Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/powershell/12.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Kubernetes 我无法访问nginx入口和路由53上的kibana_Kubernetes_Kubernetes Ingress_Nginx Ingress_Amazon Eks_External Dns - Fatal编程技术网

Kubernetes 我无法访问nginx入口和路由53上的kibana

Kubernetes 我无法访问nginx入口和路由53上的kibana,kubernetes,kubernetes-ingress,nginx-ingress,amazon-eks,external-dns,Kubernetes,Kubernetes Ingress,Nginx Ingress,Amazon Eks,External Dns,我已经在EKS集群上部署了带有内部负载平衡器和外部DNS的nginx ingress controller,因此我尝试公开kibana,其主机名注册在路由器53上的私有托管区域(my hostname.com)。 但当我使用vpn在浏览器上访问它时,它显示我无法访问该站点。所以我需要知道我做错了什么 以下是所有资源: 入口控制器: apiVersion: v1 kind: Service metadata: name: default-http-backend spec: ports:

我已经在EKS集群上部署了带有内部负载平衡器和外部DNS的nginx ingress controller,因此我尝试公开kibana,其主机名注册在路由器53上的私有托管区域(my hostname.com)。 但当我使用vpn在浏览器上访问它时,它显示我无法访问该站点。所以我需要知道我做错了什么

以下是所有资源:

入口控制器:

apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    app: default-http-backend

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: default-http-backend
spec:
  selector:
    matchLabels:
      app: default-http-backend
  template:
    metadata:
      labels:
        app: default-http-backend
    spec:
      containers:
      - name: default-http-backend
        image: gcr.io/google_containers/defaultbackend:1.3

---

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: internal-ingress
  name: internal-ingress-controller
spec:
  replicas: 1
  selector:
    matchLabels:
      app: internal-ingress
  template:
    metadata:
      labels:
        app: internal-ingress
    spec:
      containers:
      - args:
        - /nginx-ingress-controller
        - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
        - --configmap=$(POD_NAMESPACE)/internal-ingress-configuration
        - --tcp-services-configmap=$(POD_NAMESPACE)/internal-tcp-services
        - --udp-services-configmap=$(POD_NAMESPACE)/internal-udp-services
        - --annotations-prefix=nginx.ingress.kubernetes.io
        - --ingress-class=internal-ingress
        - --publish-service=$(POD_NAMESPACE)/internal-ingress
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.11.0
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: internal-ingress-controller
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1

---

apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "3600"
    service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
    service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*'
  labels:
    app: internal-ingress
  name: internal-ingress
spec:
  externalTrafficPolicy: Cluster
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: http
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    app: internal-ingress
  sessionAffinity: None
  type: LoadBalancer
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  annotations:
    kubernetes.io/ingress.class: "internal-ingress"
  labels:
    app: app
  name: app-private
spec:
  rules:
  - host: kibana.my-hostname.com
    http:
      paths:
      - backend:
          serviceName: kibana
          servicePort: 5601
apiVersion: v1
kind: Service
metadata:
  name: kibana
spec:
  selector:
    app: kibana
  ports:
  - name: client
    port: 5601
    protocol: TCP
  type: ClusterIP
外部DNS:

apiVersion: v1
kind: ServiceAccount
metadata:
  name: external-dns

---

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: external-dns
rules:
- apiGroups: [""]
  resources: ["services","endpoints","pods"]
  verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
  resources: ["ingresses"]
  verbs: ["get","watch","list"]
- apiGroups: [""]
  resources: ["nodes"]
  verbs: ["list","watch"]

---

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: external-dns-viewer
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: external-dns
subjects:
- kind: ServiceAccount
  name: external-dns
  namespace: default

---

apiVersion: apps/v1beta2
kind: Deployment
metadata:
  labels:
    app: external-dns-private
  name: external-dns-private
spec:
  replicas: 1
  selector:
    matchLabels:
      app: external-dns-private
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: external-dns-private
    spec:
      serviceAccountName: external-dns
      containers:
      - args:
        - --source=ingress
        - --domain-filter=my-hostname.com
        - --provider=aws
        - --registry=txt
        - --txt-owner-id=dev.k8s.nexus
        - --annotation-filter=kubernetes.io/ingress.class=internal-ingress
        - --aws-zone-type=private
        image: registry.opensource.zalan.do/teapot/external-dns:latest
        name: external-dns-private
入口资源:

apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    app: default-http-backend

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: default-http-backend
spec:
  selector:
    matchLabels:
      app: default-http-backend
  template:
    metadata:
      labels:
        app: default-http-backend
    spec:
      containers:
      - name: default-http-backend
        image: gcr.io/google_containers/defaultbackend:1.3

---

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: internal-ingress
  name: internal-ingress-controller
spec:
  replicas: 1
  selector:
    matchLabels:
      app: internal-ingress
  template:
    metadata:
      labels:
        app: internal-ingress
    spec:
      containers:
      - args:
        - /nginx-ingress-controller
        - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
        - --configmap=$(POD_NAMESPACE)/internal-ingress-configuration
        - --tcp-services-configmap=$(POD_NAMESPACE)/internal-tcp-services
        - --udp-services-configmap=$(POD_NAMESPACE)/internal-udp-services
        - --annotations-prefix=nginx.ingress.kubernetes.io
        - --ingress-class=internal-ingress
        - --publish-service=$(POD_NAMESPACE)/internal-ingress
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.11.0
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: internal-ingress-controller
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1

---

apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "3600"
    service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
    service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*'
  labels:
    app: internal-ingress
  name: internal-ingress
spec:
  externalTrafficPolicy: Cluster
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: http
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    app: internal-ingress
  sessionAffinity: None
  type: LoadBalancer
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  annotations:
    kubernetes.io/ingress.class: "internal-ingress"
  labels:
    app: app
  name: app-private
spec:
  rules:
  - host: kibana.my-hostname.com
    http:
      paths:
      - backend:
          serviceName: kibana
          servicePort: 5601
apiVersion: v1
kind: Service
metadata:
  name: kibana
spec:
  selector:
    app: kibana
  ports:
  - name: client
    port: 5601
    protocol: TCP
  type: ClusterIP
kibana服务:

apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    app: default-http-backend

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: default-http-backend
spec:
  selector:
    matchLabels:
      app: default-http-backend
  template:
    metadata:
      labels:
        app: default-http-backend
    spec:
      containers:
      - name: default-http-backend
        image: gcr.io/google_containers/defaultbackend:1.3

---

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: internal-ingress
  name: internal-ingress-controller
spec:
  replicas: 1
  selector:
    matchLabels:
      app: internal-ingress
  template:
    metadata:
      labels:
        app: internal-ingress
    spec:
      containers:
      - args:
        - /nginx-ingress-controller
        - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
        - --configmap=$(POD_NAMESPACE)/internal-ingress-configuration
        - --tcp-services-configmap=$(POD_NAMESPACE)/internal-tcp-services
        - --udp-services-configmap=$(POD_NAMESPACE)/internal-udp-services
        - --annotations-prefix=nginx.ingress.kubernetes.io
        - --ingress-class=internal-ingress
        - --publish-service=$(POD_NAMESPACE)/internal-ingress
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.11.0
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: internal-ingress-controller
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1

---

apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "3600"
    service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
    service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*'
  labels:
    app: internal-ingress
  name: internal-ingress
spec:
  externalTrafficPolicy: Cluster
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: http
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    app: internal-ingress
  sessionAffinity: None
  type: LoadBalancer
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  annotations:
    kubernetes.io/ingress.class: "internal-ingress"
  labels:
    app: app
  name: app-private
spec:
  rules:
  - host: kibana.my-hostname.com
    http:
      paths:
      - backend:
          serviceName: kibana
          servicePort: 5601
apiVersion: v1
kind: Service
metadata:
  name: kibana
spec:
  selector:
    app: kibana
  ports:
  - name: client
    port: 5601
    protocol: TCP
  type: ClusterIP

我已经检查了我的私有托管区域的记录集,发现kibana.my-hostname.com已经添加,但仍然无法访问它。

Route53将只响应来自您内部和允许的VPC的请求。您无法通过专有网络访问域

要解决此问题,请将您的区域更改为公共区域,或使用带有简单广告的VPN将请求转发到您的专用区域,如前所述

参考文献:

apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    app: default-http-backend

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: default-http-backend
spec:
  selector:
    matchLabels:
      app: default-http-backend
  template:
    metadata:
      labels:
        app: default-http-backend
    spec:
      containers:
      - name: default-http-backend
        image: gcr.io/google_containers/defaultbackend:1.3

---

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: internal-ingress
  name: internal-ingress-controller
spec:
  replicas: 1
  selector:
    matchLabels:
      app: internal-ingress
  template:
    metadata:
      labels:
        app: internal-ingress
    spec:
      containers:
      - args:
        - /nginx-ingress-controller
        - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
        - --configmap=$(POD_NAMESPACE)/internal-ingress-configuration
        - --tcp-services-configmap=$(POD_NAMESPACE)/internal-tcp-services
        - --udp-services-configmap=$(POD_NAMESPACE)/internal-udp-services
        - --annotations-prefix=nginx.ingress.kubernetes.io
        - --ingress-class=internal-ingress
        - --publish-service=$(POD_NAMESPACE)/internal-ingress
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.11.0
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: internal-ingress-controller
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1

---

apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "3600"
    service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
    service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*'
  labels:
    app: internal-ingress
  name: internal-ingress
spec:
  externalTrafficPolicy: Cluster
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: http
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    app: internal-ingress
  sessionAffinity: None
  type: LoadBalancer
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  annotations:
    kubernetes.io/ingress.class: "internal-ingress"
  labels:
    app: app
  name: app-private
spec:
  rules:
  - host: kibana.my-hostname.com
    http:
      paths:
      - backend:
          serviceName: kibana
          servicePort: 5601
apiVersion: v1
kind: Service
metadata:
  name: kibana
spec:
  selector:
    app: kibana
  ports:
  - name: client
    port: 5601
    protocol: TCP
  type: ClusterIP

请发布
curl-v kibana.my hostname.com
它向我显示:curl(7)连接失败关于
nslookup kibana.my hostname.com
kubectl get pod--show labels
**服务器找不到kibana.my-hostname.com:nxdomain第二个命令正确返回标签,但我已经在使用vpn访问我的vpc内的资源,如上所述,我可以使用VPNB访问在内部负载平衡器上公开的服务,但您是通过ALB的dns名称访问服务,还是通过在您的内部区域上注册的自定义dns访问服务?如果您使用的是Amazon ok提供的dns名称,它可以工作。。。但是,如果是自定义内部路由,53只接受vpc内部资源的请求,如回答中的链接所述。