Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/svn/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
<img src="//i.stack.imgur.com/RUiNP.png" height="16" width="18" alt="" class="sponsor tag img">elasticsearch Can';t通过filebeat将日志发送到Kubernetes的logstash 配置_<img Src="//i.stack.imgur.com/RUiNP.png" Height="16" Width="18" Alt="" Class="sponsor Tag Img">elasticsearch_Kubernetes_Logstash_Filebeat - Fatal编程技术网 elasticsearch Can';t通过filebeat将日志发送到Kubernetes的logstash 配置,elasticsearch,kubernetes,logstash,filebeat,elasticsearch,Kubernetes,Logstash,Filebeat" /> elasticsearch Can';t通过filebeat将日志发送到Kubernetes的logstash 配置,elasticsearch,kubernetes,logstash,filebeat,elasticsearch,Kubernetes,Logstash,Filebeat" />

elasticsearch Can';t通过filebeat将日志发送到Kubernetes的logstash 配置

elasticsearch Can';t通过filebeat将日志发送到Kubernetes的logstash 配置,elasticsearch,kubernetes,logstash,filebeat,elasticsearch,Kubernetes,Logstash,Filebeat,nginx.yaml --- apiVersion: v1 kind: Namespace metadata: name: beats --- apiVersion: apps/v1 kind: Deployment metadata: namespace: beats name: nginx spec: replicas: 1 selector: matchLabels: app: nginx template: metadata:

nginx.yaml

---
apiVersion: v1
kind: Namespace
metadata:
 name: beats

---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: beats
  name: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.7.9
        ports:
        - containerPort: 80
        volumeMounts:
          - name: nginx-data
            mountPath: /var/log/nginx
      volumes:
        - name: nginx-data
          persistentVolumeClaim:
            claimName: nginx-data-pvc

---
apiVersion: v1
kind: Service
metadata:
  namespace: beats
  name: nginx
  labels:
    app: nginx
spec:
  type: NodePort
  ports:
    - port: 80
  selector:
    app: nginx

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nginx-data-pv
  namespace: beats
spec:
  storageClassName: manual
  capacity:
    storage: 5Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  hostPath:
    path: /mnt/data/nginx-data-pv
    type: DirectoryOrCreate

---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx-data-pvc
  namespace: beats
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  volumeName: nginx-data-pv
亚马尔

---
apiVersion: v1
kind: Namespace
metadata:
 name: beats

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: filebeat-config
  namespace: beats
  labels:
    k8s-app: filebeat
data:
  filebeat.yml: |-
    filebeat.modules:
      - module: nginx

    filebeat.autodiscover:
      providers:
        - type: kubernetes
          hints.enabled: true
          templates:
            - condition.contains:
                kubernetes.namespace: beats
              config:
                - module: nginx
                  access:
                    enabled: true
                    var.paths: ["/mnt/data/nginx-data-pv/access.log*"]
                    subPath: access.log
                    tags: ["access"]
                  error:
                    enabled: true
                    var.paths: ["/mnt/data/nginx-data-pv/error.log*"]
                    subPath: error.log
                    tags: ["error"]

    output.logstash:
      hosts: ["logstash:5044"]

---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: filebeat
  namespace: beats
  labels:
    k8s-app: filebeat
spec:
  selector:
    matchLabels:
      k8s-app: filebeat
  template:
    metadata:
      labels:
        k8s-app: filebeat
    spec:
      terminationGracePeriodSeconds: 30
      hostNetwork: true
      dnsPolicy: ClusterFirstWithHostNet
      containers:
        - name: filebeat
          image: docker.elastic.co/beats/filebeat:7.10.0
          args: [
            "-c", "/etc/filebeat.yml",
            "-e",
          ]
          env:
            - name: NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
          securityContext:
            runAsUser: 0
          resources:
            limits:
              memory: 200Mi
            requests:
              cpu: 100m
              memory: 100Mi
          volumeMounts:
            - name: config
              mountPath: /etc/filebeat.yml
              subPath: filebeat.yml
              readOnly: true
            - name: data
              mountPath: /usr/share/filebeat/data
            - name: varlibdockercontainers
              mountPath: /var/lib/docker/containers
              readOnly: true
            - name: varlog
              mountPath: /var/log
              readOnly: true
      volumes:
        - name: config
          configMap:
            defaultMode: 0600
            name: filebeat-config
        - name: varlibdockercontainers
          hostPath:
            path: /var/lib/docker/containers
        - name: varlog
          hostPath:
            path: /var/log
        - name: data
          hostPath:
            path: /var/lib/filebeat-data
            type: DirectoryOrCreate
罗格斯塔什·亚马尔

---
apiVersion: v1
kind: Namespace
metadata:
 name: beats

---
apiVersion: v1
kind: Service
metadata:
  namespace: beats
  labels:
    app: logstash
  name: logstash
spec:
  ports:
    - name: "5044"
      port: 5044
      targetPort: 5044
  selector:
    app: logstash
status:
  loadBalancer: {}

---
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: beats
  name: logstash-configmap
data:
  logstash.yml: |
    http.host: "0.0.0.0"
    path.config: /usr/share/logstash/pipeline
    xpack.monitoring.enabled: false
  logstash.conf: |
    input {
        beats {
            port => 5044
        }
    }

    filter {
        grok {
            match => { "message" => "%{COMBINEDAPACHELOG}"}
        }

        date {
            match => [ "time", "dd/MMM/YYYY:HH:mm:ss Z" ]
        }

        geoip {
            source => "remote_ip"
            target => "geoip"
        }

        useragent {
            source => "agent"
            target => "user_agent"
        }
    }

    output {
        stdout { codec => rubydebug }
    }

---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: logstash-nginx-to-X
  namespace: beats
spec:
  serviceName: "logstash"
  selector:
    matchLabels:
      app: logstash
  updateStrategy:
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: logstash
    spec:
      initContainers:
      - name: init-logstash
        image: docker.elastic.co/logstash/logstash:7.10.0
        securityContext:
          privileged: true
        command: ['sh', '-c', 'bin/logstash-plugin install logstash-output-XXX']
      containers:
      - name: logstash
        image: docker.elastic.co/logstash/logstash:7.10.0
        resources:
          limits:
            memory: 2Gi
        ports:
          - containerPort: 5044
        volumeMounts:
          - name: config-volume
            mountPath: /usr/share/logstash/config
          - name: logstash-pipeline-volume
            mountPath: /usr/share/logstash/pipeline
      volumes:
        - name: config-volume
          configMap:
            name: logstash-configmap
            items:
              - key: logstash.yml
                path: logstash.yml
        - name: logstash-pipeline-volume
          configMap:
            name: logstash-configmap
            items:
              - key: logstash.conf
                path: logstash.conf
日志 菲利贝特吊舱 贮木箱
kubectl日志-f logstash-nginx-to-X-0-n节拍
[信息]2020-12-07 12:26:39.559[主要]运行程序-启动Logstash{“Logstash.version”=>“7.10.0”,“jruby.version”=>“jruby 9.2.13.0(2.5.7)2020-08-03 9a89c94bcc OpenJDK 64位服务器VM 11.0.8+10+jit[linux-x86_64]”
[INFO]2020-12-07 12:26:39.641[main]可写目录-创建目录{:setting=>“path.queue”,:path=>“/usr/share/logstash/data/queue”}
[信息]2020-12-07 12:26:39.645[主]可写目录-创建目录{:设置=>“path.dead_letter_queue”,:path=>“/usr/share/logstash/data/dead_letter_queue”}
[警告]2020-12-07 12:26:40.278[LogStash::Runner]多本地-忽略“pipelines.yml”文件,因为指定了模块或命令行选项
[INFO]2020-12-07 12:26:40.324[LogStash::Runner]代理-未找到持久UUID文件。正在生成新的UUID{:UUID=>“3ed4d3f3-8de9-4747-ab5d-67c9f8175a5c”,:path=>“/usr/share/logstash/data/UUID”}
[信息]2020-12-07 12:26:41.764[Converge PipelineAction::Create]反射-反射花了80毫秒扫描1个URL,生成23个键和47个值
[INFO]2020-12-07 12:26:43.568[[main]-管道管理器]geoip-使用geoip数据库{:path=>“/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2 City.mmdb”}
[信息]2020-12-07 12:26:44.032[[main]-管道管理器]javapipeline-启动管道{:pipeline\u id=>“main”,“pipeline.workers”=>1,“pipeline.batch.size”=>125,“pipeline.batch.delay”=>50,“pipeline.max\u inflight”=>125,“pipepeline.sources”=>[“/usr/share/logstash/pipepeline/logstash.conf”],:thread=>“\
[信息]2020-12-07 12:26:46.277[[main]-管道管理器]javapipeline-管道Java执行初始化时间{“秒”=>2.24}
[INFO]2020-12-07 12:26:46.279[[main]-管道管理器]beats-beats输入:启动输入侦听器{:address=>“0.0.0.0:5044”}
[INFO]2020-12-07 12:26:46.339[[main]-管道管理器]javapipeline-管道已启动{“pipeline.id”=>“main”}
[信息]2020-12-07 12:26:46.385[代理线程]代理-正在运行的管道{:计数=>1,:正在运行的管道=>[:main],:未运行的管道=>[]
[INFO]2020-12-07 12:26:46.575[Api Webserver]代理-已成功启动Logstash Api端点{:port=>9600}
[信息]2020-12-07 12:26:46.760[主语]
  • 将主机:[“logstash:5044”]更改为主机:[“logstash.beats.svc.cluster.local:5044”]
  • 创建服务帐户
  • 删除此项:
    • 将主机:[“logstash:5044”]更改为主机:[“logstash.beats.svc.cluster.local:5044”]
    • 创建服务帐户
    • 删除此项:
    kubectl logs -f filebeat-t2585 -n beats
    
    2020-12-07T10:46:01.635Z    INFO    [monitoring]    log/log.go:145  Non-zero metrics in the last 30s    {"monitoring": {"metrics": {"beat":{"cpu":{"system":{"ticks":1700,"time":{"ms":200}},"total":{"ticks":2210,"time":{"ms":244},"value":2210},"user":{"ticks":510,"time":{"ms":44}}},"handles":{"limit":{"hard":1048576,"soft":1048576},"open":10},"info":{"ephemeral_id":"6aa78d6d-8b17-43fc-9e2a-c456eeee4f61","uptime":{"ms":240241}},"memstats":{"gc_next":18802656,"memory_alloc":10736760,"memory_total":48437904},"runtime":{"goroutines":43}},"filebeat":{"harvester":{"open_files":0,"running":0}},"libbeat":{"config":{"module":{"running":0}},"pipeline":{"clients":3,"events":{"active":0}}},"registrar":{"states":{"current":0}},"system":{"load":{"1":1.28,"15":1.26,"5":1.39,"norm":{"1":0.32,"15":0.315,"5":0.3475}}}}}}
    E1207 10:46:15.956993       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Pod: pods is forbidden: User "system:serviceaccount:beats:default" cannot list resource "pods" in API group "" at the cluster scope
    
    kubectl logs -f logstash-nginx-to-X-0 -n beats
    
    [INFO ] 2020-12-07 12:26:39.559 [main] runner - Starting Logstash {"logstash.version"=>"7.10.0", "jruby.version"=>"jruby 9.2.13.0 (2.5.7) 2020-08-03 9a89c94bcc OpenJDK 64-Bit Server VM 11.0.8+10 on 11.0.8+10 +jit [linux-x86_64]"}
    [INFO ] 2020-12-07 12:26:39.641 [main] writabledirectory - Creating directory {:setting=>"path.queue", :path=>"/usr/share/logstash/data/queue"}
    [INFO ] 2020-12-07 12:26:39.645 [main] writabledirectory - Creating directory {:setting=>"path.dead_letter_queue", :path=>"/usr/share/logstash/data/dead_letter_queue"}
    [WARN ] 2020-12-07 12:26:40.278 [LogStash::Runner] multilocal - Ignoring the 'pipelines.yml' file because modules or command line options are specified
    [INFO ] 2020-12-07 12:26:40.324 [LogStash::Runner] agent - No persistent UUID file found. Generating new UUID {:uuid=>"3ed4d3f3-8de9-4747-ab5d-67c9f8175a5c", :path=>"/usr/share/logstash/data/uuid"}
    [INFO ] 2020-12-07 12:26:41.764 [Converge PipelineAction::Create<main>] Reflections - Reflections took 80 ms to scan 1 urls, producing 23 keys and 47 values
    [INFO ] 2020-12-07 12:26:43.568 [[main]-pipeline-manager] geoip - Using geoip database {:path=>"/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-City.mmdb"}
    [INFO ] 2020-12-07 12:26:44.032 [[main]-pipeline-manager] javapipeline - Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>1, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>125, "pipeline.sources"=>["/usr/share/logstash/pipeline/logstash.conf"], :thread=>"#<Thread:0x7d2f64b2@/usr/share/logstash/logstash-core/lib/logstash/java_pipeline.rb:125 run>"}
    [INFO ] 2020-12-07 12:26:46.277 [[main]-pipeline-manager] javapipeline - Pipeline Java execution initialization time {"seconds"=>2.24}
    [INFO ] 2020-12-07 12:26:46.279 [[main]-pipeline-manager] beats - Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
    [INFO ] 2020-12-07 12:26:46.339 [[main]-pipeline-manager] javapipeline - Pipeline started {"pipeline.id"=>"main"}
    [INFO ] 2020-12-07 12:26:46.385 [Agent thread] agent - Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
    [INFO ] 2020-12-07 12:26:46.575 [Api Webserver] agent - Successfully started Logstash API endpoint {:port=>9600}
    [INFO ] 2020-12-07 12:26:46.760 [[main]<beats] Server - Starting server on port: 5044
    
    [WARN ] 2020-12-07 12:49:13.107 [nioEventLoopGroup-2-2] DefaultChannelPipeline - An exceptionCaught() event was fired, and it reached at the tail of the pipeline. It usually means the last handler in the pipeline did not handle the exception.
    io.netty.handler.codec.DecoderException: org.logstash.beats.InvalidFrameProtocolException: Invalid version of beats protocol: 69
        at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:471) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.handler.codec.ByteToMessageDecoder.channelInputClosed(ByteToMessageDecoder.java:404) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.handler.codec.ByteToMessageDecoder.channelInputClosed(ByteToMessageDecoder.java:371) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.handler.codec.ByteToMessageDecoder.channelInactive(ByteToMessageDecoder.java:354) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:262) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.channel.AbstractChannelHandlerContext.access$300(AbstractChannelHandlerContext.java:61) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.channel.AbstractChannelHandlerContext$4.run(AbstractChannelHandlerContext.java:253) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.util.concurrent.DefaultEventExecutor.run(DefaultEventExecutor.java:66) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) [netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) [netty-all-4.1.49.Final.jar:4.1.49.Final]
        at java.lang.Thread.run(Thread.java:834) [?:?]
    Caused by: org.logstash.beats.InvalidFrameProtocolException: Invalid version of beats protocol: 69
        at org.logstash.beats.Protocol.version(Protocol.java:22) ~[logstash-input-beats-6.0.11.jar:?]
        at org.logstash.beats.BeatsParser.decode(BeatsParser.java:62) ~[logstash-input-beats-6.0.11.jar:?]
        at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:501) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:440) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
        ... 11 more
    
        hostNetwork: true
        dnsPolicy: ClusterFirstWithHostNet