Logstash 在Lostash';s beats.config

Logstash 在Lostash';s beats.config,logstash,elastic-stack,logstash-grok,filebeat,Logstash,Elastic Stack,Logstash Grok,Filebeat,我用麋鹿和filebeat。我将日志从filebeat发送到Logstash,再从那里发送到Elastic,并在Kibana中可视化。 我正在粘贴显示在kibana日志结果中的json结果,如下所示: { "_index": "filebeat-6.4.2-2018.10.30", "_type": "doc", "_source": { "@timestamp": "2018-10-30T09:15:31.697Z", "fields": { "

我用麋鹿和filebeat。我将日志从filebeat发送到Logstash,再从那里发送到Elastic,并在Kibana中可视化。 我正在粘贴显示在kibana日志结果中的json结果,如下所示:

    {
  "_index": "filebeat-6.4.2-2018.10.30",
  "_type": "doc",
  "_source": {
    "@timestamp": "2018-10-30T09:15:31.697Z",
    "fields": {
      "server": "server1"
    },
    "prospector": {
      "type": "log"
    },
    "host": {
      "name": "kushmathapa"
    },
    "message": "{ \"datetime\": \"2018-10-23T18:04:00.811660Z\", \"level\": \"ERROR\", \"message\": \"No response from remote. Handshake timed out or transport failure detector triggered.\" }",
    "source": "C:\\logs\\batch-portal\\error.json",
    "input": {
      "type": "log"
    },
    "beat": {
      "name": "kushmathapa",
      "hostname": "kushmathapa",
      "version": "6.4.2"
    },
    "offset": 0,
    "tags": [
      "lighthouse1",
      "controller",
      "trt"
    ]
  },
  "fields": {
    "@timestamp": [
      "2018-10-30T09:15:31.697Z"
    ]
  }
}
我想让它显示为

    {
  "_index": "filebeat-6.4.2-2018.10.30",
  "_type": "doc",
  "_source": {
    "@timestamp": "2018-10-30T09:15:31.697Z",
    "fields": {
      "server": "server1"
    },
    "prospector": {
      "type": "log"
    },
    "host": {
      "name": "kushmathapa"
    },
    "datetime": 2018-10-23T18:04:00.811660Z,
    "log_level": ERROR,
    "message": "{ \"No response from remote. Handshake timed out or transport failure detector triggered.\" }",
    "source": "C:\\logs\\batch-portal\\error.json",
    "input": {
      "type": "log"
    },
    "beat": {
      "name": "kushmathapa",
      "hostname": "kushmathapa",
      "version": "6.4.2"
    },
    "offset": 0,
    "tags": [
      "lighthouse1",
      "controller",
      "trt"
    ]
  },
  "fields": {
    "@timestamp": [
      "2018-10-30T09:15:31.697Z"
    ]
  }
}
我的beats.config现在看起来像这样

  input {
  beats {
    port => 5044
  }
}

output {
  elasticsearch {
    hosts => "localhost:9200"
    manage_template => false
    index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" 
  } stdout {
    codec => rubydebug { metadata => true }
  }
}

我已经应用了过滤器,但似乎遗漏了一些内容。

Logstash需要知道您收到的
消息
字段是JSON格式的。您可以在此处使用
json
过滤器,从盒子中获取您想要的几乎所有内容:

filter {
    json {
        target => "message"
    }
}

如果需要这些名称,您可以使用变异或添加/删除字段将
level
重命名为
log.level
,将
datetime
重命名为
@datetime

您可以使用如下所示的配置文件。 在grok筛选器中,将您希望接收的日志格式添加到elasticsearch中(例如,请参阅所提到的配置)

由于消息是json格式的(问题中有更新),grok过滤器无法工作,因为消息是json格式的。我想您会对更新后的问题有所了解。
input {
beats {
port => 5044
id => "my_plugin_id"
tags => ["logs"]
type => "abc"
}
}
filter {
if [type] == "abc" {
 mutate {
    gsub => [ "message", "\r", "" ]
}

    grok {
        break_on_match => true
                match => {
                         "message" => [
                         "%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:log_level}%{SPACE}%{GREEDYDATA:message}"
                         ]
                  }
                  overwrite => [ "message" ]
    }

    grok {
        break_on_match => true
                match => {
                         "message" => [
                          "%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:log_level}%{SPACE}%{GREEDYDATA:message}"
                         ]
                  }
                  overwrite => [ "message" ]
    }

date {
   match => [ "timestamp" , "yyyy-MM-dd HH:mm:ss,SSS" ]
} 
}
}
output {
if [type] == "abc" {
elasticsearch { 
hosts => ["ip of elasticsearch:port_number of elasticsearch"]
index => "logfiles"
} 
}
else {
elasticsearch { 
hosts => ["ip of elasticsearch:port_number of elasticsearch"]
index => "task_log"
} 
}
stdout {
codec => rubydebug { metadata => true }
}
}