elasticsearch 日志存储已用过滤器
我正在尝试使用麋鹿堆栈中的appead.rb过滤器,但似乎无法解决它。我对格罗克不是很熟悉,我相信这就是我的问题所在。有人能帮忙吗 日志文件示例:elasticsearch 日志存储已用过滤器,elasticsearch,logstash,groc,elasticsearch,Logstash,Groc,我正在尝试使用麋鹿堆栈中的appead.rb过滤器,但似乎无法解决它。我对格罗克不是很熟悉,我相信这就是我的问题所在。有人能帮忙吗 日志文件示例: { "application_name": "Application.exe", "machine_name": "Machine1", "user_name": "testuser", "entry_date": "2015-03-12T18:12:23.5187552Z", "chef_environmen
{
"application_name": "Application.exe",
"machine_name": "Machine1",
"user_name": "testuser",
"entry_date": "2015-03-12T18:12:23.5187552Z",
"chef_environment_name": "chefenvironment1",
"chef_logging_cookbook_version": "0.1.9",
"logging_level": "INFO",
"performance": {
"process_name": "account_search",
"process_id": "Machine1|1|635617555435187552",
"event_type": "enter"
},
"thread_name": "1",
"logger_name": "TestLogger",
"@version": "1",
"@timestamp": "2015-03-12T18:18:48.918Z",
"type": "rabbit",
"log_from": "rabbit"
}
{
"application_name": "Application.exe",
"machine_name": "Machine1",
"user_name": "testuser",
"entry_date": "2015-03-12T18:12:23.7527462Z",
"chef_environment_name": "chefenvironment1",
"chef_logging_cookbook_version": "0.1.9",
"logging_level": "INFO",
"performance": {
"process_name": "account_search",
"process_id": "Machine1|1|635617555435187552",
"event_type": "exit"
},
"thread_name": "1",
"logger_name": "TestLogger",
"@version": "1",
"@timestamp": "2015-03-12T18:18:48.920Z",
"type": "rabbit",
"log_from": "rabbit"
}
Example.conf文件
input {
rabbitmq {
host => "SERVERNAME"
add_field => ["log_from", "rabbit"]
type => "rabbit"
user => "testuser"
password => "testuser"
durable => "true"
exchange => "Logging"
queue => "testqueue"
codec => "json"
exclusive => "false"
passive => "true"
}
}
filter {
grok {
match => ["message", "%{TIMESTAMP_ISO8601} START id: (?<process_id>.*)"]
add_tag => [ "taskStarted" ]
}
grok {
match => ["message", "%{TIMESTAMP_ISO8601} END id: (?<process_id>.*)"]
add_tag => [ "taskTerminated"]
}
elapsed {
start_tag => "taskStarted"
end_tag => "taskTerminated"
unique_id_field => "process_id"
timeout => 10000
new_event_on_match => false
}
}
output {
file {
codec => json { charset => "UTF-8" }
path => "test.log"
}
}
输入{
兔子{
主机=>“服务器名”
add_field=>[“log_from”,“rabbit”]
类型=>“兔子”
用户=>“测试用户”
密码=>“testuser”
持久=>“真”
exchange=>“日志记录”
队列=>“测试队列”
编解码器=>“json”
独占=>“假”
被动=>“真”
}
}
滤器{
格罗克{
match=>[“message”,“%{TIMESTAMP_ISO8601}开始id:(?*)”
添加标签=>[“任务已启动”]
}
格罗克{
match=>[“message”,“%{TIMESTAMP_ISO8601}结束id:(?*)”
添加标签=>[“任务终止”]
}
过去的{
开始\标记=>“任务已启动”
结束\标记=>“任务已终止”
唯一\u id\u字段=>“进程\u id”
超时=>10000
新的\u事件\u在\u匹配上=>false
}
}
输出{
文件{
编解码器=>json{charset=>UTF-8}
路径=>“test.log”
}
}
您不需要使用grok筛选器,因为您的输入已经是json格式。您需要这样做:
if [performance][event_type] == "enter" {
mutate { add_tag => ["taskStarted"] }
} else if [performance][event_type] == "exit" {
mutate { add_tag => ["taskTerminated"] }
}
elapsed {
start_tag => "taskStarted"
end_tag => "taskTerminated"
unique_id_field => "performance.process_id"
timeout => 10000
new_event_on_match => false
}
我对
unique\u id\u字段
不是很肯定——我认为它应该可以工作,但如果不行,你可以只将它改为process\u id
和add\u字段=>{“process\u id”=>“%{[performance][process id]}}
你不需要使用grok过滤器,因为你的输入已经是json格式的。您需要这样做:
if [performance][event_type] == "enter" {
mutate { add_tag => ["taskStarted"] }
} else if [performance][event_type] == "exit" {
mutate { add_tag => ["taskTerminated"] }
}
elapsed {
start_tag => "taskStarted"
end_tag => "taskTerminated"
unique_id_field => "performance.process_id"
timeout => 10000
new_event_on_match => false
}
我对
unique\u id\u字段
不是很肯定——我认为它应该可以工作,但如果不行,你可以只把它改成process\u id
和add\u字段=>{“process\u id”=>“%{[performance][process id]}}
,那么这个输入和这个配置会发生什么呢?是否添加了标签?在end事件中有新字段吗?那么这个输入和这个配置会发生什么呢?是否添加了标签?你在最后的比赛中有新的场地吗?是的。我尝试使用与您在unique_id_字段中相同的语法,但也不起作用。我喜欢add_字段并将使用它。unique_id_字段=>“[performance][process_id]”
应该可以工作。是的。我尝试使用与您在unique_id_字段中相同的语法,但也不起作用。我喜欢add_字段并将使用它。unique_id_字段=>“[performance][process_id]”
应该可以工作。