logstash redis kafka传输 haproxy日志
logstash 客戶端收集 haproxy ?tcp日志
?
input {
file {
path => "/data/haproxy/logs/haproxy_http.log"
start_position => "beginning"
type => "haproxy_http"
}
file {
path => "/data/haproxy/logs/haproxy_tcp.log"
start_position => "beginning"
type => "haproxy_tcp"
}
}
filter {
if [type] == "haproxy_http" {
grok{
patterns_dir => "/data/logstash/patterns"
match => {"message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{FENG:captured_request_cookie} %{FENG:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} \"%{WORD:verb} %{URIPATHPARAM:request} %{WORD:http_socke}/%{NUMBER:http_version}\""}
}
geoip {
source => "client_ip"
fields => ["ip","city_name","country_name","location"]
add_tag => [ "geoip" ]
}
} else if [type] == "haproxy_tcp" {
grok {
match => { "message" => "(?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue}" }
}
}
}
output {
if [type] == "haproxy_http" {
redis {
host => "192.168.20.166"
port => "6379"
db => "5"
data_type => "list"
key => "haproxy_http.log"
}
} else if [type] == "haproxy_tcp" {
redis {
host => "192.168.20.166"
port => "6379"
db => "4"
data_type => "list"
key => "haproxy_tcp.log"
}
}
}
?
?
logstash 服務(wù)器端把 haproxy ?tcp日志寫入到elasticsearch中
[root@logstashserver etc]# cat logstash.conf
input {
if [type] == "haproxy_http" {
redis {
host => "192.168.20.166"
port => "6379"
db => "5"
data_type => "list"
key => "haproxy_http.log"
}
} else if [type] == "haproxy_tcp" {
redis {
host => "192.168.20.166"
port => "6379"
db => "4"
data_type => "list"
key => "haproxy_tcp.log"
}
}
}
?
output {
if [type] == "haproxy_http" {
elasticsearch {
hosts => ["es1:9200","es2:9200","es3:9200"]
manage_template => true
index => "logstash-haproxy-http.log-%{+YYYY-MM-dd}"
}
}
if [type] == "haproxy_tcp" {
elasticsearch {
hosts => ["es1:9200","es2:9200","es3:9200"]
manage_template => true
index => "logstash-haproxy-tcp.log-%{+YYYY-MM-dd}"
}
}
}
?
?
#########################################kafka###############################################
客戶端
input {
file {
path => "/data/haproxy/logs/haproxy_http.log"
start_position => "beginning"
type => "haproxy_http"
}
file {
path => "/data/haproxy/logs/haproxy_tcp.log"
start_position => "beginning"
type => "haproxy_tcp"
}
}
filter {
if [type] == "haproxy_http" {
grok{
patterns_dir => "/data/logstash/patterns"
match => {"message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{FENG:captured_request_cookie} %{FENG:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} \"%{WORD:verb} %{URIPATHPARAM:request} %{WORD:http_socke}/%{NUMBER:http_version}\""}
}
geoip {
source => "client_ip"
fields => ["ip","city_name","country_name","location"]
add_tag => [ "geoip" ]
}
} else if [type] == "haproxy_tcp" {
grok {
match => { "message" => "(?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue}" }
}
}
}
output {
if [type] == "haproxy_http" {
kafka { #輸出到kafka
bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092" #他們就是生產(chǎn)者
topic_id => "haproxy_http.log" #這個(gè)將作為主題的名稱,將會(huì)自動(dòng)創(chuàng)建
compression_type => "snappy" #壓縮類型
}
} else if [type] == "haproxy_tcp" {
kafka { #輸出到kafka
bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092" #他們就是生產(chǎn)者
topic_id => "haproxy_tcp.log" #這個(gè)將作為主題的名稱,將會(huì)自動(dòng)創(chuàng)建
compression_type => "snappy" #壓縮類型
}
}
}
?
服務(wù)器端
input {
if [type] == "haproxy_http" {
kafka {
zk_connect => "zookeeper1:2181,zookeeper2:2181,zookeeper3:2181"
topic_id => "haproxy_http.log"
reset_beginning => false
consumer_threads => 5
decorate_events => true
}
} else if [type] == "haproxy_tcp" {
kafka {
zk_connect => "zookeeper1:2181,zookeeper2:2181,zookeeper3:2181"
topic_id => "haproxy_tcp.log"
reset_beginning => false
consumer_threads => 5
decorate_events => true
}
}
}
output {
if [type] == "haproxy_http" {
elasticsearch {
hosts => ["es1:9200","es2:9200","es3:9200"]
manage_template => true
index => "logstash-haproxy-http.log-%{+YYYY-MM-dd}"
}
}
if [type] == "haproxy_tcp" {
elasticsearch {
hosts => ["es1:9200","es2:9200","es3:9200"]
manage_template => true
index => "logstash-haproxy-tcp.log-%{+YYYY-MM-dd}"
}
}
}
轉(zhuǎn)載于:https://www.cnblogs.com/fengjian2016/p/5919702.html
總結(jié)
以上是生活随笔為你收集整理的logstash redis kafka传输 haproxy日志的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Atitit 软件工程概览attilax
- 下一篇: java 21 - 6 字符缓冲流的特殊