您好,登錄后才能下訂單哦!
這篇文章將為大家詳細講解有關Centos7.6如何部署ELK日志分析系統,小編覺得挺實用的,因此分享給大家做個參考,希望大家閱讀完這篇文章后可以有所收獲。
useradd elk chown -R elk:elk /home/elk/elasticsearch chown -R elk:elk /home/elk/elasticsearch2 chown -R elk:elk /home/elk/elasticsearch3 mkdir -p /home/eladata mkdir -p /var/log/elk chown -R elk:elk /home/eladata chown -R elk:elk /var/log/elk
/home/elk/elasticsearch/config [root@localhost config]# grep -v "^#" elasticsearch.yml cluster.name: my-application node.name: node0 node.master: true node.attr.rack: r1 node.max_local_storage_nodes: 3 path.data: /home/eladata path.logs: /var/log/elk http.cors.enabled: true http.cors.allow-origin: "*" network.host: 192.168.1.70 http.port: 9200 transport.tcp.port: 9301 discovery.zen.minimum_master_nodes: 1 cluster.initial_master_nodes: ["node0"]
su elk -l -c '/home/elk/elasticsearch/bin/elasticsearch -d'
[root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat elasticsearch.service [Unit] Description=Elasticsearch Documentation=http://www.elastic.co Wants=network-online.target After=network-online.target [Service] RuntimeDirectory=elasticsearch PrivateTmp=true Environment=ES_HOME=/home/elk/elasticsearch Environment=ES_PATH_CONF=/home/elk/elasticsearch/config Environment=PID_DIR=/var/run/elasticsearch EnvironmentFile=-/etc/sysconfig/elasticsearch WorkingDirectory=/home/elk/elasticsearch User=elk Group=elk ExecStart=/home/elk/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet StandardOutput=journal StandardError=inherit LimitNOFILE=65536 LimitNPROC=4096 LimitAS=infinity LimitFSIZE=infinity TimeoutStopSec=0 KillSignal=SIGTERM KillMode=process SendSIGKILL=no SuccessExitStatus=143 [Install] WantedBy=multi-user.target [root@localhost system]#
/home/elk/elasticsearch2/config [root@localhost config]# grep -v "^#" elasticsearch.yml cluster.name: my-application node.name: node1 node.master: false node.attr.rack: r1 node.max_local_storage_nodes: 3 path.data: /home/eladata path.logs: /var/log/elk http.cors.enabled: true http.cors.allow-origin: "*" network.host: 192.168.1.70 transport.tcp.port: 9303 http.port: 9302 discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"] [root@localhost config]#
su elk -l -c '/home/elk/elasticsearch2/bin/elasticsearch2 -d'
[root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat elasticsearch2.service [Unit] Description=Elasticsearch Documentation=http://www.elastic.co Wants=network-online.target After=network-online.target [Service] RuntimeDirectory=elasticsearch2 PrivateTmp=true Environment=ES_HOME=/home/elk/elasticsearch2 Environment=ES_PATH_CONF=/home/elk/elasticsearch2/config Environment=PID_DIR=/var/run/elasticsearch EnvironmentFile=-/etc/sysconfig/elasticsearch WorkingDirectory=/home/elk/elasticsearch User=elk Group=elk ExecStart=/home/elk/elasticsearch2/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet StandardOutput=journal StandardError=inherit LimitNOFILE=65536 LimitNPROC=4096 LimitAS=infinity LimitFSIZE=infinity TimeoutStopSec=0 KillSignal=SIGTERM KillMode=process SendSIGKILL=no SuccessExitStatus=143 [Install] WantedBy=multi-user.target [root@localhost system]#
/home/elk/elasticsearch3/config [root@localhost config]# grep -v "^#" elasticsearch.yml cluster.name: my-application node.name: node2 node.attr.rack: r1 node.master: false node.max_local_storage_nodes: 3 path.data: /home/eladata path.logs: /var/log/elk http.cors.enabled: true http.cors.allow-origin: "*" network.host: 192.168.1.70 http.port: 9203 transport.tcp.port: 9304 discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"] discovery.zen.minimum_master_nodes: 1 [root@localhost config]#
su elk -l -c '/home/elk/elasticsearch3/bin/elasticsearch3 -d'
[root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat elasticsearch3.service [Unit] Description=Elasticsearch Documentation=http://www.elastic.co Wants=network-online.target After=network-online.target [Service] RuntimeDirectory=elasticsearch3 PrivateTmp=true Environment=ES_HOME=/home/elk/elasticsearch3 Environment=ES_PATH_CONF=/home/elk/elasticsearch3/config Environment=PID_DIR=/var/run/elasticsearch EnvironmentFile=-/etc/sysconfig/elasticsearch WorkingDirectory=/home/elk/elasticsearch3 User=elk Group=elk ExecStart=/home/elk/elasticsearch3/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet StandardOutput=journal StandardError=inherit LimitNOFILE=65536 LimitNPROC=4096 LimitAS=infinity LimitFSIZE=infinity TimeoutStopSec=0 KillSignal=SIGTERM KillMode=process SendSIGKILL=no SuccessExitStatus=143 [Install] WantedBy=multi-user.target [root@localhost system]#
[root@localhost logstash]# pwd /home/elk/logstash [root@localhost logstash]#
./logstash -f ../dev.conf nohup ./logstash -f ../dev.conf &
[root@localhost config]# pwd /home/elk/kibana/config [root@localhost config]# grep -v "^#" kibana.yml server.host: "192.168.1.70" elasticsearch.hosts: ["http://192.168.1.70:9200"] kibana.index: ".kibana" i18n.locale: "zh-CN"
./kibana nohup ./kibana &
[root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat kibana.service [Unit] Description=Kibana Server Manager [Service] ExecStart=/home/elk/kibana/bin/kibana [Install] WantedBy=multi-user.target [root@localhost system]#
yum install git npm git clone https://github.com/mobz/elasticsearch-head.git [root@localhost elasticsearch-head]# pwd /home/elk/elasticsearch-head [root@localhost elasticsearch-head]#
npm install npm run start nohup npm run start &
[root@localhost config]# pwd /home/elk/kafka/config [root@localhost config]# grep -v "^#" server.properties broker.id=0 listeners=PLAINTEXT://192.168.1.70:9092 num.network.threads=3 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 log.dirs=/var/log/kafka-logs num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=168 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=localhost:2181 zookeeper.connection.timeout.ms=6000 group.initial.rebalance.delay.ms=0 delete.topic.enable=true [root@localhost config]#
[root@localhost bin]# pwd /home/elk/kafka/bin [root@localhost bin]# ./zookeeper-server-start.sh ../config/zookeeper.properties
[root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat zookeeper.service [Service] Type=forking SyslogIdentifier=zookeeper Restart=always RestartSec=0s ExecStart=/home/elk/kafka/bin/zookeeper-server-start.sh -daemon /home/elk/kafka/config/zookeeper.properties ExecStop=/home/elk/kafka/bin/zookeeper-server-stop.sh [root@localhost system]#
./kafka-server-start.sh ../config/server.properties
[root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat kafka.service [Unit] Description=Apache kafka After=network.target [Service] Type=simple Restart=always RestartSec=0s ExecStart=/home/elk/kafka/bin/kafka-server-start.sh /home/elk/kafka/config/server.properties ExecStop=/home/elk/kafka/bin/kafka-server-stop.sh [root@localhost system]#
/kafka-topics.sh --create --zookeeper 192.168.1.70:2181 --replication-factor 1 --partitions 1 --topic test
./kafka-topics.sh --list --zookeeper 192.168.1.70:2181
./kafka-console-producer.sh --broker-list 192.168.1.70:9092 --topic test
bin/kafka-console-consumer.sh --bootstrap-server 192.168.1.70:9092 --topic test --from-beginning
生產的消息,消費那邊接受到即是ok的
安裝6.5版本的
[root@localhost filebeat]# pwd /usr/local/filebeat [root@localhost filebeat]# cat filebeat.yml filebeat.prospectors: - type: log paths: - /opt/logs/workphone-tcp/catalina.out fields: tag: 54_tcp_catalina_out - type: log paths: - /opt/logs/workphone-webservice/catalina.out fields: tag: 54_web_catalina_out name: 192.168.1.54 filebeat.config.modules: path: ${path.config}/modules.d/*.yml reload.enabled: false setup.template.settings: index.number_of_shards: 3 output.kafka: hosts: ["192.168.1.70:9092"] topic: "filebeat-log" partition.hash: reachable_only: true compression: gzip max_message_bytes: 1000000 required_acks: 1 [root@localhost filebeat]#
安裝完成后去logstash編輯配置文件
[root@localhost logstash]# pwd /home/elk/logstash [root@localhost logstash]# cat dev.conf input { kafka{ bootstrap_servers => "192.168.1.70:9092" topics => ["filebeat-log"] codec => "json" } } filter { if [fields][tag]=="jpwebmap" { json{ source => "message" remove_field => "message" } geoip { source => "client" target => "geoip" add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ] add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ] } mutate { convert => [ "[geoip][coordinates]", "float"] } } if [fields][tag] == "54_tcp_catalina_out"{ grok { match => ["message", "%{TIMESTAMP_ISO8601:logdate}"] } date { match => ["logdate", "ISO8601"] } mutate { remove_field => [ "logdate" ] } } if [fields][tag] == "54_web_catalina_out"{ grok { match => ["message", "%{TIMESTAMP_ISO8601:logdate}"] } date { match => ["logdate", "ISO8601"] } mutate { remove_field => [ "logdate" ] } } if [fields][tag] == "55_tcp_catalina_out"{ grok { match => ["message", "%{TIMESTAMP_ISO8601:logdate}"] } date { match => ["logdate", "ISO8601"] } mutate { remove_field => [ "logdate" ] } } if [fields][tag] == "55_web_catalina_out"{ grok { match => ["message", "%{TIMESTAMP_ISO8601:logdate}"] } date { match => ["logdate", "ISO8601"] } mutate { remove_field => [ "logdate" ] } } if [fields][tag] == "51_nginx80_access_log" { mutate { add_field => { "spstr" => "%{[log][file][path]}" } } mutate { split => ["spstr" , "/"] # save the last element of the array as the api_method. add_field => ["src", "%{[spstr][-1]}" ] } mutate{ remove_field => [ "friends", "ecs", "agent" , "spstr" ] } grok { match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" } remove_field => "message" } date { match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"] target => "@timestamp" } geoip { source => "x_forwarded_for" target => "geoip" database => "/home/elk/logstash/GeoLite2-City.mmdb" add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ] add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ] } mutate { convert => [ "[geoip][coordinates]", "float"] } } } output { if [fields][tag] == "wori"{ elasticsearch { hosts => ["192.168.1.70:9200"] index => "zabbix" } } if [fields][tag] == "54_tcp_catalina_out"{ elasticsearch { hosts => ["192.168.1.70:9200"] index => "54_tcp_catalina_out" } } if [fields][tag] == "54_web_catalina_out"{ elasticsearch { hosts => ["192.168.1.70:9200"] index => "54_web_catalina_out" } } if [fields][tag] == "55_tcp_catalina_out"{ elasticsearch { hosts => ["192.168.1.70:9200"] index => "55_tcp_catalina_out" } } if [fields][tag] == "55_web_catalina_out"{ elasticsearch { hosts => ["192.168.1.70:9200"] index => "55_web_catalina_out" } } if [fields][tag] == "51_nginx80_access_log" { stdout{} elasticsearch { hosts => ["192.168.1.70:9200"] index => "51_nginx80_access_log" } } }
filter { mutate { add_field => { "spstr" => "%{[log][file][path]}" } } mutate { split => ["spstr" , "/"] # save the last element of the array as the api_method. add_field => ["src", "%{[spstr][-1]}" ] } mutate{ remove_field => [ "friends", "ecs", "agent" , "spstr" ] } }
filter { if [fields][tag] == "java"{ grok { match => ["message", "%{TIMESTAMP_ISO8601:logdate}"] } date { match => ["logdate", "ISO8601"] } mutate { remove_field => [ "logdate" ] } } #End if }
input { kafka{ bootstrap_servers => "172.16.11.68:9092" #topics => ["ql-prod-tomcat" ] topics => ["ql-prod-dubbo","ql-prod-nginx","ql-prod-tomcat" ] codec => "json" consumer_threads => 5 decorate_events => true #auto_offset_reset => "latest" group_id => "logstash" #client_id => "" ############################# HELK Optimizing Latency ############################# fetch_min_bytes => "1" request_timeout_ms => "305000" ############################# HELK Optimizing Availability ############################# session_timeout_ms => "10000" max_poll_records => "550" max_poll_interval_ms => "300000" } } #input { # kafka{ # bootstrap_servers => "172.16.11.68:9092" # topics => ["ql-prod-java-dubbo","ql-prod","ql-prod-java" ] # codec => "json" # consumer_threads => 15 # decorate_events => true # auto_offset_reset => "latest" # group_id => "logstash-1" # ############################# HELK Optimizing Latency ############################# # fetch_min_bytes => "1" # request_timeout_ms => "305000" # ############################# HELK Optimizing Availability ############################# # session_timeout_ms => "10000" # max_poll_records => "550" # max_poll_interval_ms => "300000" # } #}
filter { if [fields][tag] == "nginx-access" { mutate { add_field => { "spstr" => "%{[log][file][path]}" } } mutate { split => ["spstr" , "/"] # save the last element of the array as the api_method. add_field => ["src", "%{[spstr][-1]}" ] } mutate{ remove_field => [ "friends", "ecs", "agent" , "spstr" ] } grok { match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" } remove_field => "message" } date { match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"] target => "@timestamp" } geoip { source => "x_forwarded_for" target => "geoip" database => "/opt/logstash-6.2.4/GeoLite2-City.mmdb" add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ] add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ] } mutate { convert => [ "[geoip][coordinates]", "float"] } } #endif }
output{ if [fields][tag] == "nginx-access" { stdout{} elasticsearch { user => elastic password => WR141bp2sveJuGFaD4oR hosts => ["172.16.11.67:9200"] index => "logstash-%{[fields][proname]}-%{+YYYY.MM.dd}" } } #stdout{} if [fields][tag] == "java" { elasticsearch { user => elastic password => WR141bp2sveJuGFaD4oR hosts => ["172.16.11.66:9200","172.16.11.68:9200"] index => "%{[host][name]}-%{[src]}" } } }
關于“Centos7.6如何部署ELK日志分析系統”這篇文章就分享到這里了,希望以上內容可以對大家有一定的幫助,使各位可以學到更多知識,如果覺得文章不錯,請把它分享出去讓更多的人看到。
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。