91超碰碰碰碰久久久久久综合_超碰av人澡人澡人澡人澡人掠_国产黄大片在线观看画质优化_txt小说免费全本

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

Centos7.6如何部署ELK日志分析系統

發布時間:2021-11-15 15:38:27 來源:億速云 閱讀:160 作者:小新 欄目:云計算

這篇文章將為大家詳細講解有關Centos7.6如何部署ELK日志分析系統,小編覺得挺實用的,因此分享給大家做個參考,希望大家閱讀完這篇文章后可以有所收獲。

下載elasticsearch

創建elk用戶并授權

useradd elk
chown -R elk:elk /home/elk/elasticsearch
chown -R elk:elk /home/elk/elasticsearch2
chown -R elk:elk /home/elk/elasticsearch3
mkdir -p /home/eladata
mkdir -p /var/log/elk
chown -R elk:elk /home/eladata
chown -R elk:elk /var/log/elk

主節點master

elasticsearch解壓,修改配置文件

/home/elk/elasticsearch/config
[root@localhost config]# grep -v  "^#" elasticsearch.yml 
cluster.name: my-application
node.name: node0
node.master: true
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
http.port: 9200
transport.tcp.port: 9301
discovery.zen.minimum_master_nodes: 1
cluster.initial_master_nodes: ["node0"]

手動啟動命令

su elk -l -c '/home/elk/elasticsearch/bin/elasticsearch -d'

啟動文件 elasticsearch.service

[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch.service 
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch
Environment=ES_PATH_CONF=/home/elk/elasticsearch/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target

[root@localhost system]#

Node1節點

/home/elk/elasticsearch2/config
[root@localhost config]# grep -v  "^#" elasticsearch.yml 
cluster.name: my-application
node.name: node1
node.master: false
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
transport.tcp.port: 9303
http.port: 9302
discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"]
[root@localhost config]#

手動啟動命令

su elk -l -c '/home/elk/elasticsearch2/bin/elasticsearch2 -d'

啟動文件 elasticsearch2.service

[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch2.service 
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch2
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch2
Environment=ES_PATH_CONF=/home/elk/elasticsearch2/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch2/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target

[root@localhost system]#

Node2節點

/home/elk/elasticsearch3/config
[root@localhost config]# grep -v  "^#" elasticsearch.yml 
cluster.name: my-application
node.name: node2
node.attr.rack: r1
node.master: false
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
http.port: 9203
transport.tcp.port: 9304
discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"]
discovery.zen.minimum_master_nodes: 1
[root@localhost config]#

手動啟動命令

su elk -l -c '/home/elk/elasticsearch3/bin/elasticsearch3 -d'

啟動文件 elasticsearch3.service

[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch3.service 
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch3
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch3
Environment=ES_PATH_CONF=/home/elk/elasticsearch3/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch3
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch3/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target

[root@localhost system]#

下載logstash

目錄如下,默認配置即可

[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]#

手動啟動命令

./logstash -f ../dev.conf 
nohup ./logstash -f ../dev.conf &

下載kibana

配置文件如下

[root@localhost config]# pwd
/home/elk/kibana/config
[root@localhost config]# grep -v  "^#" kibana.yml 
server.host: "192.168.1.70"
elasticsearch.hosts: ["http://192.168.1.70:9200"]
kibana.index: ".kibana"
i18n.locale: "zh-CN"

手動啟動命令

./kibana
nohup ./kibana &

kibana啟動文件

[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kibana.service 
[Unit]
Description=Kibana  Server Manager
[Service]
ExecStart=/home/elk/kibana/bin/kibana
[Install]
WantedBy=multi-user.target
[root@localhost system]#
端口為:5601 訪問:192.168.1.70:5601

安裝Elasticsearch -head

yum install git npm
git clone https://github.com/mobz/elasticsearch-head.git 
[root@localhost elasticsearch-head]# pwd
/home/elk/elasticsearch-head
[root@localhost elasticsearch-head]#

啟動

npm install 
npm run start
nohup npm run start &
curl -XPUT '192.168.2.67:9100/book'
訪問192.168.2.67:9100 即可訪問

下載kafka

修改配置文件如下

[root@localhost config]# pwd
/home/elk/kafka/config
[root@localhost config]# grep -v "^#" server.properties 
broker.id=0
listeners=PLAINTEXT://192.168.1.70:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/var/log/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
[root@localhost config]#
kafka配置啟動zookeeper

手動啟動方式

[root@localhost bin]# pwd
/home/elk/kafka/bin
[root@localhost bin]#
./zookeeper-server-start.sh ../config/zookeeper.properties

systemctl 啟動zookeeper

[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat zookeeper.service 
[Service]
Type=forking
SyslogIdentifier=zookeeper
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/zookeeper-server-start.sh -daemon /home/elk/kafka/config/zookeeper.properties
ExecStop=/home/elk/kafka/bin/zookeeper-server-stop.sh
[root@localhost system]#
啟動kafka服務

手動啟動方式

./kafka-server-start.sh ../config/server.properties

systemctl 啟動kafka

[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kafka.service 
[Unit]
Description=Apache kafka
After=network.target
[Service]
Type=simple
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/kafka-server-start.sh  /home/elk/kafka/config/server.properties
ExecStop=/home/elk/kafka/bin/kafka-server-stop.sh
[root@localhost system]#
測試kafka

新建一個名字為test的topic

/kafka-topics.sh --create --zookeeper 192.168.1.70:2181 --replication-factor 1 --partitions 1 --topic test

查看kafka中的topic

./kafka-topics.sh --list  --zookeeper 192.168.1.70:2181

往kafka topic為test中 生產消息

./kafka-console-producer.sh --broker-list 192.168.1.70:9092 --topic test

在kafka topic為test中 消費消息

bin/kafka-console-consumer.sh --bootstrap-server 192.168.1.70:9092 --topic test --from-beginning

生產的消息,消費那邊接受到即是ok的

目標機器安裝filebeat

安裝6.5版本的

[root@localhost filebeat]# pwd
/usr/local/filebeat
[root@localhost filebeat]# cat filebeat.yml 
filebeat.prospectors:
- type: log
  paths:
    - /opt/logs/workphone-tcp/catalina.out
  fields:
     tag: 54_tcp_catalina_out
- type: log
  paths:
    - /opt/logs/workphone-webservice/catalina.out
  fields:
     tag: 54_web_catalina_out
name: 192.168.1.54
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 3
output.kafka:
  hosts: ["192.168.1.70:9092"]
  topic: "filebeat-log"
  partition.hash:
    reachable_only: true
  compression: gzip
  max_message_bytes: 1000000
  required_acks: 1

[root@localhost filebeat]#

安裝完成后去logstash編輯配置文件

logstash操作

[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]# cat dev.conf 
input {
  kafka{
    bootstrap_servers => "192.168.1.70:9092"
    topics => ["filebeat-log"]
    codec => "json"
  }
}
filter {
        if [fields][tag]=="jpwebmap" {
        	json{
                source => "message"
                remove_field => "message"
        	}
        	geoip {
        	source => "client"
        	target => "geoip"
         	add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
         	add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}"  ]
        	}
         	mutate {
                convert => [ "[geoip][coordinates]", "float"]
                }
        }
	if [fields][tag] == "54_tcp_catalina_out"{
    		grok {
        		match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
    		}
    		date {
        		match => ["logdate", "ISO8601"]
    		}
    		mutate {
         		remove_field => [ "logdate" ]
    		}
  	}
	if [fields][tag] == "54_web_catalina_out"{
                grok {
                        match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
                }
                date {
                        match => ["logdate", "ISO8601"]
                }
                mutate {
                        remove_field => [ "logdate" ]
                }
        }
	if [fields][tag] == "55_tcp_catalina_out"{
                grok {
                        match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
                }
                date {
                        match => ["logdate", "ISO8601"]
                }
                mutate {
                        remove_field => [ "logdate" ]
                }
        }
        if [fields][tag] == "55_web_catalina_out"{
                grok {
                        match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
                }
                date {
                        match => ["logdate", "ISO8601"]
                }
                mutate {
                        remove_field => [ "logdate" ]
                }
        }
	if [fields][tag] == "51_nginx80_access_log" {
        	mutate {
        		add_field => { "spstr" => "%{[log][file][path]}" }
        	}
       		mutate {
        		split => ["spstr" , "/"]
        		# save the last element of the array as the api_method.
        		add_field => ["src", "%{[spstr][-1]}" ]
        	}
        	mutate{
        		remove_field => [ "friends", "ecs", "agent" , "spstr" ]
        	}
    		grok {
        		match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" }
        		remove_field => "message"
    		}
    		date {
                	match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"]
                	target => "@timestamp"
        	}
    		geoip {
        		source => "x_forwarded_for"
        		target => "geoip"
        		database => "/home/elk/logstash/GeoLite2-City.mmdb"
        		add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
        		add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}"  ]
        	}
    		mutate {
        		convert => [ "[geoip][coordinates]", "float"]
    		}
  	}
}
output {
if [fields][tag] == "wori"{
  elasticsearch {
   hosts => ["192.168.1.70:9200"]
   index => "zabbix"
       }
   }
if [fields][tag] == "54_tcp_catalina_out"{
  elasticsearch {
   hosts => ["192.168.1.70:9200"]
   index => "54_tcp_catalina_out"
       } 
   }
if [fields][tag] == "54_web_catalina_out"{
  elasticsearch {
   hosts => ["192.168.1.70:9200"]
   index => "54_web_catalina_out"
       } 
   }
if [fields][tag] == "55_tcp_catalina_out"{
  elasticsearch {
   hosts => ["192.168.1.70:9200"]
   index => "55_tcp_catalina_out"
       } 
   }   
if [fields][tag] == "55_web_catalina_out"{
  elasticsearch {
   hosts => ["192.168.1.70:9200"]
   index => "55_web_catalina_out"
       } 
   }
if [fields][tag] == "51_nginx80_access_log" {
       stdout{}
  	elasticsearch {
   	hosts => ["192.168.1.70:9200"]
   	index => "51_nginx80_access_log"
   	}
   }
}

其他的配置文件

index.conf

filter {
	mutate {
        add_field => { "spstr" => "%{[log][file][path]}" }
	}
        mutate {
        split => ["spstr" , "/"]
        # save the last element of the array as the api_method.
        add_field => ["src", "%{[spstr][-1]}" ]
        }
        mutate{
	remove_field => [ "friends", "ecs", "agent" , "spstr" ]
	}
}

java.conf

filter {
if [fields][tag] == "java"{
    grok {
        match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
    }
    date {
        match => ["logdate", "ISO8601"]
    }
    mutate {
         remove_field => [ "logdate" ]
    }
  } #End if
}

kafkainput.conf

input {
  kafka{
    bootstrap_servers => "172.16.11.68:9092"
    #topics => ["ql-prod-tomcat" ]
    topics => ["ql-prod-dubbo","ql-prod-nginx","ql-prod-tomcat" ]
    codec => "json"
    consumer_threads => 5
    decorate_events => true
    #auto_offset_reset => "latest"
    group_id => "logstash"
    #client_id => ""
    ############################# HELK Optimizing Latency #############################
    fetch_min_bytes => "1"
    request_timeout_ms => "305000"
    ############################# HELK Optimizing Availability #############################
    session_timeout_ms => "10000"
    max_poll_records => "550"
    max_poll_interval_ms => "300000"
  }

}
#input {
#  kafka{
#    bootstrap_servers => "172.16.11.68:9092"
#    topics => ["ql-prod-java-dubbo","ql-prod","ql-prod-java" ]
#    codec => "json"
#    consumer_threads => 15
#    decorate_events => true
#    auto_offset_reset => "latest"
#    group_id => "logstash-1"
#    ############################# HELK Optimizing Latency #############################
#    fetch_min_bytes => "1"
#    request_timeout_ms => "305000"
#    ############################# HELK Optimizing Availability #############################
#    session_timeout_ms => "10000"
#    max_poll_records => "550"
#    max_poll_interval_ms => "300000"
#  }

#}

nginx.conf

filter {
if [fields][tag] == "nginx-access" {
        mutate {
        add_field => { "spstr" => "%{[log][file][path]}" }
        }
        mutate {
        split => ["spstr" , "/"]
        # save the last element of the array as the api_method.
        add_field => ["src", "%{[spstr][-1]}" ]
        }
        mutate{
        remove_field => [ "friends", "ecs", "agent" , "spstr" ]
        }

    grok {
        match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" }
        remove_field => "message"
    }
    date {
                match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"]
                target => "@timestamp"
        }
    geoip {
        source => "x_forwarded_for"
        target => "geoip"
        database => "/opt/logstash-6.2.4/GeoLite2-City.mmdb"
        add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
        add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}"  ]

        }
    mutate {
        convert => [ "[geoip][coordinates]", "float"]
    }

  } #endif
}

ouput.conf

output{
  if [fields][tag] == "nginx-access" {
       stdout{}
  	elasticsearch {
   	user => elastic
   	password => WR141bp2sveJuGFaD4oR
   	hosts => ["172.16.11.67:9200"]
   	index => "logstash-%{[fields][proname]}-%{+YYYY.MM.dd}"
   	}
   }
       #stdout{}
   if [fields][tag] == "java" {
        elasticsearch {
        user => elastic
        password => WR141bp2sveJuGFaD4oR
        hosts => ["172.16.11.66:9200","172.16.11.68:9200"]
        index => "%{[host][name]}-%{[src]}"
        }
  }
}

關于“Centos7.6如何部署ELK日志分析系統”這篇文章就分享到這里了,希望以上內容可以對大家有一定的幫助,使各位可以學到更多知識,如果覺得文章不錯,請把它分享出去讓更多的人看到。

向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

泗洪县| 宾阳县| 繁峙县| 泸西县| 东至县| 孙吴县| 塘沽区| 嵩明县| 黎川县| 佛教| 安阳市| 长海县| 宣汉县| 昭通市| 罗田县| 江川县| 青冈县| 新郑市| 芦山县| 治多县| 平陆县| 涟水县| 肥东县| 临沂市| 南阳市| 衡东县| 谷城县| 新乡县| 临沭县| 黑龙江省| 宜兴市| 伊春市| 武强县| 金秀| 黎平县| 瑞昌市| 香河县| 民勤县| 栾川县| 都江堰市| 阜新市|