您好,登錄后才能下訂單哦!
【一】資源準備
# 3臺4C*8G, 安裝Zookeeper、Kafka、Logstash——Broker(input: filebeat; output: Kafka)
10.101.2.23 10.101.2.24 10.101.2.25
# 2臺4C*8G, 安裝Logstash——Indexer(input: Kafaka; output: Elasticsearch)
10.101.2.26 10.101.2.27
# 3臺8C*16G, 安裝Elasticsearch
10.101.2.28 10.101.2.29 10.101.2.30
# 2臺2C*4G, 安裝Kibana
10.101.2.31 10.101.2.32
# 安裝包下載
elasticsearch-5.3.1.tar.gz
filebeat-5.3.1-linux-x86_64.tar.gz
jdk-8u131-linux-x64.tar.gz
kafka_2.12-0.10.2.0.tgz
kibana-5.3.1-linux-x86_64.tar.gz
logstash-5.3.1.tar.gz
node-v7.9.0-linux-x64.tar.gz
zookeeper-3.4.10.tar.gz
nginx-1.12.0.tar.gz
統一上傳至服務器 /usr/local/src 目錄下
【二】通用配置
# 配置hosts
vi /etc/hosts
10.101.2.23 vmserver2x23
10.101.2.24 vmserver2x24
10.101.2.25 vmserver2x25
10.101.2.26 vmserver2x26
10.101.2.27 vmserver2x27
10.101.2.28 vmserver2x28
10.101.2.29 vmserver2x29
10.101.2.30 vmserver2x30
10.101.2.31 vmserver2x31
10.101.2.32 vmserver2x32
# 配置ssh訪問限制,假如有必要的話
vi /etc/hosts.allow
【三】安裝Elasticsearch集群
# 系統環境
vi /etc/sysctl.conf
vm.max_map_count=262144
fs.file-max=65536
執行 sysctl -p 使配置生效
vi /etc/security/limits.conf #打開文件數
* soft nofile 65536
* hard nofile 131072
* soft nproc 2048
* hard nproc 4096
* - memlock unlimited
vi /etc/security/limits.d/90-nproc.conf
* soft nproc 2048
# 配置Java環境變量
cd /usr/local/src/
tar -xvf jdk-8u131-linux-x64.tar.gz
mv jdk1.8.0_131 /usr/share/
vi /etc/profile #在末尾添加下面3行后, 保存退出
export JAVA_HOME=/usr/share/jdk1.8.0_131
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
source /etc/profile 使得配置立即生效
# 解壓elasticsearch
cd /usr/local/src
tar -xvf elasticsearch-5.3.1.tar.gz
mv elasticsearch-5.3.1 /usr/local
vi /usr/local/elasticsearch-5.3.1/bin/elasticsearch #設置ES_JAVA_OPTS參數
ES_JAVA_OPTS="-Xms8g -Xmx8g" # 去掉該行前的注釋后, 一定要刪除后面的這一串 ./bin/elasticsearch
# 新增elastic組及用戶, 因為elasticsearch不允許root用戶啟動
groupadd elastic
useradd elastic -g elastic
passwd elastic # 設定用戶密碼
chown -R elastic:elastic /usr/local/elasticsearch-5.3.1/
# 配置elasticsearch.ywl, 主要參數如下
cluster.name: bsd-elk
node.name: elk-2-30 # 每個節點不同
node.master: true
node.data: true
bootstrap.memory_lock: true
bootstrap.system_call_filter: false # centos7以下版本需要將這個參數設置為false
network.host: 0.0.0.0
http.port: 9200
discovery.zen.ping.unicast.hosts: ["10.101.2.28:9300", "10.101.2.29:9300", "10.101.2.30:9300"]
discovery.zen.minimum_master_nodes: 2
discovery.zen.ping_timeout: 60s # 網上大部分文章這個參數都寫成了discovery.zen.ping.timeout
http.cors.enabled: true
http.cors.allow-origin: "*"
# 下載node-v7.9.0-linux-x64.tar.gz, 解壓后mv到/usr/local/nodejs-7.9.0
chown -R elastic:elastic nodejs-7.9.0/
cd /usr/local/nodejs-7.9.0
ln -s /usr/local/nodejs-7.9.0/bin/node /usr/local/bin
ln -s /usr/local/nodejs-7.9.0/bin/npm /usr/local/bin
# 安裝head插件, 5.x以上的elasticsearch暫沒有找到離線安裝的方法, 所以需要服務器開通外網訪問權限
# 5.x版本是里程碑式的更新, 網上大部分的文章都是以前版本的插件安裝
cd /usr/local/elasticsearch-5.3.1
git clone https://github.com/mobz/elasticsearch-head.git
如果沒有git工具先安裝, yum install git
cd elasticsearch-head
npm install -g grunt --registry=https://registry.npm.taobao.org # 安裝grunt
npm install # 安裝head
npm install grunt --save # 如果 node_modules/grunt/bin/目錄下沒有 grunt文件, 就執行以下該命令
vi Gruntfile.js 修改connect, 在options里添加本機IP hostname: '10.101.2.30',
cd /usr/local/elasticsearch-5.3.1
bin/elasticsearch -d # 啟動elasticsearch
cd elasticsearch-head
node_modules/grunt/bin/grunt server & # 啟動head插件
訪問 http://10.101.2.30:9100
# 安裝bigdesk插件
cd /usr/local/elasticsearch-5.3.1
git clone https://github.com/hlstudio/bigdesk
cd bigdesk/_site
python -m SimpleHTTPServer & # 啟動bigdesk插件
訪問 http://10.101.2.30:8000
另外兩臺機器(10.101.2.28 10.101.2.29)按此步驟同樣配置, master和data節點的選擇看實際情況, 我的3臺全是混合節點
所有elasticsearch啟動后, 訪問head如果能看到3個節點的集群信息, 就可以了
【四】安裝ZooKeeper集群
# zookeeper依賴java, java環境配置參照上面
# 解壓zookeeper-3.4.10.tar.gz
cd /usr/local/src
tar -xvf zookeeper-3.4.10.tar.gz
mv zookeeper-3.4.10 /usr/local
mkdir /usr/local/zookeeper-3.4.10/data # 每個節點上創建一個數據存放目錄
# 創建myid文件
echo 23 >/usr/local/zookeeper-3.4.10/data/myid # 10.101.2.23、24、25三臺機器上myid的值分別放 23 24 25
# 配置zoo.cfg
cd /usr/local/zookeeper-3.4.10/conf/
cp zoo_sample.cfg zoo.cfg
vi zoo.cfg # 主要參數如下
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper-3.4.10/data
clientPort=2181
server.23=10.101.2.23:2888:3888
server.24=10.101.2.24:2888:3888
server.25=10.101.2.25:2888:3888
# 復制配置文件到其他節點
scp zoo.cfg root@ip:/usr/local/zookeeper-3.4.10/conf/
# 啟動zookeeper集群
cd /usr/local/zookeeper-3.4.10/
bin/zkServer.sh start
bin/zkServer.sh status # 主節點會返回 Mode: leader, 從節點返回 Mode: follower
至此zookeeper集群配置完畢
【五】配置kafka集群
# 解壓kafka_2.12-0.10.2.0.tgz, 創建數據目錄
cd /usr/local
tar -xvf src/kafka_2.12-0.10.2.0.tgz
mkdir /usr/local/kafka_2.12-0.10.2.0/data
# 配置server.propertites
cd /usr/local/kafka_2.12-0.10.2.0/config
vi server.properties # 主要參數如下
broker.id=23 # 10.101.2.23、24、25三臺機器上id的值分別放 23 24 25
delete.topic.enable=true
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka_2.12-0.10.2.0/data
num.partitions=6
num.recovery.threads.per.data.dir=1
#log.flush.interval.messages=10000
#log.flush.interval.ms=1000
log.retention.hours=72
#log.retention.bytes=1073741824
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.101.2.23:2181,10.101.2.24:2181,10.101.2.25:2181
zookeeper.connection.timeout.ms=6000
# 復制配置文件到其他節點, 不要忘記修改 broker.id
scp server.properties root@ip:/usr/local/kafka_2.12-0.10.2.0/config/
# 啟動kafka集群
cd /usr/local/kafka_2.12-0.10.2.0/
bin/kafka-server-start.sh config/server.properties > /dev/null &
# 友情贈送幾條常用命令
bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test # 創建topic
bin/kafka-topics.sh --list --zookeeper localhost:2181 # 查看已經創建的topic列表
bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic test # 查看topic的詳細信息
bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test # 發送消息, 回車后模擬輸入一下消息
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test # 消費消息, 可以換到其他kafka節點, 同步接收生產節點發送的消息
bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic test --partitions 6 # 給topic增加分區
bin/kafka-topics.sh --delete --zookeeper localhost:2181 --topic test1 # 刪除已經創建的topic, 前提是開了delete.topic.enable=true參數
如果還不能刪除, 可以到zookeeper中去干掉它
cd /usr/local/zookeeper-3.4.10/
bin/zkCli.sh
ls /brokers/topics # 查看topic
rmr /brokers/topics/test1 # 刪除topic
至此kafka集群配置完畢
【六】logstash——broker集群配置
# java環境配置略
# 解壓logstash-5.3.1.tar.gz
cd /usr/local
tar -xvf src/logstash-5.3.1.tar.gz
# 添加配置文件 beat_to_kafka.conf
cd logstash-5.3.1
vi config/beat_to_kafka.conf # 輸入下面內容后保存
input {
beats {
port => 5044
}
}
filter {
}
# topic_id改成按beat中配置的document_type來輸出到不同的topic中, 供kibana分組過濾用
output {
kafka {
bootstrap_servers => "10.101.2.23:9092,10.101.2.24:9092,10.101.2.25:9092"
# topic_id => "bsd-log"
topic_id => '%{[type]}'
}
}
# 啟動logstash
bin/logstash -f config/beat_to_kafka.conf > /dev/null &
至此logstash-broker集群配置完成
【七】在實際應用服務器上安裝filebeat
# 解壓filebeat-5.3.1-linux-x86_64.tar.gz
cd /usr/local/
tar -xvf src/filebeat-5.3.1-linux-x86_64.tar.gz
mv filebeat-5.3.1-linux-x86_64 filebeat-5.3.1
# 配置filebeat.yml文件, 先找一臺drds試水
cd filebeat-5.3.1
vi filebeat.yml # 主要參數如下
#=========================== 文件正文開始 =============================
filebeat.prospectors:
-
input_type: log
paths:
- /home/admin/drds-server/3306/logs/rms/slow.log
- /home/admin/drds-server/3306/logs/engineering/slow.log
- /home/admin/drds-server/3306/logs/sc_file/slow.log
- /home/admin/drds-server/3306/logs/sc_user/slow.log
- /home/admin/drds-server/3306/logs/sc_order/slow.log
- /home/admin/drds-server/3306/logs/sc_inventory/slow.log
- /home/admin/drds-server/3306/logs/sc_marketing/slow.log
- /home/admin/drds-server/3306/logs/sc_message/slow.log
- /home/admin/drds-server/3306/logs/sc_channel/slow.log
#exclude_lines: ["^DBG"]
#include_lines: ['Exception','ERR_CODE']
#exclude_files: [".gz$"]
document_type: drds-slow
# 設定合并正則
multiline.pattern: ^[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}
multiline.negate: true
multiline.match: after
# 單臺機器上配置不同document_type
-
input_type: log
paths:
- /home/admin/drds-server/3306/logs/test/sql.log
document_type: drds-sql
multiline.pattern: ^[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}
multiline.negate: true
multiline.match: after
#----------------------------- Logstash output --------------------------------
output.logstash:
# The Logstash hosts
hosts: ["10.101.2.23:5044","10.101.2.24:5044","10.101.2.25:5044"]
#=========================== 文件正文結束 =============================
# 啟動filebeat
./filebeat -c filebeat.yml > /dev/null &
【八】配置logstash——indexer集群
# java環境配置略
# 解壓logstash-5.3.1.tar.gz
cd /usr/local
tar -xvf src/logstash-5.3.1.tar.gz
# 添加配置文件 kafka_to_es.conf
cd logstash-5.3.1
vi config/kafka_to_es.conf # 輸入下面內容后保存
# input里關于server以及topic的配置, 5.x和以前版本不同
input {
kafka {
bootstrap_servers => "10.101.2.23:9092,10.101.2.24:9092,10.101.2.25:9092"
group_id => "logstash"
topics => ["drds-sql","drds-slow","sc_user","sc_channel","sc_order","sc_inventory","sc_message","sc_file","sc_marketing","rms",'scm','engineering']
consumer_threads => 50
decorate_events => true
}
}
filter {
}
output {
elasticsearch {
hosts => ["10.101.2.28:9200","10.101.2.29:9200","10.101.2.30:9200"]
index => "logstash-%{+YYYY.MM.dd.hh}"
manage_template => true
template_overwrite => true
template_name => "drdsLogstash"
flush_size => 50000
idle_flush_time => 10
}
}
# 啟動logstash
bin/logstash -f config/kafka_to_es.conf > /dev/null &
至此logstash-indexer集群配置完成, 不出意外, elasticsearch-head里應該已經有數據寫入了
【九】配置kibana集群
# 解壓kibana-5.3.1-linux-x86_64.tar.gz
cd /usr/local
tar -xvf src/kibana-5.3.1-linux-x86_64.tar.gz
mv kibana-5.3.1-linux-x86_64/ kibana-5.3.1
# 配置kibana.yml文件
cd kibana-5.3.1
vi config/kibana.yml #主要參數如下
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.url: "http://10.101.2.28:9200" # 指到es集群master節點
# 啟動kibana
bin/kibana > /dev/null &
# kibana對瀏覽器兼容性不好, 低版本的chrome和ie都不能訪問, 顯示正在加載
訪問 http://ip:5601
# 另一臺kibana節點按相同配置(可以把es.url指到另一個節點), kibana查詢支持布爾運算符, 通配符等, 關鍵字要大些(如AND OR), 具體自行百度
【十】配置nginx代理
# 安裝nginx需要的一些依賴包
yum -y install pcre-devel
yum -y install gd-devel
# 解壓nginx-1.12.0.tar.gz
cd /usr/local/
tar -xvf src/nginx-1.12.0.tar.gz
# 安裝nginx
cd nginx-1.12.0
./configure --prefix=/usr/local/nginx-1.12.0/ --conf-path=/usr/local/nginx-1.12.0/nginx.conf
make
make install
# 配置 nginx.conf 文件, 我們這邊只做負載均衡, 隨便設設就好
vi /usr/local/nginx-1.12.0/nginx.conf
worker_processes 1;
error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;
sendfile on;
keepalive_timeout 65;
upstream kibana {
server 10.101.2.31:5601;
server 10.101.2.32:5601;
}
server {
listen 15601;
server_name 10.101.2.31;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
proxy_pass http://kibana;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
# 啟動nginx
sbin/nginx
# 之后在瀏覽器上訪問 http://nginx_ip:15601 即可
至此集群所有組件配置完成
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。