EFK搭建
环境:
centos7
192.168.1.7
jdk,zookeeper,kafka,filbeat,elasticsearch
192.168.1.8
jdk,zookeeper,kafka,filebeat,logstash
192.168.1.9
jdk,zookeeper,kafka,filbebeat,kibana
1:关闭防火墙 selinux`
systemctl stop firewalld
setenforce 0
2 :时间同步
yum -y install ntpdate
ntpdate pool.ntp.org
3 :上传软件安装包
4: 修改主机名称
hostnamectl set-hostname kafka01
hostnamectl set-hostname kafka02
hostnamectl set-hostname kafka03
5: 修改hosts文件
vim /etc/hosts
192.168.232.135 kafka01
192.168.232.136 kafka02
192.168.232.137 kafka03
测试:ping kafka01
6:安装jdk
rpm -ivh jdk-8u131-linux-x64_.rpm
验证:
java -version
java version "1.8.0_131"
Java(TM) SE Runtime Environment (build 1.8.0_131-b11)
Java HotSpot(TM) 64-Bit Server VM (build 25.131-b11, mixed mode)
7:安装zookeeper
tar zxvf zookeeper-3.4.14.tar.gz
mv zookeeper-3.4.14 /usr/local/zookeeper
7.1编辑 zoo.cfg
cd /usr/local/zookeeper/conf
mv zoo_sample.cfg zoo.cfg
vim zoo.cfg
7.2 参数详解:
tickTime=2000 # zk服务器之间的心跳时间
initLimit=10 # zk连接失败的时间
syncLimit=5 # zk的同步通信时间
dataDir=/tmp/zookeeper #zk的数据目录
clientPort=2181 # zk的监听端口号
最后一行添加:
server.1=192.168.232.135:2888:3888 # 服务器编号,2888:通信端口 3888: 选举端口
server.2=192.168.232,136:2888:3888
server.3=192.168.232.137:2888:3888
7.3 创建myid文件
mkdir /tmp/zookeeper
kafka01:
echo "1" > /tmp/zookeeper/myid
kafka02:
echo "2" > /tmp/zookeeper/myid
kafka03:
echo "3" > /tmp/zookeeper/myid
8:开启zk服务
/usr/local/zookeeper/bin/zkServer.sh start
验证服务转态
/usr/local/zookeeper/bin/zkServer.sh status
一个leader
两个follower
9 部署kafka
tar zxvf kafka_2.11-2.2.0.tgz
mv kafka_2.11-2.2.0 /usr/local/kafka
9.1 编辑主配置文件
vim /usr/local/kafka/config/server.properties
kafka01:
kafka02:
kafka03:
9.2 开启kafka 服务
/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
验证:
netstat -lptnu|grep 9092
10.1 创建topic(创建一台就可以)
/usr/local/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.232.135:2181 --replication-factor 2 --partitions 3 --topic wg007
10.2 查看当前的topic
/usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.232.135:2181
10.3 模拟生产者
/usr/local/kafka/bin/kafka-console-producer.sh --broker-list 192.168.232.135:9092 --topic wg007
10.4 模拟消费者
/usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.232.135:9092 --topic wg007 --from-beginning
11:安装filebeat(收集日志的)
rpm -ivh filebeat-6.8.12-x86_64.rpm
cd /etc/filebeat/
备份filebeat.yml
mv filebeat.yml filebeat.yml.beak
12:编辑filebeat.yaml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/messages
output.kafka:
enabled: true
hosts: ["192.168.232.135:9092","192.168.232.136:9092","192.168.232.137:9092"]
topic: msg
13 安装elasticsearch
rpm -ivh elasticsearch-6.6.2.rpm
13.1 编译elasticsearch的配置文件
vim /etc/elasticsearch/elasticsearch.yml
cluster.name: wg007
node.name: node-1
path.data: /var/lib/elasticsearc
path.logs: /var/log/elasticsearch
network.host: 192.168.1.7
http.port: 9200
13.2启动elasticsearch服务
systemctl enable elasticsearch
systemctl start elasticsearch
13.3验证:
netstat -lptnu|egrep java
14 部署logstash
rpm -ivh logstash-6.6.0.rpm
cd /etc/logstash/conf.d/
14.1编辑messages.conf
vim messages.conf
input {
kafka {
bootstrap_servers => ["192.168.232.135:9092,192.168.232.136:9092,192.168.232.137:9092"]
group_id => "logstash"
topics => "messages"
consumer_threads => 5
}
}
output {
elasticsearch {
hosts => "192.168.232.135:9200"
index => "msg-log-%{+YYYY.MM.dd}"
}
}
14.2启动logstash
systemctl start logstash
14.3验证:
netstat -nltp |grep 9600
15:部署kibana
rpm -ivh kibana-6.6.2-x86_64.rpm
15.1编辑kibana的主配置文件
vim /etc/kibana/kibana.yml
15.2 开启kibana
systemctl start kibana
15.3 验证
netstat -lptnu|grep 5601
15.415:没有index 怎么办?
1: chmod 777
2: echo “test” >> /var/log/messages