ELK搭建(kibana+zookeeper+kafka+filbebeat)

https://blog.csdn.net/fenghumen/article/details/109083538

环境:
centos7
192.168.1.4
jdk,zookeeper,kafka,filbeat,elasticsearch
192.168.1.5
jdk,zookeeper,kafka,filebeat,logstash
192.168.1.6
jdk,zookeeper,kafka,filbebeat,kibana

1:关闭防火墙 selinux

systemctl stop firewalld
setenforce 0

2 :时间同步

yum -y install ntpdate
ntpdate pool.ntp.org

3 :上传软件安装包到/usr/local/src下

[root@bogon src]# ll
总用量 265296
-rw-r--r--. 1 root root 169983496 8月  20 16:53 jdk-8u131-linux-x64_.rpm
-rw-r--r--. 1 root root  63999924 8月  20 16:53 kafka_2.11-2.2.0.tgz
-rw-r--r--. 1 root root  37676320 8月  20 16:52 zookeeper-3.4.14.tar.gz

4: 修改主机名称

hostnamectl set-hostname kafka01
hostnamectl set-hostname kafka02
hostnamectl set-hostname kafka03

5: 修改hosts文件

192.168.1.7 kafka01
192.168.1.8 kafka02
192.168.1.9 kafka03

6:安装jdk

rpm -ivh jdk-8u131-linux-x64_.rpm

验证:

[root@kafka01 src]# java -version
java version "1.8.0_131"
Java(TM) SE Runtime Environment (build 1.8.0_131-b11)
Java HotSpot(TM) 64-Bit Server VM (build 25.131-b11, mixed mode)

7:安装zookeeper

tar zxvf zookeeper-3.4.14.tar.gz
mv zookeeper-3.4.14 /usr/local/zookeeper

7.1编辑 zoo.cfg

cd /usr/local/zookeeper/conf
mv zoo_sample.cfg  zoo.cfg

7.2 参数详解:

tickTime=2000 # zk服务器之间的心跳时间
initLimit=10 # zk连接失败的时间
syncLimit=5 # zk的同步通信时间
dataDir=/tmp/zookeeper  #zk的数据目录
clientPort=2181 # zk的监听端口号
server.1=192.168.1.4:2888:3888  # 服务器编号,2888:通信端口 3888: 选举端口
server.2=192.168.1.5:2888:3888
server.3=192.168.1.6:2888:3888

7.3 创建myid文件

mkdir /tmp/zookeeper
kafka01:
echo "1" > /tmp/zookeeper/myid
kafka02:
echo "2" > /tmp/zookeeper/myid
kafka03:
echo "3" > /tmp/zookeeper/myid

8:开启zookeeper服务

/usr/local/zookeeper/bin/zkServer.sh start

验证服务转态

/usr/local/zookeeper/bin/zkServer.sh status
Mode: follower
Mode: leader
[root@kafka02 conf]# netstat  -lptnu|grep java
tcp6       0      0 :::2181                 :::*                    LISTEN      3372/java           
tcp6       0      0 192.168.1.5:2888        :::*                    LISTEN      3372/java           
tcp6       0      0 192.168.1.5:3888        :::*                    LISTEN      3372/java  

9 部署kafka

tar zxvf kafka_2.11-2.2.0.tgz
mv kafka_2.11-2.2.0 /usr/local/kafka
 cd /usr/local/kafka

9.1 编辑主配置文件

vim server.properties
kafka01:
broker.id=0
advertised.listeners=PLAINTEXT://kafka01:9092
zookeeper.connect=192.168.1.7:2181,192.168.1.5:2181,192.168.1.6:2181
kafka02:
broker.id=1
advertised.listeners=PLAINTEXT://kafka02:9092
zookeeper.connect=192.168.1.7:2181,192.168.1.5:2181,192.168.1.6:2181
kafka03:
broker.id=2
advertised.listeners=PLAINTEXT://kafka03:9092
zookeeper.connect=192.168.1.7:2181,192.168.1.5:2181,192.168.1.6:2181

9.2 开启kafka 服务

/usr/local/kafka/bin/kafka-server-start.sh  -daemon /usr/local/kafka/config/server.properties

验证:

[root@kafka01 config]# netstat  -lptnu|grep 9092
tcp6       0      0 :::9092                 :::*                    LISTEN      14930/java   

10 测试kafka

10.1 创建topic

[root@kafka01 config]# /usr/local/kafka/bin/kafka-topics.sh  --create  --zookeeper 192.168.1.4:2181 --replication-factor 2 --partitions 3 --topic wg007

10.2 查看当前的topic

[root@kafka01 config]# /usr/local/kafka/bin/kafka-topics.sh  --list  --zookeeper 192.168.1.4:2181

10.3 模拟生产者

[root@kafka01 config]# /usr/local/kafka/bin/kafka-console-producer.sh --broker-list 192.168.1.4:9092 --topic wg007
>

10.4 模拟消费者

[root@kafka02 config]# /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.1.4:9092  --topic wg007 --from-beginning

11 :部署filebeat

11.1 配置yum源

[root@kafka01 yum.repos.d]# vim filebeat.repo 
[filebeat-6.x]
name=Elasticsearch repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
yum -y install filebeat

11.2 编辑 filebeat.yaml

[root@kafka01 filebeat]# cat filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths: 
    - /var/log/messages

output.kafka:
  enabled: true
  hosts: ["192.168.1.4:9092","192.168.1.5:9092","192.168.1.6:9092"]
  topic: messages

12 安装elasticsearch

rpm -ivh elasticsearch-6.6.2.rpm

12.1 编译es的配置文件

[root@kafka01 src]# cat  /etc/elasticsearch/elasticsearch.yml |grep -v "^#"
cluster.name: wg007
node.name: node-1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.1.4
http.port: 9200

12.2 启动es服务

systemctl enable elasticsearch
systemctl start elasticsearch

验证:

[root@kafka01 src]# netstat  -lptnu|egrep  "9200|9300"
tcp6       0      0 192.168.1.4:9200        :::*                    LISTEN      80284/java          
tcp6       0      0 192.168.1.4:9300        :::*                    LISTEN      80284/java   

13 部署logstash

13.1 上传rpm包

rpm -ivh logstash-6.6.0.rpm

13.2编辑messages.conf

[root@kafka02 conf.d]# cat messages.conf 
input {
 kafka {
  bootstrap_servers => ["192.168.1.4:9092,192.168.1.5:9092,192.168.1.6:9092"]
  group_id => "logstash"
  topics => "messages"
  consumer_threads => 5
 }
}

output {
 elasticsearch {
  hosts => "192.168.1.4:9200"
  index => "msg-log-%{+YYYY.MM.dd}"
 }

}

验证:

[root@kafka02 conf.d]# netstat  -lptnu|grep 9600
tcp6       0      0 127.0.0.1:9600          :::*                    LISTEN      77046/java  

14 部署kibana

rpm -ivh  kibana-6.6.2-x86_64.rpm

14.2 编辑kibana的主配置文件

[root@kafka03 src]# cat /etc/kibana/kibana.yml |grep -v "^#"|sed '/^$/d'
server.port: 5601
server.host: "192.168.1.6"
elasticsearch.hosts: ["http://192.168.1.4:9200"]

14.3 开启kibana

systemctl enable kibana

14.4 验证:

[root@kafka03 src]# netstat  -lptnu|grep 5601
tcp        0      0 192.168.1.6:5601        0.0.0.0:*               LISTEN      72621/node 

15:没有index 如下

1: chmod 777
2: echo "test" >> /var/log/messages

猜你喜欢

转载自blog.csdn.net/fenghumen/article/details/109065089