docker-compose部署zk集群+kafka集群

1,新建网络

docker network create --driver bridge --subnet 172.23.0.0/16 --gateway 172.23.0.1  zookeeper_network


2,查看新建的网络是否成功

docker network ls 


3,如果建错了,可删除

docker network rm {id}

4,安装zookeeper以及kafka镜像+kafka管理工具

 docker pull zookeeper
 docker pull wurstmeister/kafka
docker pull hlebalbau/kafka-manager

5.创建需要的文件夹以及文件

cd /mnt

mkdir zookeeper


cd zookeper

mkdir zoo1 zoo2 zoo3 kafka1 kafka2 kafka3

#1,创建zoo1对应的挂载目录

mkdir ./zoo1/data
mkdir ./zoo1/logs

#2,创建zoo2对应的挂载目录

mkdir ./zoo2/data
mkdir ./zoo2/logs

#3,创建zoo3对应的挂载目录

mkdir ./zoo3/data
mkdir ./zoo3/logs


#4,创建kafka1对应的挂载目录

mkdir ./kafka1/data
mkdir ./kafka1/logs


#5,创建kafka2对应的挂载目录

mkdir ./kafka2/data
mkdir ./kafka2/logs


#6,创建kafka3对应的挂载目录

mkdir ./kafka3/data
mkdir ./kafka3/logs

6,针对zoo1,zoo2 ,zoo3 分别创建对应的myid文件以及zoo.cfg文件

#1,进入zoo1文件夹
cd zoo1
vi myid
1

#2,进入zoo2文件夹
cd zoo2
vi myid
2


#3,进入zoo3文件夹
cd zoo3
vi myid
3

6.1 zoo.cfg文件三个相同,就是对应的ip+端口

cd zoo1

vi zoo.cfg

6.2 zoo.cfg文件内容

tickTime=2000
initLimit=10
syncLimit=5
dataDir=/data
dataLogDir=/datalog
clientPort=2181
server.1=172.23.0.11:2888:3888;2181
server.2=172.23.0.12:2888:3888;2181
server.3=172.23.0.13:2888:3888;2181

7,创建docker-compose.yaml

cd zookeeper

vi docker-compose.yaml

7.1 docker-compose.yaml具体内容


version: '3.0'

services:

  zoo1:
    image: zookeeper # 镜像
    restart: always # 重启
    container_name: zoo1
    hostname: zoo1
    ports:
    - "2181:2181"

    volumes:
    - "./zoo1/zoo.cfg:/conf/zoo.cfg"
    - "./zoo1/data:/data"
    - "./zoo1/datalog:/datalog"
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=172.23.0.11:2888:3888 server.2=172.23.0.12:2888:3888 server.3=172.23.0.13:2888:3888

    networks:
      default:
        ipv4_address: 172.23.0.11

  zoo2:
    image: zookeeper # 镜像
    restart: always # 重启
    container_name: zoo2
    hostname: zoo2
    ports:
    - "2182:2181"

    volumes:
    - "./zoo2/zoo.cfg:/conf/zoo.cfg"
    - "./zoo2/data:/data"
    - "./zoo2/datalog:/datalog"
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=172.23.0.11:2888:3888 server.2=172.23.0.12:2888:3888 server.3=172.23.0.13:2888:3888

    networks:
      default:
        ipv4_address: 172.23.0.12

  zoo3:
    image: zookeeper # 镜像
    restart: always # 重启
    container_name: zoo3
    hostname: zoo3
    ports:
    - "2183:2181"

    volumes:
    - "./zoo3/zoo.cfg:/conf/zoo.cfg"
    - "./zoo3/data:/data"
    - "./zoo3/datalog:/datalog"
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=172.23.0.11:2888:3888 server.2=172.23.0.12:2888:3888 server.3=172.23.0.13:2888:3888

    networks:
      default:
        ipv4_address: 172.23.0.13

  kafka1:
    image: wurstmeister/kafka # 镜像
    restart: always
    container_name: kafka1
    hostname: kafka1
    privileged: true
    ports:
    - 9092:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.23.0.14:9092 # 暴露在外的地址
      KAFKA_ADVERTISED_HOST_NAME: 172.23.0.14 # 
      KAFKA_HOST_NAME: kafka1
      KAFKA_ZOOKEEPER_CONNECT: 172.23.0.11:2181,172.23.0.12:2181,172.23.0.13:2181
      KAFKA_ADVERTISED_PORT: 9092 # 暴露在外的端口
      KAFKA_BROKER_ID: 0 # 
      KAFKA_LISTENERS: PLAINTEXT://172.23.0.14:9092
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
    volumes:
    - "./kafka1/data:/kafka"
    - "./kafka1/logs:/opt/kafka/logs"
    links:
    - zoo1
    - zoo2
    - zoo3

    networks:
      default:
        ipv4_address: 172.23.0.14

  kafka2:
    image: wurstmeister/kafka # 镜像
    restart: always
    container_name: kafka2
    hostname: kafka2
    privileged: true
    ports:
    - 9093:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.23.0.15:9092 # 暴露在外的地址
      KAFKA_ADVERTISED_HOST_NAME: 172.23.0.15 
      KAFKA_HOST_NAME: kafka2
      KAFKA_ZOOKEEPER_CONNECT: 172.23.0.11:2181,172.23.0.12:2181,172.23.0.13:2181
      KAFKA_ADVERTISED_PORT: 9093 # 暴露在外的端口
      KAFKA_BROKER_ID: 1 # 
      KAFKA_LISTENERS: PLAINTEXT://172.23.0.15:9092
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
    volumes:
    - "./kafka2/data:/kafka"
    - "./kafka2/logs:/opt/kafka/logs"
    links:
    - zoo1
    - zoo2
    - zoo3

    networks:
      default:
        ipv4_address: 172.23.0.15

  kafka3:
    image: wurstmeister/kafka # 镜像
    restart: always
    container_name: kafka3
    hostname: kafka3
    privileged: true
    ports:
    - 9094:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.23.0.16:9092 # 暴露在外的地址
      KAFKA_ADVERTISED_HOST_NAME: 172.23.0.16 # 
      KAFKA_HOST_NAME: kafka3
      KAFKA_ZOOKEEPER_CONNECT: 172.23.0.11:2181,172.23.0.12:2181,172.23.0.13:2181
      KAFKA_ADVERTISED_PORT: 9094 # 暴露在外的端口
      KAFKA_BROKER_ID: 2 # 
      KAFKA_LISTENERS: PLAINTEXT://172.23.0.16:9092
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
    volumes:
    - "./kafka3/data:/kafka"
    - "./kafka3/logs:/opt/kafka/logs"
    links:
    - zoo1
    - zoo2
    - zoo3

    networks:
      default:
        ipv4_address: 172.23.0.16

  kafka-manager:
    image: hlebalbau/kafka-manager
    restart: always
    container_name: kafka-manager
    hostname: kafka-manager
    ports:
    - 9000:9000
    links:
    - kafka1
    - kafka2
    - kafka3
    - zoo1
    - zoo2
    - zoo3
    environment:
      ZK_HOSTS: 172.23.0.11:2181,172.23.0.12:2181,172.23.0.13:2181
      KAFKA_BROKERS: 172.23.0.14:9092,172.23.0.15.52.70:9093,172.23.0.16:9094
      APPLICATION_SECRET: letmein
      KAFKA_MANAGER_AUTH_ENABLED: "true" # 开启验证
      KAFKA_MANAGER_USERNAME: "admin" # 用户名
      KAFKA_MANAGER_PASSWORD: "admin" # 密码
      KM_ARGS: -Djava.net.preferIPv4Stack=true
    networks:
      default:
        ipv4_address: 172.23.0.10

networks:
  default:
    external:
      name: zookeeper_network

8,用docker-compose 启动(需要进入到docker-compose.yaml所在的文件夹)

cd /mnt/zookeeper

docker-compose up -d

9,查看是否启动成功

docker ps 

启动成功会显示下图:

  10,验证zk集群

docker exec -ti zoo1 /bin/bash

cd bin

./zkServer.sh status

zoo1 显示状态为follower

zoo2 状态也是follower

 

 zoo3状态为leader

 到这里,zk集群是正常的

11,创建topic并验证消费者和生产者

11.1 创建topic

docker exec -ti kafka1 bash

cd opt

#进入安装的版本下面

cd  kafka_2.13-2.7.1/bin

#执行创建topic 

 kafka-topics.sh --create --zookeeper 172.23.0.11:2181 --replication-factor 1 --partitions 3 --topic chattest

成功了会显示如下

新打开一个shell,验证其他节点是否能看到新建的topic

docker exec -ti kafka2 bash

cd opt/kafka_2.13-2.7.1/bin

kafka-topics.sh --list --zookeeper 172.23.0.13:2181

 能看到,说明没问题

11.2 验证生产者和消费者

docker exec -ti kafka1 bash

cd opt/kafka_2.13-2.7.1/bin

./kafka-console-producer.sh --broker-list 172.23.0.13:9092,172.23.0.15:9092,172.23.0.16:9092  --topic chattest

新打开一个shell,打开消费者

docker exec -ti kafka2 bash

cd opt/kafka_2.13-2.7.1/bin

./kafka-console-consumer.sh --bootstrap-server 172.23.0.14:9092, 172.23.0.15:9092,172.23.0.16:9092 --topic chattest --from-beginning 

显示结果如下

12,kafka-manager 配置

kafka-manager 地址为

ip:9000

 

大功告成了!

后续可以加上zk的管理界面 

猜你喜欢

转载自blog.csdn.net/weixin_36755535/article/details/121428164