springboot + kafka 实现双kafka间的消息传递

springboot + kafka 实现双kafka间的消息传递

使用场景:

1.同kafka中不同topic之间的消息传递通过
@KafkaListener(topics = {“topic1”},errorHandler = “consumerAwareErrorHandler”)
@SendTo(“topic2”)两个注解实现,本篇不多做说明。
2.从某个kafka中获取消息,并按照自定义格式处理,存入另一个kafka供第三方使用。
例子如下:

1.引入pom,注意springboot版本与kafka版本的兼容问题,本例中 springboot 2.2.5 ,kafka 2.3.6
<dependency>
   <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
</dependency>

2.application.yml kafka配置在此不多做解释。
spring:
  kafka:
    ###########【Kafka集群】###########
    bootstrap-servers: xxx.xxx.xxx.xxx:9092
    ###########【初始化生产者配置】###########
    producer:
#kafka事务消息
#      transaction-id-prefix: kafka_tx.
#      enable:
#        idempotence: true
      #重试次数
      retries: 3
#应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
#      acks: 1
#批量大小
      batch-size: 16384
#提交延时
#当生产端积累的消息达到batch-size或接收到消息linger.ms后,生产者就会将消息提交给kafka
#linger.ms为0表示每接收到一条消息就提交给kafka,这时候batch-size其实就没用了
      properties:
        linger:
          ms: 0
#生产端缓冲区大小
      buffer-memory: 33554432
#Kafka提供的序列化和反序列化类
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
  ###########【初始化消费者配置】###########
    consumer:
      properties:
#默认的消费组ID
        group:
          id: consumerGroup
#消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发rebalance操作)
        session:
          timeout:
            ms: 120000
#消费请求超时时间
        request:
          timeout:
            ms: 180000
#是否自动提交offset
      enable-auto-commit: false
#提交offset延时(接收到消息后多久提交offset)
      auto:
        commit:
          interval:
            ms: 1000
# 当kafka中没有初始offset或offset超出范围时将自动重置offset
# earliest:重置为分区中最小的offset;
# latest:重置为分区中最新的offset(消费分区中新产生的数据);
# none:只要有一个分区不存在已提交的offset,就抛出异常
      auto-offset-reset: latest
#Kafka提供的序列化和反序列化类
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
#  批量消费每次最多消费多少条消息
#      max-poll-records: 50
    listener:
#消费端监听的topic不存在时,项目启动会报错(关掉)
      missing-topics-fatal: true
  outkafka:
    ###########【Kafka集群】###########
    bootstrap-servers: xxx.xxx.xxx.xxx:9093
    ###########【初始化生产者配置】###########
    producer:
      #kafka事务消息
#      transaction-id-prefix: kafka_tx.
#      enable:
#        idempotence: true
      #重试次数
      retries: 3
      #应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
      #      acks: 1
      #批量大小
      batch-size: 16384
      #提交延时
      #当生产端积累的消息达到batch-size或接收到消息linger.ms后,生产者就会将消息提交给kafka
      #linger.ms为0表示每接收到一条消息就提交给kafka,这时候batch-size其实就没用了
      properties:
        linger:
          ms: 0
      #生产端缓冲区大小
      buffer-memory: 33554432
      #Kafka提供的序列化和反序列化类
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
    ###########【初始化消费者配置】###########
    consumer:
      properties:
        #默认的消费组ID
        group:
          id: outConsumerGroup
        #消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发rebalance操作)
        session:
          timeout:
            ms: 120000
        #消费请求超时时间
        request:
          timeout:
            ms: 180000
      #是否自动提交offset
      enable-auto-commit: false
      #提交offset延时(接收到消息后多久提交offset)
      auto:
        commit:
          interval:
            ms: 1000
      # 当kafka中没有初始offset或offset超出范围时将自动重置offset
      # earliest:重置为分区中最小的offset;
      # latest:重置为分区中最新的offset(消费分区中新产生的数据);
      # none:只要有一个分区不存在已提交的offset,就抛出异常
      auto-offset-reset: latest
      #Kafka提供的序列化和反序列化类
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    #  批量消费每次最多消费多少条消息
    #      max-poll-records: 50
    listener:
      #消费端监听的topic不存在时,项目启动会报错(关掉)
      missing-topics-fatal: true

3.kafkaConfig/初始化两个kafka配置
@Configuration
@EnableKafka
public class KafkaConfig {
    @Value("${spring.kafka.bootstrap-servers}")
    private String innerServers;
    @Value("${spring.kafka.consumer.properties.group.id}")
    private String innerGroupid;
    @Value("${spring.kafka.consumer.enable-auto-commit}")
    private String innerEnableAutoCommit;
    @Value("${spring.kafka.consumer.auto.commit.interval.ms}")
    private String innerIntervalMs;
    @Value("${spring.kafka.consumer.auto-offset-reset}")
    private String innerOffsetReset;

    //生产者配置
    @Value("${spring.kafka.producer.retries}")
    private String retries;
    @Value("${spring.kafka.producer.batch-size}")
    private String batchSize;
    @Value("${spring.kafka.producer.buffer-memory}")
    private String bufferMemory;

    public Map<String, Object> consumerConfigs() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, innerServers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, innerGroupid);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, innerEnableAutoCommit);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, innerIntervalMs);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, innerOffsetReset);
        props.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, false);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }


    @Bean("kafkaListenerContainerFactory")//理解为默认优先选择当前容器下的消费者工厂
    @Primary
    KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(3);
        factory.getContainerProperties().setPollTimeout(3000);
        return factory;
    }

    @Bean//第一个消费者工厂的bean
    public ConsumerFactory<Integer, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }


    /**
     * 生产者配置方法
     */
    private Map<String, Object> senderProps() {
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, innerServers);
        props.put(ProducerConfig.RETRIES_CONFIG, retries);
        props.put(ProducerConfig.ACKS_CONFIG, "1");
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return props;
    }

    @Bean //生产者工厂配置
    public ProducerFactory<String, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(senderProps());
    }

    @Bean //kafka发送消息模板
    public KafkaTemplate<String, String> kafkaTemplate() {
        return new KafkaTemplate<String, String>(producerFactory());
    }

    //消费者配置
    @Value("${spring.outkafka.bootstrap-servers}")
    private String outServers;
    @Value("${spring.outkafka.consumer.properties.group.id}")
    private String outGroupid;
    @Value("${spring.outkafka.consumer.enable-auto-commit}")
    private String outEnableAutoCommit;
    @Value("${spring.outkafka.consumer.auto.commit.interval.ms}")
    private String outIntervalMs;
    @Value("${spring.outkafka.consumer.auto-offset-reset}")
    private String outOffsetReset;
    //生产者配置
    @Value("${spring.outkafka.producer.retries}")
    private String outRetries;
    @Value("${spring.outkafka.producer.batch-size}")
    private String outBatchSize;
    @Value("${spring.outkafka.producer.buffer-memory}")
    private String outBufferMemory;

    /**
     * 连接第二个集群的消费者配置
     */
    public Map<String, Object> consumerConfigsOutSchedule() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, outServers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, outGroupid);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, outEnableAutoCommit);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, innerIntervalMs);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, innerOffsetReset);
        props.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, false);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

    @Bean
    public ConsumerFactory<Integer, String> consumerFactoryOutSchedule() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigsOutSchedule());
    }

    /**
     * 连接第二个kafka集群的配置
     */
    @Bean("kafkaListenerContainerFactoryOutSchedule")
    KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>> kafkaListenerContainerFactoryOutSchedule() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactoryOutSchedule());
        factory.setConcurrency(3);
        factory.getContainerProperties().setPollTimeout(3000);
        return factory;
    }

    private Map<String, Object> senderOutProps() {
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, outServers);
        props.put(ProducerConfig.RETRIES_CONFIG, outRetries);
        props.put(ProducerConfig.ACKS_CONFIG, "1"); 
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, outBatchSize);
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, outBufferMemory);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return props;
    }

    @Bean //生产者工厂配置
    public ProducerFactory<String, String> producerOutFactory() {
        return new DefaultKafkaProducerFactory<>(senderOutProps());
    }

    @Bean //kafka发送消息模板
    public KafkaTemplate<String, String> kafkaOutTemplate() {
        return new KafkaTemplate<String, String>(producerOutFactory());
    }
}

4.KafkaConsumer 消费者
@Component
@Slf4j
public class KafkaConsumer {
    @Resource(name = "kafkaOutTemplate")
    private KafkaTemplate<String, Object> kafkaOutTemplate;

    // 异常处理器
    @Bean
    public ConsumerAwareListenerErrorHandler consumerAwareErrorHandler() {
        return (message, exception, consumer) -> {
            log.info("消费异常:{}", message.getPayload());
            log.info("消费异常exception:{}", exception.getMessage());
            return null;
        };
    }

    // 测试消费监听
    @KafkaListener(topics = {"topic1","topic2"}, containerFactory = "kafkaListenerContainerFactory", errorHandler = "consumerAwareErrorHandler")
    public void onMessage(ConsumerRecord<?, ?> record) {
        switch (record.topic()) {
            case "topic":// 消费监听
                log.info("数据消费:{}", record.topic() + "-" + record.partition() + "-" + record.value());
                String freezeJsonStr = String.valueOf(record.value());
                if (JSONUtil.isJsonObj(freezeJsonStr)) {
                    List<String> jsonObjs = SendToKafkaEnum.OUT_TOPIC1.dataDeal(freezeJsonStr);
                    jsonObjs.forEach(t -> {
                        kafkaOutTemplate.send(SendToKafkaEnum.OUT_TOPIC1.topicName, t);
                    });
                } else {
                    log.info("非JSON数据,数据:{}", record.value());
                }
                break;
            case "topic2":// 消费监听
                log.info("简单消费:{}", record.topic() + "-" + record.partition() + "-" + record.value());
                String curveJsonStr = String.valueOf(record.value());
                if (JSONUtil.isJsonObj(curveJsonStr)) {
                    List<String> jsonObjs =  SendToKafkaEnum.OUT_TOPIC2.dataDeal(curveJsonStr);
                     jsonObjs.forEach(t -> {
                        kafkaOutTemplate.send(SendToKafkaEnum.OUT_TOPIC2.topicName, t);
                    });
                } else {
                    log.info("非JSON数据,数据:{}", record.value());
                }
                break;
            default:
                break;
        }
    }

    @KafkaListener(topics = {"outtopic1","outtopic2"}, containerFactory = "kafkaListenerContainerFactoryOutSchedule", errorHandler = "consumerAwareErrorHandler")
    public void onMessage2(ConsumerRecord<?, ?> record) {
        log.info("简单消费2:{}", record.topic() + "-" + record.partition() + "-" + record.value());
    }
}

5.SendToKafkaEnum 数据处理枚举
public enum SendToKafkaEnum {
    OUT_TOPIC1("outtopic1") {

        @Override
        public List<String> dataDeal(String sourceData) {
            return commonDeal(this, sourceData);
        }
    },
    OUT_TOPIC2("outtopic2") {

        @Override
        public List<String> dataDeal(String sourceData) {
            return commonDeal(this, sourceData);
        }
    };

    public abstract List<String> dataDeal(String sourceData);

    //数据通用处理,根据数据格式做对应处理
    private static List<String> commonDeal(SendToKafkaEnum ste, String sourceData) {
        JSONObject jsonObject = JSON.parseObject(sourceData);
        for (List<Object> recod : records) {
            switch (ste) {
                case OUT_TOPIC1:
                  // 数据处理
                    break;
                case OUT_TOPIC2:
                    // 数据处理
                    break;
            }
        }
        return vos;
    }

    String topicName;

    SendToKafkaEnum(String topicName) {
        this.topicName = topicName;
    }

猜你喜欢

转载自blog.csdn.net/u011445756/article/details/113923705