Kafka:生产与消费POJO类型数据

需求

测试需要,生产测试数据发送至Kafka,要求数据类型为POJO类。这里是一个生产POJO至kafka的示例。

模块组织

utils

BeanUtils:用于将Object转换成byte[],或将byte[]转换成Object.

PropertiesUtils:用于加载Properties的工具类。

ObjectEncoder:Kafka的value序列化器。实现 org.apache.kafka.common.serialization.Serializer接口,重写serialize方法。

ObjectDecoder:Kafka的value反序列化器。实现 org.apache.kafka.common.serialization.Deserializer接口,重写Deserializer方法。

beans

OrderBean:测试用订单数据的POJO类,简化版只有四个属性:id、user、amount、date。

controler

OrderProducer:代码入口,创建kafka生产者并生成数据。

OrderConsumer:代码入口,创建kafka消费者并消费数据。

resources

producer.properties:生产者properties配置文件。

comsumer.properties:消费者properties配置文件。

代码实现

utils

BeanUtils

package com.zixuan.kafka.utils;

import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;

public class BeanUtils {
    private BeanUtils(){}
    /**
     * 对象转字节数组
     * @param obj
     * @return
     */
    public static byte[] ObjectToBytes(Object obj){
        byte[] bytes = null;
        ByteArrayOutputStream bo = null;
        ObjectOutputStream oo = null;
        try {
            bo = new ByteArrayOutputStream();
            oo = new ObjectOutputStream(bo);
            oo.writeObject(obj);
            bytes = bo.toByteArray();

        } catch (IOException e) {
            e.printStackTrace();
        }finally {
            try {
                if(bo!=null){
                    bo.close();
                }
                if(oo!=null){
                    oo.close();
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        return bytes;
    }
    /**
     * 字节数组转对象
     * @param bytes
     * @return
     */
    public static Object BytesToObject(byte[] bytes){
        Object obj = null;
        ByteArrayInputStream bi = null;
        ObjectInputStream oi = null;
        try {
            bi =new ByteArrayInputStream(bytes);
            oi =new ObjectInputStream(bi);
            obj = oi.readObject();

        } catch (Exception e) {
            e.printStackTrace();
        }finally {
            try {
                if(bi!=null){
                    bi.close();
                }
                if(oi!=null){
                    oi.close();
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }

        return obj;
    }
}

PropertiesUtils:

package com.zixuan.kafka.utils;

import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;

public class PropertiesUtils {

    //读取生产者Properties文件
    public Properties loadProducerProp(Properties props,String encoderClass){
        return loadProp(props,"producer.properties",encoderClass);
    }

    //读取消费者Properties文件
    public Properties loadConsumerProp(Properties props,String decoderClass){
        return loadProp(props,"comsumer.properties",decoderClass);
    }

    //读取Properties文件
    private Properties loadProp(Properties props,String propsName,String coderClass){

        InputStream fis = this.getClass().getClassLoader().getResourceAsStream(propsName);
        try {
            props.load(fis);
            props.put("value.deserializer", coderClass);
            props.put("value.serializer", coderClass);
            fis.close();
        } catch (IOException e) {
            e.printStackTrace();
            return null;
        }
        return props;
    }
}

ObjectEncoder

package com.zixuan.kafka.encoder;

import com.zixuan.kafka.utils.BeanUtils;

import java.util.Map;

public class ObjectEncoder implements org.apache.kafka.common.serialization.Serializer {

    public void configure(Map configs, boolean isKey) {

    }

    public byte[] serialize(String topic, Object data) {
        return BeanUtils.ObjectToBytes(data);
    }

    public void close() {

    }
}

ObjectDecoder

package com.zixuan.kafka.encoder;

import com.zixuan.kafka.utils.BeanUtils;

import java.util.Map;

public class ObjectDecoder implements org.apache.kafka.common.serialization.Deserializer {

    public void configure(Map configs, boolean isKey) {

    }

    public Object deserialize(String topic, byte[] data) {
        return BeanUtils.BytesToObject(data);
    }

    public void close() {

    }
}

beans

OrderBean

package com.zixuan.kafka.bean;

import java.io.Serializable;

public class OrderBean implements Serializable {
    private int orderID;
    private String user;
    private String amount;
    private long creatTime;

    public OrderBean(){

    }

    public OrderBean(int orderID,String user,String amount,long creatTime){
        this.orderID=orderID;
        this.user=user;
        this.amount=amount;
        this.creatTime=creatTime;
    }

    @Override
    public String toString() {
        return "Order [orderID=" + orderID + ", user=" + user + ", amount=" + amount + ", creatTime=" + creatTime + "]";
    }


    public int getOrderID() {
        return orderID;
    }

    public void setOrderID(int orderID) {
        this.orderID = orderID;
    }

    public String getUser() {
        return user;
    }

    public void setUser(String user) {
        this.user = user;
    }

    public String getAmount() {
        return amount;
    }

    public void setAmount(String amount) {
        this.amount = amount;
    }

    public long getCreatTime() {
        return creatTime;
    }

    public void setCreatTime(long creatTime) {
        this.creatTime = creatTime;
    }


}

controler

OrderProducer

package com.zixuan.kafka.controler;

import com.zixuan.baijiaxing.NameUtils;
import com.zixuan.kafka.bean.OrderBean;
import com.zixuan.kafka.utils.DataUtils;
import com.zixuan.kafka.utils.PropertiesUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;

public class OrderProducer {
    public static void main(String[] args) {
        //创建Properties
        Properties props = new Properties();
        //加载Properties文件
        props = new PropertiesUtils().loadProducerProp(props, "com.zixuan.kafka.encoder.ObjectEncoder");
        //根据Properties创建kafka生产者对象
        KafkaProducer<String, Object> kafkaProducer = new KafkaProducer<String, Object>(props);
        //发送数据
        for (int i = 0; i < 100; i++) {
            kafkaProducer.send(new ProducerRecord<String, Object>("test-topic"
                    , new OrderBean(i, DataUtils.createName(), DataUtils.createDouble(100, "0.00"), DataUtils.createCurrentTimeMillis())));
        }
    }
}

OrderConsumer

package com.zixuan.kafka.controler;

import com.zixuan.kafka.utils.PropertiesUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.util.Arrays;
import java.util.Properties;

public class OrderConsumer {
    public static void main(String[] args) {
        //创建Properties
        Properties properties = new Properties();
        //加载Properties文件
        properties = new PropertiesUtils().loadConsumerProp(properties, "com.zixuan.kafka.encoder.ObjectDecoder");
        //根据Properties创建kafka消费者对象
        KafkaConsumer<String, Object> consumer = new KafkaConsumer<String, Object>(properties);
        //指定消费的topic
        consumer.subscribe(Arrays.asList("test-topic"));
        //消费数据
        while (true){
            ConsumerRecords<String, Object> poll = consumer.poll(100);
            for (ConsumerRecord<String, Object> records : poll) {
                System.out.println(records.value().toString());
            }
        }
    }
}

resources

producer.properties

#kafka集群
bootstrap.servers=hd01:9092,hd02:9092,hd03:9092
#zk集群
zookeeper.connect=hd01:2181,hd02:2181,hd03:2181
#ack
acks=-1
#key的序列化器(topic)
key.serializer=org.apache.kafka.common.serialization.StringSerializer
#value的序列化器
#value.serializer=
#失败重试次数
retries=3
#批次大小,当多个记录被发送到同一个分区时会将数据合并成一个批次发送
batch.size=16384
#缓存大小,当数据生成速度大于发送速度,会将数据先缓存起来
buffer.memory=33554432
#压缩类型,数据传输时进行压缩
compression.type=lz4
#调用send起,至接收到ack返回的最大时间。即调用send起至真正发送出数据的时间+失败重试消耗的时间+发送数据后至接收到返回的时间
#所以值应该至少大于request.timeout.ms + linger.ms
delivery.timeout.ms=120000
#将一定时间段内的所有数据组成一个批次发送。默认为0,提高参数值可以减少数据发送次数。
linger.ms=0
#发送数据后至收到返回的最大时间,超时则视为失败。至应该大于replica.lag.time.max.ms(一个broker的配置项)
request.timeout.ms=30000
#TCP的buffer大小,设置为-1的话则使用系统默认的大小
send.buffer.bytes=131072

comsumer.properties

#kafka集群
bootstrap.servers=hd01:9092,hd02:9092,hd03:9092
#zk集群
zookeeper.connect=hd01:2181,hd02:2181,hd03:2181
#group.id
group.id=test
#key的序列化器(topic)
key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
#value的序列化器
#value.deserializer=
#拉取数据的最小数据量,默认为1,即只要有1字节数据就拉取,否则等待数据量大于这个值时再拉取
fetch.min.bytes=1
#消费者组之间的心跳,必须低于会话超时时间session.timeout.ms。
#必须在broker配置的group.min.session.timeout.ms 和 group.max.session.timeout.ms的允许范围内
heartbeat.interval.ms=3000
#会话超时时间,当超过这个时间没有收到心跳,则从消费者组中删除该消费者。
session.timeout.ms=10000
#每个分区的每批次拉取的最大数据量
max.partition.fetch.bytes=1048576
#允许自动穿件topic
allow.auto.create.topics=true
#设置offset,可配置为 latest, earliest, none
auto.offset.reset=earliest
#自动提交offset
enable.auto.commit=true
#自动提交offset间隔,单位为毫秒
auto.commit.interval.ms=5000

猜你喜欢

转载自blog.csdn.net/x950913/article/details/107372294