1、操作前提
Kafka集成了Producer/Consumer连接Broker的客户端工具,但是在消息处理方面,这两者主要用于服务端(Broker)的简单操作,如:
1、启动zookeeper:./zkServer.sh start
2、启动Kafka:./kafka-server-start.sh -deamon $KAFKA_HOME/config/server.properties
3.创建Topic
4.对已有Topic的Produce/Consume测试
跟其他的消息系统一样,Kafka提供了多种不用语言实现的客户端API,如:Java,Python,Ruby,Go等。这些API极大的方便用户使用Kafka集群,本文只展示javaAPI的使用
- 在本地虚拟机中安装了kafka_2.11版本
- 本地安装有JDK1.8
- IDEA编译器
- Maven3
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>${kafka.version}</version>
</dependency>
2、设置配置项
public class KafkaProperties {
public static final String ZK = "192.168.1.120:2181";
public static final String TOPIC = "hello_topic";
public static final String BROKER_LIST = "192.168.1.120:9092";
public static final String GROUP_ID = "test_group1";//自己定义即可
}
3、设置Producer
import com.lu.spark.kafka.properties.KafkaProperties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
public class KafkaProducer extends Thread {
private String topic;
private Producer<Integer, String> producer;
public KafkaProducer(String topic){
this.topic = topic; //由外部传进topic
////设置Producer属性
Properties properties = new Properties();
properties.put("metadata.broker.list", KafkaProperties.BROKER_LIST);//kafka ip和端口
properties.put("serializer.class","kafka.serializer.StringEncoder");//序列化配置
properties.put("request.required.acks","1");//数据同步策略设置
/*0:这意味着生产者producer不等待来自broker同步完成的确认继续发送下一条(批)消息。此选项提供最低的延迟但最弱的耐久性保证(当服务器发生故障时某些数据会丢失,如leader已死,但producer并不知情,发出去的信息broker就收不到)。
1:这意味着producer在leader已成功收到的数据并得到确认后发送下一条message。此选项提供了更好的耐久性为客户等待服务器确认请求成功(被写入死亡leader但尚未复制将失去了唯一的消息)。
-1:这意味着producer在follower副本确认接收到数据后才算一次发送完成。
此选项提供最好的耐久性,我们保证没有信息将丢失,只要至少一个同步副本保持存活。
三种机制,性能依次递减 (producer吞吐量降低),数据健壮性则依次递增。*/
producer = new Producer<Integer, String>(new ProducerConfig(properties));
}
@Override
public void run(){
int messageNo = 1;
while (true){
String message = "message_" + messageNo;
producer.send(new KeyedMessage<Integer, String>(topic, message));//发送数据
System.out.println("Send:" + message);
messageNo ++;
try {
Thread.sleep(2000); //2s 发送一次数据
}catch (Exception e){
e.printStackTrace();
}
}
}
}
4、设置Consumer
import com.lu.spark.kafka.properties.KafkaProperties;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
public class KafkaConsumer extends Thread {
private String topic;
public KafkaConsumer(String topic){
this.topic = topic;
}
private ConsumerConnector createConnector(){
Properties properties = new Properties();//设置consumer配置项
properties.put("zookeeper.connect", KafkaProperties.ZK);
properties.put("group.id", KafkaProperties.GROUP_ID);
return Consumer.createJavaConsumerConnector(new ConsumerConfig(properties));
}
@Override
public void run(){
ConsumerConnector consumerConnector = createConnector();
Map<String,Integer> topicCountMap = new HashMap<String,Integer>();
topicCountMap.put(topic,1);
//topicCountMap.put(topic2,1);实际生产环境中可能会存在多个topic
//topicCountMap.put(topic3,1);
//String: topic
// List<KafkaStream<byte[], byte[]>> 对应的数据流
//这里的写法就是源码的返回值
Map<String, List<KafkaStream<byte[], byte[]>>> messageStream = consumerConnector.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = messageStream.get(topic).get(0); //获取每次接收到的数据
ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator(); //迭代器
while (consumerIterator.hasNext()){
String message = new String(consumerIterator.next().message());
System.out.println("receive: " + message);
}
}
}
5、程序启动入口
import com.lu.spark.kafka.consumer.KafkaConsumer;
import com.lu.spark.kafka.producer.KafkaProducer;
import com.lu.spark.kafka.properties.KafkaProperties;
/**
* Created by XQL on 2018/5/20.
* Kafka JAVA API 测试
*/
public class KafkaClientApp {
public static void main(String[] args) {
//启动Producer
KafkaProducer kafkaProducer = new KafkaProducer(KafkaProperties.TOPIC);
kafkaProducer.start();
//启动Consumer
KafkaConsumer kafkaConsumer = new KafkaConsumer(KafkaProperties.TOPIC);
kafkaConsumer.start();
}
}