Flume 与Kafka 整合

整合Flume和Kafka的综合使用


avro-memory-kafka.conf
avro-memory-kafka.sources = avro-source
avro-memory-kafka.sinks = kafka-sink
avro-memory-kafka.channels = memory-channel


avro-memory-kafka.sources.avro-source.type = avro
avro-memory-kafka.sources.avro-source.bind = node1.oracle.com
avro-memory-kafka.sources.avro-source.port = 44444


avro-memory-kafka.sinks.kafka-sink.type = org.apache.flume.sink.kafka.KafkaSink
avro-memory-kafka.sinks.kafka-sink.brokerList = node1.oracle.com:9092
avro-memory-kafka.sinks.kafka-sink.topic = flume-kafka
avro-memory-kafka.sinks.kafka-sink.rollInterval=86400
avro-memory-kafka.sinks.kafka-sink.batchSize = 100
avro-memory-kafka.sinks.kafka-sink.serializer=text
avro-memory-kafka.sinks.kafka-sink.serializer.appendNewline = false
avro-memory-kafka.sinks.kafka-sink.requiredAcks =1


avro-memory-kafka.channels.memory-channel.type = memory
avro-memory-kafka.channels.memory-channel.capacity = 1000
avro-memory-kafka.channels.memory-channel.transactionCapacity = 1000




avro-memory-kafka.sources.avro-source.channels = memory-channel
avro-memory-kafka.sinks.kafka-sink.channel = memory-channel












exec-memory-avro.conf
exec-memory-avro.sources = exec-source
exec-memory-avro.sinks = avro-sink
exec-memory-avro.channels = memory-channel


exec-memory-avro.sources.exec-source.type = exec
exec-memory-avro.sources.exec-source.command = tail -F /home/hadoop/data/data.log
exec-memory-avro.sources.exec-source.shell = /bin/sh -c


exec-memory-avro.sinks.avro-sink.type = avro
exec-memory-avro.sinks.avro-sink.hostname = hadoop000
exec-memory-avro.sinks.avro-sink.port = 44444


exec-memory-avro.channels.memory-channel.type = memory


exec-memory-avro.sources.exec-source.channels = memory-channel
exec-memory-avro.sinks.avro-sink.channel = memory-channel




 


flume-ng agent \
--name avro-memory-kafka \
--conf $FLUME_HOME/conf \
--conf-file $FLUME_HOME/conf/avro-memory-kafka.conf \
-Dflume.root.logger=INFO,console




flume-ng agent \
--name exec-memory-avro \
--conf $FLUME_HOME/conf \
--conf-file $FLUME_HOME/conf/exec-memory-avro.conf \
-Dflume.root.logger=INFO,console




在/tmp/logs下建立空文件kafka.log。
在hadoop 用户目录下新建脚本kafkaoutput.sh(一定要给予可执行权限),用来向kafka.log输入内容: kafka_test***


for((i=0;i<=100;i++));
do echo "kafka_test-"+$i>>/home/hadoop/data/data.log;
done






创建topic 
bin/kafka-topics.sh --zookeeper node1.oracle.com:2181/kafka0.9  --create --topic flume-kafka --partition 3 --replication-factor 1


bin/kafka-topics.sh --zookeeper node1.oracle.com:2181/kafka0.9  --describe --topic flume-kafka


bin/kafka-console-producer.sh --topic flume-kafka --broker-list node1.oracle.com:9092


bin/kafka-console-consumer.sh  --zookeeper node1.oracle.com:2181/kafka0.9 --topic flume-kafka




[hadoop@node1 data]$ echo hello spark >> data.log 
[hadoop@node1 data]$ echo hello mysql  >> data.log 
[hadoop@node1 data]$ echo hello oracle  >> data.log 
[hadoop@node1 data]$ echo hello hbase  >> data.log 
[hadoop@node1 data]$ echo hello hive >> data.log 
[hadoop@node1 data]$ echo hello zookeeper >> data.log 




[hadoop@node1 kafka_2.11-0.9.0.0]$ bin/kafka-console-consumer.sh  --zookeeper node1.oracle.com:2181/kafka0.9 --topic flume-kafka
hello spark
hello mysql
hello oracle
hello hbase
hello hive
hello zookeeper










[hadoop@node1 data]$ jps
32417 Application
19522 SecondaryNameNode
19668 ResourceManager
34740 Jps
34661 ConsoleConsumer
19206 NameNode
19351 DataNode
29960 QuorumPeerMain
19769 NodeManager
34636 ConsoleProducer
33198 Application
34431 Kafka
























猜你喜欢

转载自blog.csdn.net/wjl7813/article/details/80161159