flume配置文件--httpSource,kafkaChannel,hdfsSink

tier1.sources = httpSource
tier1.channels = kafkaChannel
tier1.sinks = hdfsSink

tier1.sources.httpSource.channels = kafkaChannel
tier1.sinks.hdfsSink.channels = kafkaChannel


#--------httpSource--------
tier1.sources.httpSource.type = org.apache.flume.source.http.HTTPSource
tier1.sources.httpSource.port = 9999
tier1.sources.httpSource.channels = kafkaChannel
tier1.sources.httpSource.handler = com.bolo.flume.http.BoloJSONHandler

#--------kafkaChannel---------
tier1.channels.kafkaChannel.type = org.apache.flume.channel.kafka.KafkaChannel
tier1.channels.kafkaChannel.capacity = 10000
tier1.channels.kafkaChannel.transactionCapacity = 1000
tier1.channels.kafkaChannel.brokerList=localhost:9092
tier1.channels.kafkaChannel.topic=TOPIC_USER_BEHAVIOR_NEW
tier1.channels.kafkaChannel.zookeeperConnect= localhost饿:2181
tier1.channels.kafkaChannel.groupId    = test


#---------hdfsSink 相关配置------------------
tier1.sinks.hdfsSink.type = org.apache.flume.sink.hdfs.HDFSEventSink
tier1.sinks.hdfsSink.hdfs.path = /data/suiyue/data-ods/client-event/%Y%m%d
tier1.sinks.hdfsSink.hdfs.writeFormat = Text
tier1.sinks.hdfsSink.hdfs.fileType = DataStream
tier1.sinks.hdfsSink.hdfs.rollSize = 33554432
tier1.sinks.hdfsSink.hdfs.rollCount = 0
tier1.sinks.hdfsSink.hdfs.rollInterval = 30
tier1.sinks.hdfsSink.hdfs.batchSize=1000

你可能感兴趣的:(日志采集,大数据,flume)