结构中有3个 Agent,分别是监控 File 文件的 a1,接收 a1 的 s1 并写入本地文件夹的 a3 和接收 a1 的 s2 并上传至 HDFS 的 a2
1.在这里,我使用 Flume 监听的是 /usr/wang/data/Example4.txt 文件
2.在 flume 的 conf 同级目录下创建 job/job1 文件夹,并在 job1 文件夹内创建 flume-file-flume.conf、flume-flume-dir.conf 和 flume-flume-hdfs.conf
cd /usr/wang/flume
mkdir -p job/job1
cd job/job1
touch flume-file-flume.conf
touch flume-flume-dir.conf
touch flume-flume-hdfs.conf
vim flume-file-flume.conf
将下方文件拷贝到 flume-file-flume.conf 中
# Name the components on this agent
a1.sources = r1
a1.sinks = k1 k2
a1.channels = c1 c2
# 会将source过来的events发往所有channel
a1.sources.r1.selector.type = replicating
# Describe/configure the source
a1.sources.r1.type = exec
a1.sources.r1.command = tail -F /usr/wang/data/Example4.txt
a1.sources.r1.shell = /bin/bash -c
# Describe the sinks
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = master
a1.sinks.k1.port = 4141
a1.sinks.k2.type = avro
a1.sinks.k2.hostname = master
a1.sinks.k2.port = 4142
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
a1.channels.c2.type = memory
a1.channels.c2.capacity = 1000
a1.channels.c2.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1 c2
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c2
将下方文件拷贝到 flume-flume-dir.conf 中
# Name the components on this agent
a3.sources = r1
a3.sinks = k1
a3.channels = c1
# Describe/configure the source
a3.sources.r1.type = avro
a3.sources.r1.bind = master
a3.sources.r1.port = 4142
# Describe the sink
a3.sinks.k1.type = file_roll
a3.sinks.k1.sink.directory = /usr/wang/data/flume
# Use a channel which buffers events in memory
a3.channels.c1.type = memory
a3.channels.c1.capacity = 1000
a3.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a3.sources.r1.channels = c1
a3.sinks.k1.channel = c1
将下方文件拷贝到 flume-flume-hdfs.conf 中
# Name the components on this agent
a2.sources = r1
a2.sinks = k1
a2.channels = c1
# Describe/configure the source
a2.sources.r1.type = avro
a2.sources.r1.bind = master
a2.sources.r1.port = 4141
# Describe the sink
a2.sinks.k1.type = hdfs
a2.sinks.k1.hdfs.path = hdfs://master:9000/flume1/%Y%m%d/%H
#上传文件的前缀
a2.sinks.k1.hdfs.filePrefix = flume1-
#是否按照时间滚动文件夹
a2.sinks.k1.hdfs.round = true
#多少时间单位创建一个新的文件夹
a2.sinks.k1.hdfs.roundValue = 1
#重新定义时间单位
a2.sinks.k1.hdfs.roundUnit = hour
#是否使用本地时间戳
a2.sinks.k1.hdfs.useLocalTimeStamp = true
#积攒多少个Event才flush到HDFS一次
a2.sinks.k1.hdfs.batchSize = 1000
#设置文件类型,可支持压缩
a2.sinks.k1.hdfs.fileType = DataStream
#多久生成一个新的文件
a2.sinks.k1.hdfs.rollInterval = 600
#设置每个文件的滚动大小
a2.sinks.k1.hdfs.rollSize = 134217700
#文件的滚动与Event数量无关
a2.sinks.k1.hdfs.rollCount = 0
#最小冗余数
a2.sinks.k1.hdfs.minBlockReplicas = 1
# Use a channel which buffers events in memory
a2.channels.c1.type = memory
a2.channels.c1.capacity = 1000
a2.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1
1.创建写入本地文件所在的文件夹
cd /usr/wang/data
mkdir flume
2.在执行监控配置之前,需要先将 HDFS 集群启动起来
cd /usr/wang/hadoop/sbin
./start-dfs.sh
cd /usr/wang/flume
bin/flume-ng agent --conf conf/ --name a3 --conf-file /usr/wang/flume/job/job1/flume-flume-dir.conf
bin/flume-ng agent --conf conf/ --name a2 --conf-file /usr/wang/flume/job/job1/flume-flume-hdfs.conf
bin/flume-ng agent --conf conf/ --name a1 --conf-file /usr/wang/flume/job/job1/flume-file-flume.conf
4.向 /usr/wang/data/Example4.txt 写入数据
cd /usr/wang/data
echo "Hello" >> Example4.txt
5.查看 HDFS 上是否创建成功
/usr/wang/hadoop/bin/hdfs dfs -ls /
/usr/wang/data/flume
ll
7.关闭
在执行监控配置端输入 ctrl + c 即可关闭,或者直接使用
sudo kill -9 进程号