Flume使用案例之实时读取本地文件到HDFS
Flume实时读取本地文件到HDFS
1. 创建flume-hdfs.conf文件
# 1 agent a2.sources = r2 a2.sinks = k2 a2.channels = c2
# 2 source a2.sources.r2.type = exec a2.sources.r2.command = tail -F /opt/Andy a2.sources.r2.shell = /bin/bash -c
# 3 sink a2.sinks.k2.type = hdfs a2.sinks.k2.hdfs.path = hdfs://dtstack_hdfs:9000/flume/%Y%m%d/%H #上传文件的前缀 a2.sinks.k2.hdfs.filePrefix = logs- #是否按照时间滚动文件夹 a2.sinks.k2.hdfs.round = true #多少时间单位创建一个新的文件夹 a2.sinks.k2.hdfs.roundValue = 1 #重新定义时间单位 a2.sinks.k2.hdfs.roundUnit = hour #是否使用本地时间戳 a2.sinks.k2.hdfs.useLocalTimeStamp = true #积攒多少个Event才flush到HDFS一次 a2.sinks.k2.hdfs.batchSize = 1000 #设置文件类型,可支持压缩 a2.sinks.k2.hdfs.fileType = DataStream #多久生成一个新的文件 a2.sinks.k2.hdfs.rollInterval = 600 #设置每个文件的滚动大小 a2.sinks.k2.hdfs.rollSize = 134217700 #文件的滚动与Event数量无关 a2.sinks.k2.hdfs.rollCount = 0 #最小副本数 a2.sinks.k2.hdfs.minBlockReplicas = 1
# Use a channel which buffers events in memory a2.channels.c2.type = memory a2.channels.c2.capacity = 1000 a2.channels.c2.transactionCapacity = 100
# Bind the source and sink to the channel a2.sources.r2.channels = c2 a2.sinks.k2.channel = c2 |
2. 执行监控配置
/opt/module/flume1.8.0/bin/flume-ng agent \ --conf /opt/module/flume1.8.0/conf/ \ --name a2 \ --conf-file /opt/module/flume1.8.0/jobconf/flume-hdfs.conf |