一般是flume->kafka->SparkStreaming,如果非要从Flume直接将数据输送到SparkStreaming里面有两种方式,如下:
程序如下:
package cn.lijie
import org.apache.log4j.Level
import org.apache.spark.streaming.flume.FlumeUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}
/**
* User: lijie
* Date: 2017/8/3
* Time: 15:19
*/
object Flume2SparkStreaming01 {
def myFunc = (it: Iterator[(String, Seq[Int], Option[Int])]) => {
it.map(x => {
(x._1, x._2.sum + x._3.getOrElse(0))
})
}
def main(args: Array[String]): Unit = {
MyLog.setLogLeavel(Level.ERROR)
val conf = new SparkConf().setAppName("fs01").setMaster("local[2]")
val sc = new SparkContext(conf)
val ssc = new StreamingContext(sc, Seconds(10))
val ds = FlumeUtils.createStream(ssc, "10.1.9.102", 6666)
sc.setCheckpointDir("C:\\Users\\Administrator\\Desktop\\checkpoint")
val res = ds.flatMap(x => {
new String(x.event.getBody.array()).split(" ")
}).map((_, 1)).updateStateByKey(myFunc, new HashPartitioner(sc.defaultParallelism), true)
res.print()
ssc.start()
ssc.awaitTermination()
}
}
flume配置如下:
#agent名, source、channel、sink的名称
a1.sources = r1
a1.channels = c1
a1.sinks = k1
#具体定义source
a1.sources.r1.type = spooldir
a1.sources.r1.spoolDir = /home/hadoop/monitor
#具体定义channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 10000
a1.channels.c1.transactionCapacity = 100
#具体定义sink
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = 10.1.9.102
a1.sinks.k1.port = 6666
#组装source、channel、sink
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
启动flume:
/usr/java/flume/bin/flume-ng agent -n a1 -c conf -f /usr/java/flume/mytest/push.properties
结果:
但是这种方法必须要引入Spark官方的一个jar包,见官方的文档:点击跳转,将jar下载下来放到flume安装包的lib目录下即可,点击直接下载jar包
程序如下:
package cn.lijie
import java.net.InetSocketAddress
import org.apache.log4j.Level
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.flume.FlumeUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}
/**
* User: lijie
* Date: 2017/8/3
* Time: 15:19
*/
object Flume2SparkStreaming02 {
def myFunc = (it: Iterator[(String, Seq[Int], Option[Int])]) => {
it.map(x => {
(x._1, x._2.sum + x._3.getOrElse(0))
})
}
def main(args: Array[String]): Unit = {
MyLog.setLogLeavel(Level.WARN)
val conf = new SparkConf().setAppName("fs01").setMaster("local[2]")
val sc = new SparkContext(conf)
val ssc = new StreamingContext(sc, Seconds(10))
val addrs = Seq(new InetSocketAddress("192.168.80.123", 10086))
val ds = FlumeUtils.createPollingStream(ssc, addrs, StorageLevel.MEMORY_AND_DISK_2)
sc.setCheckpointDir("C:\\Users\\Administrator\\Desktop\\checkpointt")
val res = ds.flatMap(x => {
new String(x.event.getBody.array()).split(" ")
}).map((_, 1)).updateStateByKey(myFunc, new HashPartitioner(sc.defaultParallelism), true)
res.print()
ssc.start()
ssc.awaitTermination()
}
}
启动flume:
#agent名, source、channel、sink的名称
a1.sources = r1
a1.channels = c1
a1.sinks = k1
#具体定义source
a1.sources.r1.type = spooldir
a1.sources.r1.spoolDir = /home/hadoop/monitor
#具体定义channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 10000
a1.channels.c1.transactionCapacity = 100
#具体定义sink
a1.sinks.k1.type = org.apache.spark.streaming.flume.sink.SparkSink
a1.sinks.k1.hostname = 192.168.80.123
a1.sinks.k1.port = 10086
#组装source、channel、sink
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
启动flume:
/usr/java/flume/bin/flume-ng agent -n a1 -c conf -f /usr/java/flume/mytest/push.properties
结果
公用类:
MyLog类:
package cn.lijie
import org.apache.log4j.{Level, Logger}
import org.apache.spark.Logging
/**
* User: lijie
* Date: 2017/8/3
* Time: 15:36
*/
object MyLog extends Logging {
/**
* 设置日志级别
*
* @param level
*/
def setLogLeavel(level: Level): Unit = {
val flag = Logger.getRootLogger.getAllAppenders.hasMoreElements
if (!flag) {
logInfo("set log level ->" + level)
Logger.getRootLogger.setLevel(level)
}
}
}
Pom文件:
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0modelVersion>
<groupId>flume-sparkstreaminggroupId>
<artifactId>flume-sparkstreamingartifactId>
<version>1.0-SNAPSHOTversion>
<properties>
<maven.compiler.source>1.7maven.compiler.source>
<maven.compiler.target>1.7maven.compiler.target>
<encoding>UTF-8encoding>
<scala.version>2.10.6scala.version>
<spark.version>1.6.1spark.version>
<hadoop.version>2.6.4hadoop.version>
properties>
<dependencies>
<dependency>
<groupId>org.scala-langgroupId>
<artifactId>scala-libraryartifactId>
<version>${scala.version}version>
dependency>
<dependency>
<groupId>org.apache.sparkgroupId>
<artifactId>spark-core_2.10artifactId>
<version>${spark.version}version>
dependency>
<dependency>
<groupId>org.apache.sparkgroupId>
<artifactId>spark-streaming_2.10artifactId>
<version>${spark.version}version>
dependency>
<dependency>
<groupId>org.apache.sparkgroupId>
<artifactId>spark-streaming-flume_2.10artifactId>
<version>${spark.version}version>
dependency>
<dependency>
<groupId>org.apache.hadoopgroupId>
<artifactId>hadoop-clientartifactId>
<version>${hadoop.version}version>
dependency>
<dependency>
<groupId>mysqlgroupId>
<artifactId>mysql-connector-javaartifactId>
<version>5.1.38version>
dependency>
dependencies>
<build>
<sourceDirectory>src/main/scalasourceDirectory>
<testSourceDirectory>src/test/scalatestSourceDirectory>
<plugins>
<plugin>
<groupId>net.alchim31.mavengroupId>
<artifactId>scala-maven-pluginartifactId>
<version>3.2.2version>
<executions>
<execution>
<goals>
<goal>compilegoal>
<goal>testCompilegoal>
goals>
<configuration>
<args>
<arg>-dependencyfilearg>
<arg>${project.build.directory}/.scala_dependenciesarg>
args>
configuration>
execution>
executions>
plugin>
<plugin>
<groupId>org.apache.maven.pluginsgroupId>
<artifactId>maven-shade-pluginartifactId>
<version>2.4.3version>
<executions>
<execution>
<phase>packagephase>
<goals>
<goal>shadegoal>
goals>
<configuration>
<filters>
<filter>
<artifact>*:*artifact>
<excludes>
<exclude>META-INF/*.SFexclude>
<exclude>META-INF/*.DSAexclude>
<exclude>META-INF/*.RSAexclude>
excludes>
filter>
filters>
<transformers>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>cn.lijie.Flume2SparkStreaming01mainClass>
transformer>
transformers>
configuration>
execution>
executions>
plugin>
plugins>
build>
project>