Spark Streaming整合Flume,Mysql(基于Flume的Push模式),实时保存数据到Mysql

集群分配如下:

192.168.58.11	spark01
192.168.58.12	spark02
192.168.58.13	spark03
spark版本:spark-2.1.0-bin-hadoop2.7
flume版本:apache-flume-1.7.0-bin

flume配置如下:

#flume启动命令
#bin/flume-ng agent -n a4 -f conf/a4.conf -c conf -Dflume.root.logger=INFO,console

#定义agent名, source、channel、sink的名称
a4.sources = r1
a4.channels = c1
a4.sinks = k1

#具体定义source
a4.sources.r1.type = spooldir
a4.sources.r1.spoolDir = /opt/kevin/log(监控目录)

#具体定义channel
a4.channels.c1.type = memory
a4.channels.c1.capacity = 10000
a4.channels.c1.transactionCapacity = 100

#具体定义sink
a4.sinks = k1
a4.sinks.k1.type = avro
a4.sinks.k1.channel = c1
#这里是Windows的地址
a4.sinks.k1.hostname = 192.168.58.11(IP地址)
a4.sinks.k1.port = 1234(端口号)

#组装source、channel、sink
a4.sources.r1.channels = c1
a4.sinks.k1.channel = c1

Spark Streaming程序

package com.kk.sparkstreaming.flume

import org.apache.spark.SparkConf
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.storage.StorageLevel
import org.apache.log4j.Logger
import org.apache.log4j.Level
import org.apache.spark.streaming.flume.FlumeUtils
import java.sql.Connection
import java.sql.PreparedStatement
import java.sql.DriverManager

object FlumePush {

  def main(args: Array[String]): Unit = {
    // 减少日志输出
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)

    //创建StreamingContext对象  StreamingContext(conf: SparkConf, batchDuration: Duration)
    val sparkConf = new SparkConf().setAppName("TestFlume") 
    val streamConf = new StreamingContext(sparkConf, Seconds(3)); //每隔3秒采集一次数据

    //采用的是Push方式
    val flumeStream = FlumeUtils.createStream(streamConf, "192.168.58.11", 1234, StorageLevel.DISK_ONLY);

    //从Flume中接收数据:
    //e 表示从FLume中收到的一次事件Event
    val data = flumeStream.map { e =>
      {
        new String(e.event.getBody.array())
      }
    }

    val datas = data.map(line => {
      // 1,201.105.101.108,http://mystore.jsp/?productid=1,2017020029,2,1 
      val index: Array[String] = line.split(",");
      val ip = index(1);
      (ip, 1)
    })

    datas.print()// 打印在屏幕上
    datas.foreachRDD(cs => {
      var conn: Connection = null;
      var ps: PreparedStatement = null;
      try {
        Class.forName("com.mysql.jdbc.Driver").newInstance();
        cs.foreachPartition(f => {
          conn = DriverManager.getConnection("jdbc:mysql://192.168.58.11:3306/storm?useUnicode=true&characterEncoding=utf8", "root", "kevin");
          ps = conn.prepareStatement("insert into result values(?,?)");
          f.foreach(s => {
            ps.setString(1, s._1);
            ps.setInt(2, s._2);
            ps.executeUpdate();
          })
        })
      } catch {
        case t: Throwable => t.printStackTrace() // TODO: handle error
      } finally {
        if (ps != null) {
          ps.close()
        }
        if (conn != null) {
          conn.close();
        }
      }
    })
    streamConf.start()
    streamConf.awaitTermination();
  }
}

pom.xml配置文件


	UTF-8
	2.2.1
	2.11.1



	
	
		org.scala-lang
		scala-library
		${scala.version}
	
	
	
	   org.scala-lang
	   scala-compiler
	   ${scala.version}
	
	
	
	   org.scala-lang
	   scala-reflect
	   ${scala.version}
	
	
	
	   org.apache.spark
	   spark-core_2.11
	   ${spark.version}
	
	
	
	   org.apache.spark
	   spark-streaming_2.11
	   ${spark.version}
	
	
	   org.apache.spark
	   spark-sql_2.11
	   ${spark.version}
	
	
	
	org.apache.spark
	spark-streaming-flume_2.11
	2.2.2
	
        

所需jar包: spark-streaming-flume_2.10-2.1.0.jar
在集群运行需要将spark-streaming-flume_2.10-2.1.0.jar拷贝在集群的每台机器的spark安装目录的jars目录下,此外还需要mysql驱动包
测试:
1.首先需要启动Spark Streaming程序
2.启动Flume
3.拷贝日志文件到/opt/kevin/log
注意:序列化问题,启动顺序

你可能感兴趣的:(spark,sparkstreaming,spark,streaming,flume)