pom:
4.0.0
com.tzb.bigdata
spark-test
1.0
2.10.6
2.6.0
org.apache.spark
spark-core_2.11
2.1.1
org.apache.spark
spark-sql_2.11
2.1.1
org.apache.spark
spark-hive_2.11
2.1.1
com.typesafe.play
play-mailer_2.11
7.0.0
mysql
mysql-connector-java
5.1.41
org.apache.spark
spark-streaming_2.11
2.1.1
org.apache.spark
spark-streaming-kafka-0-10_2.11
2.3.0
scala-library
org.scala-lang
org.apache.kafka
kafka-clients
0.11.0.2
org.apache.hbase
hbase-client
2.0.1
com.fasterxml.jackson.core
jackson-databind
net.sf.json-lib
json-lib
2.4
jdk15
org.neo4j.driver
neo4j-java-driver
4.0.0
com.google.code.gson
gson
2.8.5
junit
junit
4.12
net.minidev
json-smart
2.3
joda-time
joda-time
2.10.1
com.huaban
jieba-analysis
1.0.2
com.alibaba
fastjson
1.2.68
org.elasticsearch
elasticsearch-spark-20_2.11
6.2.4
org.apache.poi
poi
3.12
spark-test
net.alchim31.maven
scala-maven-plugin
3.2.2
compile
testCompile
org.apache.maven.plugins
maven-assembly-plugin
WordCount
jar-with-dependencies
make-assembly
package
single
org.apache.maven.plugins
maven-compiler-plugin
8
直接上代码:
DataChangeStreaming:
package com.tzb.sparkstreaming.prod
import java.io.{FileNotFoundException, IOException}
import java.util
import com.alibaba.fastjson.{JSON, JSONObject}
import com.tzb.utils.{ConfigUtils, HBaseUtil, StringUtil}
import net.sf.json.JSONArray
import org.apache.hadoop.hbase.TableExistsException
import org.apache.hadoop.hbase.client._
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.slf4j
import org.slf4j.LoggerFactory
import scala.collection.mutable.ArrayBuffer
/**
*
* SparkStreaming 版本0.10
* 注:本程序是将sparkstreaming和kafka、hbase结合起来使用的示例,测试环境 kafka和kafka依赖的zk为210机器,hbase和hbase依赖的zk为211机器
*
* 本地以及210 linux测试机都已测试成功:
* 打开kafkatool向某个主题中推送数据
* 执行main方法,开始消费数据
* kafkaTool发送json消息示例:
* {
* "tableName": "hbasetable6",
* "option": "put",
* "rowKey": "1001",
* "families": [
* "info1",
* "info2"],
* "cols_data": {
* "name":"tom",
* "age":"20"
* }
* }
* 如何查看自己消费者分组 对应的 topic 的offset:
* https://blog.51cto.com/13639264/2135877
* [root@xg kafka_2.11-2.0.0]# bin/kafka-consumer-groups.sh --bootstrap-server 10.21.0.210:9092 --group testgroup --describe
* Consumer group 'testgroup' has no active members.
* TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID
* testTopic 0 8 9 1 - - -
*
* 更改指定消费者分组对应topic的offset:未生效??
* bin/kafka-consumer-groups.sh --bootstrap-server 10.21.0.210:9092 --group testgroup --topic testTopic--execute --reset-offsets --to-offset 9
*
* 打包测试(成功):
* spark-submit --master yarn-client --conf spark.driver.memory=2g --class com.tzb.sparkstreaming.prod.DataChangeStreaming --executor-memory 8G --num-executors 5 --executor-cores 2 /var/lib/hadoop-hdfs/spride_sqoop_beijing/bi_table/test/spark-test-jar-with-dependencies.jar >> /var/lib/hadoop-hdfs/spride_sqoop_beijing/bi_table/test/sparkstreaming_datachange.log
* 线上跑的话要把代码里的kafka以及zk,hbase等组件的ip或域名,改为线上的,同时提交任务时把 spark-submit 改为 spark-submit2,命令后边加个&符号,则为后台启动程序,当前窗口可关闭。
*
* 如何停止任务:
* 如果想停止掉这个任务则:ps -ef | grep DataChangeStreaming,并将端口kill掉即可。
*/
object DataChangeStreaming {
// 设置日志的级别
Logger.getLogger("org.apache").setLevel(Level.ERROR)
val logger: slf4j.Logger = LoggerFactory.getLogger(this.getClass.getSimpleName)
def main(args: Array[String]): Unit = {
val sparkConf: SparkConf = new SparkConf()
.setAppName(this.getClass.getSimpleName)
.setMaster("local[*]")
val ssc: StreamingContext = new StreamingContext(sparkConf, Seconds(5))
//策略
val preferredHosts: LocationStrategy = LocationStrategies.PreferConsistent
//kafka topic
val topics = Array("testTopic")
val groupId = "testgroup"
val kafkaParams: Map[String, Object] = Map[String, Object](
"bootstrap.servers" -> ConfigUtils.brokers, //kafka producer 生产者地址
"key.deserializer" -> classOf[StringDeserializer].getName,
"value.deserializer" -> classOf[StringDeserializer].getName,
"group.id" -> groupId,
//latest, earliest, none
"auto.offset.reset" -> "earliest",
"enable.auto.commit" -> "false" // 不自动提交
)
val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
ssc,
preferredHosts,
ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
)
logger.info(s"开始消费 kafka --- 主题名:${topics(0)} --- 消费组:${groupId} --- brokers:${ConfigUtils.brokers}")
println(s"开始消费 kafka --- 主题名:${topics(0)} --- 消费组:${groupId} --- brokers:${ConfigUtils.brokers}")
stream.foreachRDD(fr => {
//获取offset
val offsetRanges: Array[OffsetRange] = fr.asInstanceOf[HasOffsetRanges].offsetRanges
println("获取offset---offsetRanges:" + offsetRanges.mkString(","))
//开始业务
fr.foreachPartition(it => {
//HBase 连接
val connection: Connection = HBaseUtil.initHbase
println("创建的Hbase的Connection连接==>" + connection)
var tableName = ""
try
it.foreach(record => {
val jsonString: String = record.value()
println("接受到的一条 json 消息 ==>" + jsonString)
val jSONObject: JSONObject = JSON.parseObject(jsonString)
tableName = jSONObject.getString("tableName")
val option: String = jSONObject.getString("option") //put delete 行为标识字段
val rowKey: String = jSONObject.getString("rowKey")
val families: String = jSONObject.getString("families")
val cols_data: String = jSONObject.getString("cols_data")
val familysArr = ArrayBuffer[String]()
if(families != null && StringUtil.isNotBlank(families)){
val familyjsonArr: JSONArray = JSONArray.fromObject(families)
val familyIter: util.Iterator[_] = familyjsonArr.iterator()
while (familyIter.hasNext) {
val family : String = familyIter.next()+""
familysArr += family
}
}
println("所有列族:familylist "+familysArr.mkString(","))
//创建新表 不存在就创建,存在就报错抛异常,但是数据依然会插入
// HBaseUtil.createTable(connection, tableName)
// HBaseUtil.createTable(connection, tableName, Array[String]("info1", "info2")) //创建两个列族
HBaseUtil.createTable(connection, tableName,familysArr) //创建n列族
println("tableName:" + tableName + " " + "rowKey:" + rowKey + " " + "option:" + option + "data:" + cols_data)
val dataObject: JSONObject = JSON.parseObject(cols_data)
if(dataObject !=null){
val keys: util.Set[String] = dataObject.keySet()
var columns = new ArrayBuffer[String]
var values = new ArrayBuffer[String]
import scala.collection.JavaConversions._
for (key <- keys) {
columns += key
values += dataObject.getString(key)
println(s"Columns: $key -> values: ${dataObject.getString(key)}")
}
//保存到HBase
HBaseUtil.putData(connection, tableName, rowKey, columns.toArray, values.toArray,familysArr(0)) //只添加到第一个列族 familysArr(0)
logger.info(s"Save successed -> 表名:$tableName --> 列族:${familysArr.mkString(",")} rowkey:$rowKey")
print(s"Save successed -> 表名:$tableName --> 列族:${familysArr.mkString(",")} rowkey:$rowKey")
}
//执行删除操作
// if (option != null && HBaseUtil.OPTION_DELETE == option.toLowerCase) {
// //option不是null,而且值是'delete',执行删除操作
// HBaseUtil.deleteByRowKey(tableName, rowKey)
// } else {
//
// }
})
catch {
case e1: FileNotFoundException => {
println("Missing file exception")
}
case e2: IOException => {
println("IO Exception")
}
case e3: IllegalArgumentException => {
printf("do something when illegal happened.")
}
case e3: TableExistsException => {
printf(s"Hbase中表已经存在!表名为${tableName}")
}
case e4: Exception => e4.printStackTrace()
} finally {
connection.close()
}
})
//手动提交偏移量,kafka管理,有可能会产生重复消费
stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
})
ssc.start()
ssc.awaitTermination()
}
}
HbaseUtil:
package com.tzb.utils
import java.io.IOException
import java.text.MessageFormat
import java.util
import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
*
*/
object HBaseUtil {
private val logger: Logger = LoggerFactory.getLogger(this.getClass.getSimpleName)
private var configuration: Configuration = _
private var connection: Connection = _
final val OPTION_DELETE = "delete"
/**
* 初始化配置
*/
def init(): Unit = {
try
if (configuration == null) {
configuration = HBaseConfiguration.create()
configuration.set("hbase.zookeeper.quorum", ConfigUtils.zkconnect) //zk地址
configuration.set("hbase.zookeeper.property.clientPort", "2181")
}
catch {
case e: Exception =>
logger.error("HBase Configuration Initialization failure !")
throw new RuntimeException(e)
}
}
/**
* 连接集群
*
* @return
*/
def initHbase: Connection = {
init()
try
if (connection == null || connection.isClosed) connection = ConnectionFactory.createConnection(configuration)
// System.out.println("---------- " + conn.hashCode());
catch {
case e: IOException =>
logger.error("HBase 建立链接失败 ", e)
}
connection
}
/**
* 查看HBase所有列簇
*/
def list(): Unit = {
val admin: Admin = initHbase.getAdmin
val tableNames: Array[TableName] = admin.listTableNames
for (name <- tableNames) {
System.out.println(name.getNameAsString)
}
}
/**
* 创建表
*
* @param tableNmae 表名
* @param cols 字段
*/
def createTable(connection: Connection, tableNmae: String, cols: ArrayBuffer[String] = ArrayBuffer[String] {"information"}): Unit = { //这是创建表时默认的列族名
try {
val tableName: TableName = TableName.valueOf(tableNmae)
val admin: Admin = connection.getAdmin
val tableNames: Array[TableName] = admin.listTableNames
for (name <- tableNames) {
//System.out.println(name.getNameAsString)
if (tableName == name.getNameAsString) {
logger.info(s"~~~表名 :$tableName 已经存在~~~")
println(s"~~~表名 :$tableName 已经存在~~~")
return
}
}
//if (admin.tableExists(tableName)) {
// println("表已存在!")
//} else {
val hTableDescriptor : HTableDescriptor = new HTableDescriptor(tableName)
for (col <- cols) {
val hColumnDescriptor = new HColumnDescriptor(col)
hTableDescriptor.addFamily(hColumnDescriptor)
}
admin.createTable(hTableDescriptor)//传入列族(n个)
//}
}
catch {
case e: IOException =>
e.printStackTrace()
}
}
/**
* 删除表
*
* @param tableName 表名
* @return
*/
def deleteTable(tableName: String): Boolean = {
var admin: Admin = null
try {
admin = initHbase.getAdmin
admin.disableTable(TableName.valueOf(tableName))
admin.deleteTable(TableName.valueOf(tableName))
} catch {
case e: IOException =>
logger.error(MessageFormat.format("删除指定的表失败,tableName:{0}", tableName), e)
return false
} finally admin.close()
true
}
/**
* 获取原始数据
*
* @param tableName 表名
*/
def getNoDealData(tableName: String): Unit = {
try {
val table: Table = initHbase.getTable(TableName.valueOf(tableName))
val scan = new Scan()
//
val result = new mutable.HashMap[String, mutable.HashMap[String, String]]()
// 获取表
val rs: ResultScanner = table.getScanner(scan)
import scala.collection.JavaConversions._
for (r <- rs) { //每一行数据
val columnMap = new mutable.HashMap[String, String]()
var rowKey: String = null
for (cell <- r.listCells) {
if (rowKey == null) rowKey = Bytes.toString(cell.getRowArray, cell.getRowOffset, cell.getRowLength)
columnMap.put(Bytes.toString(cell.getQualifierArray, cell.getQualifierOffset, cell.getQualifierLength), Bytes.toString(cell.getValueArray, cell.getValueOffset, cell.getValueLength))
}
if (rowKey != null) result.put(rowKey, columnMap)
}
result.foreach(println(_))
} catch {
case e: IOException =>
e.printStackTrace()
}
}
/**
* 插入数据,当指定rowkey已经存在,则会覆盖掉之前的旧数据
*
* @param connection
* @param tableName
* @param rowKey
* @param columns
* @param values
* @param familyName
*/
def putData(connection: Connection, tableName: String, rowKey: String, columns: Array[String], values: Array[String], familyName: String = "information"): Unit = { //information默认的列族
try {
val table: Table = connection.getTable(TableName.valueOf(tableName))
//设置rowkey
val put = new Put(Bytes.toBytes(rowKey))
if (columns != null && values != null && columns.length == values.length) {
var i = 0
while (i < columns.length) {
if (columns(i) != null && values(i) != null) {
put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes(columns(i)), Bytes.toBytes(values(i)))
}
else{
throw new NullPointerException(MessageFormat.format("列名和列数据都不能为空,column:{0},value:{1}", columns(i), values(i)))
}
i += 1
}
}
table.put(put)
table.close()
} catch {
case e: Exception =>
logger.error(MessageFormat.format("为表添加 or 更新数据失败,tableName:{0},rowKey:{1},familyName:{2}", tableName, rowKey, familyName), e)
}
}
/**
* 根据rowkey删除整行的所有列族、所有行、所有版本
*
* @param tableName 表名
* @param rowKey rowkey
*/
def deleteByRowKey(tableName: String, rowKey: String): Boolean = {
var table: Table = null
try {
table = initHbase.getTable(TableName.valueOf(tableName))
val delete = new Delete(Bytes.toBytes(rowKey))
table.delete(delete)
} catch {
case e: IOException =>
logger.error(MessageFormat.format("删除指定的表失败,tableName:{0}", tableName), e)
return false
} finally table.close()
logger.info(s"Deleted -> 表名:$tableName -------------- rowkey:$rowKey")
true
}
def main(args: Array[String]): Unit = {
//HBaseUtil.list()
//HBaseUtil.createTable("pvuv",Array[String]{"information"})
//HBaseUtil.deleteTable("t_user_search_1")
//val connection: Connection = HBaseUtil.initHbase
//HBaseUtil.putData(connection,"www", "002", Array[String]{"url"}, Array[String]{"www.goole.com"});
//HBaseUtil.getNoDealData("www")
//解析json数据
//{
// "tableName": "pvuv",
// "pk": "001",
// "option": "put",
// "data": {
// "id": "21908627",
// "system_id": "10001",
// "user_id": "",
// "monitor_point": "10001",
// "client_ip": "183.15.177.28",
// "client_user_agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
// }
//}
val jsonString = "{\"tableName\":\"pvuv\",\"pk\":\"21908637\",\"option\":\"put\",\"data\":{\"id\":\"21908637\",\"system_id\":\"20001\",\"user_id\":\"\",\"monitor_point\":\"10001\",\"client_ip\":\"183.15.177.28\",\"client_user_agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36\"}}"
val jSONObject: JSONObject = JSON.parseObject(jsonString)
//val tableName: String = jSONObject.getString("tableName")
//val rowKey: String = jSONObject.getString("pk")
//val option: String = jSONObject.getString("option")
//val familyName = "information"
val dataObject: JSONObject = JSON.parseObject(jSONObject.getString("data"))
val keys: util.Set[String] = dataObject.keySet()
var columns = new ArrayBuffer[String]
var values = new ArrayBuffer[String]
import scala.collection.JavaConversions._
for (key <- keys) {
columns += key
values += dataObject.getString(key)
//println(s"${key} -> ${dataObject.getString(key)}")
}
//HBaseUtil.createTable(connection, "t_pvuv_log")
//HBaseUtil.putData(connection,tableName,rowKey,columns.toArray,values.toArray)
//HBaseUtil.getNoDealData("t_monitor_system")
//HBaseUtil.deleteByRowKey("t_pvuv_log", "10690786")
}
}