Spark2.0基于广播变量broadcast实现实时数据按天统计

package com.gm.hive.SparkHive;

import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import org.apache.spark.Partition;


import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;

import scala.Tuple2;
import scala.reflect.ClassManifestFactory;


public class App {

	private static volatile Broadcast> bcMap = null;
	
	public static void main(String[] args) {
		// TODO Auto-generated method stub
		SparkConf conf = new SparkConf().setMaster("local[2]").setAppName(
				"SparkStreaming");
		
		JavaSparkContext sc = new JavaSparkContext(conf);
		sc.setLogLevel("ERROR");
		sc.setCheckpointDir("./checkpoint");
		
		JavaStreamingContext ssc = new JavaStreamingContext(sc,
				Durations.seconds(10));
		
		Date date = new Date();
		SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
		
		Map map = new HashMap();
		map.put(sdf.format(date), true);
		bcMap = sc.broadcast(map);//初始广播变量
	
		// kafka相关参数,必要!缺了会报错
		Map kafkaParams = new HashMap<>();
		kafkaParams.put("bootstrap.servers", "192.168.174.200:9092");
		kafkaParams.put("key.deserializer", StringDeserializer.class);
		kafkaParams.put("value.deserializer", StringDeserializer.class);
		kafkaParams.put("group.id", "newgroup2");
		kafkaParams.put("auto.offset.reset", "latest");
		kafkaParams.put("enable.auto.commit", false);

		Collection topics = Arrays.asList("test");

		JavaInputDStream> stream = KafkaUtils
				.createDirectStream(ssc, LocationStrategies.PreferConsistent(),
						ConsumerStrategies. Subscribe(topics,
								kafkaParams));

		// 注意这边的stream里的参数本身是个ConsumerRecord对象
		JavaPairDStream counts = stream
				.flatMap(
						x -> Arrays.asList(x.value().toString().split(" "))
								.iterator())
				.mapToPair(x -> new Tuple2(x, 1))
				.reduceByKey((x, y) -> x + y);
		//counts.print();

		
		stream.foreachRDD(rdd -> {
			
			Map map1 = bcMap.value();
			Date newDate = new Date();
			SimpleDateFormat newsdf = new SimpleDateFormat("yyyy-MM-dd");
			String newDay = newsdf.format(newDate);
			
			if (map1.get(newDay) != null) {//存在当前天
				if (bcMap.value().get(newDay)) {//当前天的值为true,更新为false并更新到广播变量中
					map1.put(newDay, false);
					bcMap = rdd.context().broadcast(map1,ClassManifestFactory.classType(Map.class));	
				}
			} else {
				if (bcMap != null) {//不存在当前天,将新的一天添加并更新到广播变量中
					bcMap.unpersist();
				}
				map1.put(newDay, true);
				bcMap = rdd.context().broadcast(map1,ClassManifestFactory.classType(Map.class));
			}	
		});
		
		
		JavaPairDStream result = counts
				.updateStateByKey(new Function2, Optional, Optional>() {

					private static final long serialVersionUID = 1L;

					@Override
					public Optional call(List values,
							Optional state) throws Exception {
						/**
						 * values:经过分组最后 这个key所对应的value,如:[1,1,1,1,1]
						 * state:这个key在本次之前之前的状态
						 */
						Integer updateValue = 0;
						
						Date newDate = new Date();
						SimpleDateFormat newsdf = new SimpleDateFormat("yyyy-MM-dd");
						String newDay = newsdf.format(newDate);
						
						Map map1 = bcMap.value();
						if (map1.get(newDay) != null) {
							if(map1.get(newDay)){//新的一天开始,将计算结果更新为0
								for (Integer value : values) {
									updateValue += value;
								}
							} else {//新的一天进行中,已计算过数据,正常运算
								if (state.isPresent()) {
									updateValue = state.get();		
								}
								for (Integer value : values) {
									updateValue += value;
								}
							}
						}
						return Optional.of(updateValue);
					}
				});
		
		
		//数据库内容
		String url = "jdbc:postgresql://192.168.174.200:5432/postgres?charSet=utf-8";
		Properties connectionProperties = new Properties();
		connectionProperties.put("user","postgres");
		connectionProperties.put("password","postgres");
		connectionProperties.put("driver","org.postgresql.Driver");
		
		result.print();

		result.foreachRDD(new VoidFunction>(){
			public void call(JavaPairRDD rdd)
					throws Exception {
				// TODO Auto-generated method stub
				JavaRDD rowRDD = rdd.map(new Function,ResultRow>(){

					public ResultRow call(Tuple2 arg0)
							throws Exception {
						// TODO Auto-generated method stub
						Date newDate = new Date();
						SimpleDateFormat newsdf = new SimpleDateFormat("yyyy-MM-dd");
						String newDay = newsdf.format(newDate);
						
						ResultRow rr = new ResultRow();
						rr.setTypeid(arg0._1+"_"+newDay);
						rr.setKczs(arg0._2);
						return rr;
					}
					
				});
				SparkSession spark = SparkSession.builder().config(rdd.context().getConf()).getOrCreate();
				Dataset  data = spark.createDataFrame(rowRDD, ResultRow.class);
				//将数据通过覆盖的形式保存在数据表中
				data.write().mode(SaveMode.Append).jdbc(url, "kcssqktj", connectionProperties);
			}	
		});
		
		ssc.start();
		try {
			ssc.awaitTermination();
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		ssc.close();
	}

}
package com.gm.hive.SparkHive;

import java.io.Serializable;

public class ResultRow implements Serializable {
	private static final long serialVersionUID = 6681372116317508248L;
	String typeid;
	int kczs;

	public String getTypeid() {
		return typeid;
	}

	public void setTypeid(String typeid) {
		this.typeid = typeid;
	}

	public int getKczs() {
		return kczs;
	}

	public void setKczs(int kczs) {
		this.kczs = kczs;
	}

}

	4.0.0
	com.test
	kcssqktj_spark
	0.0.1-SNAPSHOT
	
		UTF-8
	

	
		
			junit
			junit
			3.8.1
			test
		

		
			org.slf4j
			slf4j-log4j12
			1.7.22
		
		
			org.apache.hadoop
			hadoop-client
			2.8.0
			
				
					javax.servlet
					*
				
			
		

		
			org.apache.spark
			spark-sql_2.11
			2.0.0
		
		
			org.apache.spark
			spark-hive_2.11
			2.0.0
		

		
			org.apache.spark
			spark-streaming_2.11
			2.0.0
			
				
					slf4j-log4j12
					org.slf4j
				
			
		
		
			org.apache.spark
			spark-core_2.11
			2.0.0
		

		
		
			org.apache.hive
			hive-jdbc
			2.1.1
		

		
		
			org.apache.hive
			hive-exec
			2.1.1
		

		
			org.postgresql
			postgresql
			9.4-1201-jdbc4
		

		
			org.apache.spark
			spark-streaming-kafka-0-10_2.11
			2.0.0
		
	
	

		
			
				org.apache.maven.plugins
				maven-compiler-plugin
				
					1.8
					1.8
				
			
			
				org.apache.maven.plugins
				maven-shade-plugin
				
					1.8
					1.8
				
				
					
						package
						
							shade
						
						
							true
							allinone
							
								
									*:*
								
							
							
								
									*:*
									
										META-INF/*.SF
										META-INF/*.DSA
										META-INF/*.RSA
									
								
							
							
								
									reference.conf
								
								
									META-INF/spring.handlers
								
								
									META-INF/spring.schemas
								
								
									
										
									
								
							
						
					
				
			
		
	

 

 

 

 

 

你可能感兴趣的:(大数据,分布式框架,Spark,大数据)