spark实时计算kafka消息队列中的wordcount


package sparkTestJava;

import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

import kafka.serializer.StringDecoder;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;

import scala.Tuple2;

public class KafkaDirectWordCount {

	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setAppName("wordcount").setMaster("local[2]");
		JavaStreamingContext jssc = new JavaStreamingContext(conf,Durations.seconds(5));
		
		// 首先要创建一份kafka参数map
		Map kafkaParams = new HashMap();
		// 我们这里是不需要zookeeper节点的啊,所以我们这里放broker.list
		kafkaParams.put("metadata.broker.list", 
				"192.168.*.*:9092,192.168.*.*:9092,192.168.*.*:9092");
		
		// 然后创建一个set,里面放入你要读取的Topic,这个就是我们所说的,它给你做的很好,可以并行读取多个topic
		Set topics = new HashSet();
		topics.add("wordcount20170605");
		
		JavaPairInputDStream lines = KafkaUtils.createDirectStream(
				jssc, 
				String.class, // key类型
				String.class, // value类型
				StringDecoder.class, // 解码器
				StringDecoder.class,
				kafkaParams, 
				topics);
		
		JavaDStream words = lines.flatMap(new FlatMapFunction, String>(){

			private static final long serialVersionUID = 1L;

			@Override
			public Iterable call(Tuple2 tuple) throws Exception {
			 	return Arrays.asList(tuple._2.split(" "));
			}
			
		});
		
		JavaPairDStream pairs = words.mapToPair(new PairFunction(){

			private static final long serialVersionUID = 1L;

			@Override
			public Tuple2 call(String word) throws Exception {
				return new Tuple2(word, 1);
			}
			
		});
		
		JavaPairDStream wordcounts = pairs.reduceByKey(new Function2(){

			private static final long serialVersionUID = 1L;

			@Override
			public Integer call(Integer v1, Integer v2) throws Exception {
				return v1 + v2;
			}
			
		});
		
		wordcounts.print();
		
		jssc.start();
		jssc.awaitTermination();
		jssc.close();
	}
}

首先在运行程序之前得在kafka中创建一个名为wordcount20170605的topic 

  接着利用hadoop:9092,hadoop1:9092,hadoop2:9092端口来向topic中产生数据,然后程序收集这些数据之后并进行实时的计算

运行截图:

spark实时计算kafka消息队列中的wordcount_第1张图片spark实时计算kafka消息队列中的wordcount_第2张图片


你可能感兴趣的:(kafka,Spark)