spark学习03之wordCount统计并排序(java)

       wordCount就是对一大堆单词进行个数统计,然后排序。从网上找篇英文文章放到本地文档。

spark学习03之wordCount统计并排序(java)_第1张图片

  pom.xml


  4.0.0
  com.fei
  word-count
  0.0.1-SNAPSHOT
  
  
    UTF-8
  

  
    
      junit
      junit
      3.8.1
      test
    
     
	  org.apache.spark
	  spark-core_2.10
	  1.3.0
	
	
	
  
   
     
         
            org.apache.maven.plugins 
            maven-compiler-plugin 
            2.0.2 
             
                1.8 
                1.8 
             
         
     
 

WordCount.java

package com.fei;

import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;

import scala.Tuple2;

/**
 * 单词统计,并按降序排序,输出前10个单词及个数
 * @author Jfei
 *
 */
public class WordCount {

	public static void main(String[] args) {
		//1.本地模式,创建spark配置及上下文
		SparkConf conf = new SparkConf().setAppName("wordCount").setMaster("local");
		JavaSparkContext sc = new JavaSparkContext(conf);
		
		//2.读取本地文件,并创建RDD
		JavaRDD linesRDD = sc.textFile("e:\\words.txt");
		//3.每个单词由空格隔开,将每行的linesRDD拆分为每个单词的RDD
		JavaRDD wordsRDD = linesRDD.flatMap(s  -> Arrays.asList(s.split("\\s")));
		//相当于 ==>
		/*JavaRDD wordsRDD = linesRDD.flatMap(new FlatMapFunction(){
			private static final long serialVersionUID = 1L;
			@Override
			public Iterable call(String line) throws Exception {
				return Arrays.asList(line.split(" "));
			}
		});*/
		//4.将每个单词转为key-value的RDD,并给每个单词计数为1
		JavaPairRDD wordsPairRDD = wordsRDD.mapToPair(s -> new Tuple2(s, 1));
		//相当于 ==>
		/*JavaPairRDD wordsPairRDD = wordsRDD.mapToPair(new PairFunction() {
			private static final long serialVersionUID = 1L;
			@Override
			public Tuple2 call(String word) throws Exception {
				return new Tuple2(word,1);
			}
		});*/
		
		//5.计算每个单词出现的次数
		 JavaPairRDD wordsCountRDD = wordsPairRDD.reduceByKey((a,b) -> a+b);
		//相当于 ==>
		/*JavaPairRDD wordsCountRDD = wordsPairRDD.reduceByKey(new Function2() {
			@Override
			public Integer call(Integer v1, Integer v2) throws Exception {
				return v1 + v2;
			}
		});*/
		 
		 //6.因为只能对key进行排序,所以需要将wordsCountRDD进行key-value倒置,返回新的RDD
		 JavaPairRDD wordsCountRDD2 = wordsCountRDD.mapToPair(s -> new Tuple2(s._2, s._1));
		 //相当于 ==>
           /*JavaPairRDD wordsCountRDD2 = wordsCountRDD.mapToPair(new PairFunction, Integer, String>() {
			private static final long serialVersionUID = 1L;
			@Override
			public Tuple2 call(Tuple2 t) throws Exception {
				return new Tuple2(t._2,t._1);
			}
		});*/
         
		 //7.对wordsCountRDD2进行排序,降序desc
		 JavaPairRDD wordsCountRDD3 = wordsCountRDD2.sortByKey(false);
		 
		 //8.只取前10个
		 List>  result = wordsCountRDD3.take(10);
		 
		 //9.打印
		 result.forEach(t -> System.out.println(t._2 + "   " + t._1));
		 
		 
		 sc.close();
	 }
}
spark学习03之wordCount统计并排序(java)_第2张图片
如果JDK不是1.8的,那修改下pom.xml及代码中不要使用lambda表达式


你可能感兴趣的:(spark)