SparkWordCount和JavaWordCount

(1)SparkWordCount

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object SparkWordCount {

  def main(args: Array[String]): Unit = {
    //创建配置信息
    val sparkConf = new SparkConf()
    //local模拟一个集群环境运行任务
    //local[num],使用的线程数目去模拟一个集群
    //local[*],使用本地所有有空闲的线程模拟集群
    //默认为2
    sparkConf.setAppName("SparkWC")
      .setMaster("local[*]")

    //创建sparkcontext
    val sc:SparkContext = new SparkContext(sparkConf)

    //读取数据
   // val lines:RDD[String]=sc.textFile(args(0))
   val lines:RDD[String]=sc.textFile( "D:\\ceshi\\input\\1.txt")

    //文件里面所有单词的一个集合,先map再压平
    val words:RDD[String] = lines.flatMap(_.split(" "))
    //把单词通过map映射成一个元组
    val tuple = words.map((_,1))

    println(tuple)

    //按照key进行reduce,并将value累加
    val reduced = tuple.reduceByKey(_+_)
    println(reduced)
    //排序
    val res:RDD[(String,Int)] = reduced.sortBy(_._2,false)
    //保存结果
   // res.saveAsTextFile(args(1))
    res.saveAsTextFile("D:\\ceshi\\out")
    sc.stop()
  }
}

(2)JavaWordCount

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.Arrays;

public class JavaWordCount {
    public static void main(String[] args) {
        final SparkConf conf = new SparkConf().setAppName("JavaWc").setMaster("local[2]");

        final JavaSparkContext jsc = new JavaSparkContext(conf);
        //读文件,得到一个RDD
        //final JavaRDD lines = jsc.textFile(args[0]);

        final JavaRDD lines = jsc.textFile("D:\\ceshi\\input\\1.txt");

        //通过切分字符串,得到单词的集合
        final JavaRDD words = lines.flatMap(new FlatMapFunction() {
            @Override
            public Iterable call(String s) {
                return Arrays.asList(s.split(" "));
            }
        });

        //把words变成一个元组
        final JavaPairRDD tuples =words.mapToPair(new PairFunction() {
            @Override
            public Tuple2 call(String s) throws Exception {
                return new Tuple2<>(s, 1);
            }
        });
        //聚合
        final JavaPairRDD reduced = tuples.reduceByKey(new Function2() {
            @Override
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1+v2;
            }
        });

        //单词和它出现的次数做一个颠倒,交换
        final JavaPairRDD swaped = reduced.mapToPair(new PairFunction, Integer, String>() {
            @Override
            public Tuple2 call(Tuple2 tup) throws Exception {
                return tup.swap();
            }
        });

        //排序
        final JavaPairRDD sorted =swaped.sortByKey();

        //数据的一个位置交换
        final JavaPairRDD res = sorted.mapToPair(new PairFunction, String, Integer>() {
            @Override
            public Tuple2 call(Tuple2 tup) throws Exception {
                return tup.swap();
            }
        });

        //最后结果写到文件
       // res.saveAsTextFile(args[1]);
        res.saveAsTextFile("D:\\ceshi\\out");
        jsc.stop();

    }
}

 

你可能感兴趣的:(大数据之spark一些例子)