2、易用性
3、通用性
4、兼容性
sc.textFile("hdfs://node1:9000/wc.txt").flatMap(_.split(" ")).map(x=>(x,1)).reduceByKey(_+_).collect
1、修改配置文件(spark-env.sh)
2、可以sc.textFile("/wc.txt").flatMap(_.split(" ")).map(x=>(x,1)).reduceByKey(_+_).collect
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
//通过scala编写spark的单词计数程序
object WordCount {
def main(args: Array[String]): Unit = {
//1、创建SparkConf对象,设置appName和master地址,local[2]表示本地使用2个线程来进行计算
val sparkConf: SparkConf = new SparkConf().setAppName("WordCount").setMaster("local[2]")
//2、创建SparkContext对象,这个对象很重要,它会创建DAGScheduler和TaskScheduler
val sc: SparkContext = new SparkContext(sparkConf)
//设置日志输出级别
sc.setLogLevel("WARN")
//3、读取数据文件
//val data: RDD[String] = sc.textFile(args(0))
val data: RDD[String] = sc.textFile("E:\\wordcount\\input\\words.txt")
//4、切分每一行,并且压平 hello、you、me
val words: RDD[String] = data.flatMap(_.split(" "))
//5、每个单词记位1
val wordAndOne: RDD[(String, Int)] = words.map((_,1))
//6、相同单词出现的次数进行累加
val result: RDD[(String, Int)] = wordAndOne.reduceByKey(_+_)
//按照单词出现的次数降序排序
val sortResult: RDD[(String, Int)] = result.sortBy(_._2,false)
//7、收集数据,打印输出
val finalResult: Array[(String, Int)] = sortResult.collect()
//打印结果
finalResult.foreach(x=>println(x))
//关闭
sc.stop()
}
}
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
//利用java语言来实现spark的单词计数
public class WordCount_Java {
public static void main(String[] args) {
//1、创建SparkConf对象,设置appName和master地址
SparkConf sparkConf = new SparkConf().setAppName("WordCount_Java").setMaster("local[2]");
//2、创建javaSparkContext对象
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
//3、读取数据文件
JavaRDD dataJavaRDD = jsc.textFile("E:\\wordcount\\input\\words.txt");
//4、对每一行进行切分压平
JavaRDD wordsJavaRDD = dataJavaRDD.flatMap(new FlatMapFunction() {
@Override //line表示每一行记录
public Iterator call(String line) throws Exception {
//切分每一行
String[] words = line.split(" ");
return Arrays.asList(words).iterator();
}
});
//5、每个单词记为1
JavaPairRDD wordAndOneJavaPairRDD = wordsJavaRDD.mapToPair(new PairFunction() {
@Override
public Tuple2 call(String word) throws Exception {
return new Tuple2<>(word, 1);
}
});
//6、把相同单词出现的次数累加 (_+_)
JavaPairRDD resultJavaPairRDD = wordAndOneJavaPairRDD.reduceByKey(new Function2() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
//按照单词出现的次数降序排序
//需要将(单词,次数)进行位置颠倒 (次数,单词)
JavaPairRDD sortJavaPairRDD = resultJavaPairRDD.mapToPair(new PairFunction, Integer, String>() {
@Override
public Tuple2 call(Tuple2 t) throws Exception {
return new Tuple2<>(t._2, t._1);
}
}).sortByKey(false);
//将(次数,单词)变为(单词,次数)
JavaPairRDD finalSortJavaPairRDD = sortJavaPairRDD.mapToPair(new PairFunction, String, Integer>() {
@Override
public Tuple2 call(Tuple2 t) throws Exception {
return new Tuple2<>(t._2, t._1);
}
});
//7、收集打印
List> finalResult = finalSortJavaPairRDD.collect();
for(Tuple2 t:finalResult){
System.out.println(t);
}
jsc.stop();
}
}
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0modelVersion>
<groupId>cn.itcastgroupId>
<artifactId>SparkartifactId>
<version>1.0-SNAPSHOTversion>
<properties>
<maven.compiler.source>1.8maven.compiler.source>
<maven.compiler.target>18maven.compiler.target>
<encoding>UTF-8encoding>
<scala.version>2.11.8scala.version>
<scala.compat.version>2.11scala.compat.version>
<hadoop.version>2.7.4hadoop.version>
<spark.version>2.0.2spark.version>
properties>
<dependencies>
<dependency>
<groupId>org.scala-langgroupId>
<artifactId>scala-libraryartifactId>
<version>${scala.version}version>
dependency>
<dependency>
<groupId>org.apache.sparkgroupId>
<artifactId>spark-core_2.11artifactId>
<version>${spark.version}version>
dependency>
dependencies>
<build>
<sourceDirectory>src/main/scalasourceDirectory>
<testSourceDirectory>src/test/scalatestSourceDirectory>
<plugins>
<plugin>
<groupId>net.alchim31.mavengroupId>
<artifactId>scala-maven-pluginartifactId>
<version>3.2.2version>
<executions>
<execution>
<goals>
<goal>compilegoal>
<goal>testCompilegoal>
goals>
<configuration>
<args>
<arg>-dependencyfilearg>
<arg>${project.build.directory}/.scala_dependenciesarg>
args>
configuration>
execution>
executions>
plugin>
<plugin>
<groupId>org.apache.maven.pluginsgroupId>
<artifactId>maven-surefire-pluginartifactId>
<version>2.18.1version>
<configuration>
<useFile>falseuseFile>
<disableXmlReport>truedisableXmlReport>
<includes>
<include>**/*Test.*include>
<include>**/*Suite.*include>
includes>
configuration>
plugin>
<plugin>
<groupId>org.apache.maven.pluginsgroupId>
<artifactId>maven-shade-pluginartifactId>
<executions>
<execution>
<phase>packagephase>
<goals>
<goal>shadegoal>
goals>
<configuration>
<filters>
<filter>
<artifact>*:*artifact>
<excludes>
<exclude>META-INF/*.SFexclude>
<exclude>META-INF/*.DSAexclude>
<exclude>META-INF/*.RSAexclude>
excludes>
filter>
filters>
<transformers>
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>mainClass>
transformer>
transformers>
configuration>
execution>
executions>
plugin>
plugins>
build>
project>