第一次通过python编写spark:
1)通过python编写spark需要通过pyspark这个类库来操作spark;
2)window电脑本地需要安装spark
pyspark官网:http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD
参考:https://blog.csdn.net/lc_1123/article/details/79007231
https://www.jianshu.com/p/b5e949261cfd
https://blog.csdn.net/cymy001/article/details/78483723
https://blog.csdn.net/weixin_41512727/article/details/100131995(window环境配置)
from pyspark import SparkConf, SparkContext
# 创建SparkConf和SparkContext
conf = SparkConf().setMaster("local").setAppName("wordcount")
sc = SparkContext(conf=conf)
# list
data = ["hello", "world", "hello", "word", "count", "count", "hello"]
#hdfs上一个文件
inputFile = "hdfs://node1.hadoop:9000/README.md"
# 将Collection的data转化为spark中的rdd并进行操作
#rdd = sc.parallelize(data)
rdd = sc.textFile(inputFile)
countRdd = rdd.count() #统计rdd中有多少行数据
#在读取文本文件时是通过一行行读取
print('countRdd:',countRdd)
from operator import add
#resultRdd = rdd.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a + b)
resultRdd = rdd.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(add)
# rdd转为collecton并打印
resultColl = sorted(resultRdd.collect())
for line in resultColl:
print(line)
# 结束
sc.stop()
说明: