【PySpark】RDD的map操作小例子

#################test.py######################
#cat /Users/mparsian/spark-1.6.1-bin-hadoop2.6/test.py
#!/usr/bin/python
import sys
for line in sys.stdin:
    print "hello " + line
################test2.py######################
#cat /Users/mparsian/spark-1.6.1-bin-hadoop2.6/test2.py
#!/usr/bin/python
def fun2(str):
    str2 = str + " zaza"
    return str2
>>> data = ["john","paul","george","ringo"]
>>> data
['john', 'paul', 'george', 'ringo']
>>> rdd = sc.parallelize(data)
>>> rdd.collect()
['john', 'paul', 'george', 'ringo']
>>> test = "/Users/mparsian/spark-1.6.1-bin-hadoop2.6/test.py"
>>> test2 = "/Users/mparsian/spark-1.6.1-bin-hadoop2.6/test2.py"
>>> import test
>>> import test2
>>> pipeRDD =  rdd.pipe(test) #注意Pipe
>>> pipeRDD.collect()
[u'hello john', u'', u'hello paul', u'', u'hello george', u'', u'hello ringo', u'']
>>> rdd.collect()
['john', 'paul', 'george', 'ringo']
>>> rdd2 = rdd.map(lambda x : test2.fun2(x))  #这一步很重要,将rdd中每一项都进行fun2()
>>> rdd2.collect()
['john zaza', 'paul zaza', 'george zaza', 'ringo zaza']

转载于:https://my.oschina.net/u/3575262/blog/1591431

你可能感兴趣的:(python,大数据)