#################test.py######################
#cat /Users/mparsian/spark-1.6.1-bin-hadoop2.6/test.py
#!/usr/bin/python
import sys
for line in sys.stdin:
print "hello " + line
################test2.py######################
#cat /Users/mparsian/spark-1.6.1-bin-hadoop2.6/test2.py
#!/usr/bin/python
def fun2(str):
str2 = str + " zaza"
return str2
>>> data = ["john","paul","george","ringo"]
>>> data
['john', 'paul', 'george', 'ringo']
>>> rdd = sc.parallelize(data)
>>> rdd.collect()
['john', 'paul', 'george', 'ringo']
>>> test = "/Users/mparsian/spark-1.6.1-bin-hadoop2.6/test.py"
>>> test2 = "/Users/mparsian/spark-1.6.1-bin-hadoop2.6/test2.py"
>>> import test
>>> import test2
>>> pipeRDD = rdd.pipe(test) #注意Pipe
>>> pipeRDD.collect()
[u'hello john', u'', u'hello paul', u'', u'hello george', u'', u'hello ringo', u'']
>>> rdd.collect()
['john', 'paul', 'george', 'ringo']
>>> rdd2 = rdd.map(lambda x : test2.fun2(x)) #这一步很重要,将rdd中每一项都进行fun2()
>>> rdd2.collect()
['john zaza', 'paul zaza', 'george zaza', 'ringo zaza']