用Spark做单表关联

##按照hadoop的思想胡乱搞了一下,但是结果不对。。。。

##需要再思考

from pyspark import SparkContext

sc = SparkContext('local','STJoin')

rdd = sc.textFile("file:///usr/local/spark/mycode/TestPackage/ST.txt")

num = rdd.flatMap(lambda line : line.split(" ")).map(lambda a : (a,1)).keys().collect()
num.remove('child')
num.remove('parent')
child=[]
parents=[]
STs=[]
for i in range(0,len(num)):
if i % 2 ==0:
child.append(num[i])
else:
parents.append(num[i])
for i in child:
for j in parents:
STs.append(i+" "+j)

STsRDD = sc.parallelize(STs)
relation = STsRDD.map(lambda a : (a,1)).reduceByKey(lambda a,b : (a+b))
relation.foreach(print)
posted @ 2018-08-04 10:48  Bean_zheng  阅读(192)  评论(0编辑  收藏  举报