from pyspark import SparkContext

from pyspark import SparkConf

from pyspark.mllib.regression import LabeledPoint

from pyspark.mllib.tree import GradientBoostedTrees

from pyspark.mllib.tree import GradientBoostedTreesModel

 

string_test = 'pyspark_test'

conf = SparkConf().setAppName(string_test).setMaster('yarn')

sc = SparkContext(conf=conf)

hdfs_data = sc.textFile("hdfs://dap/basicdata/dianzhang/phoenix/tmp_dianzhang_train_type/ds=2018-04-15/type=R1_C1/000000_0")

#a = hdfs_data.randomSplit((0.7, 0.3))

a = hdfs_data.map(lambda x:x.split('\t'))

b = a.map(lambda x:LabeledPoint(x.pop(-1), x[3:]))

'''

弯路

c = b.collect()

model = GradientBoostedTrees.trainClassifier(sc.parallelize(c), {}, numIterations=10)

'''

model = GradientBoostedTrees.trainClassifier(b, {}, numIterations=10)

model.save(sc, 'hdfs://dap/basicdata/dianzhang/phoenix/tmp_model')

model = GradientBoostedTreesModel.load(sc, 'hdfs://dap/basicdata/dianzhang/phoenix/tmp_model')

hdfs_data = sc.textFile("hdfs://dap/basicdata/dianzhang/phoenix/tmp_dianzhang_train_type/ds=2018-04-14/type=R1_C1/000000_0")

a = hdfs_data.map(lambda x:x.split('\t')[3:-1])

b = hdfs_data.map(lambda x:x.split('\t')[-1])

c = model.predict(a)

d = b.zip(c)

e = d.filter(lambda x: int(x[0]) - x[1] == 0)

e.count()