KMeans聚类算法

from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark import SparkContext
from pyspark.sql import SparkSession, Row
from pyspark.ml.linalg import Vector, Vectors

sc = SparkContext('local','KMeans聚类算法')
spark = SparkSession.builder.master('local').appName('KMeans聚类算法').getOrCreate()

def f(x):
rel={}
rel['features'] = Vectors.dense(float(x[0]), float(x[1]), float(x[2]), float(x[3]))
return rel

df = sc.textFile("file:///usr/local/spark/mycode/exercise/iris.txt").map(lambda line: line.split(",")).map(lambda p: Row(**f(p))).toDF()

kmeansmodel = KMeans().setFeaturesCol('features').setPredictionCol('prediction').fit(df)

results = kmeansmodel.transform(df).collect()
# for item in results:
# print(str(item[0])+' is predcted as cluster'+ str(item[1]))

results2 = kmeansmodel.clusterCenters()
# for item in results2:
# print(item)

kemdata=kmeansmodel.computeCost(df)
print(kemdata)
posted @ 2018-10-03 14:09  Bean_zheng  阅读(545)  评论(0编辑  收藏  举报