2025/1/29

import org.apache.spark.ml.clustering.KMeans
import org.apache.spark.ml.evaluation.ClusteringEvaluator
import org.apache.spark.sql.SparkSession

object KMeansExample {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("KMeansExample")
.master("local[*]")
.getOrCreate()

// 加载LibSVM格式的数据
val data = spark.read.format("libsvm").load("data/sample_kmeans_data.txt")

// 划分训练集和测试集
val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3), seed = 1234L)

// 创建KMeans模型
val kmeans = new KMeans()
.setK(2)
.setSeed(1L)

// 训练模型
val model = kmeans.fit(trainingData)

// 预测
val predictions = model.transform(testData)
predictions.select("features", "prediction").show(10)

// 评估模型
val evaluator = new ClusteringEvaluator()
val silhouette = evaluator.evaluate(predictions)
println(s"Silhouette with squared euclidean distance = $silhouette")

spark.stop()
}
}

posted @ 2025-01-29 21:19  为20岁努力  阅读(17)  评论(0)    收藏  举报