spark--transform算子--reduceByKey

import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by liupeng on 2017/6/16.
  */
object T_reduceByKey {
  System.setProperty("hadoop.home.dir","F:\\hadoop-2.6.5")

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("reduceByKey_test").setMaster("local")
    val sc = new SparkContext(conf)

    val list1 = List(("liupeng", 120), ("liupeng", 120), ("liusi", 120))
    val rdd = sc.parallelize(list1)
    //reduceByKey方法用于根据key做分组计算,但是和reduce不同,他还是属于T算子
    val sum = rdd.reduceByKey((x, y) => x + y)
      .foreach(println)
  }
}
运行结果:
(liusi,120)
(liupeng,240)
posted @ 2017-07-19 09:06  书灯  阅读(5)  评论(0)    收藏  举报  来源