spark--transform算子--sortByKey

import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by liupeng on 2017/6/16.
  */
object T_sortByKey {
  System.setProperty("hadoop.home.dir","F:\\hadoop-2.6.5")

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("sortByKey_test").setMaster("local")
    val sc = new SparkContext(conf)

    val list1 = List(("liupeng", 120), ("liupeng", 120), ("liusi", 120), ("mayun", 100))
    val rdd = sc.parallelize(list1)
    //sortByKey方法用于针对Key做排序,默认是按照升序排序
    val sum = rdd.sortByKey()
      .foreach(println)
    //降序排序
    val sum1 = rdd.sortByKey(false)
      .foreach(println)
  }
}
运行结果:
(liupeng,120)
(liupeng,120)
(liusi,120)
(mayun,100)

(mayun,100)
(liusi,120)
(liupeng,120)
(liupeng,120)
posted @ 2017-07-19 10:05  书灯  阅读(6)  评论(0)    收藏  举报  来源