Spark 取前几行,先sort再limit

scala> val df = sc.parallelize(Seq(
     |   (0,"cat26",30.9), 
     |   (1,"cat67",28.5), 
     |   (2,"cat56",39.6),
     |   (3,"cat8",35.6))).toDF("Hour", "Category", "Value")
df: org.apache.spark.sql.DataFrame = [Hour: int, Category: string ... 1 more field]

scala> df.show
+----+--------+-----+
|Hour|Category|Value|
+----+--------+-----+
|   0|   cat26| 30.9|
|   1|   cat67| 28.5|
|   2|   cat56| 39.6|
|   3|    cat8| 35.6|
+----+--------+-----+


scala> df.sort(col("Hour").asc).limit(1)
res6: org.apache.spark.sql.Dataset[org.apache.spark.sql.Row] = [Hour: int, Category: string ... 1 more field]

scala> df.sort(col("Hour").asc).limit(1).show
+----+--------+-----+
|Hour|Category|Value|
+----+--------+-----+
|   0|   cat26| 30.9|
+----+--------+-----+


scala> df.sort(col("Hour").desc).limit(1).show
+----+--------+-----+
|Hour|Category|Value|
+----+--------+-----+
|   3|    cat8| 35.6|
+----+--------+-----+

//默认是升序
scala> df.sort(col("Hour")).limit(1).show
+----+--------+-----+
|Hour|Category|Value|
+----+--------+-----+
|   0|   cat26| 30.9|
+----+--------+-----+
posted @ 2020-12-29 20:20  船长博客  阅读(1157)  评论(0编辑  收藏  举报
永远相信美好的事情即将发生!