scala> val df = sc.parallelize(Seq(
| (0,"cat26",30.9),
| (1,"cat67",28.5),
| (2,"cat56",39.6),
| (3,"cat8",35.6))).toDF("Hour", "Category", "Value")
df: org.apache.spark.sql.DataFrame = [Hour: int, Category: string ... 1 more field]
scala> df.show
+----+--------+-----+
|Hour|Category|Value|
+----+--------+-----+
| 0| cat26| 30.9|
| 1| cat67| 28.5|
| 2| cat56| 39.6|
| 3| cat8| 35.6|
+----+--------+-----+
scala> df.sort(col("Hour").asc).limit(1)
res6: org.apache.spark.sql.Dataset[org.apache.spark.sql.Row] = [Hour: int, Category: string ... 1 more field]
scala> df.sort(col("Hour").asc).limit(1).show
+----+--------+-----+
|Hour|Category|Value|
+----+--------+-----+
| 0| cat26| 30.9|
+----+--------+-----+
scala> df.sort(col("Hour").desc).limit(1).show
+----+--------+-----+
|Hour|Category|Value|
+----+--------+-----+
| 3| cat8| 35.6|
+----+--------+-----+
//默认是升序
scala> df.sort(col("Hour")).limit(1).show
+----+--------+-----+
|Hour|Category|Value|
+----+--------+-----+
| 0| cat26| 30.9|
+----+--------+-----+