04.spark rdd

1.
We translate your query to Spark SQL in the following way:

from pyspark.sql.functions import mean, desc

df.filter(df["country"] == "france") \ # only french stations
  .groupBy("station_id") \ # by station
  .agg(mean("temperature").alias("average_temp")) \ # calculate average
  .orderBy(desc("average_temp")) \ # order by average 
  .take(100) # return first 100 rows
Using the RDD API and anonymous functions:

df.rdd \
  .filter(lambda x: x[1] == "france") \ # only french stations
  .map(lambda x: (x[0], x[2])) \ # select station & temp
  .mapValues(lambda x: (x, 1)) \ # generate count
  .reduceByKey(lambda x, y: (x[0]+y[0], x[1]+y[1])) \ # calculate sum & count
  .mapValues(lambda x: x[0]/x[1]) \ # calculate average
  .sortBy(lambda x: x[1], ascending = False) \ # sort
  .take(100)

  

2.
# -*- coding: utf-8 -*-  
from __future__ import print_function  
from pyspark.sql import SparkSession  
from pyspark.sql import Row  
  
if __name__ == "__main__":  
    # 初始化SparkSession  
    spark = SparkSession \  
        .builder \  
        .appName("RDD_and_DataFrame") \  
        .config("spark.some.config.option", "some-value") \  
        .getOrCreate()  
  
    sc = spark.sparkContext  
  
    lines = sc.textFile("employee.txt")  
    parts = lines.map(lambda l: l.split(","))  
    employee = parts.map(lambda p: Row(name=p[0], salary=int(p[1])))  
  
    #RDD转换成DataFrame  
    employee_temp = spark.createDataFrame(employee)  
  
    #显示DataFrame数据  
    employee_temp.show()  
  
    #创建视图  
    employee_temp.createOrReplaceTempView("employee")  
    #过滤数据  
    employee_result = spark.sql("SELECT name,salary FROM employee WHERE salary >= 14000 AND salary <= 20000")  
  
    # DataFrame转换成RDD  
    result = employee_result.rdd.map(lambda p: "name: " + p.name + "  salary: " + str(p.salary)).collect()  
  
    #打印RDD数据  
    for n in result:  
        print(n)  

  

posted @ 2018-01-14 23:18  桃源仙居  阅读(76)  评论(0)    收藏  举报