Spark实现求平均值

1. 需求背景

文本文件File里面存放公司各个部门人员的工资明细 salary.txt文件数据格式如下:
deptId name salary
1001 张三01 2000
1002 李四02 2500
1003 张三05 3000
1002 王五01 2600
用程序写出各个部门的平均工资并倒序输出

2. 使用Spark实现

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Encoders, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
case class Salary(deptId: String, name: String, id: String, salary: Double)
object DeptAvgSalaryApp {
  val path = "./spark/src/main/resources/salary.txt"

  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("DeptAvgSalaryApp")
    val spark: SparkContext = new SparkContext(sparkConf)
    val inputRDD: RDD[String] = spark.textFile(path)
    val dataRDD = inputRDD
      .filter(line => !line.contains("deptId"))
      .map(line => {
        // println(s"------------:${line}")
        val arrs = line.split(" ")
        Tuple2(arrs(0), arrs(3).toDouble)
      })
    // 方法一: 使用mapValue
     useMapValue(dataRDD)

    // 方法二: 使用reduceByKey
     useReduceByKey(dataRDD)

    // 方法三:使用combineByKey
     useCombineByKey(dataRDD)

    // 方法四: 使用Spark SQL
     useSparkSQL();
     spark.stop()
  }

  def useMapValue(dataRDD: RDD[(String, Double)]) = {
    /**
     * 针对数据根据key分组
     * (1001,CompactBuffer(2000.0))
     * (1003,CompactBuffer(3000.0))
     * (1002,CompactBuffer(2500.0, 2600.0))
     * 针对Tuple中value值进行处理
     * --计算平均值
     */
    dataRDD
      .groupByKey()
      .mapValues(iter => {
        var cnt: Int = 0
        var sum: Double = 0
        val it = iter.iterator
        while (it.hasNext) {
          cnt += 1
          sum += it.next()
        }
        (sum / cnt)
      }).sortBy(_._2, false)
      .collect()
      .foreach(println(_))
  }

  def useReduceByKey(dataRDD: RDD[(String, Double)]) = {
    /**
     * 原始数据处理
     * (1001,2000) ===> (1001,(2000,1))
     * (1002,2500) ===> (1002,(2500,1))
     * (1002,2600) ===> (1002,(2600,1))
     * 之后根据key进行reduceByKey操作生成如下数据
     * (1001,(2000.0,1))
     * (1003,(3000.0,1))
     * (1002,(5100.0,2))
     * 继续针对数据操作求平均值
     * (1001,2000.0)
     * (1003,3000.0)
     * (1002,2550.0)
     */
    dataRDD.map(a => (a._1, (a._2, 1)))
      .reduceByKey((a, b) => (a._1 + b._1, a._2 + b._2))
      .map(t => (t._1, t._2._1 / t._2._2))
      .sortBy(_._2, false)
      .collect()
      .foreach(println(_))
  }

  def useCombineByKey(dataRDD: RDD[(String, Double)]) = {
    /**
     * combineByKey(createCombiner, mergeValue, mergeCombiner)参数说明
     * 1. createCombiner:组合器函数,用于将RDD中V类型转换成C类型,输入参数为RDD[K,V]中的V,输出为C
     *   --将key(DeptId)对应的Value数据salary,每一条数据输出为(1,salary)
     * 2. mergeValue:合并值函数,将一个C类型和一个RDD中V类型值合并成一个C类型,输入参数为(C,V),输出为C
     *   --将同一分区的(1,salary1)与另外一个salary2数据合并,输出为(1+1,salary1+salary2)
     * 3. mergeCombiners:合并组合器函数,用于将两个C类型值合并成一个C类型,输入参数为(C,C),输出为C
     *   --将不同分区(n,salary1),(m,salary2) 进行合并处理,输出为(n+m,salary1+salary2)
     * 4. numPartitions:结果RDD分区数,默认保持原有的分区数
     * 5. partitioner:分区函数,默认为HashPartitioner
     * 6. mapSideCombine:是否需要在Map端进行combine操作,类似于MapReduce中的combine,默认为true
     */
    val createCombiner = (salary: Double) => {
      Tuple2(1, salary)
    }
    val mergeValue = (c: (Int, Double), score: Double) => {
      Tuple2(c._1 + 1, c._2 + score)
    }
    val mergeCombiner = (t1: Tuple2[Int, Double], t2: Tuple2[Int, Double]) => {
      Tuple2(t1._1 + t2._1, t1._2 + t2._2)
    }

    dataRDD.combineByKey(createCombiner, mergeValue, mergeCombiner)
      .collect()
      .map(t => t match {
        case (id, (cnt, sum)) => {
          Tuple2(id, sum / cnt)
        }
      })
      .sortBy(_._2)
      .foreach(println(_))
  }

  def useSparkSQL() = {
    import org.apache.spark.sql.functions._
    val spark = SparkSession.builder()
      .appName("DeptAvgSalaryApp")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    import spark.implicits._

  // 读取数据源文件进行过滤+处理生成Dataset数据 val dataFrame
= spark .read .text(path) .filter(row => { val str = row.getString(0) if (str.contains("deptId")) { false } else { true } })/*.foreach(row => { println("--: "+ row.getString(0)) // --: 1001 张三 01 2000 println(row.toString()) // [1001 张三 01 2000] })*/.map(row => { val line = row.getString(0) val arrs = line.split(" ") Salary(arrs(0),arrs(1),arrs(2),arrs(3).toDouble) }) dataFrame.show() // Dataset数据注册成一张表+使用SQL方式进行汇总查询 dataFrame.createOrReplaceTempView("dept") val frame = spark.sql("select deptid,avg(salary) from dept group by deptId order by avg(salary) desc") frame.show() spark.stop() } }

 

posted @ 2021-06-10 14:19  521pingguo1314  阅读(1312)  评论(0编辑  收藏  举报