package com.shujia.spark.streaming
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}
object Demo4SSCToMysql {
def main(args: Array[String]): Unit = {
val spark: SparkSession = SparkSession.builder()
.master("local[2]")
.appName("mysql")
.getOrCreate()
import spark.implicits._
/**
* 创建streaming 上下文对象,指定batch的间隔时间,多久计算一次
*
*/
val ssc = new StreamingContext(spark.sparkContext, Durations.seconds(5))
//设置checkpoint
ssc.checkpoint("data/checkpoint")
/**
* ReceiverInputDStream: 被动接收数据,将接收过来的数据放在内存或者磁盘上
* 接收数据会一直占用资源
*
* nc -lk 9888
*
*
* yum install nc
*/
//读取数据
val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)
val wordsDS: DStream[String] = linesDS.flatMap(_.split(","))
val kvDS: DStream[(String, Int)] = wordsDS.map((_, 1))
def updateFun(seq: Seq[Int], option: Option[Int]): Option[Int] = {
//计算当前batch单词的数量
val currCount: Int = seq.sum
//获取之前单词的数量
val lastCount: Int = option.getOrElse(0)
//返回最新单词的数量
Some(currCount + lastCount)
}
val countDS: DStream[(String, Int)] = kvDS.updateStateByKey(updateFun)
//将结果保存到hdfs
//countDS.saveAsTextFiles("data/sparkstream", "txt")
countDS.foreachRDD((rdd: RDD[(String, Int)]) => {
val countDF: DataFrame = rdd.toDF("word", "c")
//将数据保存到mysql中
countDF
.write
.format("jdbc")
.mode(SaveMode.Overwrite)
.option("url", "jdbc:mysql://master:3306?useUnicode=true&characterEncoding=utf-8")
.option("dbtable", "student.wordcount")
.option("user", "root")
.option("password", "123456")
.save()
})
//启动spark streaming
ssc.start()
ssc.awaitTermination()
ssc.stop()
}
}