package cn.spark.study.dataFramecore

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext

object RDD2DataFrameReflection extends App{
val conf = new SparkConf().setAppName("RDD2DataFrameReflection").setMaster("local");
val sc = new SparkContext(conf);
val sqlContext = new SQLContext(sc);
// 在Scala中使用反射方式,进行RDD到DataFrame的转换,需要手动导入一个隐式转换
import sqlContext.implicits._
case class Student(id:Int,name:String,age:Int)
// 这里其实就是一个普通的,元素为case class的RDD
// 直接对它使用toDF()方法,即可转换为DataFrame
//把读取进来的文字分割然后成为数组一个一个array循环放进student对象中.最后调用toDF生成对象
val studentDF = sc.textFile("D:/students.txt", 1).map{ line => line.split(",")}
.map{arr => Student(arr(0).trim().toInt,arr(1).trim().toString(),arr(2).trim().toInt)}.toDF();
studentDF.registerTempTable("student");
val teenageDF = sqlContext.sql("select * from student where age <= 18");
//DataFrame 转化为RDD
val teenagerRDD = teenageDF.rdd;
//通过里面的row打印出来
teenagerRDD.map { row => Student(row(0).toString().toInt,row(1).toString(),row(2).toString().toInt) }
.collect().foreach {student => print(student.id + " " + student.name + " " + student.age) }
// 还可以通过row的getValuesMap()方法,获取指定几列的值,返回的是个map 进行筛选重组RDD
val studentRDD = teenagerRDD.map { row =>
val map = row.getValuesMap[Any](Array("id","name","age"))
Student(map("id").toString().toInt,map("name").toString(),map("age").toString().toInt)
}
studentRDD.collect().foreach { stu => println(stu.id + ":" + stu.name + ":" + stu.age) }
}