windows 本地spark连接远程hive3查询数据

package main.scala.work

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object connectHive {
  def main(args: Array[String]): Unit = {

    val confs: SparkConf = new SparkConf().setMaster("local[*]").setAppName("jobs")

    val spark = SparkSession.builder()
      .appName("Hive Integration")
      .config(confs)
      .config("spark.sql.warehouse.dir", "hdfs://ip:9000/opt/hiveDataFile") // 配置master映射远程ip,window  hosts文件里面配置
      .config("hive.metastore.uris", "thrift://ip:9083") //  配置远程 hive-site.xml  hive.metastore.uris
      .enableHiveSupport()
      .getOrCreate()


//    val df: DataFrame = spark_session.read.format("jdbc")
//      .option("url", "jdbc:hive2://ip:10000/test_db")
//      .option("driver", "org.apache.hive.jdbc.HiveDriver")
//      .option("user", "hadoop")
//      .option("password", "123456")
//      .option("query", "select cast(ID AS string) as idstr,name,cast(salary as string) as salary,department from test_db.employees")
//      .load()
//
//    df.printSchema()
//
//
//    df.show()


//    val spark = SparkSession.builder()
//      .appName("SparkOnHive")
//      //.config(confs)
//      //.config("spark.sql.warehouse.dir", "/opt/hiveDataFile")
//      .config("hive.metastore.uris", "thrift://ip:9083") // 直接指定Metastore地址
//      .enableHiveSupport() // 启用Hive支持
//      .getOrCreate()


     //查询示例
    //spark.sql("USE test_db")
    val df = spark.sql("SELECT * FROM test_db.room2")
    df.printSchema()
    df.show()

//    // 插入示例(两种方式)
//    // 方式1:直接写入Hive表
//    df.write.mode("overwrite").saveAsTable("high_value_sales")
//
//    // 方式2:通过临时表插入
//    df.createOrReplaceTempView("temp_sales")
//    spark.sql("INSERT INTO sales_archive SELECT * FROM temp_sales")

   // spark.stop()
  }
}

  

posted @ 2025-07-14 22:37  ARYOUOK  阅读(15)  评论(0)    收藏  举报