Spark学习笔记——使用CatalystSqlParser解析Spark SQL
Spark的parser使用的是antlr来实现,其g4文件如下
https://github.com/apache/spark/blob/master/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4
如果想解析spark SQL的语句,可以使用其原生的parser来进行解析,代码如下
package com.bigdata.spark
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoTable, LogicalPlan}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.execution.datasources.CreateTable
object SparkSQLParser {
def main(args: Array[String]): Unit = {
// val spark = SparkSession.builder()
// .appName("SQL Create Table Parser")
// .master("local[*]")
// .enableHiveSupport() // 关键:启用 Hive 语法支持
// .getOrCreate()
// val logicalPlan: LogicalPlan = spark.sessionState.sqlParser.parsePlan(sql)
val sql = "SELECT id, name FROM users WHERE age > 18" // select
// val sql = "CREATE TABLE users (id INT, name STRING)" // create
// val sql = "INSERT INTO users VALUES (1, 'Alice')" // insert
// val sql = "INSERT INTO xx.table2 SELECT * FROM xx.table1" // insert
val logicalPlan: LogicalPlan = CatalystSqlParser.parsePlan(sql)
println(logicalPlan)
logicalPlan match {
// 解析建表语句
case createTable: CreateTable =>
println(s"SQL: [$sql] -> 这是一个 CREATE TABLE 语句")
// 解析insert语句
case _: InsertIntoTable =>
println(s"SQL: [$sql] -> 这是一个 INSERT 语句")
// 解析血缘
var inputTables = Set[String]()
var outputTable: Option[String] = None
// 遍历 LogicalPlan 解析血缘
logicalPlan.foreach {
case UnresolvedRelation(tableIdentifier) =>
inputTables += tableIdentifier.quotedString // 解析输入表
case InsertIntoTable(table, _, _, _, _) =>
table match {
case UnresolvedRelation(tableIdentifier) =>
outputTable = Some(tableIdentifier.quotedString) // 解析输出表
case _ =>
outputTable = Some(table.toString()) // 其他情况
}
case _ => // 其他情况忽略
}
println(s"输入表: ${inputTables.mkString(", ")}")
println(s"输出表: ${outputTable.getOrElse("无")}")
// 解析select语句
case _: Project | _: Filter | _: Aggregate | _: Join | _: LogicalPlan =>
println(s"SQL: [$sql] -> 这是一个 SELECT 查询语句")
case _ =>
println(s"SQL: [$sql] -> 未知类型")
}
}
}
1.解析insert语句
'InsertIntoTable 'UnresolvedRelation `xx`.`table2`, false, false +- 'Project [*] +- 'UnresolvedRelation `xx`.`table1` SQL: [INSERT INTO xx.table2 SELECT * FROM xx.table1] -> 这是一个 INSERT 语句 输入表: `xx`.`table1` 输出表: `xx`.`table2`
2.解析select语句
'Project ['id, 'name]
+- 'Filter ('age > 18)
+- 'UnresolvedRelation `users`
SQL: [SELECT id, name FROM users WHERE age > 18] -> 这是一个 SELECT 查询语句
3.解析create语句
由于spark默认是不支持解析create sql的,需要依赖hive
使用sparksession解析会报
Exception in thread "main" java.lang.IllegalArgumentException: Unable to instantiate SparkSession with Hive support because Hive classes are not found. at org.apache.spark.sql.SparkSession$Builder.enableHiveSupport(SparkSession.scala:869) at com.bigdata.spark.SparkSQLParser$.main(SparkSQLParser.scala:17) at com.bigdata.spark.SparkSQLParser.main(SparkSQLParser.scala)
使用CatalystSqlParser解析会报
Exception in thread "main" org.apache.spark.sql.catalyst.parser.ParseException: Unsupported SQL statement == SQL == CREATE TABLE users (id INT, name STRING) at org.apache.spark.sql.catalyst.parser.AbstractSqlParser$$anonfun$parsePlan$1.apply(ParseDriver.scala:74) at org.apache.spark.sql.catalyst.parser.AbstractSqlParser$$anonfun$parsePlan$1.apply(ParseDriver.scala:69) at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parse(ParseDriver.scala:100) at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parsePlan(ParseDriver.scala:69) at com.bigdata.spark.SparkSQLParser$.main(SparkSQLParser.scala:26) at com.bigdata.spark.SparkSQLParser.main(SparkSQLParser.scala)
可以使用hive parser来解析建表语句,参考:antlr解析hive语句
本文只发表于博客园和tonglin0325的博客,作者:tonglin0325,转载请注明原文链接:https://www.cnblogs.com/tonglin0325/p/4581960.html

浙公网安备 33010602011771号