1/28

以下是代码的详细注释,并删除了敏感信息(如数据库密码等):
Python复制
# coding:utf-8

# spark导包
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from pyspark.sql.functions import count
from pyspark.sql.functions import monotonically_increasing_id
from pyspark.sql.types import StringType, StructField, IntegerType, StructType, FloatType
from pyspark.sql.functions import col, sum, when, desc, asc

if __name__ == "__main__":
    # 创建sparksession
    spark = SparkSession.builder.appName("sparkSql").master("local[*]"). \
        config("spark.sql.shuffle.partitions", 2). \
        config("spark.sql.warehouse.dir", "hdfs://192.168.116.131:8020/user/hive/warehouse"). \
        config("hive.metastore.uris", "thrift://192.168.116.131:9083").enableHiveSupport().getOrCreate()

    sc = spark.sparkContext

    # 读取Hive表
    commoditydata = spark.read.table("commoditydata")
    # commoditydata.show()

    # 需求一:按地址统计
    # 以address字段为主进行统计
    # result1 = commoditydata.groupBy("address").count()

    # 将结果转化为DF对象
    # df = result1.toPandas()

    # 存入MySQL(已删除敏感信息)
    # result1.write.mode("overwrite") \
    #     .format("jdbc") \
    #     .option("url", "jdbc:mysql://192.168.116.131:3306/bigdata") \
    #     .option("dbtable", "addresssort") \
    #     .option("user", "root") \
    #     .option("password", "your_password") \
    #     .option("driver", "com.mysql.cj.jdbc.Driver") \
    #     .save()

    # 存入HIVE
    # result1.write.mode("overwrite").saveAsTable("addresssort", "parquet")
    # spark.sql("select * from addresssort").show()

    # 需求二:各类型销量占比
    # 以type为主
    # result2 = commoditydata.groupBy("type").sum("buy_len").withColumnRenamed("sum(buy_len)", "total_buy_len")

    # 存入MySQL(已删除敏感信息)
    # result2.write.mode("overwrite") \
    #     .format("jdbc") \
    #     .option("url", "jdbc:mysql://192.168.116.131:3306/bigdata") \
    #     .option("dbtable", "typesort") \
    #     .option("user", "root") \
    #     .option("password", "your_password") \
    #     .option("driver", "com.mysql.cj.jdbc.Driver") \
    #     .save()

    # 存入HIVE
    # result2.write.mode("overwrite").saveAsTable("typesort", "parquet")
    # spark.sql("select * from typesort").show()

    # 需求三:统计每个省日销100+的店铺
    # 按address,name进行分组
    # result3 = commoditydata.groupBy("address", "name"). \
    #     agg({"buy_len": "sum"}).withColumnRenamed("sum(buy_len)", "total_buy_len"). \
    #     filter(col("total_buy_len") > 100). \
    #     drop_duplicates(subset=["name"]). \
    #     groupBy("address").count()
    # result3 = result3.orderBy(col("count").desc()).limit(10)

    # 存入MySQL(已删除敏感信息)
    # result3.write.mode("overwrite") \
    #     .format("jdbc") \
    #     .option("url", "jdbc:mysql://192.168.116.131:3306/bigdata") \
    #     .option("dbtable", "provincesort") \
    #     .option("user", "root") \
    #     .option("password", "your_password") \
    #     .option("driver", "com.mysql.cj.jdbc.Driver") \
    #     .save()

    # 存入HIVE
    # result3.write.mode("overwrite").saveAsTable("provincesort", "parquet")
    # spark.sql("select * from provincesort").show()

    # 需求四:统计每种类型buy_len的总和以及buy_len与price的总和
    # result4 = commoditydata.groupBy("type").agg(
    #     F.sum("buy_len").alias("total_buy_len"),
    #     F.sum(F.col("buy_len") * F.col("price")).alias("sum_buy_len_price")
    # )
    # result4 = result4.withColumn("ration", F.col("sum_buy_len_price") / F.col("total_buy_len"))

    # 存入MySQL(已删除敏感信息)
    # result4.write.mode("overwrite") \
    #     .format("jdbc") \
    #     .option("url", "jdbc:mysql://192.168.116.131:3306/bigdata") \
    #     .option("dbtable", "salerate") \
    #     .option("user", "root") \
    #     .option("password", "your_password") \
    #     .option("driver", "com.mysql.cj.jdbc.Driver") \
    #     .save()

    # 存入HIVE
    # result4.write.mode("overwrite").saveAsTable("salerate", "parquet")
    # spark.sql("select * from salerate").show()

    # 需求五:店铺/商品销售额统计
    # resultDouble = commoditydata.withColumn("volume", when(col("buy_len") * col("price") < 100, "小于100")
    #                                         .when(col("buy_len") * col("price") < 200, "小于200")
    #                                         .when(col("buy_len") * col("price") < 500, "小于500")
    #                                         .when(col("buy_len") * col("price") < 1000, "小于1000")
    #                                         .when(col("buy_len") * col("price") < 5000, "小于5000")
    #                                         .when(col("buy_len") * col("price") < 10000, "小于10000")
    #                                         .otherwise("大于10000")
    #                                         )
    # result5 = resultDouble.drop_duplicates(["title"])
    # result5 = result5.groupby("volume").agg(count("title").alias("title_count"))
    #
    # result6 = resultDouble.drop_duplicates(["name"])
    # result6 = result6.groupby("volume").agg(count("name").alias("name_count"))
    #
    # result_comnbine = result5.join(result6, ["volume"])

    # 存入MySQL(已删除敏感信息)
    # result_comnbine.write.mode("overwrite") \
    #     .format("jdbc") \
    #     .option("url", "jdbc:mysql://192.168.116.131:3306/bigdata") \
    #     .option("dbtable", "storeproduct") \
    #     .option("user", "root") \
    #     .option("password", "your_password") \
    #     .option("driver", "com.mysql.cj.jdbc.Driver") \
    #     .save()

    # 存入HIVE
    # result_comnbine.write.mode("overwrite").saveAsTable("storeproduct", "parquet")
    # spark.sql("select * from storeproduct").show()

    # 需求六:商品销量top10
    # 商品销量Top 10
    resultList = commoditydata.orderBy("buy_len", ascending=False).limit(10)
    resultList.show()

    # 存入MySQL(已删除敏感信息)
    # resultList.write.mode("overwrite") \
    #     .format("jdbc") \
    #     .option("url", "jdbc:mysql://192.168.116.131:3306/bigdata") \
    #     .option("dbtable", "productTop") \
    #     .option("user", "root") \
    #     .option("password", "your_password") \
    #     .option("driver", "com.mysql.cj.jdbc.Driver") \
    #     .save()

    # 存入HIVE
    resultList.write.mode("overwrite").saveAsTable("productTop", "parquet")
    spark.sql("select * from productTop").show()

修改内容:

  1. 删除了所有敏感信息,如数据库
posted @ 2025-01-28 22:11  Hbro  阅读(6)  评论(0)    收藏  举报