Spark篇

调优篇

#!/bin/bash

/opt/spark/spark-2.1.1-bin-hadoop2.6/bin/spark-submit --class com.hangshu.impl.MainCtl \
--master yarn \
--deploy-mode cluster \
--num-executors 6 \
--driver-memory 2g \
--executor-memory 5g \
--executor-cores 2 \
--conf spark.default.parallelism=24 \
--queue thequeue \
/opt/spark_jar/dataAnalys-1.0.jar



24C 72G


那么num-executors * executor-cores不要超过队列总CPU core的1/3~1/2左右比较合适


因此Spark官网建议的设置原则是,设置该参数为num-executors * executor-cores的2~3倍较为合适,比如Executor的总CPU core数量为300个,那么设置1000个task是可以的,此时可以充分地利用Spark集群的资源。

2*6*2 

24-36



./bin/spark-submit \
  --master spark://192.168.1.1:7077 \
  --num-executors 100 \
  --executor-memory 6G \
  --executor-cores 4 \
 --total-executor-cores 400 \ ##standalone default all cores 
  --driver-memory 1G \
  --conf spark.default.parallelism=1000 \
  --conf spark.storage.memoryFraction=0.5 \
  --conf spark.shuffle.memoryFraction=0.3 \


flink  run -t yarn-per-job \
-d -ynm FlinkRetention \
-Dyarn.application.name=FlinkRetention \
-c com.bigdata.etl.FlinkRetention  /data/bigdata/flink-dw/target/flink-dw.jar \
--consumer.bootstrap.servers ${consumer_bootstrap_servers} \
--producer.bootstrap.servers ${producer_bootstrap_servers} \
--retentionGroupId ${retentionGroupId} \
--flinkSeconds ${flinkSeconds} \
--redis.ip ${redis_ip} \
--redis.port ${redis_port} \
--redis.password ${redis_password}



flink  run -t yarn-per-job \   //指定运行模式
-d -ynm FlinkRetention \   //指定在jobmanager里面显示的名字
-Dyarn.application.name=FlinkRetention \    // 指定在yarn上的application的名字
-c com.bigdata.etl.FlinkRetention   // 入口类
 /data/bigdata/flink-dw/target/flink-dw.jar \   //  自己任务的jar包
--consumer.bootstrap.servers ${consumer_bootstrap_servers} \  需要传入的参数
--producer.bootstrap.servers ${producer_bootstrap_servers} \
--retentionGroupId ${retentionGroupId} \
--flinkSeconds ${flinkSeconds} \
--redis.ip ${redis_ip} \
--redis.port ${redis_port} \
--redis.password ${redis_password}

result.write.mode(SaveMode.Append).format("jdbc")
      .option(JDBCOptions.JDBC_URL,"jdbc:mysql://127.0.0.1:3306/db?rewriteBatchedStatement=true")	//开启批量处理
      .option("user","root")
      .option("password","XXX")
      .option(JDBCOptions.JDBC_TABLE_NAME,"xxx")
      .option(JDBCOptions.JDBC_TXN_ISOLATION_LEVEL,"NONE")    //不开启事务
      .option(JDBCOptions.JDBC_BATCH_INSERT_SIZE,10000)   //设置批量插入数据量
      .save()

import spark.implicits._
      val repEnergyDysDF3 = spark.sql(sqlStr).as[MachinePlatforms]
      println("resultBatch的分区数: " + repEnergyDysDF3.rdd.partitions.size)
      // 批量写入MySQL
      // 此处最好对处理的结果进行一次重分区
      // 由于数据量特别大,会造成每个分区数据特别多
      repEnergyDysDF3.repartition(500).foreachPartition(record => {
        val list = new ListBuffer[MachinePlatforms]
        record.foreach(mp => {
          val name = mp.name
          val output_add_m = mp.output_add_m
          val output_add_kg = mp.output_add_kg
          list.append(MachinePlatforms(name, output_add_m,output_add_kg))
        })
//        upsertDateMatch(list) //执行批量插入数据
      })
不要使用:

 //用Executor实现多线程方式处理Job
      val executorService = Executors.newFixedThreadPool(2)
      executorService.submit(new Callable[Unit]() {
        def call()  = {
          println("x1")
          repEnergyDysDF.repartition(1).write.format("jdbc").mode(SaveMode.Append)
            .options(Map("url" -> mySqlConfig.url, "dbtable" -> "hs_machine_platforms_copy", "driver" -> mySqlConfig.driver)).save()
          //      3.删除上次数据
          DeleteTableUtils.delete("hs_machine_platforms_copy","and crt = '"+crt+"'",mySqlConfig)
        }
      })
      executorService.submit(new Callable[Unit]() {
        def call()   = {
          ZipTabUtils.sinkToMySQL2(spark, repEnergyDysDF, "hs_machine_platforms_crt", mySqlConfig)
          println("x2")
        }
      })
      executorService.shutdown()

FROM_UNIXTIME(UNIX_TIMESTAMP(),'yyyy-MM-dd HH:mm:ss') crt
'$this_hour' crt

//用Executor实现多线程方式处理Job
      java.util.concurrent.ExecutorService executorService = Executors.newFixedThreadPool(2);
      executorService.submit(new Callable<Void>() {
        @Override
        public Void call(){
          df.rdd().saveAsTextFile(rb.getString("hdfspath") + "/file3",com.hadoop.compression.lzo.LzopCodec.class);
          return null;
        }
      });
      executorService.submit(new Callable<Void>() {
        @Override
        public Void call(){
          df.rdd().saveAsTextFile(rb.getString("hdfspath") + "/file4",com.hadoop.compression.lzo.LzopCodec.class);
          return null;
        }
      });

      executorService.shutdown();


//用Executor实现多线程方式处理Job
  val dfList = Array(df,df)
  val executorService = Executors.newFixedThreadPool(2)
  for(df <- dfList) {
    executorService.submit(new Callable[Boolean]() {
      def call() : Boolean  = {
        df.show()
        true
      }
    })
  }

  executorService.shutdown()

export SPARK_SUBMIT_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=41999
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005


--conf spark.serializer=org.apache.spark.serializer.KryoSerializer

表名:hs_spin:ods_work_team
列簇:cf

'hs_spin:ods_work_team', {NAME => 'cf', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => '604800 SECONDS (7 DAYS)', COMPRESSION => 'SNAPPY', MIN_VERSIONS => '0', BLOCKCACHE => 'true', BLOCKSIZE => '65536', REPLICATION_SCOPE => '0'}

data.coalesce(1).write.mode(SaveMode.Overwrite).option("header","true").option("sep","#").csv("rerr")
    data.coalesce(1).write.mode(SaveMode.Overwrite).option("header","true").option("sep","#").csv("ressssssss")
    println("存入完成!!!!!!!!!!")

hdfs dfs -ls
scan 'ns_hs_flink:word'




java -jar arthas-boot.jar -h
java -jar arthas-boot.jar

val spark = SparkSession.builder().appName(this.getClass.getSimpleName).master("local[3]").getOrCreate()

/opt/yarn_shell/yarn.log
cat /opt/yarn_shell/yarn.log | grep "失败任务ID"
cat /opt/yarn_shell/yarn.log | grep "失败任务ID" -A4
more /opt/yarn_shell/yarn.log | grep "失败任务ID" -B4


cat filename | grep abc -A4
 
 
-s {query}

后输入"关键词 要搜索的文件名"即可调出eveything在整个电脑上搜索文件(支持模糊匹配),除此之外还可以结合everything的过滤参数来实现定向搜索;如果需要在当前文件夹搜索的话,将参数换为
-s " """%path%""" {query} "
注:在当前目录搜索就需要用到上面说的第二种唤起方式。
如果需要搜索图片,将参数换为
-s pdf:{query}


Listary自带常用的一些Web搜索,比如bd + xxx就是百度搜索xxx,gg + xxx就是谷歌搜索xxx等,而通过分析URL可以加入自己常用的网址搜索,比如谷歌翻译英译中的URL是https://translate.google.cn/#view=home&op=translate&sl=auto&tl=zh-CN&text=,在后面接入xxx就是把英文xxx翻译为中文。对应Listaty可如此配置

英-中
https://translate.google.cn/#view=home&op=translate&sl=auto&tl=zh-CN&text={query}

中-英
https://translate.google.cn/#view=home&op=translate&sl=auto&tl=en&text={query}


输入"fy xxx"即自动打开谷歌翻译将xxx英译中

 而b站的搜索为https://search.bilibili.com/all?keyword=,与之类似的可配置为 
 输入"bili xxx"即可自动打开B站网页搜索xxx
 分析出URL变量便可以自定义Web的更多使用,当然也可以不需要参数直接打开特定网页,直接在URL输入对应网址即可

https://search.bilibili.com/all?keyword={query}

bk | 百度百科 | https://baike.baidu.com/item/{query}
wj | 维基百科 | https://zh.wikipedia.org/wiki/{query}
cd | 字典词典 | https://www.zdic.net/hans/{query}
zh | 知乎 | https://www.zhihu.com/search?type=content&q={query}
db | 豆瓣电影信息 | https://movie.douban.com/subject_search?search_text={query}
wp | 网盘搜索 | https://www.xalssy.com.cn/search/kw{query}
pdd | 盘多多搜索 | http://nduoduo.net/s/name/{query}
zz | 种子磁力搜索 | http://zhongzijidiso.xyz/list/{query}/1/0/0/
yk | 优酷 | https://so.youku.com/search_video/q_{query}
Iqy | 爱奇艺 | https://so.iqiyi.com/so/q_{query}
tx | 腾讯 | https://v.qq.com/x/search/?q={query}
eb 电子书URL:http://cn.epubee.com/books/?s={query}
wk | 百度文库 | https://wenku.baidu.com/search?word={query}
lm | 绿色软件 | http://www.xdowns.com/search.php?ac=search&keyword={query}
app | 安卓软件 | http://www.appchina.com/sou/?keyword={query}






repEnergyDysDF.describe("year" ).show()
这个方法可以动态的传入一个或多个String类型的字段名,结果仍然为DataFrame对象,用于统计数值类型字段的统计值,比如count, mean, stddev, min, max等。
这里列出的四个方法比较类似,其中
  (1)first获取第一行记录
  (2)head获取第一行记录,head(n: Int)获取前n行记录
  (3)take(n: Int)获取前n行数据
  (4)takeAsList(n: Int)获取前n行数据,并以List的形式展现
  以Row或者Array[Row]的形式返回一行或多行数据。first和head功能相同。
  take和takeAsList方法会将获得到的数据返回到Driver端,所以,使用这两个方法时需要注意数据量,以免Driver发生OutOfMemoryError

jdbcDF .where("id = 1 or c1 = 'b'" ).show()
 
jdbcDF .filter("id = 1 or c1 = 'b'" ).show()

还有一个重载的select方法,不是传入String类型参数,而是传入Column类型参数。可以实现select id, id+1 from test这种逻辑。
jdbcDF.select(jdbcDF( "id" ), jdbcDF( "id") + 1 ).show( false)
能得到Column类型的方法是apply以及col方法,一般用apply方法更简便。

jdbcDF .selectExpr("id" , "c3 as time" , "round(c4)" ).show(false)
val idCol1 = jdbcDF.apply("id")
val idCol2 = jdbcDF("id")

返回一个新的DataFrame对象,其中不包含去除的字段,一次只能去除一个字段。
jdbcDF.drop("id")
jdbcDF.drop(jdbcDF("id"))


jdbcDF.limit(3).show( false)
limit方法获取指定DataFrame的前n行记录,得到一个新的DataFrame对象。和take与head不同的是,limit方法不是Action操作。


(1)orderBy和sort:按指定字段排序,默认为升序
  示例1,按指定字段排序。加个-表示降序排序。sort和orderBy使用方法相同

jdbcDF.orderBy(- jdbcDF("c4")).show(false)
// 或者
jdbcDF.orderBy(jdbcDF("c4").desc).show(false)

示例2,按字段字符串升序排序

jdbcDF.orderBy("c4").show(false)


jdbcDF .groupBy("c1" )
jdbcDF.groupBy( jdbcDF( "c1"))
(2)cube和rollup:group by的扩展
  功能类似于SQL中的group by cube/rollup,略。

(3)GroupedData对象
  该方法得到的是GroupedData类型对象,在GroupedData的API中提供了group by之后的操作,比如,

max(colNames: String*)方法,获取分组中指定字段或者所有的数字类型字段的最大值,只能作用于数字型字段
min(colNames: String*)方法,获取分组中指定字段或者所有的数字类型字段的最小值,只能作用于数字型字段
mean(colNames: String*)方法,获取分组中指定字段或者所有的数字类型字段的平均值,只能作用于数字型字段
sum(colNames: String*)方法,获取分组中指定字段或者所有的数字类型字段的和值,只能作用于数字型字段
count()方法,获取分组中的元素个数

 stat方法可以用于计算指定字段或指定字段之间的统计信息,比如方差,协方差等。这个方法返回一个DataFramesStatFunctions类型对象。
  下面代码演示根据c4字段,统计该字段值出现频率在30%以上的内容。在jdbcDF中字段c1的内容为"a, b, a, c, d, b"。其中a和b出现的频率为2 / 6,大于0.3

jdbcDF.stat.freqItems(Seq ("c1") , 0.3).show()

jdbcDF.explode( "c3" , "c3_" ){time: String => time.split( " " )}



--  删除库
drop database if exists db_name;
--  强制删除库
drop database if exists db_name cascade;

--  删除表
drop table if exists employee;

--  清空表
truncate table employee;
--  清空表,第二种方式
insert overwrite table employee select * from employee where 1=0; 

--  删除分区
alter table employee_table drop partition (stat_year_month>='2018-01');

--  按条件删除数据
insert overwrite table employee_table select * from employee_table where id>'180203a15f';



spark-hive_2.11-2.1.1.jar
spark-catalyst_2.11-2.1.1.jar

/opt/spark/spark-2.1.1-bin-hadoop2.6/jars/spark-catalyst_2.11-2.1.1.jar

$SPARK_HOME/jars



--jars a.jar,b.jar,c.jar


--jar /opt/spark/spark-2.1.1-bin-hadoop2.6/jars/spark-catalyst_2.11-2.1.1.jar

/opt/spark/spark-2.1.1-bin-hadoop2.6/bin/spark-submit --class com.hangshu.impl.MainDataMinCtl \
--jar /opt/spark/spark-2.1.1-bin-hadoop2.6/jars/spark-catalyst_2.11-2.1.1.jar \
--master yarn \
--deploy-mode cluster \
--num-executors 3 \
--driver-memory 2g \
--executor-memory 3g \
--executor-cores 1 \
--conf spark.default.parallelism=18 \
--queue thequeue \
/opt/spark_jar/dataw/dataAnalys-1.0.jar



MyConfig.run(spark, mySqlConfig, DataTableConstants.ODS_MIN_YIELD_VAL, thisHour)

ZipTabUtils.sinkToMySQL5(spark, repEnergyDysDF.drop("output_m","output_kg"), "hs_spin.hs_machine_platforms",dateHour, mySqlConfig)


//    ZipTabUtils.sinkToMySQL(spark, repEnergyDysDF, "hs_spin.dws_staff_dys", mySqlConfig)
//    ZipTabUtils.sinkToMySQL5(spark, repEnergyDysDF, "hs_spin.hs_machine_platforms",dateHour, mySqlConfig)


hs_machine_platforms ds
数据来源,0代表离线,1代表实时插入的
3代表经过特殊处理插入的


WhWideAll.runDYS(spark, mySqlConfig, tablename, OneHourAgo,x)

mv dataAnalys-1.0.jar dataAnalys-1.0.jar`backup`



MobaXterm


VMware Workstation Pro 15.5.0
软件序列号:

UG5J2-0ME12-M89WY-NPWXX-WQH88

GA590-86Y05-4806Y-X4PEE-ZV8E0

YA18K-0WY8P-H85DY-L4NZG-X7RAD

UA5DR-2ZD4H-089FY-6YQ5T-YPRX6

B806Y-86Y05-GA590-X4PEE-ZV8E0

ZF582-0NW5N-H8D2P-0XZEE-Z22VA

UY758-0RXEQ-M81WP-8ZM7Z-Y3HDA

VF750-4MX5Q-488DQ-9WZE9-ZY2D6

UU54R-FVD91-488PP-7NNGC-ZFAX6

YC74H-FGF92-081VZ-R5QNG-P6RY4

YC34H-6WWDK-085MQ-JYPNX-NZRA2


图片识别:
https://web.baimiaoapp.com/

文字压缩:
http://www.ab173.com/other/compress.php


DeleteTableUtils.deleteByDs3(targetTB,dateHour,mySqlConfig)
dys142
application_1622442245318_35645

yarn logs -applicationId application_1622442245318_35645 > logs.txt

yarn logs -applicationId application_1622442245318_35645 | grep "dys142" | more 
du -h --max-depth=1
du -h - .
ll -h


application_1622442245318_35645

yarn logs -applicationId application_1622442245318_35645 > logs.txt

yarn logs -applicationId application_1622442245318_35645 | grep "dys142" | more 
yarn logs -applicationId application_1622442245318_35645 | grep "select" | more 
yarn logs -applicationId application_1622442245318_35645 | grep "a.itemname" | more 

yarn logs -applicationId application_1622442245318_35645 | grep -C 100 "a.itemname" | more 
           CONCAT_WS("_",hs.producer_id,hs.host_id,hs.dev_group,hs.class_order,r.variety,hs.htime) as akey,
ZipTabUtils.sinkToMySQL(spark, repEnergyDysDF.drop("akey"), targetTable, mySqlConfig)

CHINER元数建模-3.0

本文来源:码农网
本文链接:https://www.codercto.com/a/126382.html

双击“CHINER元数建模”(chiner,发音:[kaɪˈnər])



查看代码是使用 ProcessingTime还是 EventTime进行业务处理, 如果是使用的事件时间进行处理的业务,则应该指定相应的事件时间和watermark

.window(TumblingEventTimeWindows.of(Time.seconds(8))) //添加滚动窗口 ,采用事件时间进行处理
.window(SlidingProcessingTimeWindows.of(Time.seconds(10),Time.seconds(5))) //添加滑动窗口,采用处理时间进行处理



滚动定义:.timeWindow(Time.seconds(5))

滑动定义:.timeWindow(Time.seconds(5),Time.seconds(1))

nexus私有仓库


%HADOOP_HOME%\bin;%HADOOP_HOME%\sbin
D:\hadoop-2.6.0-cdh5.9.0\bin
D:\hadoop-2.6.0-cdh5.9.0\sbin
D:\开发工具\spark-2.0.1-bin-hadoop2.6\bin

tightvnc密码设置成统一设成:hangshu-2016
tightvnc密码设置成统一设成:123456

D:\noteMe\业务逻辑\数据字典


-- 导出依赖jar到 D:\tmp\lib 文件夹
mvn dependency:copy-dependencies -DoutputDirectory=D:\tmp\lib


-- 手动导包
mvn install:install-file -Dfile=C:\Users\admin\AppData\Roaming\JetBrains\Datagrip2020.2\jdbc-drivers\Hive\1.1\hive-jdbc-uber-2.6.3.0-235.jar -DgroupId=org.apache.hive -DartifactId=hive-jdbc-uber -Dversion=2.6.3.0 -Dpackaging=jar


spark-sql --master spark://hadoop361:7077  --executor-memory 1g  --total-executor-cores 2
spark-sql \
--master spark://node01:7077 \
--executor-memory 1g \
--total-executor-cores 2 \
--conf  spark.sql.warehouse.dir=hdfs://node01:8020/user/hive/warehouse


java:git@192.168.3.71:hs-local/z41.git

推荐的做法是在代码中引入以下包:

import org.apache.flink.streaming.api.scala._

如果数据是有限的(静态数据集),我们可以引入以下包:

import org.apache.flink.api.scala._	
GMT = UTC+0
serverTimezone=UTC 
serverTimezone=GMT%2B8
推荐使用如下:

serverTimezone=Asia/Shanghai

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.{DataTypes, Table,_}

import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._

spark.sql("").show()

from_unixtime(unix_timestamp(),'yyyy-MM-dd HH:mm:ss') crt,
mvn package -Dmaven.test.skip=true 

import scala.collection.JavaConversions._
import spark.implicits._


import org.apache.flink.api.scala._



create table 'ns_hs_flink:word2','cf','f1'
create 'ns_hs_flink:word2','cf','f1'


// 将DataStream注册成Catalog中的表
tableEnv.registerDataStream(String name, DataStream<T> dataStream)

Caused by: java.lang.IllegalArgumentException: open() failed.Table 'test.a2' doesn't exist


CREATE TABLE mysource (id BIGINT ,name STRING, password STRING,age INT) WITH ('connector.type' = 'jdbc','connector.url' = 'jdbc:mysql://127.0.0.1:3306/feature','connector.driver' = 'com.mysql.cj.jdbc.Driver','connector.table' = 'student', 'connector.username' = 'root', 'connector.password' = '123456' )


String sourceTableSQL = "create table score(" +
        "id int," +
        "student_no string," +
        "class_name string," +
        "score double," +
        "primary key(id) not enforced" + // 多个联合主键用逗号分隔
        ") with (" +
        "'connector'='upsert-kafka'," +
        "'topic' = 'ods'," +
        "'properties.bootstrap.servers'='localhost:9092'," +
        "'key.format'='raw'," +
        "'value.format'='json'" +
        ")";

spark-shell --master yarn --deploy-mode client --jars /usr/lib/hive/lib/mysql-connector-java-5.1.46.jar

spark-shell --master yarn --deploy-mode client --jars /opt/hadoop/mysql-connector-java-5.1.46/mysql-connector-java-5.1.46.jar

import spark.implicits._
import spark.sql._

spark.sql("select floor(0),floor(1.8),floor(null)").show
spark.sql("show databases").show
spark.sql("show tables").show	
spark.sql("select * from hs_spin.ext_wide_host_increment limit 10").show()
spark.sql("select * from hs_spin.ods_wide_host_hive limit 10").show()


spark.sql("select key, id, serial_no, index, name, dev_type from hs_spin.ext_wide_host_increment limit 10").show()

spark.sql("select key, id, serial_no, index, name, dev_type from hs_spin.ext_wide_host_increment limit 10").show()
spark.sql("""insert into dwd.p_min_yield_val partition(year,month,day,producer_id)  select key, id, name, host_id, model_id, dev_group, path_id, staff_id, staff_name, class_type, variety, yarn_count, class_order, class_order_alias, efficiency, factor, output_m, output_kg, output_add_m, output_add_kg, htime, crt, online, current_shift
,SUBSTR(htime,1,4) year,SUBSTR(htime,1,7) month,SUBSTR(htime,1,10) day,producer_id from hs_spin.ext_min_yield_val limit 30""").show()

spark.sql("""SELECT producer_id,count(1) cnt FROM  dwd.p_min_yield_val where day = '2021-12-08' GROUP BY producer_id""").show()
spark.sql("""SELECT * FROM  dwd.p_min_yield_val where day = '2021-12-08' """).show()
spark.sql("""SELECT * FROM  hs_spin.ods_wide_host_hive limit 3 """).show()
spark.sql("""SELECT * FROM  dwd.p2_min_yield_val limit 3 """).show()



val df = spark.sql("""select key, id, name, host_id, model_id, dev_group, path_id, staff_id, staff_name, class_type, variety, yarn_count, class_order, class_order_alias, efficiency, factor, output_m, output_kg, output_add_m, output_add_kg, htime, crt, online, current_shift
,SUBSTR(htime,1,4) year,SUBSTR(htime,1,7) month,SUBSTR(htime,1,10) day,producer_id from hs_spin.ext_min_yield_val limit 30""")



import org.apache.spark.sql.{SaveMode, SparkSession}
df.coalesce(1).write.format("parquet").mode(SaveMode.Append).partitionBy("year","month","day","producer_id").saveAsTable(s"dwd.p_min_yield_val")
df.coalesce(1).write.format("parquet").mode(SaveMode.Append).partitionBy("year","month","day","producer_id").saveAsTable(s"dwd.p2_min_yield_val")
df.coalesce(1).write.format("parquet").mode(SaveMode.Append).partitionBy("year").saveAsTable(s"dwd.p2_min_yield_val")
df.coalesce(1).write.format("parquet").mode(SaveMode.Append).saveAsTable(s"dwd.p2_min_yield_val")


maven gson

hadoop fs -du -h /
hadoop fs -du -h /hbase
hdfs dfs -du -h /

su hdfs -l -c "hdfs dfs -du -h /"
su hdfs -l -c "hdfs dfs -du -h /user"
su hdfs -l -c "hdfs dfs -du -h /user/hive"
su hdfs -l -c "hdfs dfs -du -h /user/hive/warehouse/dwd.db"


python3 -m http.server 127.0.0.1:8888
python -m http.server 127.0.0.1 8888
python -m http.server 6666
python -m http.server 8888

python -m http.server 8080 --directory /tmp/


python2
python -m SimpleHTTPServer 3001

python3
python -m http.server 3000


http://localhost:8888/
git clone git@github.com:bkfish/Apache-Log4j-Learning.git
cd Apache-Log4j-Learning/tools
java -cp marshalsec-0.0.3-SNAPSHOT-all.jar marshalsec.jndi.LDAPRefServer "http://127.0.0.1:8888/#Log4jRCE"


快捷键 Ctrl + Alt + Shift +U

java -jar arthas-boot.jar
java -jar arthas-boot.jar --telnet-port 9998 --http-port -1


dashboard (opens new window)	当前系统的实时数据面板
thread (opens new window)	查看当前 JVM 的线程堆栈信息
watch (opens new window)	方法执行数据观测
trace (opens new window)	方法内部调用路径,并输出方法路径上的每个节点上耗时
stack (opens new window)	输出当前方法被调用的调用路径
tt (opens new window)	方法执行数据的时空隧道,记录下指定方法每次调用的入参和返回信息,并能对这些不同的时间下调用进行观测
monitor (opens new window)	方法执行监控
jvm (opens new window)	查看当前 JVM 信息
vmoption (opens new window)	查看,更新 JVM 诊断相关的参数
sc (opens new window)	查看 JVM 已加载的类信息
sm (opens new window)	查看已加载类的方法信息
jad (opens new window)	反编译指定已加载类的源码
classloader (opens new window)	查看 classloader 的继承树,urls,类加载信息
heapdump (opens new window)	类似 jmap 命令的 heap dump 功能


# 反编译只显示源码
jad --source-only com.Arthas
# 反编译某个类的某个方法
jad --source-only com.Arthas mysql

jad --source-only com.wdbyte.arthas.Arthas deadThread
使用 **sc -d -f ** 命令查看类的字段信息。

sc -d -f com.wdbyte.arthas.Arthas
sm com.wdbyte.arthas.Arthas
ognl '@com.wdbyte.arthas.Arthas@hashSet'
ognl '@com.wdbyte.arthas.Arthas@hashSet.size()'
ognl '@com.wdbyte.arthas.Arthas@hashSet.add("test")'

ognl '@com.wdbyte.arthas.Arthas@hashSet' grep test


# 查看入参和出参
$ watch com.wdbyte.arthas.Arthas addHashSetThread '{params[0],returnObj}'
# 查看入参和出参大小
$ watch com.wdbyte.arthas.Arthas addHashSetThread '{params[0],returnObj.size}'
# 查看入参和出参中是否包含 'count10'
$ watch com.wdbyte.arthas.Arthas addHashSetThread '{params[0],returnObj.contains("count10")}'
# 查看入参和出参,出参 toString
$ watch com.wdbyte.arthas.Arthas addHashSetThread '{params[0],returnObj.toString()}'

-Xms128m -Xmx1024m -XX:-UseGCOverheadLimit
在启动脚本中添加-XX:-UseGCOverheadLimit命令。这个方法只会把“java.lang.OutOfMemoryError: GC overhead limit exceeded”变成更常见的java.lang.OutOfMemoryError: Java heap space错误。


D:\ws\gitee\dromara_ws\spring-boot-demo-master
http://127.0.0.1:8080/demo/ureport/designer


docker-compose up -d


mv dataAnalys-1.0.jar dataAnalys-1.0.jar`backup`

cd /opt/spark_jar/offline/
mv dataAnalys-1.0.jar dataAnalys-1.0.jar`backup`
cd /opt/spark_jar/offline/
mv dataAnalys-1.0.jar dataAnalys-1.0.jar`/bak/.backup`

密码x:
589367


驱动	URL
com.mysql.cj.jdbc.Driver	jdbc:mysql://localhost:端口号/<数据库名>?useSSL=false&serverTimezone=UTC
jdbc:mysql://frs.hang-shu.com:14518/hs_spin?useSSL=false&serverTimezone=UTC
jdbc:mysql://frs.hang-shu.com:14518/hs_spin?user=Hsapi&password=Hsapi-2016&useUnicode=true&characterEncoding=utf8&allowMultiQueries=true&autoReconnect=true&failOverReadOnly=false

-- 关闭BI
ps -ef | grep finebi | grep -v 'grep' | awk '{print $2}' | xargs -I {} kill -9 {}
-- 启动BI
nohup /opt/FineBI5.1/bin/finebi &

cd /opt/spark_jar/offline/
mv dataAnalys-1.0.jar dataAnalys-1.0.jar`/bak/.backup`

vlookup:
?mode=figure&padding=true&srcset=@2x
?mode=figure&padding=true&srcset=@2x&darksrc=invert
?mode=figure&darksrc=swBrss.png&padding=true&srcset=swDeyj.png@2x,swDmOs.png@3x&darksrcset=swBgoV.png@2x,swB6Zq.png@3x

go代理:
https://goproxy.io/;https://mirrors.aliyun.com/goproxy/


from_unixtime(unix_timestamp(),'yyyy-MM-dd') crt,from_unixtime(unix_timestamp(),'yyyy-MM-dd hh:mm:ss') lut
git tag -a v1.4 -m 'version 1.4'

yarn queue -status default
/opt/spark_shell/spin_offline_calc.sh



/opt/spark/spark-2.1.1-bin-hadoop2.6/bin/spark-submit --class com.hangshu.start.CalcProApp \
--master yarn \
--deploy-mode cluster \
--num-executors 6 \
--driver-memory 2g \
--executor-memory 5g \
--executor-cores 2 \
--conf spark.default.parallelism=30 \
--queue thequeue \
/opt/spark_jar/offline/dataAnalys-1.0.jar "2022-02-22 17:35:00"


/opt/spark/spark-2.1.1-bin-hadoop2.6/bin/spark-submit --class com.hangshu.start.CalcProApp \
--master yarn \
--deploy-mode cluster \
--num-executors 6 \
--driver-memory 2g \
--executor-memory 5g \
--executor-cores 2 \
--conf spark.default.parallelism=30 \
--queue thequeue \
/opt/spark_jar/offline/dataAnalys-1.0.jar "2022-03-15 09:35:00"


shutdown -r -m 192.168.3.139
shutdown -r -m 192.168.3.142


sk-20220321
hangshu-2016


接口测试组件:
Swagger
knife4j

com.hangshu.start.RecoverProApp
	//    厂商ID,工序,结束时间+1.5h
	示例: "2070154266399473672" "细纱工序" "2022-04-13 08:35:00"




#!/bin/bash

/opt/spark/spark-2.1.1-bin-hadoop2.6/bin/spark-submit --class com.hangshu.start.RecoverProApp \
--master yarn \
--deploy-mode cluster \
--num-executors 6 \
--driver-memory 2g \
--executor-memory 5g \
--executor-cores 2 \
--conf spark.default.parallelism=30 \
--queue thequeue \
/opt/spark_jar/offline/dataAnalys-1.0.jar
"2070154266399473672" "细纱工序" "2022-04-13 08:35:00"



/opt/spark/spark-2.1.1-bin-hadoop2.6/bin/spark-submit --class com.hangshu.start.RecoverProApp --master yarn --deploy-mode cluster --num-executors 6 --driver-memory 2g --executor-memory 5g --executor-cores 2 --conf spark.default.parallelism=30 --queue thequeue /opt/spark_jar/offline/dataAnalys-1.0.jar "2162304858206502921" "络筒工序" "2022-05-28 16:35:00"





#!/bin/bash
# author:菜鸟教程
# url:www.runoob.com

echo "Shell 传递参数实例!";
echo "执行的文件名:$0";
echo "第一个参数为:$1";
echo "第二个参数为:$2";
echo "第三个参数为:$3";

echo "参数个数为:$#";
echo "传递的参数作为一个字符串显示:$*";


sh /opt/spark_shell/spin_offline_calc_recovery.sh "2070154266399473672" "细纱工序" "2022-04-13 08:35:00"
sh /opt/spark_shell/spin_offline_calc_recovery.sh "2367874700551389189" "络筒工序" "2022-04-20 09:35:00"
sh /opt/spark_shell/spin_offline_calc_recovery.sh "2162304858206502921" "络筒工序" "2022-05-28 16:35:00"

# shutdown -r -m 192.168.3.174

CMD命令:
1:使用管理员账户登陆
2:创建用户:CREATE USER 'username'@'host' IDENTIFIED BY 'password';
3:授权:grant all privileges on orderautocategory.* to xcuser@localhost identified by 'xcuser';
3:或:GRANT all ON *.* TO 'root'@'%' WITH GRANT OPTION;
4:授予账号复制数据的权限:

    GRANT REPLICATION SLAVE ON *.* TO 'test'@'%';

5:flush privileges;

CREATE USER 'lcg'@'host' IDENTIFIED BY '123456';
grant all privileges on test2.* to lcg@'%' identified by '123456';
flush privileges;



sysdm.cpl
nc -l 12345



//META-INF/services/ 加上接口的全限定类名,就是文件服务类的文件,META-INF/services/com.viewscenes.netsupervisor.spi.SPIService
D:\maven_repo\repo\mysql\mysql-connector-java\8.0.22



${Internal.Entry.Current.Directory}

# 查看运行的 java 进程信息
$ jps -mlvV 
# 筛选 java 进程信息
$ jps -mlvV | grep [xxx]



psping node-67:9999
psping hadoop360:9999
psping hadoop360:999
telnet hadoop360 999




	
	
备份:	
mv dataAnalys-1.0.jar dataAnalys-1.0.jar`backup`



# 查看运行的 java 进程信息
$ jps -mlvV 
# 筛选 java 进程信息
$ jps -mlvV | grep [xxx]
jps -ml






hdfs上的路径 spark 加载的jar包 对应 spark_home/jars下的所有jar包
/user/root/spark-archive/sparkjar.zip	


su hdfs -l -c "hdfs dfs -du -h /user/hive/warehouse"
su hdfs -l -c "hdfs dfs -du -h /user"
su hdfs -l -c "hdfs dfs -du -h /"
su hdfs -l -c "hdfs dfs -du -h /user/root/spark-archive"



private final static Logger logger= LoggerFactory.getLogger(p3.class);
ps -ef | grep finebi | grep -v 'grep' | awk '{print $1}'
ps -ef | grep finebi | grep -v 'grep' | awk '{print $2}'




^\s*\n

ctrl+r
选择正则表达式Regex
输入 ^\s*\n
替换全部 Replace all


mvn clean package -DskipTests
mvn package -DskipTests
mvn clean package -Dmaven.test.skip=true


Git Commit Template


数据分析_仪表板

D:\noteMe\md\总结报告.md

git tag -l -n


mysqlbinlog -v --base64-output=decode-rows --start-datetime='2021-12-12 00:00:00' --stop-datetime='2021-12-13 00:00:00' binlog文件 |grep -i -A 50 -B 50  'hs_spin.arrange' >> binlogA.sql
mysqlbinlog --read-from-remote-server --raw --host=192.168.3.83 --port=21020 --user=repl --password=repl  mysql-bin.000001

mysqlbinlog --read-from-remote-server --raw --host=127.0.0.1 --port=3306 --user=root --password=root  binlog.000022
mysqlbinlog --read-from-remote-server --raw --host=192.168.3.217 --port=3306 --user=root -p   binlog.000022
mysqlbinlog --read-from-remote-server --raw --host=192.168.3.217 --port=3306 --user=root --password   binlog.000022


//  import org.apache.flink.api.scala._

import org.apache.flink.streaming.api.scala._


--password=root

### 学院
---
<u>学院号</u>
学院名
院长


KeyEntities
关键业务对象

TableRelations
数据表关系图







"ns_hs_flink"."TEST01_20200413"
scan 'hs_spin:ods_telv_pro_history','2881198309639192595_2021-12-05 12:00:00'
get 't1','rowkey1',{COLUMN=>'f1:name',VERSIONS=>3}
get 'hs_spin:ods_telv_pro_history','2881198309639192595_2021-12-05 12:00:00',{VERSIONS=>1}
get 'hs_spin:ods_telv_pro_history','2881198309639192595_2021-12-05 12:00:00',{COLUMN=>'cf:output_add_kg',VERSIONS=>3}

create 'ns_hs_flink:person_hbase','cf'
count 'ns_hs_flink:person_hbase'
scan 'ns_hs_flink:person_hbase',{LIMIT=>1}
scan 'ns_hs_flink:person_hbase',{LIMIT=>1}


put 'ns_hs_flink:TEST01_20200413', 'r1' ,'0:COL5' ,'11.32'
put 'ns_hs_flink:TEST01_20200413', 'r2' ,'0:COL5' ,'15.6'

put 'ns_hs_flink:TEST01_20200413', 'r1' ,'0:COL5' ,11.32
put 'ns_hs_flink:TEST01_20200413', 'r2' ,'0:COL5' ,15.6


spark-catalyst_2.11-2.1.1.jar
spark-hive_2.11-2.1.1


$SPARK_HOME
$SPARK_HOME/lib
D:\开发工具\spark-2.0.1-bin-hadoop2.6

val appName = "HS_"+this.getClass().getSimpleName().filter(!_.equals('$'))
//    hs.run true 服务器上运行,false 本地运行
    var spark: SparkSession = null
    if (ConfigurationManager.map.get("hs.run").asInstanceOf[Boolean]) {
      spark = SparkSessionUtils.getSparkByNameEnv(appName)
    } else {
      spark = SparkSessionUtils.getSparkEnv
    }

//切换hive的数据库
     spark.sql("use dwd")
     val repEnergyDysDF = spark.sql(sqlStr)

     //    1、创建分区表,可以将append改为overwrite,这样如果表已存在会删掉之前的表,新建表
//     repEnergyDysDF.write.mode("append").partitionBy("year","month","day","producer_id").saveAsTable("p_min_yield_val")

     repEnergyDysDF.repartition(1)
       //直接使用SaveMode实现即可
       .write.mode(SaveMode.Append).format("hive").insertInto("p_min_yield_val")
//     repEnergyDysDF.write.insertInto(com.hangshu.constant.DataTableConstants.R_DWD_MIN_YIELD_VAL)


cp 2.txt 2.txt`backup`


Dynamic partition strict mode requires at least one static partition column. To turn this off set hive.exec.dynamic.partition.mode=nonstrict


scan 'hs_spin:ext_p_min_yield_val', {LIMIT=>1}
hdfs://edh/user/hive/warehouse/dwd.db/p_min_yield_val



NVL(r.variety, '未知品种') variety,


  
[Unit]
Description=nacos
After=network.target

[Service]
Type=forking
ExecStart=/opt/apache/nacos/bin/startup.sh -m standalone
ExecReload=/opt/apache/nacos/bin/shutdown.sh
ExecStop=/opt/apache/nacos/bin/shutdown.sh
PrivateTmp=true

[Install]  
WantedBy=multi-user.target  


 systemctl start nacos.service

systemctl stop nacos.service
http://192.168.3.60/nacos/index.html
http://192.168.3.60:8848/nacos/index.html#/login

put  'hs_spin:ods_fault_tolerant_coefficient','4605670556180797632','f:ratio','0.03'
get 'hs_spin:ods_fault_tolerant_coefficient','4605670556180797632',{COLUMN=>['f:ratio','f:host_id']}

hs_spin:ods_fault_tolerant_coefficient





















  



Spark 双流 join 代码示例
https://www.cnblogs.com/yangxusun9/p/13137592.html

GitHub 文件加速
https://toolwa.com/github/
https://shrill-pond-3e81.hunsh.workers.dev/

章节2-Hbase 篇

示例中TTL => ‘ 259200’设置数据过期时间为三天

create 'NewsClickFeedback',{NAME=>'Toutiao',VERSIONS=>1,BLOCKCACHE=>true,BLOOMFILTER=>'ROW',COMPRESSION=>'SNAPPY',TTL => ' 259200 '},{SPLITS => ['1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']}


create 't_task_log',{NAME => 'f', TTL=>'86400'}
TTL => '86400 SECONDS (1 DAY)'
看到TTL => 'FOREVER',FOREVER永不过期


hbase(main):004:0> disable "t_task_log"  #禁用表
0 row(s) in 2.3380 seconds
 
hbase(main):005:0> alter "t_task_log",NAME=>'data',TTL=>'86400' #设置TTL值,作用于列族data
Updating all regions with the new schema...
1/1 regions updated.
Done.
0 row(s) in 1.9510 seconds
 
hbase(main):006:0> enable  "t_task_log"  #恢复表
0 row(s) in 1.2640 seconds


disable "ns_hs_flink:word"
alter "ns_hs_flink:word",NAME=>'f1',TTL=>'259200'
enable  "ns_hs_flink:word"
desc  'ns_hs_flink:word'

create 'NewsClickFeedback',{NAME=>'Toutiao',VERSIONS=>1,BLOCKCACHE=>true,BLOOMFILTER=>'ROW',COMPRESSION=>'SNAPPY',TTL => ' 259200 '},{SPLITS => ['1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']}

create 'NewsClickFeedback',{NAME=>'cf',VERSIONS=>1,BLOCKCACHE=>true,BLOOMFILTER=>'ROW',COMPRESSION=>'SNAPPY',TTL => ' 259200 '}}


git reset HEAD file
git reset HEAD hs_spin_spark.zip
npm config set registry https://registry.npmjs.org/


https://github1s.com/niumoo/down-bit


$ git clone git@github.com:conwnet/github1s.git
$ cd github1s
$ yarn
$ yarn watch
$ yarn serve # in another shell
$ # Then visit http://localhost:5000 or http://localhost:5000/conwnet/github1s once the build is completed.

truncate_preserve 'hs_spin:ods_ext_item_origin'
describe 'hs_spin:ods_ext_item_origin'
count 'hs_spin:ods_ext_item_origin'

spark.sql(" ").show(10,false)

SELECT mean(val) AS '均值', variance(val) AS '方差', stddev(val) AS '标准差', corr(val,yearsmarried) AS '两个指标的相关系数', skewness(val) AS 'skewness偏度', kurtosis(val) AS 'kurtosis峰度'
,a.* from hs_spin.ods_ext_item_inc a where a.devid ='1437812751' and htime BETWEEN '2021-08-24 04:59:54.536' and CURRENT_TIMESTAMP

https://mirrors.bfsu.edu.cn/apache/flink/flink-1.13.2/flink-1.13.2-bin-scala_2.11.tgz

spark-env.sh 脚本里面添加一行 export SPARK_DIST_CLASSPATH=”SPARK_DIST_CLASSPATH:/usr/lib/path/*” 项目中用到的jar包放到这个目录下就行了


D:\soft_dev\driver\spark_jar

--jars 


ln -s /home/gamestat /gamestat
rm -rf  b  注意不是rm -rf  b/

mkdir -p /opt/external_jars
ln -s /opt/external_jars /usr/lib/hadoop-hdfs/lib/external_jars
ln -s /opt/external_jars /usr/lib/hadoop-yarn/lib/external_jars

SPARK_CLASSPATH=$SPARK_CLASSPATH:/opt/external_jars/*
SPARK_CLASSPATH=$SPARK_CLASSPATH:/opt/external_jars/*


spark-submit --class ezviz.bigdata.spark.OfflineJob --master yarn --deploy-mode cluster --queue azkaban --name antarmy_XP_one_day_data --driver-memory 2G --executor-memory 10G --executor-cores 3 --num-executors 10 --conf spark.driver.extraJavaOptions=" -Dfile.encoding=utf-8 " --conf spark.executor.extraJavaOptions=" -Dfile.encoding=utf-8 " --conf spark.yarn.jars=local:/opt/cloudera/parcels/CDH/lib/spark/jars/*,local:/opt/cloudera/parcels/CDH/lib/spark/hive/*:/opt/cloudera/parcels/CDH/lib/hive/lib/* --conf spark.sql.parquet.compression.codec=gzip --conf spark.shuffle.consolidateFiles=true EzBigdataFramework-1.0-SNAPSHOT-shaded.jar "/user/antarmy/antarmy_xp_one_day_data.conf"

--conf spark.yarn.jars=local:/opt/external_jars/* --conf spark.sql.parquet.compression.codec=gzip --conf spark.shuffle.consolidateFiles=true EzBigdataFramework-1.0-SNAPSHOT-shaded.jar


spark.driver.extraClassPath /opt/external_jars/*



http://hadoop360:60010/zk.jsp
http://hadoop360:60010/jmx?qry=Hadoop:service=HBase,name=RegionServer,sub=Server
http://www.kuazhi.com/post/518841.html

HBase Master:
master:
http://hadoop360:60010

HBase regionServer
regionServer:
http://hadoop362:60030
http://hadoop363:60030
http://hadoop364:60030

http://hadoop362:60030/jmx?description=true
http://hadoop362:60030/jmx?qry=hadoop:service=NameNode,name=NameNodeInfo
http://hadoop362:60030/jmx?qry=java.lang:type=OperatingSystem
http://hadoop362:60030/jmx?qry=Hadoop:service=HBase,name=JvmMetrics
http://hadoop362:60030/jmx?qry=Hadoop:service=HBase,name=Master,sub=Server
http://hadoop362:60030/jmx?qry=Hadoop:service=HBase,name=RegionServer,sub=Regions

hadoop HBase JMX 指标学习

章节3-优秀开源系统



http://demo-jj.dev33.cn/spdj-admin/index.html

posted @ 2023-09-04 17:41  三里清风18  阅读(91)  评论(0)    收藏  举报