工作中用到的一些hive 语句

-- 强制删库,同时删除库下的所有表

drop database database_name cascade;

-- 中文乱码,将编码改为拉丁文
alter database hive character set latin1;

-- 内连接
select * from A inner join B on A.id = B.id;
-- 半连接semi join 的效果如下
-- 区别,inner join 先 join 后去掉b表的字段,如果b表id有重复,数据就会重复;semi join 不会有重复
select A.* from A inner join B on A.id = B.id;
select * from A left semi join B on A.id = B.id;
-- +-------+---------+-----------+--------+--+
-- | a.ip    | a.time | a.method | a.url |
-- +-------+---------+-----------+--------+--+
-- | 1       | 999      | lucas | baidu |
-- | 4       | 444      | wenlong | liu |
-- | 5       | 555      | hhh | ggg |
-- +-------+---------+-----------+--------+--+

-- 外链接
select * from A a left join B b on a.id = b.id;
-- +-------+---------+-----------+--------+-------+---------+-----------+--------+--+
-- | a.ip | a.time | a.method | a.url | b.ip | b.time | b.method | b.url |
-- +-------+---------+-----------+--------+-------+---------+-----------+--------+--+
-- | 1 | 999 | lucas | baidu | 1 | 999 | lucas | baidu |
-- | 4 | 444 | wenlong | liu | 4 | 444 | wenlong | liu |
-- | 5 | 555 | hhh | ggg | 5 | 555 | hhh | ggg |
-- | 2 | 222 | bbb | ddd | NULL | NULL | NULL | NULL |
-- | 3 | 333 | eee | fff | NULL | NULL | NULL | NULL |
-- +-------+---------+-----------+--------+-------+---------+-----------+--------+--+

-- 文氏图取左表数据
select a.* from A a left join B b on a.id = b.id;

-- 文氏图取左表数据并剔除id相同的数据
select A.* from A left join B on A.id = B.id where B.id is null;
-- 文氏图取右表数据
select A.* from A right join B on A.id = B.id;
-- 文氏图取右表数据并剔除id相同的数据
select A.* from A right join B on A.id = B.id where B.id is null;
-- 笛卡尔乘积
select * from A full join B on A.id = B.id;
-- 文氏图去掉相等的部分
select * from A full join B on A.id = B.id where A.id is null or B.id is null;

-- 增量表写入全量表(非分区),已有的需更新,没有的插入(注意a.*,表示只取第一张表的字段)
insert overwrite table 原表 select * from 增量表 union all select a.* from 原表 a left join 增量表 b on 1=1 and a.业务不会变化的字段=b.业务不会变化的字段 where b.业务不会变化的字段 is null;

-- map join 左连接左表不能是小表,右连接右表不能是小表
set hive.join.emit.interval = 1000
set hive.mapjoin.size.key = 10000
set hive.mapjoin.cache.numrows = 10000
select /*+ mapjoin(u) */ u.user_id,u.gender,pv.url from user_info u join page_view pv on (u.user_id=pv.user_id);

-- 子查询
select a.user_id,b.cnt from (select user_id from user_info) a join (select user_id,count(*) as cnt from page_view group by user_id) b on a.user_id=b.user_id;

-- distinct使用方法 往往只用它来返回不重复记录的条数,而不是用来返回不重复记录的所有值
select gender,count(distinct user_id) from user_info group by gender;
-- 说明:select的字段除了聚合函数中的字段以及和列无关的函数计算外,都必须包含在group by中。

-- rank的用法
SELECT cookieid, vtime, pv FROM (
SELECT cookieid, vtime, pv, RANK() OVER (PARTITION BY cookieid ORDER BY pv DESC, vtime DESC) as pv_rank FROM (
SELECT cookieid, vtime, COUNT(vtime) AS pv
FROM tmp_dh_topN
GROUP BY cookieid, vtime
ORDER BY cookieid, vtime
) tmp_dh_pv
)
WHERE tmp_dh_pv.pv_rank=1;

-- rowNumber  1234
SELECT name,subject,score,ROW_NUMBER() over(PARTITION BY subject ORDER BY score DESC) AS score_rank FROM class_score;
-- rank  1134
SELECT name,subject,score,RANK() over(PARTITION BY subject ORDER BY score DESC) AS score_rank FROM class_score;
-- dense_rank  1123
SELECT name,subject,score,dense_rank() over(PARTITION BY subject ORDER BY score DESC) AS score_rank FROM class_score;

-- 修复分区
-- 修复单分区表所有分区
MSCK REPAIR TABLE dw_log_detail;
-- 修复多级分区表所有分区
set hive.msck.path.validation=ignore;
MSCK REPAIR table dw_log_detail;
-- 修复单个分区(分区太多时优先采用这种)
ALTER TABLE table_name ADD PARTITION(country='U.S.A')

-- 逐条插入数据
INSERT INTO test(name,pwd,createdate)VALUES('name1','pwd1','2017-06-20 14:14:09');
INSERT OVERWRITE TABLE test PARTITION(pt_d = '20190419') select '1','2','3',...;

-- 增加分区
ALTER TABLE table_name ADD IF NOT EXISTS PARTITION(dt=20181224) location '/cdm/table' PARTITION(...) location ... ;

-- 删除分区
ALTER TABLE table_name DROP IF EXISTS PARTITION(...);

-- 修改分区
ALTER TABLE table_name PARTITION(...) SET location '';

-- 修改字段名
ALTER TABLE table_name CHANGE COLUMN hms hour_miniute_second INT COMMENT '...' AFTER name /FIRST;

-- 增加列 (注意字段复数)
ALTER TABLE table_name ADD COLUMNS (column_name STRING comment '');

-- 分区表动态插入数据,默认100个分区
-- 不可以写成select *,分区字段要写在最后
SET hive.EXEC.DYNAMIC.PARTITION=TRUE;
SET hive.EXEC.DYNAMIC.PARTITION.MODE=nonstrict;
set hive.exec.max.dynamic.partitions.pernode=1000;
set hive.exec.max.dynamic.partitions=1000;
set hive.exec.max.created.files=1000;
INSERT OVERWRITE TABLE table_name PARTITION(pt_d) SELECT column1,column2,column3,...,partition_clo FROM table_name;

-- 分区表静态插入数据
-- 查询字段不需要写分区字段
INSERT overwrite TABLE table_name PARTITION(pt_d='...') SELECT column1,column2,column3,... FROM table_name WHERE pt_d='...';

-- 创建表的时候指定为lzo格式
CREATE EXTERNAL TABLE foo (
     columnA string,
     columnB string
) PARTITIONED BY (date string)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY "\t"
STORED AS
INPUTFORMAT "com.hadoop.mapred.DeprecatedLzoTextInputFormat"
OUTPUTFORMAT "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
LOCATION '/path/xxxx/foo';

-- 对于已经创建好的表,可以使用alter语句,修改为lzo存储格式。
ALTER TABLE foo SET FILEFORMAT 
INPUTFORMAT "com.hadoop.mapred.DeprecatedLzoTextInputFormat"
OUTPUTFORMAT "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat";

-- 当使用insert语句往lzo表里插入数据时,需要加入下面两个参数:
SET hive.exec.compress.output=true;
SET mapred.output.compression.codec=com.hadoop.compression.lzo.LzopCodec;

-- 查询使用count(*)有数据,但是select * 是没有数据的?
-- 若发生将一个使用LZO压缩过的表数据导入一个没有被压缩过表内,
-- 查询将出现这种状况,hdfs目录下明明有数据,count(*)有数据总量,但是select就是查询不出来,
-- 具体原因:
-- 表的结构是未压缩过的,但是数据是压缩过的,通过select(此处查询的方法跟表的结构有关系)查询压缩过的结果,肯定不会显示,
-- 具体解决办法:
-- 使用select的时候指定对应的压缩方法就可以查询出来压缩过的数据,类似于如下:
SET hive.exec.compress.output=true;
SET mapred.output.compression.codec=com.hadoop.compression.lzo.LzopCodec;

SELECT t1.indexNo,t2.indexNo1,t2.correlationCategory FROM OdsMetaApplicationIndex t1 INNER JOIN (SELECT t3.indexNo AS indexNo1,t4.rightIndexNo AS indexNo2,t4.correlationCategory AS correlationCategory FROM OdsMetaApplicationIndex t3 INNER JOIN OdsMetaIndexCorrelation t4 ON t3.rootIndexNo = t4.leftIndexNo WHERE t3.indexNo = ?)t2 ON t1.rootIndexNo = t2.indexNo2 WHERE t1.indexNo = ?;

-- 桶表
CREATE TABLE student1(id INT,name string,gender string,age INT) clustered BY(id) INTO 3 buckets ROW FORMAT delimited fields terminated BY ','
-- STORED as orc
location '/data/m0908/tables/';
SET hive.enforce.bucketing = TRUE;
load DATA LOCAl inpath '/data/m0908/1.txt' INTO TABLE student1;
-- 覆盖表into也不可省
load DATA LOCAL inpath '/data/m0908/1.txt' overwrite INTO TABLE student1;

-- 日期比较函数: datediff语法: datediff(string enddate,string startdate)
-- 返回值: int
-- 说明: 返回结束日期减去开始日期的天数。
select datediff('2016-12-30','2016-12-29');
-- 最近1000天 registration_time是表字段
select t.registration_time from ads_rec_user_property t where datediff(current_timestamp,t.registration_time)<=1000;
-- 查询近30天的数据
select * from table where datediff(current_timestamp,create_time)<=30;

-- 日期增加函数: date_add语法: date_add(string startdate, intdays)
-- 返回值: string
-- 说明: 返回开始日期startdate增加days天后的日期。
select date_add('2016-12-29',10);

-- 日期减少函数: date_sub语法: date_sub (string startdate,int days)
-- 返回值: string
-- 说明: 返回开始日期startdate减少days天后的日期。
select date_sub('2016-12-29',10);
-- 获取当前日期
SELECT from_unixtime(unix_timestamp(),'yyyy-MM-dd');
-- 当前日期前一天
SELECT DATE_SUB(from_unixtime(unix_timestamp(),'yyyy-MM-dd'),1);
-- 当前日期前一天转换格式
SELECT from_unixtime(unix_timestamp(DATE_SUB(from_unixtime(unix_timestamp(),'yyyy-MM-dd'),1),'yyyy-mm-dd'),'yyyymmdd');
-- 获取时间戳
select unix_timestamp(); select unix_timestamp('2019-08-12','yyyyMMdd');
-- 时间戳转换为时间格式,字符串类型
select from_unixtime(999999999,'yyyyMMdd'); select from_unixtime(unix_timestamp(),'yyyyMMdd');
-- 三个月前时间
select add_months(from_unixtime(unix_timestamp(),'yyyy-MM-dd'),-3);
select from_unixtime(unix_timestamp(add_months(from_unixtime(unix_timestamp(),'yyyy-MM-dd'),-3),'yyyy-MM-dd'),'yyyyMMdd');
-- 时间转换
SELECT from_unixtime(unix_timestamp(occurrence_time_value),'yyyyMMdd') from dwd_contract_app_event t limit 1;
SELECT regexp_replace(SUBSTRING(occurrence_time_value,0,10), '','-') from dwd_contract_app_event t limit 1;
SELECT date_format(occurrence_time_value,'yyyyMMdd') from dwd_contract_app_event t limit 1;

-- 输出date对应星期几
select pmod(datediff('2020-06-12','1900-01-08'),7)+1 as week_day;
-- 输出date上周周日
select date_sub('2020-06-12 14:10:59',pmod(datediff('2020-06-12 14:10:59','1900-01-08'),7)+1) as Last_Sunday;
-- 输出date上周周一
select date_sub('2020-06-12',pmod(datediff('2020-06-12','1900-01-08'),7)+7) as Last_Monday;
-- date所在周的周一
select date_sub('2020-06-12',pmod(datediff('2020-06-12','1900-01-08'),7)) as Monday_Date;
-- 获取日期的周数
select weekofyear(to_date(from_unixtime(unix_timestamp('20170603','yyyyMMdd'),'yyyy-MM-dd')));
select weekofyear(date_format('2017-06-03 14:06:59','yyyy-MM-dd'));
select weekofyear(to_date('2020-06-12 00:06:00'));
select weekofyear('2020-06-12 00:06:00');

-- hive中生成UUID,并使用正则表达式替换
select regexp_replace(java_method("java.util.UUID","randomUUID"),'-','');

-- 字符串拆分补位再拼接
SELECT CASE WHEN T5.app_version IS NOT NULL THEN (CONCAT(CASE WHEN LENGTH(split(T5.app_version, '\\.')[0]) <= 2 THEN LPAD(split(T5.app_version, '\\.')[0], 2, 0) ELSE split(T5.app_version, '\\.')[0] END, '.',
CASE WHEN LENGTH(split(T5.app_version, '\\.')[1]) <= 2 THEN LPAD(split(T5.app_version, '\\.')[1], 2, 0) ELSE split(T5.app_version, '\\.')[1] END, '.',
CASE WHEN LENGTH(split(T5.app_version, '\\.')[2]) <= 2 THEN LPAD(split(T5.app_version, '\\.')[2], 2, 0) ELSE split(T5.app_version, '\\.')[2] END
)) ELSE T5.app_version END AS current_APP_version;

-- 调优手段
-- map阶段合并小文件 减少map个数
set mapred.max.split.size = 5368709120;
set mapred.min.split.size.per.node = 5368709120;
set mapred.min.split.size.per.rack = 5368709120;
set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;

-- 增加map个数
set mapred.reduce.tasks = 10;

-- 调整reduce个数
-- 设置每个reduce处理数据大小
set hive.exec.reducers.bytes.per.reducer = 2000000000;
-- 直接设定reduce个数
set mapred.reduce.tasks = 20;

-- 每个Map Task需要的虚拟CPU个数
set mapreduce.map.cpu.vcores = 4;
-- 每个Reduce Task需要的虚拟CPU个数
set mapreduce.reduce.cpu.vcores = 8;

-- 每个Map Task需要的内存量
set mapreduce.map.memory.mb = 1024*6;
-- 每个Reduce Task需要的内存量
set mapreduce.reduce.memory.mb = 10500;


-- 关闭Hive 自动地根据输入文件大小,在本地运行(在GateWay运行)仅限数据量小时使用,默认false,有些麻瓜设置了true
set hive.exec.mode.local.auto = false;

-- 不根据输入小表的大小自动将 Reduce 端的 Common Join 转化为 Map Join。
set hive.auto.convert.join = false;

-- 是否显示查询结果的列名,默认为不显示
set hive.cli.print.header = true;

-- 参数控制在同一个sql中的不同的job是否可以同时运行,默认为false.
set hive.exec.parallel = true;

-- 决定是否可以在 Map 端进行聚合操作,从而减轻清洗阶段数据传输和Reduce阶段的执行时间,提升总体性能 (内存足够)。
set hive.map.aggr = true;

-- 操作是否支持倾斜的数据,数据倾斜,会启动一个优化程序,避免数据倾斜
set hive.groupby.skewindata = true;

-- Mapper/Reducer 在本地模式的最大内存量,以字节为单位,0为不限制。
set hive.mapred.local.mem = 105726524;

-- Hive 默认的输出文件格式,与创建表时所指定的相同,可选项为 'TextFile' 、 'SequenceFile' 或者 'RCFile'
set hive.default.fileformat = 'TextFile'

-- Map/Redure 模式,如果设置为 strict,将不允许笛卡尔积,’nonstrict’则允许笛卡尔积
set hive.mapred.mode = 'strict';
-- 非严格模式
set hive.mapred.mode=unstrict
-- Hive 是否开启权限认证。
set hive.security.authorization.enabled = false;

-- 决定查询中最后一个 map/reduce job 的输出是否为压缩格式。
set hive.exec.compress.output = true;

-- 决定查询的中间 map/reduce job (中间 stage)的输出是否为压缩格式。
set hive.exec.compress.intermediate = true;

-- 并发提交时的并发线程的个数。
set hive.exec.parallel.thread.number = 10;

-- Hive 默认的命令行字符编码
set hive.cli.encoding = 'UTF8'

-- 是否记录执行计划进度,默认为true
set hive.log.plan.progress = true

-- ORC (先写存储方式,在写路径,最后跟上压缩方式)
STORED AS ORC;
DROP TABLE IF EXISTS ads_rec_user_property;
CREATE EXTERNAL TABLE IF NOT EXISTS ads_rec_user_property (
user_center_id STRING COMMENT '用户唯一标识',
customer_id STRING COMMENT '用户昵称',
registration_channel STRING COMMENT '注册渠道',
registration_type STRING COMMENT '注册方式',
registration_time STRING COMMENT '注册时间',
phone_system STRING COMMENT '手机系统',
phone_sn STRING COMMENT 'sn号'
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\u0001'
STORED AS ORC
LOCATION '/data/kakasi/ads_rec_user_property'
TBLPROPERTIES("orc.compress" = "ZLIB");
-- ORCFile存储格式有几个表属性可以进一步改善效果,这些属性如下:
-- +-----------------------------------------------------------------------------------------+
-- |属性 |默认值 | 说明 |
-- |orc.compress |ZLIB | 列压缩格式(NONE, ZLIB, SNAPPY)|
-- |orc.compress.size |262,144 (= 256 KiB) | 每一个压缩块大小 |
-- |orc.stripe.size |268,435,456 (= 256 MiB) | 每一个stripe大小 |
-- |orc.row.index.stride |10,000 | index间隔行数(必须大于10000) |
-- |orc.create.index |true | 是否创建内联index |
-- +-----------------------------------------------------------------------------------------+

-- 输出数据压缩
set hive.exec.compress.output=true;
set mapred.output.compression.codec=org.apache.hadoop.io.compress.BZip2Codec;
set mapred.output.compression.type=BLOCK;
set hive.vectorized.execution.enabled=false;
-- 任务中间压缩
set hive.exec.compress.intermediate=true;
set hive.intermediate.compression.codec=org.apache.hadoop.io.compress.SnappyCodec;
set hive.intermediate.compression.type=BLOCK;
-- map/reduce 输出压缩
set hive.exec.compress.output=true;
set mapred.output.compression.codec=org.apache.hadoop.io.compress.SnappyCodec;
set mapred.output.compression.type=BLOCK;

-- 排序 distribute by + sort by 代替 order by
select * from test_inc distribute by ip sort by ip, method;
select * from test_inc order by ip, method limit 100;

-- 优先级
NOT > AND > OR

-- SpakSQL中使用map join
-- 小表缓存后广播变量
CACHE TABLE t_config AS SELECT ad_activity_id,brand FROM lxw1234.t_config;
-- 大表join小表
CREATE TABLE t_lxw1234 AS SELECT
a.cookieid,
b.brand,
a.orderid AS ad_activity_id,
a.orderitemid AS ad_id,
a.siteid AS media_id,
a.inventoryid AS ad_area_id,
SUM(1) AS pv
FROM
lxw1234.t_log a
JOIN t_config b ON (a.orderid = b.ad_activity_id)
WHERE
a.pt = '2015-06-15'
GROUP BY
a.cookieid,
b.brand,
a.orderid,
a.orderitemid,
a.siteid,
a.inventoryid;

-- hive的四种分析函数
-- LAG(col,n,DEFAULT) 用于统计窗口内往上第n行值
-- 第一个参数为列名,第二个参数为往上第n行(可选,默认为1),第三个参数为默认值(当往上第n行为NULL时候,取默认值,如不指定,则为NULL)
-- 显示向上第N行的字段值,无值显示默认值,无默认值显示null
SELECT cookieid,
createtime,
url,
ROW_NUMBER() OVER(PARTITION BY cookieid ORDER BY createtime) AS rn,
LAG(createtime,1,'1970-01-01 00:00:00') OVER(PARTITION BY cookieid ORDER BY createtime) AS last_1_time,
LAG(createtime,2) OVER(PARTITION BY cookieid ORDER BY createtime) AS last_2_time
FROM lxw1234;
-- +----------+---------------------+------+---+---------------------+-----------------------+--+
-- |cookieid |createtime |url |rn |last_1_time | last_2_time |
-- +----------+---------------------+------+---+---------------------+-----------------------+--+
-- |cookie1 |2015-04-10 10:00:00 |url1 |1 |1970-01-01 00:00:00 | NULL |
-- |cookie1 |2015-04-10 10:00:02 |url2 |2 |2015-04-10 10:00:00 | NULL |
-- |cookie1 |2015-04-10 10:03:04 |1url3 |3 |2015-04-10 10:00:02 | 2015-04-10 10:00:00 |
-- |cookie1 |2015-04-10 10:10:00 |url4 |4 |2015-04-10 10:03:04 | 2015-04-10 10:00:02 |
-- |cookie1 |2015-04-10 10:50:01 |url5 |5 |2015-04-10 10:10:00 | 2015-04-10 10:03:04 |
-- |cookie1 |2015-04-10 10:50:05 |url6 |6 |2015-04-10 10:50:01 | 2015-04-10 10:10:00 |
-- |cookie1 |2015-04-10 11:00:00 |url7 |7 |2015-04-10 10:50:05 | 2015-04-10 10:50:01 |
-- |cookie2 |2015-04-10 10:00:00 |url11 |1 |1970-01-01 00:00:00 | NULL |
-- |cookie2 |2015-04-10 10:00:02 |url22 |2 |2015-04-10 10:00:00 | NULL |
-- |cookie2 |2015-04-10 10:03:04 |1url33|3 |2015-04-10 10:00:02 | 2015-04-10 10:00:00 |
-- |cookie2 |2015-04-10 10:10:00 |url44 |4 |2015-04-10 10:03:04 | 2015-04-10 10:00:02 |
-- |cookie2 |2015-04-10 10:50:01 |url55 |5 |2015-04-10 10:10:00 | 2015-04-10 10:03:04 |
-- |cookie2 |2015-04-10 10:50:05 |url66 |6 |2015-04-10 10:50:01 | 2015-04-10 10:10:00 |
-- |cookie2 |2015-04-10 11:00:00 |url77 |7 |2015-04-10 10:50:05 | 2015-04-10 10:50:01 |
-- +----------+---------------------+------+---+---------------------+-----------------------+--+

-- LEAD 与LAG相反
-- LEAD(col,n,DEFAULT) 用于统计窗口内往下第n行值
-- 第一个参数为列名,第二个参数为往下第n行(可选,默认为1),第三个参数为默认值(当往下第n行为NULL时候,取默认值,如不指定,则为NULL)
SELECT cookieid,
createtime,
url,
ROW_NUMBER() OVER(PARTITION BY cookieid ORDER BY createtime) AS rn,
LEAD(createtime,1,'1970-01-01 00:00:00') OVER(PARTITION BY cookieid ORDER BY createtime) AS next_1_time,
LEAD(createtime,2) OVER(PARTITION BY cookieid ORDER BY createtime) AS next_2_time
FROM lxw1234;

-- FIRST_VALUE,取分组内排序后,截止到当前行,第一个值
SELECT cookieid,
createtime,
url,
ROW_NUMBER() OVER(PARTITION BY cookieid ORDER BY createtime) AS rn,
FIRST_VALUE(url) OVER(PARTITION BY cookieid ORDER BY createtime) AS first1
FROM lxw1234;
-- +----------+---------------------+--------+----+--------+--+
-- |cookieid |createtime |url |rn |first1 |
-- + ---------+---------------------+--------+----+--------+--+
-- | cookie1 |2015-04-10 10:00:00 |url1 |1 |url1 |
-- | cookie1 |2015-04-10 10:00:02 |url2 |2 |url1 |
-- | cookie1 |2015-04-10 10:03:04 |1url3 |3 |url1 |
-- | cookie1 |2015-04-10 10:10:00 |url4 |4 |url1 |
-- | cookie1 |2015-04-10 10:50:01 |url5 |5 |url1 |
-- | cookie1 |2015-04-10 10:50:05 |url6 |6 |url1 |
-- | cookie1 |2015-04-10 11:00:00 |url7 |7 |url1 |
-- | cookie2 |2015-04-10 10:00:00 |url11 |1 |url11 |
-- | cookie2 |2015-04-10 10:00:02 |url22 |2 |url11 |
-- | cookie2 |2015-04-10 10:03:04 |1url33 |3 |url11 |
-- | cookie2 |2015-04-10 10:10:00 |url44 |4 |url11 |
-- | cookie2 |2015-04-10 10:50:01 |url55 |5 |url11 |
-- | cookie2 |2015-04-10 10:50:05 |url66 |6 |url11 |
-- | cookie2 |2015-04-10 11:00:00 |url77 |7 |url11 |
-- +----------+---------------------+--------+----+--------+--+

-- LAST_VALUE,取分组内排序后,截止到当前行,最后一个值
SELECT cookieid,
createtime,
url,
ROW_NUMBER() OVER(PARTITION BY cookieid ORDER BY createtime) AS rn,
LAST_VALUE(url) OVER(PARTITION BY cookieid ORDER BY createtime) AS last1
FROM lxw1234;

-- with 关键字
With table1 as (select ...),
table2 as (select...),
table3 as (select...),
select table1.item1, table2.item2, table3.item3 from table1, table2, table3 where xxx;

posted @ 2020-08-25 11:13  惨遭虐泉的小学生  阅读(113)  评论(0)    收藏  举报