hive操作DML
-- 向數據表中加載文件,不會對數據進行任何轉換,load操作只是復制(本地到hdfs)、移動(hdfs上的文件)
-- 如果表包含分區,則必須指定每個分區的分區名,filepath可以引用一個文件或一個目錄
-- 如果數據在hdfs上,且filepath不是絕對的,則hive會相對于/user/進行解釋,會移動(記住)
-- 如果指定了overwrite,會先刪除目標表,再移動數據。如果沒有且有文件名沖突,那么現有的文件會被新文件替換。
load data [local] inpath 'filepath' [overwrite]
?into table tablename
?[partition (p1=val1,p2=val2...)]
-- 將查詢結果插入到hive表中
insert [overwrite] table table_name1 [partition (part1=val1,part2=val2)]
select * from table_name2
-- extendsion
from from_statement
insert [overwrite] table table_name1 [partition (part1=val1,part2=val2)]
select_statement1
insert [overwrite] table table_name2 [partition (part1=val1,part2=val2)]
select_statement2
-- 將查詢結果寫入文件系統
insert [overwrite] [local] directory directory1 select ... from ...
-- extension
from from_statement
insert [overwrite] [local] directory directory1 select_statement1
insert [overwrite] [local] directory directory2 select_statement2
-- select語句
select [all | distinct] c1,c2...
from table_name
[where where_condition]
[group by col_list]
[
?clustered by col_list sort by col_list |
?distribute by col_list
]
[limit number];
--
select * from t1;
--
select * from sales where amount >10 and region = 'cs';
select distinct col1,col2 from t1;
select distinct col1 from t1;
select * from t1 limit 5;
-- 輸出top-k,5
set mapred.reduce.tasks = 1
select * from sales sort by amount desc limit 5;
-- 基于分區的查詢
select p.*
?from page_view p
?where p.date >= '20140212' and p.date <='20140212';
-- having hive不支持,可以通過子查詢實現
select col1 from t1 group by col1 having sum(col2) > 10;
select col1 from (
?select col1,sum(col2) as col2sum from t1 group by col1
) t2
where t2.col2sum > 10;
-- 查詢結果寫入到目錄
insert overwrite [local] directory '/user/output'
select a.* from invites a where a.ds = '20120101';
-- group by
from invites a
insert overwrite table envents
select a.bar,count(*) where a.foo > 0 group by a.bar;
insert overwrite table envents
select a.bar,count(*) from invites a where a.foo > 0 group by a.bar;
-- join
from pokes t1 join invites t2 on (t1.bar = t2.bar)
insert overwrite table envents
select t1.bar,t1.foo,t2.foo;
-- 多表insert
from src
insert overwrite table dest1 select src.* where src.key < 100
insert overwrite table dest2 select src.key,src.value where src.key > 100 and src.key < 200
...;
-- streaming 不解
from invites a
insert overwrite table events
select transform(a.foo,a.bar) as(oof,rab) using '/bin/cat' where a.ds='20120212';
-- hive sql 使用實例
create table u_data(
?userid int,
?movieid int,
?rating int,
?unixtime string
)
row format delimited
fields terminated by '\t'
stored as textfile;
load data local inpath 'u.data'
overwrite into table u_data;
select count(1) from u_data;
mapper.py
import sys
import datetime
for line in sys.stdin
?line = line.strip()
?userid,movieid,rating,unixtime = line.split('\t')
?weekday = datetime.datetime.fromtimestamp(float(unixtime)).isoweekday()
?print '\t'.join([userid,movieid,rating,str(weekday)])
--
create table u_data_new(
?userid int,
?movieid int,
?rating int,
?weekday int
)
row format delimited
fields terminated by '\t';
-- 增加文件hive使用的
add file mapper.py
insert overwrite table u_data_new
select transform(userid,movieid,rating,unixtime) using 'python mapper.py' as (userid,movieid,rating,weekday)
from u_data;
select weekday,count(*) from u_data_new group by weekday;
---apache的網路日志
add jar ...hive_contrib.jar 將jar增加進hive的class路徑
create table apachelog(
?host string,
?identity string,
?user string,
?time string,
?request string,
?status string,
?size string,
?referer string,
?agent string
)
row format serde '.....RegexSerDe'
with serdeproperties(
?'...'='...'
)
stored as textfile;
hive 網路接口
hive --service hwi 開啟服務
http://master:9999/hwi
hive jdbc 操作
1、開啟監聽服務
hive --service hiveserver
?
轉載于:https://www.cnblogs.com/jsunday/p/3872755.html
總結
- 上一篇: ZStack中的编程技巧
- 下一篇: Butter Knife:一个安卓视图注