1. DDL Operations
复制表:
create table chjf_test_20bil as select billing_duration,cell_id from lbh_400000w;
create view student_view (id, name_length) as select id, length(name) from student;
create table lbh_16bil like lbh_80000w
load data inpath '/user/hive/warehouse/lbh_80000w/part-m-00000_copy_6' into table cs.lbh_400000w
load data inpath '/user/hive/warehouse/lbh_80000w/part-m-00000_copy_2_copy_1' into table cs.lbh_400000w
load data inpath '/user/hive/warehouse/lbh_80000w/part-m-00000_copy_3' into table cs.lbh_400000w
load data inpath '/user/hive/warehouse/lbh_80000w/part-m-00000_copy_5' into table cs.lbh_400000w
load data inpath '/user/hive/warehouse/lbh_80000w/part-m-00000' into table cs.lbh_400000w
创建表:
hive> CREATE TABLE pokes (foo INT, bar STRING);
创建表并创建索引字段ds
hive> CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING);
显示所有表:
hive> SHOW TABLES;
按正条件(正则表达式)显示表,
hive> SHOW TABLES '.*s';
表添加一列 :
hive> ALTER TABLE pokes ADD COLUMNS (new_col INT);
添加一列并增加列字段注释
hive> ALTER TABLE invites ADD COLUMNS (new_col2 INT COMMENT 'a comment');
更改表名:
hive> ALTER TABLE events RENAME TO 3koobecaf;
删除列:
hive> DROP TABLE pokes;
2. 元数据存储:
将文件中的数据加载到表中
hive> LOAD DATA LOCAL INPATH './examples/files/kv1.txt' OVERWRITE INTO TABLE pokes;
加载本地数据,同时给定分区信息
hive> LOAD DATA LOCAL INPATH './examples/files/kv2.txt' OVERWRITE INTO TABLE invites PARTITION (ds='2008-08-15');
加载DFS数据 ,同时给定分区信息
hive> LOAD DATA INPATH '/user/myname/kv2.txt' OVERWRITE INTO TABLE invites PARTITION (ds='2008-08-15');
The above command will load data from an HDFS file/directory to the table. Note that loading data from HDFS will result in moving the file/directory. As a result, the operation is almost instantaneous.
3. SQL 操作
按先件查询
hive> SELECT a.foo FROM invites a WHERE a.ds='<DATE>';
将查询数据输出至目录:
hive> INSERT OVERWRITE DIRECTORY '/tmp/hdfs_out' SELECT a.* FROM invites a WHERE a.ds='<DATE>';
将查询结果输出hdfs目录:
hive> INSERT OVERWRITE LOCAL DIRECTORY '/tmp/local_out' SELECT a.* FROM pokes a;
选择所有列到本地目录 :
hive> INSERT OVERWRITE TABLE events SELECT a.* FROM profiles a;
hive> INSERT OVERWRITE TABLE events SELECT a.* FROM profiles a WHERE a.key < 100;
hive> INSERT OVERWRITE LOCAL DIRECTORY '/tmp/reg_3' SELECT a.* FROM events a;
hive> INSERT OVERWRITE DIRECTORY '/tmp/reg_4' select a.invites, a.pokes FROM profiles a;
hive> INSERT OVERWRITE DIRECTORY '/tmp/reg_5' SELECT COUNT(1) FROM invites a WHERE a.ds='<DATE>';
hive> INSERT OVERWRITE DIRECTORY '/tmp/reg_5' SELECT a.foo, a.bar FROM invites a;
hive> INSERT OVERWRITE LOCAL DIRECTORY '/tmp/sum' SELECT SUM(a.pc) FROM pc1 a;
将一个表的统计结果插入另一个表中:
hive> FROM invites a INSERT OVERWRITE TABLE events SELECT a.bar, count(1) WHERE a.foo > 0 GROUP BY a.bar;
hive> INSERT OVERWRITE TABLE events SELECT a.bar, count(1) FROM invites a WHERE a.foo > 0 GROUP BY a.bar;
JOIN
hive> FROM pokes t1 JOIN invites t2 ON (t1.bar = t2.bar) INSERT OVERWRITE TABLE events SELECT t1.bar, t1.foo, t2.foo;
将多表数据插入到同一表中:
FROM src
INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300
INSERT OVERWRITE LOCAL DIRECTORY '/tmp/dest4.out' SELECT src.value WHERE src.key >= 300;
将文件流直接插入文件:
hive> FROM invites a INSERT OVERWRITE TABLE events SELECT TRANSFORM(a.foo, a.bar) AS (oof, rab) USING '/bin/cat' WHERE a.ds > '2008-08-09';
This streams the data in the map phase through the script /bin/cat (like hadoop streaming). Similarly - streaming can be used on the reduce side (please see the Hive Tutorial or examples)
4. 实际示例:
创建一个表
CREATE TABLE u_data (
userid INT,
movieid INT,
rating INT,
unixtime STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '/t'
STORED AS TEXTFILE;
下载示例数据文件,并解压缩
wget http://www.grouplens.org/system/files/ml-data.tar__0.gz
tar xvzf ml-data.tar__0.gz
加载数据到表中:
LOAD DATA LOCAL INPATH 'ml-data/u.data'
OVERWRITE INTO TABLE u_data;
统计数据总量:
SELECT COUNT(1) FROM u_data;
现在做一些复杂的数据分析:
创建一个 weekday_mapper.py: 文件,作为数据按周进行分割
import sys
import datetime
for line in sys.stdin:
line = line.strip()
userid, movieid, rating, unixtime = line.split('/t')
生成数据的周信息
weekday = datetime.datetime.fromtimestamp(float(unixtime)).isoweekday()
print '/t'.join([userid, movieid, rating, str(weekday)])
使用映射脚本
//创建表,按分割符分割行中的字段值
CREATE TABLE u_data_new (
userid INT,
movieid INT,
rating INT,
weekday INT)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '/t';
//将python文件加载到系统
add FILE weekday_mapper.py;
将数据按周进行分割
INSERT OVERWRITE TABLE u_data_new
SELECT
TRANSFORM (userid, movieid, rating, unixtime)
USING 'python weekday_mapper.py'
AS (userid, movieid, rating, weekday)
FROM u_data;
SELECT weekday, COUNT(1)
FROM u_data_new
GROUP BY weekday;
处理Apache Weblog 数据
将WEB日志先用正则表达式进行组合,再按需要的条件进行组合输入到表中
add jar ../build/contrib/hive_contrib.jar;
CREATE TABLE apachelog (
host STRING,
identity STRING,
user STRING,
time STRING,
request STRING,
status STRING,
size STRING,
referer STRING,
agent STRING)
ROW FORMAT SERDE 'org.apache.hadoop.hive.contrib.serde2.RegexSerDe'
WITH SERDEPROPERTIES (
"input.regex" = "([^ ]*) ([^ ]*) ([^ ]*) (-|//[[^//]]*//]) ([^ /"]*|/"[^/"]*/") (-|[0-9]*) (-|[0-9]*)(?: ([^ /"]*|/"[^/"]*/") ([^ /"]*|/"[^/"]*/"))?",
"output.format.string" = "%1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s"
)
STORED AS TEXTFILE;
1. DDL Operations
创建表:
hive> CREATE TABLE pokes (foo INT, bar STRING);
创建表并创建索引字段ds
hive> CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING);
显示所有表:
hive> SHOW TABLES;
按正条件(正则表达式)显示表,
hive> SHOW TABLES '.*s';
表添加一列 :
hive> ALTER TABLE pokes ADD COLUMNS (new_col INT);
添加一列并增加列字段注释
hive> ALTER TABLE invites ADD COLUMNS (new_col2 INT COMMENT 'a comment');
更改表名:
hive> ALTER TABLE events RENAME TO 3koobecaf;
删除列:
hive> DROP TABLE pokes;
2. 元数据存储:
将文件中的数据加载到表中
hive> LOAD DATA LOCAL INPATH './examples/files/kv1.txt' OVERWRITE INTO TABLE pokes;
加载本地数据,同时给定分区信息
hive> LOAD DATA LOCAL INPATH './examples/files/kv2.txt' OVERWRITE INTO TABLE invites PARTITION (ds='2008-08-15');
加载DFS数据 ,同时给定分区信息
hive> LOAD DATA INPATH '/user/myname/kv2.txt' OVERWRITE INTO TABLE invites PARTITION (ds='2008-08-15');
The above command will load data from an HDFS file/directory to the table. Note that loading data from HDFS will result in moving the file/directory. As a result, the operation is almost instantaneous.
3. SQL 操作
按先件查询
hive> SELECT a.foo FROM invites a WHERE a.ds='<DATE>';
将查询数据输出至目录:
hive> INSERT OVERWRITE DIRECTORY '/tmp/hdfs_out' SELECT a.* FROM invites a WHERE a.ds='<DATE>';
将查询结果输出至本地目录:
hive> INSERT OVERWRITE LOCAL DIRECTORY '/tmp/local_out' SELECT a.* FROM pokes a;
选择所有列到本地目录 :
hive> INSERT OVERWRITE TABLE events SELECT a.* FROM profiles a;
hive> INSERT OVERWRITE TABLE events SELECT a.* FROM profiles a WHERE a.key < 100;
hive> INSERT OVERWRITE LOCAL DIRECTORY '/tmp/reg_3' SELECT a.* FROM events a;
hive> INSERT OVERWRITE DIRECTORY '/tmp/reg_4' select a.invites, a.pokes FROM profiles a;
hive> INSERT OVERWRITE DIRECTORY '/tmp/reg_5' SELECT COUNT(1) FROM invites a WHERE a.ds='<DATE>';
hive> INSERT OVERWRITE DIRECTORY '/tmp/reg_5' SELECT a.foo, a.bar FROM invites a;
hive> INSERT OVERWRITE LOCAL DIRECTORY '/tmp/sum' SELECT SUM(a.pc) FROM pc1 a;
将一个表的统计结果插入另一个表中:
hive> FROM invites a INSERT OVERWRITE TABLE events SELECT a.bar, count(1) WHERE a.foo > 0 GROUP BY a.bar;
hive> INSERT OVERWRITE TABLE events SELECT a.bar, count(1) FROM invites a WHERE a.foo > 0 GROUP BY a.bar;
JOIN
hive> FROM pokes t1 JOIN invites t2 ON (t1.bar = t2.bar) INSERT OVERWRITE TABLE events SELECT t1.bar, t1.foo, t2.foo;
将多表数据插入到同一表中:
FROM src
INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300
INSERT OVERWRITE LOCAL DIRECTORY '/tmp/dest4.out' SELECT src.value WHERE src.key >= 300;
将文件流直接插入文件:
hive> FROM invites a INSERT OVERWRITE TABLE events SELECT TRANSFORM(a.foo, a.bar) AS (oof, rab) USING '/bin/cat' WHERE a.ds > '2008-08-09';
This streams the data in the map phase through the script /bin/cat (like hadoop streaming). Similarly - streaming can be used on the reduce side (please see the Hive Tutorial or examples)
4. 实际示例:
创建一个表
CREATE TABLE u_data (
userid INT,
movieid INT,
rating INT,
unixtime STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '/t'
STORED AS TEXTFILE;
下载示例数据文件,并解压缩
wget http://www.grouplens.org/system/files/ml-data.tar__0.gz
tar xvzf ml-data.tar__0.gz
加载数据到表中:
LOAD DATA LOCAL INPATH 'ml-data/u.data'
OVERWRITE INTO TABLE u_data;
统计数据总量:
SELECT COUNT(1) FROM u_data;
现在做一些复杂的数据分析:
创建一个 weekday_mapper.py: 文件,作为数据按周进行分割
import sys
import datetime
for line in sys.stdin:
line = line.strip()
userid, movieid, rating, unixtime = line.split('/t')
生成数据的周信息
weekday = datetime.datetime.fromtimestamp(float(unixtime)).isoweekday()
print '/t'.join([userid, movieid, rating, str(weekday)])
使用映射脚本
//创建表,按分割符分割行中的字段值
CREATE TABLE u_data_new (
userid INT,
movieid INT,
rating INT,
weekday INT)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '/t';
//将python文件加载到系统
add FILE weekday_mapper.py;
将数据按周进行分割
INSERT OVERWRITE TABLE u_data_new
SELECT
TRANSFORM (userid, movieid, rating, unixtime)
USING 'python weekday_mapper.py'
AS (userid, movieid, rating, weekday)
FROM u_data;
SELECT weekday, COUNT(1)
FROM u_data_new
GROUP BY weekday;
处理Apache Weblog 数据
将WEB日志先用正则表达式进行组合,再按需要的条件进行组合输入到表中
add jar ../build/contrib/hive_contrib.jar;
CREATE TABLE apachelog (
host STRING,
identity STRING,
user STRING,
time STRING,
request STRING,
status STRING,
size STRING,
referer STRING,
agent STRING)
ROW FORMAT SERDE 'org.apache.hadoop.hive.contrib.serde2.RegexSerDe'
WITH SERDEPROPERTIES (
"input.regex" = "([^ ]*) ([^ ]*) ([^ ]*) (-|//[[^//]]*//]) ([^ /"]*|/"[^/"]*/") (-|[0-9]*) (-|[0-9]*)(?: ([^ /"]*|/"[^/"]*/") ([^ /"]*|/"[^/"]*/"))?",
"output.format.string" = "%1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s"
)
STORED AS TEXTFILE;
create table partition_test
(member_id string,
name string
)
partitioned by (
stat_date string,
province string)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',';
alter table test_partition drop partition (dt='2012-03-06')
create table hive_test1 (
myid double,
source_id string,
prod_inst_id string,
acc_nbr string,
mcall_serv_area_code string,
mcall_serv_visit_code string,
mcall_serv_operator string,
opp_nbr string,
mcall_opp_area_code string,
mcall_opp_visit_code string,
mcall_opp_operator string,
mcall_ip_flag string,
ip_num string,
begin_time string,
end_time string,
mcalltime_duration string,
mcall_user_type string,
mcall_roam_type string,
mcall_call_type string,
mcall_call_flag string,
duration string,
billing_duration string,
ori_charge string,
disct_charge string,
imsi string,
cell_id string,
lac_id string,
source_table string,
source_sys string,
load_time string,
row_cnt string,
local_code string,
month_no string,
id double
) partitioned by (month string);
create table hive_test2 (
myid double,
source_id string,
prod_inst_id string,
acc_nbr string,
mcall_serv_area_code string,
mcall_serv_visit_code string,
mcall_serv_operator string,
opp_nbr string,
mcall_opp_area_code string,
mcall_opp_visit_code string,
mcall_opp_operator string,
mcall_ip_flag string,
ip_num string,
begin_time string,
end_time string,
mcalltime_duration string,
mcall_user_type string,
mcall_roam_type string,
mcall_call_type string,
mcall_call_flag string,
duration string,
billing_duration string,
ori_charge string,
disct_charge string,
imsi string,
cell_id string,
lac_id string,
source_table string,
source_sys string,
load_time string,
row_cnt string,
local_code string,
month_no string,
id double
) partitioned by (month string,mcall_serv string);
create table hive_test3 (
myid double,
source_id string,
prod_inst_id string,
acc_nbr string,
mcall_serv_area_code string,
mcall_serv_visit_code string,
mcall_serv_operator string,
opp_nbr string,
mcall_opp_area_code string,
mcall_opp_visit_code string,
mcall_opp_operator string,
mcall_ip_flag string,
ip_num string,
begin_time string,
end_time string,
mcalltime_duration string,
mcall_user_type string,
mcall_roam_type string,
mcall_call_type string,
mcall_call_flag string,
duration string,
billing_duration string,
ori_charge string,
disct_charge string,
imsi string,
cell_id string,
lac_id string,
source_table string,
source_sys string,
load_time string,
row_cnt string,
local_code string,
month_no string,
id double
) partitioned by (month string,mcall_serv string,mcall_visit string);
insert overwrite local directory '/tmp/tongji' select distinct(billing_duration) from yidongtest2w;
214 yidongtest正则表达式性能
3441721433 1个小时10分钟47秒
4075640528 2个小时32分钟24秒
质量评估:
job_201303221620_0517
create table hive_zd (aa TINYINT,ab SMALLINT,ac int,ad bigint,ae boolean,af float,ag double,ah string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',';
load data local inpath './aa.txt' overwrite into table chjf_test3 partition (fenqu_1ji='20110740',fenqu_2ji='zhejiang',fenqu_3ji='north');
load data local inpath './zd.txt' overwrite into table hive_zd;
create table chjf_gg01(id int ,curtime timestamp, curdate timestamp,name string,price float) row format delimited fields terminated by ',';
create table chjf_test1(dqcode string,dqname string,sfcode string,dqorder string,recflag int) partitioned by (code string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' LINES TERMINATED BY '\n' STORED AS TEXTFILE;
create table chjf_test1(dqcode string,dqname string,sfcode string,dqorder string,recflag int) partitioned by (code string) row format delimited fields terminated by '\t' lines terminated by '\n' stored as textfile;
load data local inpath '/home/test/chjftest/a.txt' overwrite into table chjf_test1 partition (code='gdyd');
CREATE EXTERNAL TABLE yidongtest1w (
myid double,
source_id string,
prod_inst_id string,
acc_nbr string,
mcall_serv_area_code string,
mcall_serv_visit_code string,
mcall_serv_operator string,
opp_nbr string,
mcall_opp_area_code string,
mcall_opp_visit_code string,
mcall_opp_operator string,
mcall_ip_flag string,
ip_num string,
begin_time string,
end_time string,
mcalltime_duration string,
mcall_user_type string,
mcall_roam_type string,
mcall_call_type string,
mcall_call_flag string,
duration string,
billing_duration string,
ori_charge string,
disct_charge string,
imsi string,
cell_id string,
lac_id string,
source_table string,
source_sys string,
load_time string,
row_cnt string,
local_code string,
month_no string,
id float )
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '1'
LINES TERMINATED BY '10'
STORED AS TEXTFILE
LOCATION '/user/test/yidongtestw'
;