create table emp_part1(
empno int,
empname string,
empjob string,
mgrno int,
birthday string,
salary float,
bonus float,
deptno int
)
partitioned by (day string,hour string)
row format delimited fields terminated by '\t';
alter table emp_part1 add partition (day='20170306',hour='0');
alter table emp_part1 drop partition (day='20170306',hour='0');
load data local inpath '/home/user01/emp.txt' into table emp_part1 partition (day='20170308',hour='9');
load data local inpath '/home/user01/emp.txt' into table emp_part1 partition (day='20170308',hour='10');
load data local inpath '/home/user01/emp.txt' into table emp_part1 partition (day='20170308',hour='14');
load data local inpath '/home/user01/emp.txt' into table emp_part1 partition (day='20170309',hour='10');
select * from emp_part1 where day='20170308'
select * from emp_part1 where day='20170308' and hour='14';
show partitons emp_part1;
- 分区可以理解为分类,通过分类把不同类型,时间,地域的数据放到不同的目录下。
- 分类的标准就是分区字段,可以一个,也可以多个。
- 分区表的意义在于优化查询。查询时尽量利用分区字段。如果不使用分区字段,就会全表扫描。
-
- 动态分区表:多维度数据处理及查询 严格模式:static partitioned by (county string,states
string) 非严格模式:partitioned by (county string,states string)
- 需要设置以下参数: //是否开启动态分区功能 0.13版本默认开启
set hive.exec.dynamic.partition=true;
动态分区的模式,默认strict,表示必须指定至少一个分区为静态分区,nonstrict模式表示允许所有的分区字段都可以使用动态分区
set hive.exec.dynamic.partition.mode=nostrict;
create table dypart(
id int,
name string
)
partitioned by (addr string)
row format delimited fields terminated by '\;';
//使用特殊字符作为分隔符时需要转义
//动态分区必须使用mapreduce才能完成,所以不能使用load方式加载
insert into table dypart partition (addr) select deptno,deptname,addr as addr from dept;
create external table dypart2(
empno int,
empname string,
empjob string,
mgrno int,
birthday string,
salary int,
bonus float,
deptno int
)
partitioned by (country string,province string)
row format delimited fields terminated by '\t'
location '/hive/dynamic/dypart2';
//location之后的目录可以不存在,创建表会自动创建,但作为外部表推荐目录和数据已经存在
set hive.exec.dynamic.partition.mode=strict
- 严格模式中,要求主分区必须为静态分区,辅助分区可以为动态
insert into table dypart2 partition (country='usa',province) select empno,empname,empjob,mgno,birthday,salary,bonus,depno,depno as province from emp;
insert into table dypart2 partition (country='usa',province) select c.empno,c.empname,c.empjob,c.mgno,c.birthday,c.salary,c.bonus,c.depno,c.deptname as province from (select * from emp a join dept b on a.depno=b.deptno) c;
- 覆盖导入方式,此时overwrite和into不能连用
insert overwrite table dypart2 partition (country='china',province) select a.empno,a.empname,a.empjob,a.mgno,a.birthday,a.salary,a.bonus,a.depno,b.deptname as province from emp a join dept b on a.depno=b.deptno;
- 桶表: 将内部表,外部表和分区表进一步组织成桶表 可以将表的列通过Hash算法进一步分解成不同的文件存储
create table test_bucket_table(
id int,
name string,
addr string
)
clustered by (id) into 4 buckets
row format delimited fields terminated by '\|';
`//强制开启分桶
set hive.enforce.bucketing=true;
insert overwrite table test_bucket_table select * from dept;
`//若没有使用hive.enforce.bucketing属性, 则需要设置和分桶个数相匹配的reducer个数, 同时SELECT后添加CLUSTER BY
set mapred.reduce.tasks=4;
insert into table test_bucket_table select * from dept cluster by deptno;