分区表实际上就是对应一个HDFS文件系统上的独立的文件夹,该文件夹下是该分区所有的数据文件。Hive中的分区就是分目录,把一个大的数据集根据业务需要分割成小的数据集。在查询时通过WHERE子句中的表达式选择查询所需要的指定的分区,这样的查询效率会提高很多
/user/hive/warehouse/log_partition/20170702/20170702.log
/user/hive/warehouse/log_partition/20170703/20170703.log
/user/hive/warehouse/log_partition/20170704/20170704.log
hive (default)> create table dept_partition(
deptno int, dname string, loc string
)
partitioned by (month string)
row format delimited fields terminated by '\t';
注意:分区字段不能是表中已经存在的数据,可以将分区字段看作表的伪列
3. 加载数据到分区表中
hive (default)> load data local inpath '/opt/module/datas/dept.txt' into table default.dept_partition partition(month='201709');
hive (default)> load data local inpath '/opt/module/datas/dept.txt' into table default.dept_partition partition(month='201708');
hive (default)> load data local inpath '/opt/module/datas/dept.txt' into table default.dept_partition partition(month='201707’);
注意:分区表加载数据时,必须指定分区
hive (default)> select * from dept_partition where month='201709';
多分区联合查询
hive (default)> select * from dept_partition where month='201709'
union
select * from dept_partition where month='201708'
union
select * from dept_partition where month='201707';
_u3.deptno _u3.dname _u3.loc _u3.month
10 ACCOUNTING NEW YORK 201707
10 ACCOUNTING NEW YORK 201708
10 ACCOUNTING NEW YORK 201709
20 RESEARCH DALLAS 201707
20 RESEARCH DALLAS 201708
20 RESEARCH DALLAS 201709
30 SALES CHICAGO 201707
30 SALES CHICAGO 201708
30 SALES CHICAGO 201709
40 OPERATIONS BOSTON 201707
40 OPERATIONS BOSTON 201708
40 OPERATIONS BOSTON 201709
hive (default)> alter table dept_partition add partition(month='201706') ;
同时创建多个分区
hive (default)> alter table dept_partition add partition(month='201705') partition(month='201704');
hive (default)> alter table dept_partition drop partition (month='201704');
同时删除多个分区
hive (default)> alter table dept_partition drop partition (month='201705'), partition (month='201706');
hive> show partitions dept_partition;
hive> desc formatted dept_partition;
# Partition Information
# col_name data_type comment
month string
hive (default)> create table dept_partition2(
deptno int, dname string, loc string
)
partitioned by (month string, day string)
row format delimited fields terminated by '\t';
hive (default)> load data local inpath '/opt/module/datas/dept.txt' into table default.dept_partition2 partition(month='201709', day='13');
hive (default)> select * from dept_partition2 where month='201709' and day='13';
hive (default)> dfs -mkdir -p /user/hive/warehouse/dept_partition2/month=201709/day=12;
hive (default)> dfs -put /opt/module/datas/dept.txt /user/hive/warehouse/dept_partition2/month=201709/day=12;
查询数据(查询不到刚上传的数据)
hive (default)> select * from dept_partition2 where month='201709' and day='12';
执行修复命令
hive> msck repair table dept_partition2;
再次查询数据
hive (default)> select * from dept_partition2 where month='201709' and day='12';
hive (default)> dfs -mkdir -p /user/hive/warehouse/dept_partition2/month=201709/day=11;
hive (default)> dfs -put /opt/module/datas/dept.txt /user/hive/warehouse/dept_partition2/month=201709/day=11;
执行添加分区
hive (default)> alter table dept_partition2 add partition(month='201709',day='11');
查询数据
hive (default)> select * from dept_partition2 where month='201709' and day='11';
hive (default)> dfs -mkdir -p /user/hive/warehouse/dept_partition2/month=201709/day=10;
上传数据
hive (default)> load data local inpath '/opt/module/datas/dept.txt' into table dept_partition2 partition(month='201709',day='10');
查询数据
hive (default)> select * from dept_partition2 where month='201709' and day='10';
简书:https://www.jianshu.com/u/0278602aea1d
CSDN:https://blog.csdn.net/u012387141