create database hivetest;
hive默认有一个default库,不指定库名的话,所有的表都在里面
hive> show databases;
default
hivetest
建表语句基本和mysql差不多
create table querylog (time string,userid string,keyword string,pagerank int,clickorder int,url string) ;
如果要将文件数据导入到你建的hive表中时,有时候要指定文件中字段的分隔符
以'\t'分隔
create table querylog (time string,userid string,keyword string,pagerank int,clickorder int,url string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ;
有时候不止要指定分隔符,还要指定存储的格式
以'\t'分隔,存储为text格式
create table querylogtext (time string,userid string,keyword string,pagerank int,clickorder int,url string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;
将数据加载到上面新建的表中
如果是linux本地上的文件,加local,如果是hdfs上的,则不需要local
load data local inpath '/home/hadoop/app/data/sogou.200w.utf8' into table querylog;
默认格式,数据不做压缩,磁盘开销大,数据解析开销大。
create table querylogtext (time string,userid string,keyword string,pagerank int,clickorder int,url string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;
SequenceFile是Hadoop API提供的一种二进制文件支持,其具有使用方便、可分割、可压缩的特点。
create table querylogparquet (time string,userid string,keyword string,pagerank int,clickorder int,url string) STORED AS PARQUET;
RCFILE是一种行列存储相结合的存储方式。首先,其将数据按行分块,保证同一个record在一个块上,避免读一个记录需要读取多个block。其次,块数据列式存储,有利于数据压缩和快速的列存取
实践证明RCFile目前没有性能优势, 只有存储上能省10%的空间
create table querylogrcf (time string,userid string,keyword string,pagerank int,clickorder int,url string) STORED AS RCFILE;
原始数据大小219M
建表语句
create table querylogtext (time string,userid string,keyword string,pagerank int,clickorder int,url string) STORED AS TEXTFILE;
查看hdfs上占用空间大小为218.98M
hadoop fs -du /user/hive/warehouse/querylogtext|awk '{ SUM += $1 } END { print SUM/(1024*1024)}'
执行SQL语句耗时:Time taken: 60.275 seconds
select * from ( select url,count(*) as c from querylogtext group by url having c>1 ) a order by c desc limit 100 ;
建表语句
create table querylogseq (time string,userid string,keyword string,pagerank int,clickorder int,url string) STORED AS SEQUENCEFILE;
查看hdfs上占用空间大小为244.682M
hadoop fs -du /user/hive/warehouse/querylogseq|awk '{ SUM += $1 } END { print SUM/(1024*1024)}'
执行SQL语句耗时:Time taken: 65.491 seconds
select * from ( select url,count(*) as c from querylogseq group by url having c>1 ) a order by c desc limit 100 ;
建表语句
create table querylogparquet (time string,userid string,keyword string,pagerank int,clickorder int,url string) STORED AS PARQUET;
查看hdfs上占用空间大小为218.18M
hadoop fs -du /user/hive/warehouse/querylogparquet|awk '{ SUM += $1 } END { print SUM/(1024*1024)}'
执行SQL语句耗时:Time taken: 65.785 seconds
select * from ( select url,count(*) as c from querylogparquet group by url having c>1 ) a order by c desc limit 100 ;
建表语句
set parquet.compression=snappy;
create table querylogparquetsnappy (time string,userid string,keyword string,pagerank int,clickorder int,url string) STORED AS PARQUET;
查看hdfs上占用空间大小为131.237M
hadoop fs -du /user/hive/warehouse/querylogparquetsnappy|awk '{ SUM += $1 } END { print SUM/(1024*1024)}'
执行SQL语句耗时:Time taken: 66.315 seconds
select * from ( select url,count(*) as c from querylogparquet group by url having c>1 ) a order by c desc limit 100 ;
可以发现存储空间缩减了一半多,sql耗时也没增加多少。
建表语句
create table querylogrcf (time string,userid string,keyword string,pagerank int,clickorder int,url string) STORED AS RCFILE;
查看hdfs上占用空间大小为211.499M
hadoop fs -du /user/hive/warehouse/querylogrcf|awk '{ SUM += $1 } END { print SUM/(1024*1024)}'
执行SQL语句耗时:Time taken: 60.729 seconds
select * from ( select url,count(*) as c from querylogrcf group by url having c>1 ) a order by c desc limit 100 ;
建表语句
create table querylogorc (time string,userid string,keyword string,pagerank int,clickorder int,url string) STORED AS ORC TBLPROPERTIES ("orc.compress"="SNAPPY");
查看hdfs上占用空间大小为85.6077M
hadoop fs -du /user/hive/warehouse/querylogorc|awk '{ SUM += $1 } END { print SUM/(1024*1024)}'
执行SQL语句耗时:Time taken: 60.295 seconds
select * from ( select url,count(*) as c from querylogorc group by url having c>1 ) a order by c desc limit 100 ;
综上:在本次数据压缩比和本次sql查询效率来说,可能orc+snappy效果好一些,但是orc格式压缩的时间更长,parquet格式加snappy格式数据导入时间是 28.319 seconds,orc+snappy格式数据导入时间是47.014 seconds