官网地址hive.apache.org
架构解析:
一、Hive底层的执行引擎有:MapReduce、Tez、Spark
Hive on MapReduce
Hive on Tez
Hive on Spark
压缩:GZIP、LZO、Snappy、BZIP2..
存储:TextFile、SequenceFile、RCFile、ORC、Parquet
UDF:自定义函数
二、Hive环境搭建
1)Hive下载:http://archive.cloudera.com/cdh5/cdh/5/
wget http://archive.cloudera.com/cdh5/cdh/5/hive-1.1.0-cdh5.7.0.tar.gz
2)解压
tar -zxvf hive-1.1.0-cdh5.7.0.tar.gz -C ~/app/
3)配置
系统环境变量(~/.bahs_profile)
export HIVE_HOME=/home/hadoop/app/hive-1.1.0-cdh5.7.0
export PATH=$HIVE_HOME/bin:$PATH
实现安装一个mysql, yum install xxx
hive-env.sh里面改成实际存付的位置
HADOOP_HOME=/home/hadoop/app/hadoop-2.6.0-cdh5.7.0
hive-site.xml
javax.jdo.option.ConnectionURL
jdbc:mysql://localhost:3306/sparksql?createDatabaseIfNotExist=true
javax.jdo.option.ConnectionDriverName
com.mysql.jdbc.Driver
javax.jdo.option.ConnectionUserName
root
javax.jdo.option.ConnectionPassword
root
4)拷贝mysql驱动到$HIVE_HOME/lib/
5)启动hive: $HIVE_HOME/bin/hive
6)创建表
CREATE TABLE table_name
[(col_name data_type [COMMENT col_comment])]
create table hive_wordcount(context string);
7)加载数据到hive表
LOAD DATA LOCAL INPATH 'filepath' INTO TABLE tablename
load data local inpath '/home/hadoop/data/hello.txt' into table hive_wordcount;
8)查询
select word, count(1) from hive_wordcount lateral view explode(split(context,'\t')) wc as word group by word;
lateral view explode(): 是把每行记录按照指定分隔符进行拆解
hive ql提交执行以后会生成mr作业,并在yarn上运行
create table emp(
empno int,
ename string,
job string,
mgr int,
hiredate string,
sal double,
comm double,
deptno int
) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
create table dept(
deptno int,
dname string,
location string
) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
load data local inpath '/home/hadoop/data/emp.txt' into table emp;
load data local inpath '/home/hadoop/data/dept.txt' into table dept;
求每个部门的人数
select deptno, count(1) from emp group by deptno;