1.安装mysql 作为hive的meta store
2.上传hive的安装包
sudo mkdir /usr/local/src/hive
sudo chown -R ucmed:ucmed /usr/local/src/hive
cp /tmp/apache-hive-3.1.2-bin.tar.gz /usr/local/src/hive
cd /usr/local/src/hive
tar -xzvf apache-hive-3.1.2-bin.tar.gz
3.修改配置文件
cd /usr/local/src/hive/apache-hive-3.1.2-bin/conf
cp hive-exec-log4j2.properties.template hive-exec-log4j2.properties
cp hive-log4j2.properties.template hive-log4j2.properties
cp hive-default.xml.template hive-default.xml
cp hive-env.sh.template hive-env.sh
vim hive-env.sh
export HADOOP_HOME=/usr/local/src/hadoop/hadoop-3.3.1/
export HIVE_CONF_DIR=/usr/local/src/hive/apache-hive-3.1.2-bin/conf/
vim hive-site.xm
javax.jdo.option.ConnectionURL
jdbc:mysql://master:3306/hive?createDatabaseIfNotExist=true&useSSL=false
javax.jdo.option.ConnectionDriverName
com.mysql.jdbc.Driver
javax.jdo.option.ConnectionUserName
root
javax.jdo.option.ConnectionPassword
WY@KW9OQpj5Spztd
hive.metastore.warehouse.dir
/user/hive/warehouse
hive.exec.scratchdir
/user/hive/tmp
hive.querylog.location
/user/hive/log
hive.metastore.uris
thrift://master:9083
hive.server2.thrift.port
10000
hive.server2.thrift.bind.host
0.0.0.0
hive.server2.webui.host
0.0.0.0
hive.server2.webui.port
10002
hive.server2.long.polling.timeout
5000
hive.server2.enable.doAs
true
datanucleus.autoCreateSchema
false
datanucleus.fixedDatastore
true
hive.metastore.event.db.notification.api.auth
false
hive.metastore.sasl.enabled
false
If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos.
hive.server2.enable.doAs
false
hive.server2.authentication
NONE
hive.execution.engine
mr
hive.stats.autogather
false
编辑Hadoop的core-site.xml
vim /usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml
#同步配置文件
scp /usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml slave1:/usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml
scp /usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml slave2:/usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml
追加
dfs.permissions.enabled
false
hadoop.proxyuser.ucmed.hosts
*
hadoop.proxyuser.ucmed.groups
*
4.拷贝hive所需的jdbc驱动
拷贝到/usr/local/src/hive/apache-hive-3.1.2-bin/lib
cp /tmp/mysql-connector-java-5.1.39.jar /usr/local/src/hive/apache-hive-3.1.2-bin/lib
5.配置hive环境变量
sudo vim /etc/profile
export HIVE_HOME=/usr/local/src/hive/apache-hive-3.1.2-bin
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HIVE_HOME/bin
source /etc/profile
5.重启hdfs和yarn
slave1上
/usr/local/src/hadoop/hadoop-3.3.1/****in/stop-yarn.sh
/usr/local/src/hadoop/hadoop-3.3.1/****in/start-yarn.sh
master 上
/usr/local/src/hadoop/hadoop-3.3.1/****in/stop-dfs.sh
/usr/local/src/hadoop/hadoop-3.3.1/****in/start-dfs.sh
6.初始化hive的元数据库
schematool -initSchema -dbType mysql
会在目标数据库生产对应表
7.启动测试hive
#启动元数据服务
sudo mkdir -p /opt/logs/hive/
sudo chown -R ucmed:ucmed /opt/logs/hive/
nohup hive --service metastore >> /opt/logs/hive/metastore.log 2>&1 &
nohup hive --service hiveserver2 >> /opt/logs/hive/hiveserver2.log 2>&1 &
hive
输入以下sql语句,进行功能验证
show databases ;
create database db_doit ;
use db_doit ;
create table if not exists tb_user (id int , name string) ;
show tables ;
通过第三方工具连接hive
DBeaver
连接地址 jdbc:hive2://192.168.3.184:10000/default
hive web ui
http://192.168.3.184:10002/