@羲凡——只为了更好的活着
安装java8、mysql、docker
hostname | FE-Follower | FE-Observer | BE | mysql |
---|---|---|---|---|
10.218.223.96 | √ | √ | √ | |
10.218.223.97 | √ | √ | ||
10.218.223.98 | √ |
在 10.218.223.96 上操作,我用的是root用户,你们没有可以用有sudo权限的用户名
docker pull apachedoris/doris-dev:build-env-1.2
# docker run -it -v /your/local/.m2:/root/.m2 -v /your/local/incubator-doris-DORIS-x.x.x-release/:/root/incubator-doris-DORIS-x.x.x-release/ apachedoris/doris-dev:build-env
docker run -it -v /opt/modules/complie-doris/.m2:/root/.m2 -v /opt/modules/complie-doris/incubator-doris-DORIS-0.13.0-release/:/root/incubator-doris-DORIS-0.13.0-release/ apachedoris/doris-dev:build-env-1.2
cd incubator-doris-DORIS-0.13.0-release
git clone https://github.com/apache/incubator-doris.git
cd /root/incubator-doris-DORIS-0.13.0-release/incubator-doris
sh build.sh
cd /root/incubator-doris-DORIS-0.13.0-release/incubator-doris/fs_brokers/apache_hdfs_broker/
sh build.sh
将源码编译生成的 output 下的 fe 文件夹拷贝到 10.218.223.96 指定部署路径 /opt/doris 下
ip a
查看 )cd /opt/doris/fe
mkdir /opt/doris/fe/doris-meta ####路径必须、必须、必须提前创建好
vim conf/fe.conf
###############添加下面两列信息###############
####################开始####################
meta_dir = /opt/doris/fe/doris-meta
priority_networks = 10.218.223.96/22
####################结束####################
cd /opt/doris/fe
sh bin/start_fe.sh --daemon
将源码编译生成的 output 下的 be 文件夹拷贝到 三台机器 指定部署路径 /opt/doris 下
scp -r output/be/ 10.218.223.96:/opt/doris/
scp -r output/be/ 10.218.223.97:/opt/doris/
scp -r output/be/ 10.218.223.98:/opt/doris/
ip a
查看 )cd /opt/doris/be
mkdir -p /opt/doris/be/storage ####路径必须、必须、必须提前创建好
vim conf/be.conf
###############添加下面两列信息###############
####################开始####################
storage_root_path = /opt/doris/be/storage
priority_networks = 10.218.223.96/22
####################结束####################
# mysql -h host -P port -uroot
mysql -h 10.218.223.96 -P 9030 -uroot
# ALTER SYSTEM ADD BACKEND "host:port";
ALTER SYSTEM ADD BACKEND "10.218.223.96:9050" ;
ALTER SYSTEM ADD BACKEND "10.218.223.97:9050" ;
ALTER SYSTEM ADD BACKEND "10.218.223.98:9050" ;
cd /opt/doris/be
sh bin/start_be.sh --daemon
mysql -h 10.218.223.96 -P 9030 -uroot
SHOW PROC '/backends';
将源码编译生成的 output 下的 apache_hdfs_broker文件夹拷贝到 三台机器 指定部署路径 /opt/doris 下
scp -r output/apache_hdfs_broker/ 10.218.223.96:/opt/doris/
scp -r output/apache_hdfs_broker/ 10.218.223.97:/opt/doris/
scp -r output/apache_hdfs_broker/ 10.218.223.98:/opt/doris/
rm -rf conf/hdfs-site.xml
cp /etc/hadoop/conf.cloudera.hdfs/hdfs-site.xml conf/
sh bin/start_broker.sh --daemon
# mysql -h host -P port -uroot
mysql -h 10.218.223.96 -P 9030 -uroot
# ALTER SYSTEM ADD BROKER broker_name "host1:port1","host2:port2",...;
ALTER SYSTEM ADD BROKER broker_name "10.218.223.96:8000","10.218.223.97:8000","10.218.223.98:8000";
SHOW PROC '/brokers';
将源码编译生成的 output 下的 fe 文件夹拷贝到 10.218.223.97 指定部署路径 /opt/doris 下
ip a
查看 )cd /opt/doris/fe
mkdir /opt/doris/fe/doris-meta ####路径必须、必须、必须提前创建好
vim conf/fe.conf
###############添加下面两列信息###############
####################开始####################
meta_dir = /opt/doris/fe/doris-meta
priority_networks = 10.218.223.97/22
####################结束####################
cd /opt/doris/fe
# sh bin/start_fe.sh --helper host:port --daemon
sh bin/start_fe.sh --helper 10.218.223.96:9010--daemon
# mysql -h host -P port -uroot
mysql -h 10.218.223.96 -P 9030 -uroot
# ALTER SYSTEM ADD OBSERVER "host:port";
ALTER SYSTEM ADD OBSERVER "10.218.223.97:9010";
# 测试数据
1,1,jim,2
2,1,grace,2
3,2,tom,2
4,3,bush,3
5,3,helen,3
mysql -h 10.218.223.96 -P 9030 -uroot
# 改密码
SET PASSWORD FOR 'root' = PASSWORD('123456');
# 创建数据库
CREATE DATABASE example_db;
USE example_db;
# 建表
CREATE TABLE table1
(
siteid INT DEFAULT '10',
citycode SMALLINT,
username VARCHAR(32) DEFAULT '',
pv BIGINT SUM DEFAULT '0'
)
AGGREGATE KEY(siteid, citycode, username)
DISTRIBUTED BY HASH(siteid) BUCKETS 10
PROPERTIES("replication_num" = "3");
# 如果数据在fe的节点上用下面的方式,注意看主要是端口的差异
curl --location-trusted -u root:123456 -H "label:table1_20201216" -H "column_separator:," -T table1_data http://10.218.223.96:8030/api/example_db/table1/_stream_load
# 如果数据在be的节点上用下面的方式,注意看主要是端口的差异
curl --location-trusted -u root:123456 -H "label:table1_20201217" -H "column_separator:," -T table1_data http://10.218.223.98:8040/api/example_db/table1/_stream_load;
SHOW PROC '/brokers'
查看自己的是啥,不过默认是 broker_name## HELP BROKER LOAD;
LOAD LABEL table1_20201212
(
DATA INFILE("hdfs://ns/tmp/table1_data")
INTO TABLE table1
COLUMNS TERMINATED BY ","
)
WITH BROKER broker_name
(
"hadoop.security.authentication"="simple",
"username"="hdfs",
"password"="hdfs",
"dfs.nameservices" = "ns",
"dfs.ha.namenodes.ns" = "namenode30, namenode55",
"dfs.namenode.rpc-address.ns.namenode30" = "yc-nsg-h2:8020",
"dfs.namenode.rpc-address.ns.namenode55" = "yc-nsg-h3:8020",
"dfs.client.failover.proxy.provider" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
)
PROPERTIES
(
"timeout"="3600",
"max_filter_ratio"="0.1",
"timezone"="Asia/Shanghai"
);
-- 查看load是否成功
show load where label = 'table1_20201212' order by createtime desc limit 3
|
|
|
====================================================================
@羲凡——只为了更好的活着
若对博客中有任何问题,欢迎留言交流