主机 | 从机 | 从机 | |
---|---|---|---|
主机名 | anshun112 | anshun113 | anshun114 |
hostname | anshun112 | anshun113 | anshun114 |
IP地址 | 192.168.153.112 | 192.168.153.113 | 192.168.153.114 |
用户名 | zhangyong | zhangyong | zhangyong |
内存大小 | /boot 200M /swap 2048M / 剩余空间 |
/boot 200M /swap 2048M / 剩余空间 |
/boot 200M /swap 2048M / 剩余空间 |
防火墙 | service iptables stop chkconfig iptables off(开机关闭) |
service iptables stop chkconfig iptables off |
service iptables stop chkconfig iptables off |
网路配置 | ONBOOT=yes BOOTPROTO=static IPADDR=192.168.153.112 GATEWAY=192.168.153.2 NETMASK=255.255.255.0 DNS1=114.114.114.114 DNS2=8.8.8.8 |
ONBOOT=yes BOOTPROTO=static IPADDR=192.168.153.113 GATEWAY=192.168.153.2 NETMASK=255.255.255.0 DNS1=114.114.114.114 DNS2=8.8.8.8 |
ONBOOT=yes BOOTPROTO=static IPADDR=192.168.153.114 GATEWAY=192.168.153.2 NETMASK=255.255.255.0 DNS1=114.114.114.114 DNS2=8.8.8.8 |
免密登录 | 已配置 | 已配置 | 已配置 |
anshun112 | anshun113 | anshun114 |
---|---|---|
NN1 | NN2 | |
ZKFC | ZKFC | |
JN | JN | JN |
ZK | ZK | ZK |
DN | DN | DN |
#JAVA_HOME
export JAVA_HOME=/home/software/jdk1.8.0_144
#HADOOP_HOME
export HADOOP_HOME=/home/software/hadoop-2.9.2
#ZK_HOME
export ZK_HOME=/home/software/zookeeper-3.6.0
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZK_HOME/bin
chmod +x xsync
#!/bin/bash
#1 获取输入参数个数,如果没有参数,直接退出
pcount=$#
if ((pcount==0)); then
echo no args;
exit;
fi
#2 获取文件名称
p1=$1
fname=`basename $p1`
echo fname=$fname
#3 获取上级目录到绝对路径
pdir=`cd -P $(dirname $p1); pwd`
echo pdir=$pdir
#4 获取当前用户名称
user=`whoami`
#5 循环
for((host=112; host<115; host++)); do
echo ------------------- anshun$host --------------
rsync -av $pdir/$fname $user@anshun$host:$pdir
done
sudo xsync /home/software
sudo xsync /etc/profile
source /etc/profile
export JAVA_HOME=/home/software/jdk1.8.0_144
export HADOOP_CONF_DIR=/home/software/hadoop-2.9.2/etc/hadoop
<configuration>
<property>
<name>fs.defaultFSname>
<value>hdfs://nsvalue>
property>
<property>
<name>hadoop.tmp.dirname>
<value>/home/software/hadoop-2.9.2/tmpvalue>
property>
<property>
<name>ha.zookeeper.quorumname>
<value>anshun112:2181,anshun113:2181,anshun114:2181value>
property>
configuration>
<configuration>
<property>
<name>dfs.nameservicesname>
<value>nsvalue>
property>
<property>
<name>dfs.ha.namenodes.nsname>
<value>nn1,nn2value>
property>
<property>
<name>dfs.namenode.rpc-address.ns.nn1name>
<value>anshun112:9000value>
property>
<property>
<name>dfs.namenode.http-address.ns.nn1name>
<value>anshun112:50070value>
property>
<property>
<name>dfs.namenode.rpc-address.ns.nn2name>
<value>anshun113:9000value>
property>
<property>
<name>dfs.namenode.http-address.ns.nn2name>
<value>anshun113:50070value>
property>
<property>
<name>dfs.namenode.shared.edits.dirname>
<value>qjournal://anshun112:8485;anshun113:8485;anshun114:8485/nsvalue>
property>
<property>
<name>dfs.journalnode.edits.dirname>
<value>/home/software/hadoop-2.9.2/tmp/journalvalue>
property>
<property>
<name>dfs.ha.automatic-failover.enabledname>
<value>truevalue>
property>
<property>
<name>dfs.client.failover.proxy.provider.nsname>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidervalue>
property>
<property>
<name>dfs.ha.fencing.methodsname>
<value>sshfencevalue>
property>
<property>
<name>dfs.ha.fencing.ssh.private-key-filesname>
<value>/root/.ssh/id_rsavalue>
property>
<property>
<name>dfs.namenode.name.dirname>
<value>file:///home/software/hadoop-2.9.2/tmp/hdfs/namevalue>
property>
<property>
<name>dfs.datanode.data.dirname>
<value>file:///home/software/hadoop-2.9.2/tmp/hdfs/datavalue>
property>
<property>
<name>dfs.replicationname>
<value>3value>
property>
<property>
<name>dfs.permissionsname>
<value>falsevalue>
property>
configuration>
<configuration>
<property>
<name>yarn.resoucemanager.ha.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resoucesmanager.ha.rm-idsname>
<value>rm1,rm2value>
property>
<property>
<name>yarn.resourcemanager.hostname-rm1name>
<value>anshun112value>
property>
<property>
<name>yarn.resourcemanager.hostname-rm2name>
<value>anshun114value>
property>
<property>
<name>yarn.resourcemanager.store.classname>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStorevalue>
property>
<property>
<name>yarn.resourcemanager.zk-addressname>
<value>anshun112:2181,anshun113:2181,anshun114:2181value>
property>
<property>
<name>yarn.resoucemanager.cluster-idname>
<value>ns-yarnvalue>
property>
<property>
<name>yarn.nodemanager.aux-servicesname>
<value>mapreduce_shufflevalue>
property>
<property>
<name>yarn.resourcemanager.hostnamename>
<value>anshun114value>
property>
configuration>
<configuration>
<property>
<name>mapreduce.framework.namename>
<value>yarnvalue>
property>
configuration>
anshun112
anshun113
anshun114
xsync /home/software/hadoop-2.7.2/etc
i. 在三台服务器中启动zk:
zkServer.sh start
ii. 在zk的leader节点上执行格式化命令
hdfs zkfc -formatZK
iii. 在三台服务器上启动journalnode
hadoop-daemon.sh start journalnode
iv. 在第一台节点上格式化NameNode
hadoop namenode -format
v. 在第一台节点上启动NameNode
hadoop-daemon.sh start namenode
vi. 在第二台节点上格式化NameNode
hdfs namenode -bootstrapStandby
vii. 在第二台节点上启动NameNode
hadoop-daemon.sh start namenode
viii. 在三台服务器上启动DataNode。
hadoop-daemon.sh start datanode
ix. 在第一和第二台节点上,启动zkfc
hadoop-daemon.sh start zkfc
x. 在第一台节点上启动yarn:
start-yarn.sh
xi. 在第三台节点上启动ResourceManager
yarn-daemon.sh start resourcemanager
启动:start-all.sh
停止:stop-all.sh