在Linux上搭建好ssh免密环境后,先从Windows本地把hadoop-2.5.1-x64.tar.gz上传到节点node1上;
1.tar -zxvf hadoop-2.5.1-x64.tar.gz;
2.ln -sf /root/hadoop-2.5.1 /home/hadoop-2.5(建软链)
3. cd /home/ hadoop-2.5 -> cd etc/hadoop/ -> vi hadoop-env.sh
将 export JAVA_HOME=${JAVA_HOME} 中的${JAVA_HOME}替换为JDK安装路径,我的是: export JAVA_HOME=/usr/java/jdk1.7.0_79;
4. vi hdfs-site.xml
dfs.nameservices
cclbs
dfs.ha.namenodes.cclbs
nn1,nn2
dfs.namenode.rpc-address.cclbs.nn1
node1:8020
dfs.namenode.rpc-address.cclbs.nn2
node2:8020
dfs.namenode.http-address.cclbs.nn1
node1:50070
dfs.namenode.http-address.cclbs.nn2
node2:50070
dfs.namenode.shared.edits.dir
qjournal://node2:8485;node3:8485;node4:8485/cclbs
dfs.client.failover.proxy.provider.cclbs org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.ha.fencing.methods sshfence
dfs.ha.fencing.ssh.private-key-files
/home/exampleuser/.ssh/id_dsa
dfs.journalnode.edits.dir
/opt/jn/data
dfs.ha.automatic-failover.enabled
true
保存;
5.vi core-site.xml
fs.defaultFS
hdfs://cclbs
ha.zookeeper.quorum
node1:2181,node2:2181,node3:2181
hadoop.tmp.dir
/opt/hadoop2
保存;
6. tar -zxvf zookeeper-3.4.6.tar.gz -> ln -sf /root/zookeeper-3.4.6 /home/zk ->
cd /home/zk/conf -> cp -a zoo_sample.cfg zoo.cfg -> vi zoo.cfg
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/opt/zookeeper
.
#autopurge.purgeInterval=1
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888
保存;
7.mkdir /opt/zookeeper -> vi myid (写入 ‘1’) 保存;
[root@node1 opt]# scp -r zookeeper/ root@node2:/opt
[root@node1 opt]# scp -r zookeeper/ root@node3:/opt
[root@node2 .ssh]# vi /opt/zookeeper/myid
2
:wq
[root@node3 ~]# vi /opt/zookeeper/myid
3
:wq
8. scp -r zookeeper-3.4.6 root@node2:~/ ->[root@node2 ~]# ln -sf /root/zookeeper-3.4.6/ /home/zk
scp -r zookeeper-3.4.6 root@node3:~/ -> [root@node3 ~]# ln -sf /root/zookeeper-3.4.6/ /home/zk
9.[root@node1 ~]# cd /home/zk/ -> cd bin/ -> vi profile
export PATH=$PATH:/home/zk/bin
:wq
[root@node1 bin]# source /etc/profile
[root@node1 bin]# scp /etc/profile root@node2:/etc
[root@node1 bin]# scp /etc/profile root@node3:/etc
[root@node2 ~]# source /etc/profile
[root@node3 ~]# source /etc/profile
10.service iptables stop(node1-node4) -> zkServer.sh start(node1-node3)
11. cd /home/hadoop-2.5/etc/hadoop/ -> vi slaves (datanode)
node2
node3
node4
:wq
12.[root@node1 ~]# scp hadoop-2.5.1-x64.tar.gz root@node2:~/
[root@node1 ~]# scp hadoop-2.5.1-x64.tar.gz root@node3:~/
[root@node1 ~]# scp hadoop-2.5.1-x64.tar.gz root@node4:~/
分别解压安装 tar -zxvf ...
13.[root@node1 ~]# cd /home/hadoop-2.5/etc/hadoop -> scp ./* root@node2:/home/hadoop-2.5/etc/hadoop/
scp ./* root@node3:/home/hadoop-2.5/etc/hadoop/
scp ./* root@node4:/home/hadoop-2.5/etc/hadoop/
14.[root@node2 hadoop-2.5]# cd sbin -> ./hadoop-daemon.sh start journalnode (node2-node4)
15.[root@node1 hadoop-2.5]# cd bin -> ./hdfs namenode -format
[root@node1 opt]# cd hadoop2
[root@node1 hadoop2]# ls
dfs
[root@node1 hadoop2]# cd dfs
[root@node1 dfs]# ls
name
[root@node1 dfs]# cd name
[root@node1 name]# ls
current
[root@node1 name]# cd current/
[root@node1 current]# ls
fsimage_0000000000000000000 seen_txid
fsimage_0000000000000000000.md5 VERSION
[root@node1 opt]# cd /home/hadoop-2.5/
[root@node1 hadoop-2.5]# cd sbin/
[root@node1 sbin]# ./hadoop-daemon.sh start namenode
16.[root@node2 hadoop-2.5]# cd bin
[root@node2 bin]# ./hdfs namenode -bootstrapStandby