1.查看20台服务器环境。
发现没有hadoop用户,hosts已经更改。
2.下载hadoop2.5.2.查看zookeeper版本,发现是zookeeper 3.4.6,和线上不一致。
[root@hadoopNN1 ~]# cd /data/hadoop/data1/usr/local/setupSpark/
vim newslaves #把要部署的服务器列表放到此文件
scp
[email protected]:/usr/local/houzhizhen/zookeeper/zookeeper-3.4.6.tar.gz .
scp
[email protected]:/usr/local/hadoop/hadoop-2.5.2.tar.gz .
cat newslaves | while read ip; do ips=`python -c "print '$ip'.split('.')"`; hostname="`python -c "print $ips[0]"`-`python -c "print $ips[1]"`-`python -c "print $ips[2]"`-`python -c "print $ips[3]"`"; echo $hostname; done > newslavehostname
paste newslaves newslavehostname > newhosts
cat /etc/hosts >> newhosts
./upgrade.sh distribute newslaves newhosts /etc/hosts
./upgrade.sh common newslaves "echo 'sshd:10.140.60.85' >> /etc/hosts.allow"
./upgrade.sh common newslaves "echo 'sshd:10.140.60.86' >> /etc/hosts.allow"
./upgrade.sh distribute newslaves hadoop-2.5.2.tar.gz /tmp
./upgrade.sh distribute newslaves jdk1.7.0_51.tar.gz /tmp
./upgrade.sh distribute newslaves lzo-2.06.tar.gz /tmp
vim init.sh #remove ssh key
vim setup.sh #modify hadoop version
./upgrade.sh distribute newslaves init.sh /tmp
./upgrade.sh distribute newslaves setup.sh /tmp
./upgrade.sh common newslaves sh /tmp/init.sh
./upgrade.sh common newslaves sh /tmp/setup.sh
./upgrade.sh common newslaves 'echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqzzxrHJ4O5AS6GnaWpX6XnAcxsC/IqffRGyPRZp9pDHwS5/f9ng5eeKFaFlIXq5NZF8RghEVU8kqr4MaQtHeyRO7dE26RAPeb61maIbz9NCuUWORPZvOP5a9JH88/fIKctUvxjY/ruYKVVJDPQZ1Vb9ZB3wttcxJ73IaUOoZNqzCKxWfxpFiUqXZIMtdhnCEwJbn/EofxFCiBimRKCeDbZdHompq+7cH0XWB7wD4AvVHLDqvq0fawRNVyuJhp3gpQgob2TKKFjCj1gvzqCk2RvrSyVViiciB5QGPoZowrnU7hfRjXXMQq/2ASpUw0+7PlCHuOxz6gzZZOZhr5K0CRw==
[email protected]" >> /home/hadoop/.ssh/authorized_keys'
cat <<EOF >zookeeper_hosts
10.140.60.85
10.140.60.86
10.140.60.87
EOF
cat <<EOF >journal_hosts
10.140.60.85
10.140.60.86
10.140.60.87
EOF
tar -xzf zookeeper-3.4.6.tar.gz
vim zookeeper-3.4.6/conf/zoo.cfg
rm zookeeper-3.4.6.tar.gz
tar -czf zookeeper-3.4.6.tar.gz zookeeper-3.4.6
./upgrade.sh distribute zookeeper_hosts zookeeper-3.4.6.tar.gz /tmp
./upgrade.sh distribute zookeeper_hosts zookeeper_setup.sh /tmp
./upgrade.sh common zookeeper_hosts sh /tmp/zookeeper_setup.sh
scp 10.140.60.73:/usr/local/zookeeper/conf/zoo.cfg zookeeper-3.4.6/conf/
vim zookeeper-3.4.6/conf/zoo.cfg #modify location and
./upgrade.sh distribute zookeeper_hosts zookeeper-3.4.6/conf/zoo.cfg /usr/local/zookeeper/conf/
on 10.140.60.85 and 10.140.60.86 generate root and hadoop ssh key
./upgrade.sh common newslaves 'echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAtGZ4NtllaSR??? >> /root/.ssh/authorized_keys'
./upgrade.sh common newslaves 'echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAxzkhDycm0jx8Ok???5" >> /home/hadoop/.ssh/authorized_keys'
./upgrade.sh common newslaves 'echo "ssh-rsa AAAAB3NzaC1yc2EAAA???" >> /root/.ssh/authorized_keys'
./upgrade.sh common newslaves 'echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAs+jHIfFTqaKRTYxyixc???" >> /home/hadoop/.ssh/authorized_keys'
su - hadoop
cd ~/setupSpark/
./upgrade.sh common zookeeper_hosts "mkdir -p /data/hadoop/data1/zookeeper"
ssh 10.140.60.85 "echo 1 > /data/hadoop/data1/zookeeper/myid"
ssh 10.140.60.86 "echo 2 > /data/hadoop/data1/zookeeper/myid"
ssh 10.140.60.87 "echo 3 > /data/hadoop/data1/zookeeper/myid"
./upgrade.sh common zookeeper_hosts "mkdir -p /data/hadoop/data1/zookeeper"
./upgrade.sh common zookeeper_hosts "/usr/local/zookeeper/bin/zkServer.sh start"
#确认zookeeper启动正常
tar -xzf hadoop-2.5.2.tar.gz
cp /usr/local/hadoop/conf/core-site.xml hadoop-2.5.2/etc/hadoop/
cp /usr/local/hadoop/conf/hdfs-site.xml hadoop-2.5.2/etc/hadoop/
vim hadoop-2.5.2/etc/hadoop/core-site.xml #modify cluster name and zookeeper address
vim hadoop-2.5.2/etc/hadoop/hdfs-site.xml #modify namenode address and journal edit dir to /data/hadoop/data2/journal_node/
./upgrade.sh distribute newslaves hadoop-2.5.2/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/
./upgrade.sh distribute newslaves hadoop-2.5.2/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/
./upgrade.sh common journal_hosts "mkdir -p /data/hadoop/data2/journal_node/"
./upgrade.sh common journal_hosts "/usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode"
#check the journalnode state
cat <<EOF >namenode_hosts
10.140.60.85
10.140.60.86
EOF
vim hadoop-2.5.2/etc/hadoop/hadoop-env.sh
#add the following two line at the top of body part.
export HADOOP_NAMENODE_OPTS="-Xmx22g -Xms5g -Xmn4g $JVM_OPTS -XX:ErrorFile=$HADOOP_LOG_DIR/nn_error_gc.log -Xloggc:$HADOOP_LOG_DIR/nn_gc.log -XX:HeapDumpPath=$HADOOP_LOG_DIR/nn_error.hprof"
export HADOOP_DATANODE_OPTS="-Xmx3g -Xms2g -Xmn1g $JVM_OPTS -XX:ErrorFile=$HADOOP_LOG_DIR/dn_error_gc.log -Xloggc:$HADOOP_LOG_DIR/dn_gc.log -XX:HeapDumpPath=$HADOOP_LOG_DIR/dn_error.hprof "
./upgrade.sh distribute newslaves hadoop-2.5.2/etc/hadoop/hadoop-env.sh /usr/local/hadoop/etc/hadoop/
./upgrade.sh common namenode_hosts "mkdir -p /data/hadoop/data1/dfs/name;mkdir -p /data/hadoop/data2/dfs/name"
ssh 10.140.60.85 "/usr/local/hadoop/bin/hdfs zkfc -formatZK"
ssh 10.140.60.85 "/usr/local/hadoop/bin/hadoop namenode -format zzg"
ssh 10.140.60.85 "/usr/local/hadoop/sbin/hadoop-daemon.sh start zkfc"
ssh 10.140.60.86 "/usr/local/hadoop/sbin/hadoop-daemon.sh start zkfc"
ssh 10.140.60.85 "/usr/local/hadoop/sbin/hadoop-daemon.sh start namenode"
ssh 10.140.60.86 "/usr/local/hadoop/sbin/hadoop-daemon.sh start namenode -bootstrapStandby"
ssh 10.140.60.86 "/usr/local/hadoop/bin/hdfs haadmin -failover nn1 nn2"
cp newslaves hadoop-2.5.2/etc/hadoop/slaves
./upgrade.sh distribute newslaves hadoop-2.5.2/etc/hadoop/slaves /usr/local/hadoop/etc/hadoop/
./upgrade.sh common newslaves "/usr/local/hadoop/sbin/hadoop-daemon.sh start datanode"