hadoop完全分布式一键安装、启动、停止脚本

 hadoop完全分布式一键安装脚本

#!/bin/bash
#配置HADOOP的安装目录 修改的地方1 脚本可以自己创建
##在windows编写的代码可能运行有问题执行以下 1>vim redisshell.sh  2>:set ff=unix

echo -e "请输入hadoop的安装目录,不存在脚本自动创建,最后一个/不要写 /bigdata/install"
read esinstallpath

#创建HADOOP安装的目录
if [ ! -d $esinstallpath ]; then
   mkdir -p $esinstallpath
fi 
if [ ! -d $esinstallpath ]; then
  echo "创建目录$esinstallpat失败!请检查目录是否有权限"
  exit
fi

#解压tar包
currentdir=$(cd $(dirname $0); pwd)
ls | grep 'hadoop-.*[gz]$'
if [ $? -ne 0 ]; then
   #当前目录没有hadoop的压缩包
   echo "在$currentdir下没有发现hadoop-*.tar.gz,请自行上传!"
   exit
else
   #解压
   tar -zxvf $currentdir/$(ls | grep 'hadoop-.*[gz]$') -C $esinstallpath
fi

esbanben=`ls $esinstallpath| grep 'hadoop-.*'`

#PATH设置
#末行插入
echo "">>~/.bash_profile
echo '#HADOOP'>>~/.bash_profile
echo "export HADOOP_HOME=$esinstallpath/$esbanben">>~/.bash_profile
echo 'export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin'>>~/.bash_profile
source ~/.bash_profile

hadooppath=$esinstallpath/$esbanben
confpath=$esinstallpath/$esbanben/etc/hadoop

javahome=`echo $JAVA_HOME`
bak_dir='export JAVA_HOME=${JAVA_HOME}'
new_dir='export JAVA_HOME='$javahome
sed -i "s!${bak_dir}!${new_dir}!g" $confpath/hadoop-env.sh


#修改core-site.xml
echo -e "请输入hadoop集群服务器名称 例如 mycluster"
read mycluster
sed -i '/<\/configuration>/i\' $confpath/core-site.xml
sed -i '/<\/configuration>/i\' $confpath/core-site.xml
sed -i '/<\/configuration>/i\  fs.defaultFS' $confpath/core-site.xml
sed -i "/<\/configuration>/i\  hdfs://$mycluster" $confpath/core-site.xml
sed -i "/<\/configuration>/i\" $confpath/core-site.xml

echo -e "请输入hadoop运行时产生文件的存储目录 不需要自己创建目录 集群自己会创建 指定一个空间大的 用来保存hadoop数据 例如 /bigdata/hadoop"
read hadooptmpdir
sed -i '/<\/configuration>/i\' $confpath/core-site.xml
sed -i '/<\/configuration>/i\' $confpath/core-site.xml
sed -i '/<\/configuration>/i\  hadoop.tmp.dir' $confpath/core-site.xml
sed -i "/<\/configuration>/i\  $hadooptmpdir" $confpath/core-site.xml
sed -i "/<\/configuration>/i\" $confpath/core-site.xml

echo -e "请输入zk集群 例如 cdh01:2181,cdh02:2181,cdh03:2181"
read zkhosts
sed -i '/<\/configuration>/i\' $confpath/core-site.xml
sed -i '/<\/configuration>/i\' $confpath/core-site.xml
sed -i '/<\/configuration>/i\  ha.zookeeper.quorum' $confpath/core-site.xml
sed -i "/<\/configuration>/i\  $zkhosts" $confpath/core-site.xml
sed -i "/<\/configuration>/i\" $confpath/core-site.xml

echo "core-site.xml 配置如下"

cat $confpath/core-site.xml

echo "core-site.xml 配置完成"
sleep 1
#开始配置hdfs-site.xml
sed -i '/<\/configuration>/i\' $confpath/hdfs-site.xml
sed -i '/<\/configuration>/i\' $confpath/hdfs-site.xml
sed -i '/<\/configuration>/i\  dfs.nameservices' $confpath/hdfs-site.xml
sed -i "/<\/configuration>/i\  $mycluster" $confpath/hdfs-site.xml
sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml

sed -i '/<\/configuration>/i\' $confpath/hdfs-site.xml
sed -i '/<\/configuration>/i\' $confpath/hdfs-site.xml
sed -i "/<\/configuration>/i\  dfs.ha.namenodes.$mycluster" $confpath/hdfs-site.xml
sed -i "/<\/configuration>/i\  nn1,nn2" $confpath/hdfs-site.xml
sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml


echo  "请输入两个nameNode的地址,按照空格分开 例如 cdh01 cdh02" 
read nameNodes
array=(`echo $nameNodes | tr ' ' ' '` )

for i  in `seq 0 $((${#array[@]}-1))`
do
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  dfs.namenode.rpc-address.$mycluster.nn$((${i}+1))" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  ${array[${i}]}:8020" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  dfs.namenode.http-address.$mycluster.nn$((${i}+1))" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  ${array[${i}]}:50070" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 
 #配置yarn的环境
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  yarn.resourcemanager.hostname.rm$((${i}+1))" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  ${array[${i}]}" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  yarn.resourcemanager.webapp.address.rm$((${i}+1))" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  ${array[${i}]}:8088" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 
 
done

echo  "指定journalnode的配置,所有机器 例如 cdh01:8485;cdh02:8485;cdh03:8485;cdh04:8485;cdh05:8485" 
read journalnode
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  dfs.namenode.shared.edits.dir" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  qjournal://$journalnode/$mycluster" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 
 
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  dfs.client.failover.proxy.provider.$mycluster" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 
 
 
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  dfs.ha.fencing.methods" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  " $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  sshfence" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  shell(/bin/true)" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  " $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  dfs.ha.fencing.ssh.private-key-files" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  /home/.ssh/id_rsa" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 
 
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  dfs.journalnode.edits.dir" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  $hadooptmpdir/journal" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  dfs.ha.automatic-failover.enabled" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  true" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  dfs.replication" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\  3" $confpath/hdfs-site.xml
 sed -i "/<\/configuration>/i\" $confpath/hdfs-site.xml
 
 
echo "hdfs-site.xml 配置如下"
cat $confpath/hdfs-site.xml
echo "hdfs-site.xml 配置完成"
sleep 1

#开始配置mapred-site.xml
#mapred-site.xml
cp $confpath/mapred-site.xml.template $confpath/mapred-site.xml
 sed -i "/<\/configuration>/i\" $confpath/mapred-site.xml
 sed -i "/<\/configuration>/i\" $confpath/mapred-site.xml
 sed -i "/<\/configuration>/i\  mapreduce.framework.name" $confpath/mapred-site.xml
 sed -i "/<\/configuration>/i\  yarn" $confpath/mapred-site.xml
 sed -i "/<\/configuration>/i\" $confpath/mapred-site.xml

echo "mapred-site.xml 配置如下"
cat $confpath/mapred-site.xml
echo "mapred-site.xml 配置完成"
sleep 1


#开始配置yarn-site.xml
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  yarn.nodemanager.aux-services" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  mapreduce_shuffle" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  yarn.resourcemanager.ha.enabled" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  true" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  yarn.resourcemanager.ha.rm-ids" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  rm1,rm2" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 
 
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  yarn.resourcemanager.zk-address" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  $zkhosts" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  yarn.resourcemanager.cluster-id" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\  mycluster1" $confpath/yarn-site.xml
 sed -i "/<\/configuration>/i\" $confpath/yarn-site.xml

echo "yarn-site.xml 配置如下"
cat $confpath/yarn-site.xml
echo "yarn-site.xml 配置完成"
sleep 1
 
 #开始配置slaves
 
echo  "请输入所有的datanode,按照空格分开 例如 cdh01 cdh02 cdh03" 
read datanodes
array=(`echo $datanodes | tr ' ' ' '` )
touch  $confpath/slaves
for datanode in ${array[@]}
do
 echo $datanode >>  $confpath/slaves
done
 
echo "slaves 配置如下"
cat $confpath/slaves
echo "slaves 配置完成"
sleep 1

#分发hadoop安装文件
echo "以下输入的节点必须做免密登录"
echo  "请输入所有的hadoop节点,按照空格分开,当前机器不用输入(当前机器是cdh01) 例如 cdh02 cdh03 cdh04 cdh05" 
read allnodes
user=`whoami`
array=(`echo $allnodes | tr ' ' ' '` )
for allnode in ${array[@]}
do
 echo ======= $allnode  =======
 ssh $allnode "echo ''>>~/.bash_profile"
 ssh $allnode "echo '#HADOOP'>>~/.bash_profile"
 ssh $allnode "echo 'export HADOOP_HOME=$hadooppath'>>~/.bash_profile"
 ssh $allnode 'echo "export PATH=\$PATH:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin">>~/.bash_profile'
 ssh $allnode "source ~/.bash_profile"
 ssh $allnode "rm -rf $hadooppath"
 ssh $allnode "mkdir -p $hadooppath"
 scp -r $hadooppath/* ${user}@$allnode:$hadooppath/
 echo ======= $allnode 复制完成  =======
done

for allnode in ${array[@]}
do
 echo ======= 在 $allnode 手动执行 source ~/.bash_profile 在通过 hadoop version 查看是否安装成功 =======
done

 

hadoop完全分布式一键启动脚本

#!/bin/bash
#启动所有的hadoop
slaveNode='cdh02'
source ~/.bash_profile
start-all.sh
sleep 2
#启动另一台机器的resourcemanager
ssh -T $slaveNode <

 

hadoop完全分布式一键停止脚本

#!/bin/bash
#停止hadoop
standbyNode='cdh02'
source ~/.bash_profile
stop-all.sh
ssh -T $standbyNode <

 

 

你可能感兴趣的:(hadoop完全分布式一键安装、启动、停止脚本)