[root@VM1 ~]# vim /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=master60
[root@VM2 ~]# vim /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=slave61
[root@VM3 ~]# vim /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=slave62
[root@VM4 ~]# vim /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=slave63
# vim /etc/hosts
192.168.9.60 master60
192.168.9.61 slave61
192.168.9.62 slave62
192.168.9.63 slave63
# service iptables stop && chkconfig iptables off && chkconfig --list | grep iptables
# vi /etc/selinux/config
SELINUX=disabled
# echo " " >> /etc/modprobe.d/dist.conf
# echo "alias net-pf-10 off" >> /etc/modprobe.d/dist.conf
# echo "alias ipv6 off" >> /etc/modprobe.d/dist.conf
# reboot
# ssh-keygen
# ssh-copy-id -i master60
# ssh-copy-id -i slave61
# ssh-copy-id -i slave62
# ssh-copy-id -i slave63
# service ntpd start && chkconfig ntpd on && chkconfig --list | grep ntpd
# ntpdate -u ntp.sjtu.edu.cn
# hwclock --localtime
# hwclock --localtime -w
# vi /etc/sysconfig/ntpd
SYNC_HWCLOCK=yes
# vi /etc/sysconfig/ntpdate
SYNC_HWCLOCK=yes
# ntpdate -u slave61
# ntpdate -u slave62
# ntpdate -u slave63
# service ntpd restart && service crond restart
# vi /etc/security/limits.conf
# soft 软指标 ,给警告
# hard 硬指标 ,直接停止
* soft nofile 32728
* hard nofile 1024567
* soft nproc 65535
* hard nproc unlimited
* soft memlock unlimited
* hard memlock unlimited
# useradd hadoop
# passwd hadoop
# groupadd hadoop
# usermod -a -G hadoop hadoop
# mkdir /apps && cd /apps && mkdir lib logs run sh sharedstorage svr
# chown -R hadoop:hadoop /apps/*
###################### 后续操作转hadoop用户 ######################
$ mkdir -p /apps/svr/java/
$ tar -zxvf ~/jdk-8u172-linux-x64.tar.gz -C /apps/svr/java/
$ scp -r /apps/svr/java/jdk1.8.0_172/ slave61:/apps/svr/java/
$ scp -r /apps/svr/java/jdk1.8.0_172/ slave62:/apps/svr/java/
$ scp -r /apps/svr/java/jdk1.8.0_172/ slave63:/apps/svr/java/
$ vim ~/.bash_profile
# JAVA_HOME
export JAVA_HOME=/apps/svr/java/jdk1.8.0_172
export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar
export PATH=$PATH:$JAVA_HOME/bin
$ source ~/.bash_profile
a、创建hadoop工作目录,上传解压hadoop软件
$ mkdir -p /apps/svr/hadoop/
$ cd /apps/svr/hadoop/
$ mkdir conf data1 data2 lib logs run
$ mkdir -p /apps/svr/hadoop/data1/dfs/dn
$ mkdir -p /apps/svr/hadoop/data2/dfs/dn
$ mkdir -p /apps/svr/hadoop/data1/dfs/nn
$ mkdir -p /apps/svr/hadoop/data2/dfs/nn
$ tar -zxvf ~/hadoop-2.7.3.tar.gz -C /apps/svr/hadoop/
b、配置hadoop-env.sh
$ vim /apps/svr/hadoop/hadoop-2.7.3/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/apps/svr/java/jdk1.8.0_172/
a. 配置core-site.xml
$ vim /apps/svr/hadoop/hadoop-2.7.3/etc/hadoop/core-site.xml
fs.defaultFS
hdfs://master60:9000
hadoop.tmp.dir
/apps/svr/hadoop/run/
hadoop.proxyuser.hadoop.hosts
*
hadoop.proxyuser.hadoop.groups
*
b. 配置hdfs-site.xml
$ vim /apps/svr/hadoop/hadoop-2.7.3/etc/hadoop/hdfs-site.xml
dfs.datanode.data.dir
/apps/svr/hadoop/data1/dfs/dn,/apps/svr/hadoop/data2/dfs/dn
dfs.namenode.name.dir
/apps/svr/hadoop/data1/dfs/nn,/apps/svr/hadoop/data2/dfs/nn
dfs.replication
3
dfs.permissions
true
c. 配置slaves,设置DataNode运行的主机
$ vim /apps/svr/hadoop/hadoop-2.7.3/etc/hadoop/slaves
slave61
slave62
slave63
a. 配置yarn-site.xml
$ vim /apps/svr/hadoop/hadoop-2.7.3/etc/hadoop/yarn-site.xml
yarn.resourcemanager.address
master60:8032
yarn.resourcemanager.scheduler.address
master60:8030
yarn.resourcemanager.resource-tracker.address
master60:8031
yarn.resourcemanager.admin.address
master60:8033
yarn.resourcemanager.webapp.address
master60:8038
yarn.nodemanager.aux-services
mapreduce_shuffle
b. 配置mapred-site.xml
$ vim /apps/svr/hadoop/hadoop-2.7.3/etc/hadoop/mapred-site.xml
mapreduce.framework.name
yarn
$ scp -r /apps/svr/hadoop/ slave61:/apps/svr/
$ scp -r /apps/svr/hadoop/ slave62:/apps/svr/
$ scp -r /apps/svr/hadoop/ slave63:/apps/svr/
$ vim ~/.bash_profile
# HADOOP_HOME
export HADOOP_HOME=/apps/svr/hadoop/hadoop-2.7.3
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib/native"
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$HADOOP_HOME/share/hadoop/tools/lib/*
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
$ source ~/.bash_profile
a. 格式化
$ hadoop namenode -format
b. 启动
$ start-dfs.sh
$ start-yarn.sh
c. 检查启动结果
$ yarn node -list
d. WEB UI验证
HDFS : http://192.168.9.60:50070
YARN : http://192.168.9.60:8038
a. 配置mapred-site.xml
$ cd /apps/svr/hadoop/hadoop-2.7.3/etc/hadoop/
$ vim mapred-site.xml
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
192.168.9.62:10020
mapreduce.jobhistory.webapp.address
192.168.9.62:19888
mapreduce.jobhistory.done-dir
${yarn.app.mapreduce.am.staging-dir}/history/done
mapreduce.jobhistory.intermediate-done-dir
${yarn.app.mapreduce.am.staging-dir}/history/done_intermediate
yarn.app.mapreduce.am.staging-dir
/tmp/hadoop-yarn/staging
b. 启动历史服务器
$ mr-jobhistory-daemon.sh start historyserver
C. WEB UI验证
JobHistory : http://192.168.9.62:19888