安装链接如下(主机网卡地址自定义)
Linux 虚拟机安装
hostnamectl set-hostname hadoop101
vi /etc/hosts
ssh-keygen -t rsa -P ""
cat /root/.ssh/id_rsa.pub > /root/.ssh/autorized_keys
ssh-copy-id -i .ssh/id_rsa.pub -p22 root@hadoop101
#chmod 600 /root/.ssh/autorized_keys
ssh -p 22 root@192.168.19.101
exit
mkdir /opt/soft
tar -zxvf jdk-8u221-linux-x64.tar.gz -C /opt/soft
tar -zxvf hadoop-2.6.0-cdh5.14.2.tar.gz -C /opt/soft
tar -xvf hadoop-native-64-2.6.0.tar -C /opt/soft
rm -rf *
cd /opt/
mv hadoop-2.6.0-cdh5.14.2/ hadoop260
mv jdk1.8.0_221/ java8
mv lib* /opt/soft/hadoop260/lib/native/
cp /opt/soft/hadoop260/lib/native/lib* /opt/soft/hadoop260/lib/
vi /etc/profile
export JAVA_HOME=/opt/soft/java8
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/rt.jar:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin
export HADOOP_HOME=/opt/soft/hadoop260
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
source /etc/profile
java -version
javac
echo $JAVA_HOME
vi hadoop-env.sh
export JAVA_HOME=/opt/soft/java8
vi core-site.xml
<!-- 默认的文件系统的名称。通常指定namenode的URI地址,包括主机和端口 -->
fs.defaultFS</name>
hdfs://192.168.19.101:9000</value>
</property>
<!-- 其他临时目录的父目录 -->
hadoop.tmp.dir</name>
/opt/soft/hadoop260/tmp</value>
</property>
<!-- 其他机器的root用户可访问 -->
hadoop.proxyuser.root.hosts</name>
*</value>
</property>
<!-- 其他root组下的用户都可以访问 -->
hadoop.proxyuser.root.groups</name>
*</value>
</property>
</configuration>
vi hdfs-site.xml
<!--指定dataNode存储block的副本数量-->
dfs.replication</name>
1</value>
</property>
<!--是否开启权限检查,建议开启-->
dfs.permissions.enabled</name>
false</value>
</property>
</configuration>
mv mapred-site.xml.template mapred-site.xml
vi mapred-site.xml
<!-- mapreduce的工作模式:yarn -->
mapreduce.framework.name</name>
yarn</value>
</property>
</configuration>
注:使用jobhistory,添加下面
<!-- mapreduce的工作地址 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop101:10020</value>
</property>
<!-- web页面访问历史服务端口的配置 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop101:19888</value>
</property>
vi yarn-site.xml
<!-- Site specific YARN configuration properties -->
<!-- reducer获取数据方式 -->
yarn.nodemanager.aux-services</name>
mapreduce_shuffle</value>
</property>
<!-- 指定YARN的ResourceManager的地址 -->
yarn.resourcemanager.hostname</name>
hadoop101</value>
</property>
</configuration>
vi slaves
hadoop101
hadoop namenode -format
注:不报erro即成功,报错则根据erro进行文件修改后在格式化
start-all.sh
mr-jobhistory-daemon.sh start historyserver
方式一:
stop-all.sh
mr-jobhistory-daemon.sh stop historyserver
方式二(停止进程,但不建议容易丢失数据):
kill -9 [pid]
poweroff
#或
#shutdown
注:
Hadoop集群、ZooKpeeper、HBase、Hive搭建(系统centos7.0)搭建