大数据开发环境搭载4--安装Hadoop HA集群

4、安装Hadoop HA集群

  • 下载
http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
  • 将Hadoop安装到/usr/local/hadoop
tar -xvzf hadoop-2.7.3.tar.gz -C /usr/local
mv /usr/local/hadoop-2.7.3 /usr/local/hadoop
  • 修改/etc/profile,末尾添加以下内容:
# Hadoop
export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
  • 修改{HADOOP_HOME}/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/local/jdk1.8.0_121
  • 修改{HADOOP_HOME}/etc/hadoop/core-site.xml

    
    
        fs.defaultFS
        hdfs://bigdata
    
    
    
        hadoop.tmp.dir
        /tmp/hadoop
    
    
    
        ha.zookeeper.quorum
        node1:2181,node2:2181,node3:2181
    

  • 修改{HADOOP_HOME}/etc/hadoop/hdfs-site.xml

    
    
        dfs.nameservices
        bigdata
    
    
    
        dfs.ha.namenodes.bigdata
        nn1,nn2
    
    
    
        dfs.namenode.rpc-address.bigdata.nn1
        node1:8020
    
    
    
        dfs.namenode.rpc-address.bigdata.nn2
        node2:8020
    
    
    
        dfs.namenode.http-address.bigdata.nn1
        node1:50070
    
    
    
        dfs.namenode.http-address.bigdata.nn2
        node2:50070
    
    
    
        dfs.namenode.shared.edits.dir
        qjournal://node4:8485;node5:8485;node6:8485/bigdata
    
    
    
        dfs.journalnode.edits.dir
        /tmp/hadoop/journaldata
    
    
    
        dfs.client.failover.proxy.provider.bigdata
        org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
    
    
    
        dfs.ha.fencing.methods
        sshfence
    
    
    
        dfs.ha.fencing.ssh.private-key-files
        /root/.ssh/id_dsa
    
    
    
        dfs.ha.automatic-failover.enabled
        true
    
    
    
        dfs.permissions
        false
    

  • 修改{HADOOP_HOME}/etc/hadoop/mapred-site.xml

# 先拷贝文件再修改
cp mapred-site.xml.template mapred-site.xml


    
    
        mapreduce.framework.name
        yarn
    

  • 修改{HADOOP_HOME}/etc/hadoop/yarn-site.xml

    
    
        yarn.resourcemanager.ha.enabled
        true
    
    
    
        yarn.resourcemanager.cluster-id
        yrc
    
    
    
        yarn.resourcemanager.ha.rm-ids
        rm1,rm2
    
    
    
        yarn.resourcemanager.hostname.rm1
        node1
    
    
        yarn.resourcemanager.hostname.rm2
        node3
    
    
    
        yarn.resourcemanager.zk-address
        node1:2181,node2:2181,node3:2181
    
    
        yarn.nodemanager.aux-services
        mapreduce_shuffle
    

  • 修改slaves
node2
node3
node4
node5
node6
  • 拷贝至别的节点
scp -r /usr/local/hadoop/ root@node2:/usr/local
scp -r /usr/local/hadoop/ root@node3:/usr/local
scp -r /usr/local/hadoop/ root@node4:/usr/local
scp -r /usr/local/hadoop/ root@node5:/usr/local
scp -r /usr/local/hadoop/ root@node6:/usr/local
  • 初始化Hadoop
# 1、启动zookeeper(node1、node2、node3)
zkServer.sh start

# 2、启动journalnode(node4、node5、node6)
hadoop-daemon.sh start journalnode

# 3、格式化HDFS(node1、node2)
# 在node1上执行
hdfs namenode -format
hadoop-daemon.sh start namenode
# 在node2上执行
hdfs namenode -bootstrapStandby

# 4、格式化ZKFC(node1)
hdfs zkfc -formatZK

# 5、启动HDFS(node1)
start-dfs.sh

# 6、启动YARN(node1、node3)
# 在node1上执行
start-yarn.sh
# 在node3上执行
yarn-daemon.sh start resourcemanager
  • 完全启动
start-all.sh
  • WEB验证
http://node1:50070

# 运行程序查看
http://node1:8088

你可能感兴趣的:(大数据开发环境搭载4--安装Hadoop HA集群)