生产环境实战spark (7)分布式集群 5台设备 Hadoop集群安装

生产环境实战spark (7)分布式集群 5台设备 Hadoop集群安装

1,Hadoop 下载。

下载地址:http://hadoop.apache.org/releases.html

下载版本:hadoop 2.6.5 版本    hadoop 2.6.x版本比较稳定


2,使用winscp工具上传到master节点。

检查:
[root@master rhzf_spark_setupTools]# ls
 hadoop-2.6.5.tar.gz  jdk-8u121-linux-x64.tar.gz  scala-2.11.8.zip
[root@master rhzf_spark_setupTools]# 

3,解压缩安装hadoop。


[root@master rhzf_spark_setupTools]# tar  -zxvf hadoop-2.6.5.tar.gz

[root@master hadoop-2.6.5]# vi /etc/profile

export JAVA_HOME=/usr/local/jdk1.8.0_121
export SCALA_HOME=/usr/local/scala-2.11.8
export HADOOP_HOME=/usr/local/hadoop-2.6.5


export PATH=.:$PATH:$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_HOME/bin


在命令行中输入source /etc/profile,使刚才修改的HADOOP_HOME及PATH配置文件生效

[root@master hadoop-2.6.5]# source /etc/profile
[root@master hadoop-2.6.5]# 


4,Hadoop core-site.xml配置文件修改。

[root@master hadoop]# cat core-site.xml 









   
        hadoop.tmp.dir
        /usr/local/hadoop-2.6.5/tmp
        hadoop.tmp.dir
   

   
        fs.defaultFS
        hdfs://Master:9000
   

   
       hadoop.native.lib
       false
       no use native hadoop libraries  
     


[root@master hadoop]# 



5,Hadoop  hdfs-site.xml配置文件修改。


[root@master hadoop]# cat  hdfs-site.xml









   
        dfs.replication
        3
   

   
        dfs.namenode.name.dir
        /usr/local/hadoop-2.6.5/tmp/dfs/name
   

   
        dfs.datanode.data.dir
        /usr/local/hadoop-2.6.5/tmp/dfs/data
   


[root@master hadoop]# 

6,Hadoop  hadoop-env.sh配置文件修改。

[root@master hadoop]# cat hadoop-env.sh
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


# Set Hadoop-specific environment variables here.


# The only required environment variable is JAVA_HOME.  All others are
# optional.  When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.


# The java implementation to use.
export JAVA_HOME=${JAVA_HOME}


# The jsvc implementation to use. Jsvc is required to run secure datanodes
# that bind to privileged ports to provide authentication of data transfer
# protocol.  Jsvc is not required if SASL is configured for authentication of
# data transfer protocol using non-privileged ports.
#export JSVC_HOME=${JSVC_HOME}


export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}


# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
  if [ "$HADOOP_CLASSPATH" ]; then
    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
  else
    export HADOOP_CLASSPATH=$f
  fi
done


# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""


# Extra Java runtime options.  Empty by default.
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"


# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"


export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"


export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"


# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"


# On secure datanodes, user to run the datanode as after dropping privileges.
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
# to provide authentication of data transfer protocol.  This **MUST NOT** be
# defined if SASL is configured for authentication of data transfer protocol
# using non-privileged ports.
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}


# Where log files are stored.  $HADOOP_HOME/logs by default.
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER


# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}


###
# HDFS Mover specific parameters
###
# Specify the JVM options to be used when starting the HDFS Mover.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HADOOP_MOVER_OPTS=""


###
# Advanced Users Only!
###


# The directory where pid files are stored. /tmp by default.
# NOTE: this should be set to a directory that can only be written to by 
#       the user that will run the hadoop daemons.  Otherwise there is the
#       potential for a symlink attack.
export HADOOP_PID_DIR=${HADOOP_PID_DIR}
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}


# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USER


export JAVA_HOME=/usr/local/jdk1.8.0_121


[root@master hadoop]# 


7,Hadoop  master slaves配置文件修改。

master作为主节点和数据处理节点。

[root@master hadoop]# cat slaves
master
worker01
worker02
worker03
worker04

[root@master hadoop]#


Hadoop最简 最小化配置完成

8,编写脚本,将Hadoop 文件分发到worker节点。

[root@master local]# cd rhzf_setup_scripts 
[root@master rhzf_setup_scripts]# ls
rhzf_hosts_scp.sh  rhzf_scala.sh  rhzf_ssh.sh
[root@master rhzf_setup_scripts]# vi rhzf_hadoop.sh


#!/bin/sh
for i in  238 239 240 241
do
scp   -rq /usr/local/hadoop-2.6.5  [email protected].$i:/usr/local/hadoop-2.6.5
scp   -rq /etc/profile  [email protected].$i:/etc/profile
ssh   [email protected].$i source /etc/profile
done


执行脚本

[root@master rhzf_setup_scripts]# chmod u+x rhzf_hadoop.sh
[root@master rhzf_setup_scripts]# ls
rhzf_hadoop.sh  rhzf_hosts_scp.sh  rhzf_scala.sh  rhzf_ssh.sh
[root@master rhzf_setup_scripts]# ./rhzf_hadoop.sh
[root@master rhzf_setup_scripts]# 


9,worker节点检查。

Last login: Wed Apr 19 10:12:34 2017 from 132.150.75.19
[root@worker01 ~]# cd /usr/local
[root@worker01 local]# ls
bin  etc  games  hadoop-2.6.5  include  lib  lib64  libexec  sbin  scala-2.11.8  share  src
[root@worker01 local]# 


Last login: Wed Apr 19 10:12:39 2017 from 132.150.75.19
[root@worker02 ~]# cd /usr/local
[root@worker02 local]# ls
bin  etc  games  hadoop-2.6.5  include  lib  lib64  libexec  sbin  scala-2.11.8  share  src
[root@worker02 local]#



Last login: Wed Apr 19 10:12:44 2017 from 132.150.75.19
[root@worker03 ~]# cd /usr/local
[root@worker03 local]# ls
bin  etc  games  hadoop-2.6.5  include  lib  lib64  libexec  sbin  scala-2.11.8  share  src
[root@worker03 local]# 


Last login: Wed Apr 19 10:12:49 2017 from 132.150.75.19
[root@worker04 ~]# cd /usr/local
[root@worker04 local]# ls
bin  etc  games  hadoop-2.6.5  include  lib  lib64  libexec  sbin  scala-2.11.8  share  src
[root@worker04 local]# 

10,Hadopp集群文件系统格式化。


[root@master hadoop-2.6.5]# cd bin
[root@master bin]# ls
container-executor  hadoop  hadoop.cmd  hdfs  hdfs.cmd  mapred  mapred.cmd  rcc  test-container-executor  yarn  yarn.cmd
[root@master bin]# hdfs namenode -format
17/04/19 15:21:05 INFO namenode.NameNode: STARTUP_MSG: 
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = master/10.100.100.237
STARTUP_MSG:   args = [-format]
STARTUP_MSG:   version = 2.6.5
STARTUP_MSG:   classpath = /usr/local/hadoop-2.6.5/etc/hadoop:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/usr/local/hadoop-
......略
STARTUP_MSG:   build = https://github.com/apache/hadoop.git -r e8c9fe0b4c252caf2ebf1464220599650f119997; compiled by 'sjlee' on 2016-10-02T23:43Z
STARTUP_MSG:   java = 1.8.0_121
************************************************************/
17/04/19 15:21:05 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
17/04/19 15:21:05 INFO namenode.NameNode: createNameNode [-format]
17/04/19 15:21:05 WARN common.Util: Path /usr/local/hadoop-2.6.5/tmp/dfs/name should be specified as a URI in configuration files. Please update hdfs configuration.
17/04/19 15:21:05 WARN common.Util: Path /usr/local/hadoop-2.6.5/tmp/dfs/name should be specified as a URI in configuration files. Please update hdfs configuration.
Formatting using clusterid: CID-d0ca7040-2c7d-419b-85be-24323b923f2f
17/04/19 15:21:06 INFO namenode.FSNamesystem: No KeyProvider found.
17/04/19 15:21:06 INFO namenode.FSNamesystem: fsLock is fair:true
17/04/19 15:21:06 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000
17/04/19 15:21:06 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
17/04/19 15:21:06 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
17/04/19 15:21:06 INFO blockmanagement.BlockManager: The block deletion will start around 2017 Apr 19 15:21:06
17/04/19 15:21:06 INFO util.GSet: Computing capacity for map BlocksMap
17/04/19 15:21:06 INFO util.GSet: VM type       = 64-bit
17/04/19 15:21:06 INFO util.GSet: 2.0% max memory 889 MB = 17.8 MB
17/04/19 15:21:06 INFO util.GSet: capacity      = 2^21 = 2097152 entries
17/04/19 15:21:06 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
17/04/19 15:21:06 INFO blockmanagement.BlockManager: defaultReplication         = 3
17/04/19 15:21:06 INFO blockmanagement.BlockManager: maxReplication             = 512
17/04/19 15:21:06 INFO blockmanagement.BlockManager: minReplication             = 1
17/04/19 15:21:06 INFO blockmanagement.BlockManager: maxReplicationStreams      = 2
17/04/19 15:21:06 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
17/04/19 15:21:06 INFO blockmanagement.BlockManager: encryptDataTransfer        = false
17/04/19 15:21:06 INFO blockmanagement.BlockManager: maxNumBlocksToLog          = 1000
17/04/19 15:21:06 INFO namenode.FSNamesystem: fsOwner             = root (auth:SIMPLE)
17/04/19 15:21:06 INFO namenode.FSNamesystem: supergroup          = supergroup
17/04/19 15:21:06 INFO namenode.FSNamesystem: isPermissionEnabled = true
17/04/19 15:21:06 INFO namenode.FSNamesystem: HA Enabled: false
17/04/19 15:21:06 INFO namenode.FSNamesystem: Append Enabled: true
17/04/19 15:21:06 INFO util.GSet: Computing capacity for map INodeMap
17/04/19 15:21:06 INFO util.GSet: VM type       = 64-bit
17/04/19 15:21:06 INFO util.GSet: 1.0% max memory 889 MB = 8.9 MB
17/04/19 15:21:06 INFO util.GSet: capacity      = 2^20 = 1048576 entries
17/04/19 15:21:06 INFO namenode.NameNode: Caching file names occuring more than 10 times
17/04/19 15:21:06 INFO util.GSet: Computing capacity for map cachedBlocks
17/04/19 15:21:06 INFO util.GSet: VM type       = 64-bit
17/04/19 15:21:06 INFO util.GSet: 0.25% max memory 889 MB = 2.2 MB
17/04/19 15:21:06 INFO util.GSet: capacity      = 2^18 = 262144 entries
17/04/19 15:21:06 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
17/04/19 15:21:06 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0
17/04/19 15:21:06 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension     = 30000
17/04/19 15:21:06 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
17/04/19 15:21:06 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
17/04/19 15:21:06 INFO util.GSet: Computing capacity for map NameNodeRetryCache
17/04/19 15:21:06 INFO util.GSet: VM type       = 64-bit
17/04/19 15:21:06 INFO util.GSet: 0.029999999329447746% max memory 889 MB = 273.1 KB
17/04/19 15:21:06 INFO util.GSet: capacity      = 2^15 = 32768 entries
17/04/19 15:21:06 INFO namenode.NNConf: ACLs enabled? false
17/04/19 15:21:06 INFO namenode.NNConf: XAttrs enabled? true
17/04/19 15:21:06 INFO namenode.NNConf: Maximum size of an xattr: 16384
17/04/19 15:21:06 INFO namenode.FSImage: Allocated new BlockPoolId: BP-1333219187-10.100.100.237-1492586466692
17/04/19 15:21:06 INFO common.Storage: Storage directory /usr/local/hadoop-2.6.5/tmp/dfs/name has been successfully formatted.
17/04/19 15:21:06 INFO namenode.FSImageFormatProtobuf: Saving image file /usr/local/hadoop-2.6.5/tmp/dfs/name/current/fsimage.ckpt_0000000000000000000 using no compression
17/04/19 15:21:06 INFO namenode.FSImageFormatProtobuf: Image file /usr/local/hadoop-2.6.5/tmp/dfs/name/current/fsimage.ckpt_0000000000000000000 of size 321 bytes saved in 0 seconds.
17/04/19 15:21:06 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
17/04/19 15:21:06 INFO util.ExitUtil: Exiting with status 0
17/04/19 15:21:06 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/10.100.100.237
************************************************************/
[root@master bin]# 


11,启动Hadopp集群。



[root@master sbin]# start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [Master]
Master: Warning: Permanently added the ECDSA host key for IP address '10.100.100.237' to the list of known hosts.
Master: starting namenode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-namenode-master.out
worker04: Warning: Permanently added 'worker04' (ECDSA) to the list of known hosts.
master: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-master.out
worker04: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-worker04.out
worker03: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-worker03.out
worker01: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-worker01.out
worker02: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-worker02.out
worker04: /usr/local/hadoop-2.6.5/bin/hdfs: line 276: /usr/local/jdk1.8.0_121/bin/java: No such file or directory
worker03: /usr/local/hadoop-2.6.5/bin/hdfs: line 276: /usr/local/jdk1.8.0_121/bin/java: No such file or directory
worker01: /usr/local/hadoop-2.6.5/bin/hdfs: line 276: /usr/local/jdk1.8.0_121/bin/java: No such file or directory
worker02: /usr/local/hadoop-2.6.5/bin/hdfs: line 276: /usr/local/jdk1.8.0_121/bin/java: No such file or directory
Starting secondary namenodes [0.0.0.0]
0.0.0.0: Warning: Permanently added '0.0.0.0' (ECDSA) to the list of known hosts.
0.0.0.0: starting secondarynamenode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-resourcemanager-master.out
worker04: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-worker04.out
worker02: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-worker02.out
master: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-master.out
worker01: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-worker01.out
worker03: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-worker03.out
worker04: /usr/local/hadoop-2.6.5/bin/yarn: line 284: /usr/local/jdk1.8.0_121/bin/java: No such file or directory
worker02: /usr/local/hadoop-2.6.5/bin/yarn: line 284: /usr/local/jdk1.8.0_121/bin/java: No such file or directory
worker01: /usr/local/hadoop-2.6.5/bin/yarn: line 284: /usr/local/jdk1.8.0_121/bin/java: No such file or directory
worker03: /usr/local/hadoop-2.6.5/bin/yarn: line 284: /usr/local/jdk1.8.0_121/bin/java: No such file or directory
[root@master sbin]# 
[root@master sbin]# 


[root@master sbin]# jps
20609 SecondaryNameNode
20420 DataNode
20789 ResourceManager
20903 NodeManager
21225 Jps
20266 NameNode
[root@master sbin]# 


之前在master节点将java升级更新了,但4个woker节点上 java没有更新升级

[root@worker04 local]# java -version
java version "1.7.0_51"
OpenJDK Runtime Environment (rhel-2.4.5.5.el7-x86_64 u51-b31)
OpenJDK 64-Bit Server VM (build 24.51-b03, mixed mode)
[root@worker04 local]# jps
bash: jps: command not found...
[root@worker04 local]# 


12,woker节点上安装java 8 

[root@master rhzf_setup_scripts]# cat  rhzf_jdk.sh 
#!/bin/sh
for i in  238 239 240 241
do
ssh   root@10.*.*.$i rpm -e --nodeps   java-1.7.0-openjdk-1.7.0.51-2.4.5.5.el7.x86_64
ssh   root@10.*.*.$i rpm -e --nodeps java-1.7.0-openjdk-headless-1.7.0.51-2.4.5.5.el7.x86_64
scp   -rq /usr/local/jdk1.8.0_121  root@10.*.*.$i:/usr/local/jdk1.8.0_121
done
[root@master rhzf_setup_scripts]# ./rhzf_jdk.sh 
-bash: ./rhzf_jdk.sh: Permission denied
[root@master rhzf_setup_scripts]# ls
rhzf_hadoop.sh  rhzf_hosts_scp.sh  rhzf_jdk.sh  rhzf_scala.sh  rhzf_ssh.sh
[root@master rhzf_setup_scripts]# chmod u+x  rhzf_jdk.sh
[root@master rhzf_setup_scripts]# ls
rhzf_hadoop.sh  rhzf_hosts_scp.sh  rhzf_jdk.sh  rhzf_scala.sh  rhzf_ssh.sh
[root@master rhzf_setup_scripts]# ./rhzf_jdk.sh

刷新 

 # source /etc/profile


检查4个woker节点上jdk 安装完成

[root@worker01 local]# java -version
java version "1.8.0_121"
Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
[root@worker01 local]# 



[root@worker02 bin]# java -version
java version "1.8.0_121"
Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
[root@worker02 bin]# 



[root@worker03 local]# source /etc/profile
[root@worker03 local]# java -version
java version "1.8.0_121"
Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
[root@worker03 local]# 




[root@worker04 local]# source /etc/profile
[root@worker04 local]# java -version
java version "1.8.0_121"
Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
[root@worker04 local]# 


13,停止Hadoop集群


[root@master sbin]#  stop-all.sh 
This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh
Stopping namenodes on [Master]
Master: stopping namenode
worker04: no datanode to stop
master: stopping datanode
worker02: no datanode to stop
worker01: no datanode to stop
worker03: no datanode to stop
Stopping secondary namenodes [0.0.0.0]
0.0.0.0: stopping secondarynamenode
stopping yarn daemons
stopping resourcemanager
worker04: no nodemanager to stop
worker03: no nodemanager to stop
master: stopping nodemanager
worker01: no nodemanager to stop
worker02: no nodemanager to stop
no proxyserver to stop
[root@master sbin]# 


14,再次重启Hadoop集群,一切正常


[root@master sbin]# start-all.sh    
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [Master]
Master: starting namenode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-namenode-master.out
worker04: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-worker04.out
master: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-master.out
worker03: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-worker03.out
worker01: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-worker01.out
worker02: starting datanode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-datanode-worker02.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /usr/local/hadoop-2.6.5/logs/hadoop-root-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-resourcemanager-master.out
worker04: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-worker04.out
worker02: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-worker02.out
master: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-master.out
worker01: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-worker01.out
worker03: starting nodemanager, logging to /usr/local/hadoop-2.6.5/logs/yarn-root-nodemanager-worker03.out
[root@master sbin]# jps
22869 ResourceManager
22330 NameNode
22490 DataNode
23323 Jps
22684 SecondaryNameNode
22990 NodeManager
[root@master sbin]# 

4个worker节点检查

[root@worker01 local]# jps
20752 DataNode
20884 NodeManager
21001 Jps
[root@worker01 local]# 


[root@worker02 bin]# jps
20771 DataNode
21019 Jps
20895 NodeManager
[root@worker02 bin]# 


[root@worker03 local]# 
[root@worker03 local]# jps
20528 DataNode
20658 NodeManager
20775 Jps
[root@worker03 local]# 



[root@worker04 local]# jps
20624 NodeManager
20500 DataNode
20748 Jps
[root@worker04 local]# 



15,webui页面打不开,10.*.*237 :50070

尝试了再次格式化文件系统,不行。hdfs namenode -format
重启hadoop集群也不行。

17,修改配置

[root@worker03 hadoop]# cat hdfs-site.xml   









   
        dfs.replication
        3
   

   
        dfs.namenode.name.dir
        /usr/local/hadoop-2.6.5/tmp/dfs/name
   

   
        dfs.datanode.data.dir
        /usr/local/hadoop-2.6.5/tmp/dfs/data
   

 
      dfs.http.address
      10.100.100.237:50070
 


[root@worker03 hadoop]# 


你可能感兴趣的:(生产实战spark)