Hadoop 2.7.3 安装配置及测试

1、概述

Hadoop是一个由Apache基金会所开发的分布式系统基础架构。用户可以在不了解分布式底层细节的情况下,开发分布式程序。Hadoop三种安装模式:单机模式,伪分布式,真正分布式。因在实际生产中均会使用分布式方式进行部署,本博文也将采用分布式方式进行部署。

2、环境说明

主机名 IP地址 用途说明
master 192.168.0.128  作为HADOOP的Namenode
slave1 192.168.0.11 作为HADOOP的Datanode1
slave2 192.168.0.12 作为HADOOP的Datanode2

3、软件说明

软件名称 版本 下载地址
JDK 1.8 http://download.oracle.com/otn-pub/java/jdk/8u121-b13/e9e7ea248e2c4826b92b3f075a80e441/jdk-8u121-linux-x64.tar.gz
RedHat 6.4 64位 6.4
HADOOP 2.7.3 http://mirror.bit.edu.cn/apache/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz

4、JDK安装
请参见博文《linux系统安装JDK详解》

5、SSH配置
请参见博文《SSH免密码登陆配置》

6、HADOOP安装
以下步骤在master服务器上操作
#将下载的安装包拷贝到/usr/local目录,并解压文件
[root@master local]# tar -zxvf hadoop-2.7.3.tar.gz 
#修改解压出文件夹名称,去掉版本号
[root@master local]# mv hadoop-2.7.3 hadoop
#查看解压修改名称后的文件夹
[root@master local]# ll | grep hadoop
drwxr-xr-x. 9 root root      4096 Aug 17  2016 hadoop
-rw-r--r--. 1 root root 214092195 Mar 30  2017 hadoop-2.7.3.tar.gz
#配置hadoop环境变量
[root@master local]# vim /home/hadoop/.bashrc
#在.bashrc配置文件的末尾添加如下环境变量( 红色部分
export JAVA_HOME=/usr/local/java/jdk1.8.0
export JRE_HOME=/usr/local/java/jdk1.8.0/jre
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
#在<HADOOP_HOME>/etc/hadoop/hadoop-env.sh配置文件中添加环境变量:
[root@master hadoop]# cd /usr/local/hadoop/etc/hadoop/
[root@master hadoop]# vim hadoop-env.sh 
export JAVA_HOME=/usr/local/java/jdk1.8.0

#在<HADOOP_HOME>/etc/hadoop/yarn-env.sh配置文件中"# some Java parameters"后添加环境变量:
[root@master hadoop]# cd /usr/local/hadoop/etc/hadoop/
[root@master hadoop]# vim yarn-env.sh 
export JAVA_HOME=/usr/local/java/jdk1.8.0

#在 <HADOOP_HOME>/etc/hadoop/slaves配置文件中添加如下内容:

 
  
slave1
slave2


#在<HADOOP_HOME>/etc/hadoop/core-site.xml配置文件中添加如下内容:

<property>
  <name>fs.defaultFS</name>
  <value>hdfs://master:9000</value>
 </property>
 <property>
  <name>io.file.buffer.size</name>
  <value>131072</value>
 </property>
 <property>
  <name>hadoop.tmp.dir</name>
  <value>file:/usr/local/hadoop/tmp</value>
  <description>Abasefor other temporary directories.</description>
 </property>
 <property>
  <name>hadoop.proxyuser.Spark.hosts</name>
  <value>*</value>
 </property>
<property>
  <name>hadoop.proxyuser.spark.groups</name>
  <value>*</value>
 </property>

#在<HADOOP_HOME>/etc/hadoop/hdfs-site.sh配置文件中添加如下内容:
 <property>
  <name>dfs.namenode.secondary.http-address</name>
  <value>master:9001</value>
 </property>
  <property>
   <name>dfs.namenode.name.dir</name>
   <value>file:/usr/local/hadoop/dfs/name</value>
 </property>
 <property>
  <name>dfs.datanode.data.dir</name>
  <value>file:/usr/local/hadoop/dfs/data</value>
  </property>
 <property>
  <name>dfs.replication</name>
  <value>2</value>
 </property>
 <property>
  <name>dfs.webhdfs.enabled</name>
  <value>true</value>
 </property>


#复制<HADOOP_HOME>/etc/hadoop/mapred-site.xml.template为mapred-site.xml,并在配置文件中添加如下内容:
 <property>
   <name>mapreduce.framework.name</name>
   <value>yarn</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.address</name>
  <value>master:10020</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.webapp.address</name>
  <value>master:19888</value>
 </property>

#在<HADOOP_HOME>/etc/hadoop/ yarn-site.xml 配置文件中添加如下内容:
  <property>
   <name>yarn.nodemanager.aux-services</name>
   <value>mapreduce_shuffle</value>
  </property>
  <property>
   <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
   <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  </property>
  <property>
   <name>yarn.resourcemanager.address</name>
   <value>master:8032</value>
  </property>
  <property>
   <name>yarn.resourcemanager.scheduler.address</name>
   <value>master:8030</value>
  </property>
  <property>
   <name>yarn.resourcemanager.resource-tracker.address</name>
   <value>master:8035</value>
  </property>
  <property>
   <name>yarn.resourcemanager.admin.address</name>
   <value>master:8033</value>
  </property>
  <property>
   <name>yarn.resourcemanager.webapp.address</name>
   <value>master:8088</value>
  </property>

#将hadoop目录读权限分配给hadoop用户
[root@master hadoop]# cd /usr/local/
[root@master local]# chown -R hadoop:hadoop ./hadoop
[root@master local]# ll | grep hadoop
drwxr-xr-x. 9 hadoop hadoop      4096 Aug 17  2016 hadoop

#在 <HADOOP_HOME>/etc/hadoop/目录下 创建tmp目录 ,并将读权限分配给hadoop用户
[root@master hadoop]# cd /usr/local/hadoop/
[root@master hadoop]# mkdir tmp
[root@master hadoop]# chown -R hadoop:hadoop tmp
[root@master hadoop]# ll | grep tmp
drwxr-xr-x. 2 hadoop hadoop  4096 Mar 30 21:21 tmp


--------------------------------------------------------------------------------------------------------------------------------

#到此,master节点相关配置完成

--------------------------------------------------------------------------------------------------------------------------------


#从master上将hadoop复制到slave1和slave2的/usr/local目录下,在数据节点服务器的路径与名称节点中的路径需保持一致(在master节点执行):

[root@master local]# pwd
/usr/local   
[root@master local]# scp -r hadoop root@slave1:/usr/local/
[root@master local]# scp -r hadoop root@slave2:/usr/local/


#用户ssh方式或直接登录slave1、slave2,修改/usr/local/hadoop目录度读写权限给hadoop用户:
[root@master local]# ssh slave1
root@slave1's password: 
Last login: Thu Mar 30 21:35:28 2017 from 192.168.0.128
[root@slave1 ~]# cd /usr/local/
[root@slave1 local]# chown -R hadoop:hadoop hadoop
[root@slave1 local]# ll | grep hadoop
drwxr-xr-x. 10 hadoop hadoop 4096 Mar 30 21:32 hadoop
[root@slave1 local]# exit
logout
Connection to slave1 closed.

[root@master local]# ssh slave2
root@slave2's password: 
Last login: Thu Mar 30 21:35:28 2017 from 192.168.0.128
[root@slave2 ~]# cd /usr/local/
[root@slave2 local]# chown -R hadoop:hadoop hadoop
[root@slave2 local]# ll | grep hadoop
drwxr-xr-x. 10 hadoop hadoop 4096 Mar 30 21:32 hadoop
[root@slave2 local]# exit
logout
Connection to slave2 closed.


#切换到Hadoop用户,对namenode进行格式化
 
  
[root@master local]# su hadoop
[hadoop@master local]$ cd /usr/local/hadoop/etc/hadoop/
[hadoop@master hadoop]$ hdfs namenode -format
17/03/30 21:52:20 INFO namenode.NameNode: STARTUP_MSG: 
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = master/192.168.0.128
STARTUP_MSG:   args = [-format]
STARTUP_MSG:   version = 2.7.3
STARTUP_MSG:   classpath = /usr/local/hadoop/etc/hadoop:/usr/local/hadoop/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/hadoop/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-math3-3.1.1.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/common/lib/xmlenc-0.52.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-httpclient-3.1.jar:/usr/local/hadoop/share/hadoop/common/lib/slf4j-api-1.7.10.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-cli-1.2.jar:/usr/local/hadoop/share/hadoop/common/lib/avro-1.7.4.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-configuration-1.6.jar:/usr/local/hadoop/share/hadoop/common/lib/netty-3.6.2.Final.jar:/usr/local/hadoop/share/hadoop/common/lib/zookeeper-3.4.6.jar:/usr/local/hadoop/share/hadoop/common/lib/curator-client-2.7.1.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-digester-1.8.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-compress-1.4.1.jar:/usr/local/hadoop/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/usr/local/hadoop/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/usr/local/hadoop/share/hadoop/common/lib/jetty-6.1.26.jar:/usr/local/hadoop/share/hadoop/common/lib/activation-1.1.jar:/usr/local/hadoop/share/hadoop/common/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-collections-3.2.2.jar:/usr/local/hadoop/share/hadoop/common/lib/curator-framework-2.7.1.jar:/usr/local/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar:/usr/local/hadoop/share/hadoop/common/lib/xz-1.0.jar:/usr/local/hadoop/share/hadoop/common/lib/jsp-api-2.1.jar:/usr/local/hadoop/share/hadoop/common/lib/gson-2.2.4.jar:/usr/local/hadoop/share/hadoop/common/lib/hadoop-annotations-2.7.3.jar:/usr/local/hadoop/share/hadoop/common/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/common/lib/servlet-api-2.5.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/usr/local/hadoop/share/hadoop/common/lib/jetty-util-6.1.26.jar:/usr/local/hadoop/share/hadoop/common/lib/jersey-json-1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/httpclient-4.2.5.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/paranamer-2.3.jar:/usr/local/hadoop/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/hadoop/share/hadoop/common/lib/jets3t-0.9.0.jar:/usr/local/hadoop/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/common/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/jettison-1.1.jar:/usr/local/hadoop/share/hadoop/common/lib/hamcrest-core-1.3.jar:/usr/local/hadoop/share/hadoop/common/lib/jsr305-3.0.0.jar:/usr/local/hadoop/share/hadoop/common/lib/guava-11.0.2.jar:/usr/local/hadoop/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/usr/local/hadoop/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-net-3.1.jar:/usr/local/hadoop/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop/share/hadoop/common/lib/mockito-all-1.8.5.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-lang-2.6.jar:/usr/local/hadoop/share/hadoop/common/lib/hadoop-auth-2.7.3.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/jsch-0.1.42.jar:/usr/local/hadoop/share/hadoop/common/lib/junit-4.11.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-logging-1.1.3.jar:/usr/local/hadoop/share/hadoop/common/lib/stax-api-1.0-2.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-codec-1.4.jar:/usr/local/hadoop/share/hadoop/common/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/common/lib/httpcore-4.2.5.jar:/usr/local/hadoop/share/hadoop/common/hadoop-common-2.7.3.jar:/usr/local/hadoop/share/hadoop/common/hadoop-common-2.7.3-tests.jar:/usr/local/hadoop/share/hadoop/common/hadoop-nfs-2.7.3.jar:/usr/local/hadoop/share/hadoop/hdfs:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/guava-11.0.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.3.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.7.3-tests.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-cli-1.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/guice-3.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/usr/local/hadoop/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jetty-6.1.26.jar:/usr/local/hadoop/share/hadoop/yarn/lib/activation-1.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-collections-3.2.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/xz-1.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/servlet-api-2.5.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-json-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/aopalliance-1.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jettison-1.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/javax.inject-1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/guava-11.0.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-client-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-lang-2.6.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/usr/local/hadoop/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-codec-1.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-common-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-client-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-registry-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-common-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.3.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-api-2.7.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/guice-3.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/xz-1.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/javax.inject-1.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/junit-4.11.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.3-tests.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.3.jar:/usr/local/hadoop/contrib/capacity-scheduler/*.jar
STARTUP_MSG:   build = https://git-wip-us.apache.org/repos/asf/hadoop.git -r baa91f7c6bc9cb92be5982de4719c1c8af91ccff; compiled by 'root' on 2016-08-18T01:41Z
STARTUP_MSG:   java = 1.8.0_121
************************************************************/
17/03/30 21:52:21 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
17/03/30 21:52:21 INFO namenode.NameNode: createNameNode [-format]
Formatting using clusterid: CID-c2708722-264a-44d6-9d0e-56e51eb82445
17/03/30 21:52:24 INFO namenode.FSNamesystem: No KeyProvider found.
17/03/30 21:52:24 INFO namenode.FSNamesystem: fsLock is fair:true
17/03/30 21:52:24 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000
17/03/30 21:52:24 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
17/03/30 21:52:24 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
17/03/30 21:52:24 INFO blockmanagement.BlockManager: The block deletion will start around 2017 Mar 30 21:52:24
17/03/30 21:52:24 INFO util.GSet: Computing capacity for map BlocksMap
17/03/30 21:52:24 INFO util.GSet: VM type       = 64-bit
17/03/30 21:52:24 INFO util.GSet: 2.0% max memory 966.7 MB = 19.3 MB
17/03/30 21:52:24 INFO util.GSet: capacity      = 2^21 = 2097152 entries
17/03/30 21:52:24 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
17/03/30 21:52:24 INFO blockmanagement.BlockManager: defaultReplication         = 2
17/03/30 21:52:24 INFO blockmanagement.BlockManager: maxReplication             = 512
17/03/30 21:52:24 INFO blockmanagement.BlockManager: minReplication             = 1
17/03/30 21:52:24 INFO blockmanagement.BlockManager: maxReplicationStreams      = 2
17/03/30 21:52:24 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
17/03/30 21:52:24 INFO blockmanagement.BlockManager: encryptDataTransfer        = false
17/03/30 21:52:24 INFO blockmanagement.BlockManager: maxNumBlocksToLog          = 1000
17/03/30 21:52:24 INFO namenode.FSNamesystem: fsOwner             = hadoop (auth:SIMPLE)
17/03/30 21:52:24 INFO namenode.FSNamesystem: supergroup          = supergroup
17/03/30 21:52:24 INFO namenode.FSNamesystem: isPermissionEnabled = true
17/03/30 21:52:24 INFO namenode.FSNamesystem: HA Enabled: false
17/03/30 21:52:24 INFO namenode.FSNamesystem: Append Enabled: true
17/03/30 21:52:26 INFO util.GSet: Computing capacity for map INodeMap
17/03/30 21:52:26 INFO util.GSet: VM type       = 64-bit
17/03/30 21:52:26 INFO util.GSet: 1.0% max memory 966.7 MB = 9.7 MB
17/03/30 21:52:26 INFO util.GSet: capacity      = 2^20 = 1048576 entries
17/03/30 21:52:26 INFO namenode.FSDirectory: ACLs enabled? false
17/03/30 21:52:26 INFO namenode.FSDirectory: XAttrs enabled? true
17/03/30 21:52:26 INFO namenode.FSDirectory: Maximum size of an xattr: 16384
17/03/30 21:52:26 INFO namenode.NameNode: Caching file names occuring more than 10 times
17/03/30 21:52:26 INFO util.GSet: Computing capacity for map cachedBlocks
17/03/30 21:52:26 INFO util.GSet: VM type       = 64-bit
17/03/30 21:52:26 INFO util.GSet: 0.25% max memory 966.7 MB = 2.4 MB
17/03/30 21:52:26 INFO util.GSet: capacity      = 2^18 = 262144 entries
17/03/30 21:52:26 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
17/03/30 21:52:26 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0
17/03/30 21:52:26 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension     = 30000
17/03/30 21:52:27 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10
17/03/30 21:52:27 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10
17/03/30 21:52:27 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
17/03/30 21:52:27 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
17/03/30 21:52:27 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
17/03/30 21:52:27 INFO util.GSet: Computing capacity for map NameNodeRetryCache
17/03/30 21:52:27 INFO util.GSet: VM type       = 64-bit
17/03/30 21:52:27 INFO util.GSet: 0.029999999329447746% max memory 966.7 MB = 297.0 KB
17/03/30 21:52:27 INFO util.GSet: capacity      = 2^15 = 32768 entries
17/03/30 21:52:28 INFO namenode.FSImage: Allocated new BlockPoolId: BP-1963452554-192.168.0.128-1490935948358
17/03/30 21:52:28 INFO common.Storage: Storage directory /usr/local/hadoop/dfs/name has been successfully formatted.
17/03/30 21:52:29 INFO namenode.FSImageFormatProtobuf: Saving image file /usr/local/hadoop/dfs/name/current/fsimage.ckpt_0000000000000000000 using no compression
17/03/30 21:52:30 INFO namenode.FSImageFormatProtobuf: Image file /usr/local/hadoop/dfs/name/current/fsimage.ckpt_0000000000000000000 of size 353 bytes saved in 1 seconds.
17/03/30 21:52:30 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
17/03/30 21:52:30 INFO util.ExitUtil: Exiting with status 0
17/03/30 21:52:30 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.0.128
************************************************************/
[hadoop@master hadoop]$ 

#启动hadoop服务
[hadoop@master hadoop]$ start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
[hadoop@master hadoop]$ ssh slave1
Last login: Fri Mar 31 00:36:07 2017 from master
[hadoop@slave1 ~]$ jps
6710 DataNode
6808 NodeManager
6926 Jps
[hadoop@slave1 ~]$ exit
logout
Connection to slave1 closed.


Starting namenodes on [master]
master: starting namenode, logging to /usr/local/hadoop/logs/hadoop-hadoop-namenode-master.out
slave1: starting datanode, logging to /usr/local/hadoop/logs/hadoop-hadoop-datanode-slave1.out
slave2: starting datanode, logging to /usr/local/hadoop/logs/hadoop-hadoop-datanode-slave2.out
Starting secondary namenodes [master]
master: starting secondarynamenode, logging to /usr/local/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/hadoop/logs/yarn-hadoop-resourcemanager-master.out
slave2: starting nodemanager, logging to /usr/local/hadoop/logs/yarn-hadoop-nodemanager-slave2.out
slave1: starting nodemanager, logging to /usr/local/hadoop/logs/yarn-hadoop-nodemanager-slave1.out


#查看master节点服务启动状态
[hadoop@master hadoop]$ jps
38801 ResourceManager
38870 Jps
38457 NameNode
38621 SecondaryNameNode
36287 NodeManager

#查看slave节点服务启动状态
[hadoop@master hadoop]$ ssh slave1
Last login: Fri Mar 31 00:36:07 2017 from master
[hadoop@slave1 ~]$ jps
6710 DataNode
6808 NodeManager
6926 Jps
[hadoop@slave1 ~]$ exit
logout
Connection to slave1 closed.
[hadoop@master hadoop]$ ssh slave2
Last login: Thu Mar 30 22:32:21 2017 from master
[hadoop@slave2 ~]$ jps
6338 Jps
6121 DataNode
6219 NodeManager
[hadoop@slave2 ~]$ exit
logout
Connection to slave2 closed.

7、Hadoop测试

#要在本地利用主机名访问hadoop,需在 #在C:\Windows\System32\drivers\etc\HOSTS中添加主机名与IP的映射关系:
192.168.0.128 master
192.168.0.11 slave1
192.168.0.12 slave2

# 通过浏览器访问: http://master:50070/ ,可以查看hadoop集群各种状态






8、词频统计例子

# 利用root用在/user/local目录下新建三个文本文件,内容如下:
[hadoop@master local]$ su root
Password: 
[root@master local]# vim word1.txt  //内容:hello world
[root@master local]# vim word2.txt  //内容:hello hadoop

[root@master local]# vim word3.txt  //内容:hello china

#切换到hadoop用户,利用hdfs dfs命令在分布式文件系统上新增input目录,并将新增的三个本地文件保存到hdfs中:
[hadoop@master hadoop]$ hdfs dfs -ls /
[hadoop@master hadoop]$ hdfs dfs -mkdir /input
[hadoop@master hadoop]$ hdfs dfs -ls /
Found 1 items
drwxr-xr-x   - hadoop supergroup          0 2017-03-31 03:31 /input
[hadoop@master hadoop]$ 
[hadoop@master hadoop]$ cd /usr/local/
[hadoop@master local]$ ll | grep word
-rw-r--r--.  1 root   root          12 Mar 31 01:53 word1.txt
-rw-r--r--.  1 root   root          13 Mar 31 01:54 word2.txt
-rw-r--r--.  1 root   root          12 Mar 31 01:54 word3.txt
[hadoop@master local]$ hdfs dfs -put word*.txt /input
[hadoop@master local]$ hdfs dfs -ls /input
Found 3 items
-rw-r--r--   2 hadoop supergroup         12 2017-03-31 03:34 /input/word1.txt
-rw-r--r--   2 hadoop supergroup         13 2017-03-31 03:34 /input/word2.txt
-rw-r--r--   2 hadoop supergroup         12 2017-03-31 03:34 /input/word3.txt
[hadoop@master local]$ 

#统计/input文件夹中的所有文件,将统计结果输出到/out
[hadoop@master local]$ hadoop jar /usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.
jar wordcount /input /out
输出内容较多,略

#查看输出文件
[hadoop@master local]$ hdfs dfs -ls /out
Found 2 items
-rw-r--r--   2 hadoop supergroup          0 2017-03-31 03:40 /out/_SUCCESS
-rw-r--r--   2 hadoop supergroup         33 2017-03-31 03:40 /out/part-r-00000
[hadoop@master local]$  hdfs dfs -cat /out/part-r-00000
china   1
hadoop  1
hello   3
world   1

9、停止服务

#执行stop-al.sh停止hadoop服务:
[hadoop@master local]$ stop-all.sh
This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh
Stopping namenodes on [master]
master: stopping namenode
slave2: stopping datanode
slave1: stopping datanode
Stopping secondary namenodes [master]
master: stopping secondarynamenode
stopping yarn daemons
stopping resourcemanager
slave2: stopping nodemanager
slave1: stopping nodemanager
no proxyserver to stop
[hadoop@master local]$ 

10、注意事项:
(1)在配置过程中一定要仔细,避免配置文件错误。
(2)启动前一定要确保master、slave1、slave2的防火墙是关闭状态,否则节点间通信会受阻,如执行hadoop dfsadmin -report命令会出现如下错误:
[hadoop@master dfs]$ hdfs dfsadmin -report
Configured Capacity: 0 (0 B)
Present Capacity: 0 (0 B)
DFS Remaining: 0 (0 B)
DFS Used: 0 (0 B)
DFS Used%: NaN%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0

-------------------------------------------------

你可能感兴趣的:(hadoop,分布式)