sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java8-installer
sudo vi /etc/profile
#set java environment
export JAVA_HOME=/usr/lib/jvm/oracle-java8-installer
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
source /etc/profile
验证java环境是否配好
java -version
主机名 | ip | 角色 |
---|---|---|
Master | 10.5.0.196 | MameNode、ResourceManager、SecondaryNameNode |
Slave1 | 10.5.0.231 | DataNode、NodeManager |
Slave2 | 10.5.0.232 | DataNode、NodeManager |
Slave3 | 10.5.0.233 | DataNode、NodeManager |
配置Master
修改 /etc/hostname 内容为Master (Slave1为Slave1…)
/etc/hosts 添加内容行
10.5.0.196 Master
10.5.0.231 Slave1
10.5.0.232 Slave2
10.5.0.233 Slave3
useradd hadoop
passwd hadoop
设置管理员权限
vi /etc/sudoers
`#` User privilege specification
root ALL=(ALL:ALL) ALL
hadoop ALL=(ALL:ALL) ALL
sudo apt-get install openssh-server
sudo su hadoop
cd /home/hadoop
ssh-keygen -t rsa(一路回车 生成密钥)
cd .ssh
cp id_rsa.pub authorized_keys
或者 cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
将公钥拷到Slave上
ssh-copy-id -i $HOME/.ssh/id_rsa.pub hadoop@Slave1
#或者 scp authorized_keys hadoop@Slave1:/home/hadoop/.ssh/
wget http://archive.cloudera.com/cdh5/cdh/5/hadoop-2.6.0-cdh5.4.1.tar.gz
tar -zxvf hadoop-2.6.0-cdh5.4.1.tar.gz -C /usr/local/cloudera/hadoop-2.6.0-cdh5.4.1
sudo chown hadoop:hadoop -R /usr/local/cloudera
sudo mkdir -p /usr/local/cloudera/hadoop_tmp/hdfs/namenode
sudo mkdir -p /usr/local/cloudera/hadoop_tmp/hdfs/datanode
sudo chown hadoop:hadoop -R /usr/local/hadoop_tmp
配置 ~/.bashrc
sudo vi $HOME/.bashrc
添加下列内容到文件尾部
`#`HADOOP VARIABLES START
export JAVA_HOME=/usr/lib/jvm/oracle-java8-installer
export HADOOP_HOME=/usr/local/cloudera/hadoop-2.6.0-cdh5.4.1
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib/native"
`#`HADOOP VARIABLES END
source ~/.bashrc
cd $HADOOP_HOME/etc/hadoop
sudo vi hadoop-env.sh
修改JAVA_HOME的值
export JAVA_HOME=/usr/lib/jvm/oracle-java8-installer
sudo vi slaves
Slave1
Slave2
Slave3
sudo vi masters
Master
core-site.xml、hdfs-site.xml、yarn-site.xml、mapred-site.xml
<configuration>
<!-- file system properties -->
<property>
<name>fs.default.name</name>
<value>hdfs://Master:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
</configuration>
/hdfs/namenode、/hdfs/datanode 目录需要自己新建
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/local/cloudera/hadoop_tmp/hdfs/namenode</value>
</property>
<property>
<name>dfs.datanoe.data.dir</name>
<value>file:/usr/local/cloudera/hadoop_tmp/hdfs/datanode</value>
</property>
</configuration>
<!-- Site specific YARN configuration properties -->
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>Master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>Master:8025</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>Master:8035</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>Master:8050</value>
</property>
</configuration>
<configuration>
<property>
<name>mapreduce.job.tracker</name>
<value>Master:5431</value>
</property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
基本项配置完毕
sudo apt-get install rsync
sudo rsync -avxP /usr/local/cloudera/ hadoop@Slave1:/usr/local/cloudera/
sudo rsync -avxP /usr/local/cloudera/ hadoop@Slave2:/usr/local/cloudera/
sudo rsync -avxP /usr/local/cloudera/ hadoop@Slave3:/usr/local/cloudera/
hadoop namenode -format
最后显示如下内容表示成功
15/05/15 22:48:01 INFO util.ExitUtil: Exiting with status 0
15/05/15 22:48:01 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at Master/10.5.0.196
************************************************************/
./sbin/start-dfs.sh
./sbin/start-yarn.sh
访问http://Master:50070
因为 hadoop-2.6.0-cdh5.4.1/lib/native 的静态库文件不存在
运行hadoop命令的时候会出现如下警告信息
15/05/17 10:46:49 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
开启调试模式
export HADOOP_ROOT_LOGGER=DEBUG,console
在执行hadoop命令的时候出现如下信息
15/05/17 16:46:48 DEBUG util.NativeCodeLoader: Trying to load the custom-built native-hadoop library...
15/05/17 16:46:48 DEBUG util.NativeCodeLoader: Failed to load native-hadoop with error: java.lang.UnsatisfiedLinkError: no hadoop in java.library.path
15/05/17 16:46:48 DEBUG util.NativeCodeLoader: java.library.path=/usr/local/cloudera/hadoop-2.6.0-cdh5.4.1/lib/native
15/05/17 16:46:48 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
15/05/17 16:46:48 DEBUG util.PerformanceAdvisory: Falling back to shell based
cd /usr/local/cloudera/hadoop-2.6.0-cdh5.4.1/lib/native
发现hadoop的本地库文件不存在
运行$HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.0-cdh5.4.1.jar 这个架包里的WordCount程序,作用是统计单词的个数。
mkdir file
cp *.txt /file #将找到的txt文件都复制到input目录
hadoop fs -mkdir /input
hadoop fs -put ./file/*.txt /input/
hadoop fs -ls /input/
hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.0-cdh5.4.1.jar wordcount /input/ /output/
hadoop fs -ls /output
-rw-r--r-- 1 hadoop supergroup 0 2015-05-17 12:27 /output/wordcount/_SUCCESS
-rw-r--r-- 1 hadoop supergroup 9190 2015-05-17 12:27 /output/wordcount/part-r-00000
hadoop fs -cat /output/part-r-00000