HDFS文件系统
分布式存储环境
HDFS的安装和部署
1.准备工作
准备3台机器,设置好hosts
一台作为Namenode,cc-staging-session2命名为master,
两台作为dataNode,cc-staging-front命名为slave1, cc-staging-imcenter 命名为slave2
#3台机器都创建Hadoop用户
useradd hadoop
passwd hadoop
# 安装JDK,并设置JAVA_HOME和PATH
#下载安装jdk1.7
http://www.Oracle.com/technetwork/java/javase/downloads/index.html
tar zxvf jdk-7u21-linux-x64.gz -C /usr/local/
#/etc/profile增加环境变量
pathmunge /usr/local/jdk1.7.0_21/bin
export JAVA_HOME=/usr/local/jdk1.7.0_21/
export JRE_HOME=/usr/local/jdk1.7.0_21/jre
export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar
2.下载安装hadoop
#下载hadoop
下载地址https://ccp.cloudera.com/display/SUPPORT/CDH3+Downloadable+Tarballs
wget http://archive.cloudera.com/cdh/3/hadoop-0.20.2-cdh3u6.tar.gz
wget http://archive.cloudera.com/cdh/3/hbase-0.90.6-cdh3u6.tar.gz
wget http://archive.cloudera.com/cdh/3/hive-0.7.1-cdh3u6.tar.gz
#在3太机器上创建相同的目录路径, name目录只存放在master上,且权限为755,否则会导致后面的格式化失败
mkdir -p /hadoop/{install,name,data1, data2,tmp}
#解压安装包到/hadoop/install下
tar zxvf hadoop-0.20.2-cdh3u6.tar.gz -C /hadoop/install/
#修改属主为hadoop
chown -R hadoop.hadoop /hadoop
3.设置hadoop账户的ssh信任关系
#在master机器上操作
su – hadoop
ssh-keygen
ssh-copy-id -i .ssh/id_rsa.pub hadoop@cc-staging-front
ssh-copy-id -i .ssh/id_rsa.pub hadoop@cc-staging-imcenter
ssh-copy-id -i .ssh/id_rsa.pub hadoop@cc-staging-session2
#测试一下,都能成功登录就行
ssh hadoop@master
ssh hadoop@slave1
ssh hadoop@slave2
4.编辑HDFS配置文件,所以节点都有保持一致
cd /hadoop/install/hadoop-0.20.2-cdh3u6/conf
#core-site.xml核心配置
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://master:9000</value>
</property>
</configuration>
#hdfs-site.xml:站点多项参数配置
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.name.dir</name>
<value>/hadoop/name</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/hadoop/data1,/hadoop/data2</value>
</property>
<property>
<name>dfs.tmp.dir</name>
<value>/hadoop/tmp</value>
</property>
</configuration>
#在hadoop-env.sh中配置JAVA_HOME变量
export JAVA_HOME=/usr/local/jdk1.7.0_21/