hadoop安装部署

一:准备工作
1:准备号JDK环境
2:关闭防火墙
3:selinux disabled
4: ssh安装
5:修改host文件
6:修改hostname

二:下载CDH
http://archive.cloudera.com/cdh4/cdh/4/

三:环境变量配置

export JAVA_HOME=/home/bigdata/jdk1.7.0_45  
export HADOOP_HOME=/home/hadoop/cdh4.4.0/hadoop-2.0.0-cdh4.4.0  
export HADOOP_COMMOM_HOME=$HADOOP_HOME  
export HADOOP_HDFS_HOME=$HADOOP_HOME  
export HADOOP_MAPRED_HOME=$HADOOP_HOME  
export HADOOP_YARN_HOME=$HADOOP_HOME  
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop  
export HDFS_CONF_DIR=$HADOOP_HOME/etc/hadoop  
export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop  
export HADOOP_LIB=$HADOOP_HOME/lib  
export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native  

export PATH=$PATH:/etc/haproxy/sbin/:$JAVA_HOME/bin:$JAVA_HOME/jre/bin  
export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar:$HADOOP_LIB/native/libhadoop.so  

四:配置文件
core-site.xml

fs.default.name
hdfs://hadoop-001:8020

<name>hadoop.tmp.dir</name>
 <value>/hadoop/tmp</value>   </property>   <property>
 <name>fs.trash.interval</name>
 <value>10080</value>   </property>   <property>
 <name>fs.trash.checkpoint.interval</name>
 <value>10080</value>   </property> <!--  <property>
 <name>io.compression.codecs</name>  <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec
</value>   </property>   <property>
 <name>io.compression.codec.lzo.class</name>
 <value>com.hadoop.compression.lzo.LzoCodec</value>   </property>-->   <!-- OOZIE -->   <property>
  <name>hadoop.proxyuser.hadoop.hosts</name>
  <value>hadoop-001</value>   </property>   <property>
  <name>hadoop.proxyuser.hadoop.groups</name>
  <value>hadoop</value>   </property>

hdfs-site.xml

<name>dfs.replication</name>
<value>2</value>   </property> <!--  <property>
<name>hadoop.tmp.dir</name>
<value>/hadoop/tmp</value>   </property>-->   <property>
<name>dfs.namenode.name.dir</name>
<value>file:/hadoop/name</value>
<final>ture</final>    </property>    <property>
 <name>dfs.datanode.data.dir</name>
 <value>file:/hadoop/data</value>
 <final>ture</final>    </property>    <property>
  <name>dfs.permissions</name>
  <value>false</value>    </property>    <property>
  <name>dfs.namenode.http-address</name>
  <value>hadoop-001:50070</value>    </property>    <property>
  <name>dfs.secondary.http.address</name>
  <value>hadoop-001:50090</value>    </property>    <property>
  <name>dfs.webhdfs.enabled</name>
  <value>true</value>    </property>    <!--for impala    <property>
  <name>dfs.client.read.shortcircuit</name>
  <value>true</value>    </property>    <property>
  <name>dfs.domain.socket.path</name>
  <value>/var/run/hadoop-hdfs/dn._PORT</value>    </property>    <property>
  <name>dfs.client.file-block-storage-locations.timeout</name>
  <value>3000</value>    </property>    <property>
  <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
  <value>true</value>    </property>--> </configuration>

yarn-site.xml

–>

 <name>yarn.resourcemanager.resource-tracker.address</name>  
 <value>hadoop-001:18025</value>     </property>     <property>  
 <name>yarn.resourcemanager.address </name>  
 <value>hadoop-001:18040</value>     </property>     <property>  
 <name>yarn.resourcemanager.scheduler.address </name>  
 <value>hadoop-001:18030</value>     </property>     <property>  
 <name>yarn.resourcemanager.admin.address </name>  
 <value>hadoop-001:18141</value>     </property>     <property>  
  <name>yarn.resourcemanager.webapp.address </name>  
  <value>hadoop-001:8088</value>      </property>      <property>  
   <name>yarn.nodemanager.aux-services</name>  
   <value>mapreduce.shuffle</value>      </property>      <property>  
   <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>  
   <value>org.apache.hadoop.mapred.ShuffleHandler</value>      </property>      <property>  
 <name>yarn.application.classpath</name>  
 <value>$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,$HADOOP_YARN_HOME/share/hadoop/yarn/*,$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*</value>

mapred-site.xml

<configuration>    
   <property>    
      <name>mapreduce.framework.name</name>    
      <value>yarn</value>    
  </property>    
  <property>    
      <name>mapreduce.jobhistory.address</name>    
      <value>hadoop-001:10020</value>    
  </property>    
  <property>    
      <name>mapreduce.jobhistory.webapp.address</name>    
      <value>hadoop-001:19888</value>    
  </property>    
  <property>    
      <name>mapreduce.job.tracker</name>    
      <value>hadoop-001:8021</value>    
      <final>ture</final>    
  </property>    
  <property>    
      <name>mapred.system.dir</name>    
      <value>file:/hadoop/mapred/system</value>    
      <final>ture</final>    
  </property>    
  <property>    
       <name>mapred.local.dir</name>    
       <value>file:/hadoop/mapred/local</value>    
       <final>ture</final>    
  </property>    
  <property>      
      <name>mapred.child.env</name>      
      <value>LD_LIBRARY_PATH=/usr/local/lib</value>      
  </property>     
  <!--<property>    
      <name>mapreduce.map.output.compress</name>    
      <value>true</value>    
  </property>    
  <property>    
      <name>mapreduce.map.output.compress.codec</name>    
      <value>com.hadoop.compression.lzo.LzoCodec</value>    
  </property>-->    
</configuration>

五:创建号hdfs配置文件中的目录

六:格式化namenode
hadoop namenode -format

七: 启动各自服务

你可能感兴趣的:(hadoop安装部署)