0-1 伪分布hadoop和hbase搭建

Hadoop-2.2.0伪分布式和hbase-0.96.0-hadoop2伪分布式搭建

1. 部署

(1)主机名

gedit /etc/hostname  

master1

sudo gedit /etc/hosts 

127.0.0.1 master1

(2)SSH

sudo apt-get install ssh

mkdir /home/hadoop/.ssh

ssh-keygen -t rsa  

cat id_rsa.pub >> authorized_keys

ssh localhost

ssh master1

(3)环境变量

exportANT_HOME=/opt/apache-ant-1.9.3

exportMAVEN_HOME=/opt/apache-maven-3.0.5

exportFINDBUGS_HOME=/opt/findbugs-2.0.2

exportPATH=${ANT_HOME}/bin:${MAVEN_HOME}/bin:${FINDBUGS_HOME}/bin:$PATH

#jdk

exportJAVA_HOME=/opt/jdk1.7.0_45

exportJRE_HOME=/opt/jdk1.7.0_45

exportJRE_HOME=/opt/jdk1.7.0_45/jre

exportCLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH

exportPATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH

#hadoop

exportHADOOP_HOME=/opt/hadoop-2.2.0

exportHADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop

exportPATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

#hbase

exportHBASE_HOME=/opt/hbase-0.96.0-hadoop2

exportPATH=$PATH:$HBASE_HOME/bin

(4)放置

/opt/jdk1.7.0_45

/opt/hadoop-2.2.0

/opt/hbase-0.96.0-hadoop2

(5)本地目录创建(根据下面的配置文件)

mkdir -p /data/tmp_hadoop /data/hdfs/nn   /data/hdfs/dn   /data/tmp_hbase   /data/yarn/local /data/yarn/logs/data/log/hadoop-hdfs /data/log/hadoop-yarn /data/log/hadoop-mapred

2. Hadoop-2.2.0配置

 cd  /opt/hadoop-2.2.0/etc/hadoop

(1)        core-site.xml

      hadoop.tmp.dir

      /data/tmp_hadoop

        

            

                                           

 fs.defaultFS                          

 hdfs://localhost:9000

     fs.trash.interval

     1440

        

     fs.trash.checkpoint.interval

     1440

        

         io.file.buffer.size

         131072

        

       dfs.blocksize

       67108864

       

(2)hadoop-env.sh

export JAVA_HOME=/opt/jdk1.7.0_45

export HADOOP_LOG_DIR=/data/log/hadoop-hdfs

export YARN_LOG_DIR=/data/log/hadoop-yarn

exportHADOOP_MAPRED_LOG_DIR=/data/log/hadoop-mapred

(3) hdfs-site.xml

                                           

      dfs.namenode.name.dir                          

       /data/hdfs/nn

             

             

                                         

       dfs.datanode.data.dir                   

        /data/hdfs/dn

             

             

    

   dfs.namenode.secondary.http-address

   localhost:9001            

 

       dfs.replication 

       1 

      

(4) mapred-site.xml

        mapreduce.framework.name   

        yarn

         

       mapreduce.shuffle.port

       13562

    mapreduce.jobhistory.address   

    localhost:10020                       

    mapreduce.jobhistory.webapp.address

    localhost:19888                                        

(5) masters

localhost

(6) slaves

localhost

(7) yarn-env.sh

export JAVA_HOME=/opt/jdk1.7.0_45

(8) yarn-site.xml

      yarn.resourcemanager.webapp.address

       localhost:8088

             

      yarn.resourcemanager.admin.address

      localhost:8033

         

       yarn.resourcemanager.address

       localhost:8032

             

       yarn.resourcemanager.scheduler.address

       localhost:8030

             

      yarn.resourcemanager.resource-tracker.address

     localhost:8031

       yarn.nodemanager.aux-services

       mapreduce_shuffle

      yarn.nodemanager.aux-services.mapreduce.shuffle.class

      org.apache.hadoop.mapred.ShuffleHandler

(9) 启动hadoop

sudo chown -R hadoop:hadoop hadoop-2.2.0/

hadoop namenode -format

start-dfs.sh

jps一下

NameNode

DataNode

SecondaryNameNode

localhost:50070

satrt-yarn.sh

jps一下:

ResourceManager

NodeManager

localhost:8088

mr-jobhistory-daemon.sh  start historyserver

jps一下:

JobHistoryServer

localhost:19888

3. Hbase-0.96.0-hadoop2配置

(1)hbase-env.sh

export JAVA_HOME=/opt/jdk1.7.0_45

exportPATH=$JAVA_HOME/bin:$JAVA_HOME/jre:$PATH  

export HBASE_MANAGES_ZK=true

export HBASE_CLASSPATH=/opt/hadoop-2.2.0/etc/hadoop

export  HBASE_LOG_DIR=/data/hbase/logs

(2)hbase-site.xml

    hbase.rootdir

    hdfs://localhost:9000/hbase

   hbase.cluster.distributed

   true

   hbase.tmp.dir

   /data/tmp_hbase

    hbase.zookeeper.property.dataDir

    /data/hbase/zookeeper

   

    hbase.zookeeper.property.clientPort

     2181

   hbase.zookeeper.quorum

    localhost

(3)regionservers

localhost

(4)启动hbase

sudo chown -R hadoop:hadoop hbase-0.96.0-hadoop2/

start-hbase.sh

jps一下:

HQuorumPeer

HRegionServer

HMaster

 访问:localhost:60010

你可能感兴趣的:(hadoop集群)