centos7安装hbase2.0.1的方法(在hadoop2.7.6和jdk1.8上)

centos7安装hbase2.0.1的方法(在hadoop2.7.6和jdk1.8上)
依赖hadoop 环境,我这边的版本是hadoop-2.7.6

选择hbase2.0.1版本的时候,去官网查看支持的hadoop版本

tar -zxvf hbase-2.0.1-bin.tar.gz
mv hbase-2.0.1 hbase

3、chwon -R hadoop:hadoop hbase //赋于hadoop权限

主机/etc/hosts #127.0.0.1 去掉127.0.0.1这一行

[hadoop3@master ~]$ cat /etc/hosts
#127.0.0.1  core  localhost.localdomain localhost
::1    localhost
192.168.145.200  master
192.168.145.201  slave1
192.168.145.202  slave2

zookeeper/conf/zoo.cfg 内容:

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/home/hadoop3/data/zookeeper/zkdata
dataLogDir=/home/hadoop3/data/zookeeper/zkdatalog
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888

hadoop2.7.6 /etc/hadoop/core-site.xml


    
	  fs.defaultFS
	  hdfs://mycluster
	
	
	  hadoop.tmp.dir
	  /home/hadoop3/data/tmp
	
	
	   ha.zookeeper.quorum
	   master:2181,slave1:2181,slave2:2181
	

hdfs-site.xml


    
	  dfs.nameservices
	  mycluster
	
	
      dfs.permissions.enabled
      false
    
	
	  dfs.ha.namenodes.mycluster
	  nn1,nn2
	
	
	  dfs.namenode.rpc-address.mycluster.nn1
	  master:8020
	
	
	  dfs.namenode.rpc-address.mycluster.nn2
	  slave1:8020
		
	
	  dfs.namenode.http-address.mycluster.nn1
	  master:50070
	
	
	  dfs.namenode.http-address.mycluster.nn2
	  slave1:50070
			
	
	  dfs.namenode.shared.edits.dir
	  qjournal://master:8485;slave1:8485/mycluster
	
	
	  dfs.journalnode.edits.dir
	  /home/hadoop3/data/journaldata/jn
	
	
	  dfs.client.failover.proxy.provider.mycluster
	  org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
	
	
      dfs.ha.fencing.methods
      
	  sshfence
	  shell(/bin/true)
	  
    
    
      dfs.ha.fencing.ssh.private-key-files
      /home/hadoop3/.ssh/id_rsa
    
	
      dfs.ha.fencing.ssh.connect-timeout
      10000
    
    
      dfs.namenode.handler.count
      100
    
	
	   dfs.ha.automatic-failover.enabled
	   true
	 
    
		 dfs.webhdfs.enabled
		 true
		 
	
	    dfs.datanode.max.xcievers
	    8192
	

mapred-site.xml


    
		mapreduce.framework.name
		yarn
	
	
		mapreduce.application.classpath
		
		    /home/hadoop3/app/hadoop/etc/hadoop,
			/home/hadoop3/app/hadoop/share/hadoop/common/*,
			/home/hadoop3/app/hadoop/share/hadoop/common/lib/*,
			/home/hadoop3/app/hadoop/share/hadoop/hdfs/*,
			/home/hadoop3/app/hadoop/share/hadoop/hdfs/lib/*,
			/home/hadoop3/app/hadoop/share/hadoop/mapreduce/*,
			/home/hadoop3/app/hadoop/share/hadoop/mapreduce/lib/*,
			/home/hadoop3/app/hadoop/share/hadoop/yarn/*,
			/home/hadoop3/app/hadoop/share/hadoop/yarn/lib/*
		
	

yarn-site.xml




	
		yarn.resourcemanager.connect.retry-interval.ms
		2000
	
	
		yarn.resourcemanager.ha.enabled
		true
	
	
		yarn.resourcemanager.ha.automatic-failover.enabled
		true
	
	
		yarn.resourcemanager.ha.automatic-failover.embedded
		true
	
	
		yarn.resourcemanager.cluster-id
		yarn-rm-cluster
	
	
		yarn.resourcemanager.ha.rm-ids
		rm1,rm2
	
	
		yarn.resourcemanager.hostname.rm1
		master
	
	
		yarn.resourcemanager.hostname.rm2
		slave1
	
	
		yarn.resourcemanager.recovery.enabled
		true
	
	
	   The class to use as the persistent store.
	   yarn.resourcemanager.store.class
	   org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
	 
	
		yarn.resourcemanager.zk.state-store.address
		master:2181,slave1:2181,slave2:2181
	
	
		yarn.resourcemanager.zk-address
		master:2181,slave1:2181,slave2:2181
	
	
		yarn.resourcemanager.address.rm1
		master:8032
	
	
		yarn.resourcemanager.scheduler.address.rm1
		master:8034
	
	
		yarn.resourcemanager.webapp.address.rm1
		master:8088
	
	
		yarn.resourcemanager.address.rm2
		slave1:8032
	
	
		yarn.resourcemanager.scheduler.address.rm2
		slave1:8034
	
	
		yarn.resourcemanager.webapp.address.rm2
		slave1:8088
	
	
		yarn.nodemanager.aux-services
		mapreduce_shuffle
	
	
		yarn.nodemanager.aux-services.mapreduce_shuffle.class
		org.apache.hadoop.mapred.ShuffleHandler
	


slaves

master
slave1
slave2

4、配置添加环境变量:

vi /etc/profile

export HBASE_HOME=/home/hadoop3/app//hbase
export PATH=$PATH:$HBASE_HOME/bin

source /etc/profile

5、配置hbase/conf/hbase-env.sh

export JAVA_HOME=/home/hadoop3/app/jdk
export HBASE_HOME=/home/hadoop3/app/hbase
export HBASE_CONF_DIR=/home/hadoop3/app/hbase/conf
#export HBASE_CLASSPATH=/home/hadoop3/app/hbase/conf
export HBASE_CLASSPATH=/home/hadoop3/app/hadoop/etc/hadoop
export HBASE_MANAGES_ZK=false # //不用自带的zookeeper
export HBASE_LOG_DIR=${HBASE_HOME}/logs
export HBASE_PID_DIR=/home/hadoop3/data/hbase/pids

6、配置hbase/conf/hbase-site.xml
当os user为root时
hbase.tmp.dir=/hbase/tmp
有权限

当os user为hadoop3时
hbase.tmp.dir=/home/hadoop3/data/hbase/tmp


	
	  hbase.tmp.dir
	  /home/hadoop3/data/hbase/tmp
	
    
    
        hbase.root.dir
        hdfs://mycluster/hbase
    
	
    
        hbase.cluster.distributed
        true
    
	
		hbase.master
		60000
		指定 hbase 集群主控节点
   
	
   
       hbase.master.info.port
       16010
    
       
    
       hbase.regionserver.info.port
       16030
    
	
        hbase.zookeeper.quorum
        master,slave1,slave2
		配置独立的zk集群地址,除了master,自已配了几台zookeeper,此处就配几台
    
	
		hbase.zookeeper.property.clientPort
		2181
		 连接到zookeeper的端口,默认是2181
  
  
		hbase.zookeeper.property.dataDir
		/home/hadoop3/data/hbase/zookeeper
  
  
    zookeeper.session.timeout
    60000000
  
  
	hbase.regionserver.restart.on.zk.expire
	true
	
	Zookeeper session expired will force regionserver exit.
	Enable this will make the regionserver restart.
	
 

hbase/conf/regionservers

master
slave1
slave2

hbase/conf/backup-masters

slave1

7、启动hbase

启动顺序:先启动zookeeper->Hadoop–>再启动HBase,关闭顺序:先关闭HBase–>再关闭Hadoop

bin/start-hbase.sh

8、hdfs下去查看hbase目录

hadoop fs -ls /hbase

9、进入hbase命令行

hbase shell

10、停止hbase

bin/stop-hbase.sh

#启动报错问题org.apache.hadoop.hbase.PleaseHoldException: Master is initializing
解决方法:
master>runRemoteCmd.sh “hbase-daemon.sh start regionserver” all
master>runRemoteCmd.sh “hbase-daemon.sh start master” master
解决问题!
即先启动regionserver,再启动HMaster。
在hregionServer服务器上./hbase-daemon.sh start regionserver
在hmasterServer服务器上执行:./hbase-daemon.sh start master
同时确保hbase/lib/zookeeper-3.4.12.jar 与zookeeper集群版本一致
zkCli.sh
rmr /home/hadoop3/data/hbase

rmr /hbase

你可能感兴趣的:(操作系统,Linux/Unix,hadoop,分布式开发,存储,hbase)