hbase-2.1.0 + zookeeper-3.4.10 + hadoop-3.1.1 + elasticsearch-6.4.2 +jdk1.8.0_191 从零开始完整部署笔记(超详细)

Tips:改环境是用于大数据产品研发的开发环境,为单机版,文档中非常详细的介绍了系统从0开始进行部署的全过程

##################### 环境 ##########################

#OS版本号

[troll@standalone softs]$ cat /etc/centos-release
CentOS Linux release 7.5.1804 (Core)

#软件版本号

hadoop-3.1.1
hbase-2.1.0
jdk1.8.0_191
scala-2.12.7
zookeeper-3.4.10
elasticsearch-6.4.2

###############################################

安装zookeeper

#解压安装包

#解压zookeeper 
tar -zxvf zookeeper-3.4.10.tar.gz -C ~/softs/

#配置文件
vi conf/zoo.cfg

################ start ################
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/home/troll/data/tmp/zookeeper/zk1/
# the port at which the clients will connect
clientPort=2181
################ end ################

#创建数据目录

#创建zookeeper数据目录
mkdir -p /home/troll/data/tmp/zookeeper/zk1/

#服务启动和停止

#启动服务 
bin/zkServer.sh start
#停止服务 
bin/zkServer.sh stop

安装hadoop

#解压安装包

tar -zxvf hadoop-3.1.1.tar.gz -C ~/softs/

#配置文件
vi core-site.xml

    
    
        fs.defaultFS
        hdfs://localhost:9000
    
    
    
        hadoop.tmp.dir
        /home/troll/data/hadoop/tmp
    

vi hdfs-site.xml

    
    
    	dfs.name.dir
    	/home/troll/data/hadoop/hdfs/name
    	namenode上存储hdfs名字空间元数据  
    
	
    
        dfs.data.dir
        /home/troll/data/hadoop/hdfs/data
        datanode上数据块的物理存储位置
    
    
    
        dfs.replication
        1
    

vi mapred-site.xml

    
    
        mapreduce.framework.name
        yarn
    

vi yarn-site.xml

    
    
        yarn.nodemanager.aux-services
        mapreduce_shuffle
    

vi hadoop-env.sh

#设置环境变量
export JAVA_HOME=/home/troll/softs/jdk1.8.0_191

#创建目录

mkdir -p /home/troll/data/hadoop/hdfs/data
mkdir -p /home/troll/data/hadoop/hdfs/name
mkdir -p /home/troll/data/hadoop/tmp

#生成免秘钥

ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
chmod 0600 ~/.ssh/authorized_keys

#格式化hdfs

cd /home/troll/softs/hadoop-3.1.1
./bin/hdfs namenode -format

#服务管理

#启动 hdfs 服务
cd /home/troll/softs/hadoop-3.1.1
./sbin/start-dfs.sh

#查看界面 tips:初次需要等待较长时间,3.0以下版本页面端口为50070
http://standalone.hadoop:9870/dfshealth.html#tab-overview


#停止 hdfs 服务
cd /home/troll/softs/hadoop-3.1.1
./sbin/stop-dfs.sh



#启动 yarn 服务
cd /home/troll/softs/hadoop-3.1.1
./sbin/start-yarn.sh

#查看界面
http://standalone.hadoop:8088/cluster

#停止 yarn 服务
cd /home/troll/softs/hadoop-3.1.1
./sbin/stop-yarn.sh



#也可以使用demon来操作服务

./hadoop-daemon.sh start namenode
./hadoop-daemon.sh start secondarynamenode
./hadoop-daemon.sh start jobtracker
./hadoop-daemon.sh start datanode
./hadoop-daemon.sh start tasktracker


./hadoop-daemon.sh stop namenode
./hadoop-daemon.sh stop secondarynamenode
./hadoop-daemon.sh stop jobtracker
./hadoop-daemon.sh stop datanode
./hadoop-daemon.sh stop tasktracker

安装hbase

#解压安装包

tar -zxvf hbase-2.1.0-bin.tar.gz -C ~/softs/

#修改配置
cd ~/softs/hbase-2.1.0/
vi conf/hbase-site.xml

	
		hbase.rootdir
		file:///home/troll/data/hbase
	
  	
		hbase.zookeeper.quorum  
		localhost  
	

	
		hbase.zookeeper.property.clientPort
		2181
	

	
		hbase.zookeeper.property.dataDir  
		/home/troll/data/tmp/zookeeper/zk1/  
	

	
		hbase.cluster.distributed
		true
	

	
		zookeeper.znode.parent
		/hbase
	

vi hbase-env.sh

export JAVA_HOME=/home/troll/softs/jdk1.8.0_191
export HBASE_MANAGES_ZK=true

#创建数据目录

mkdir -p /home/troll/data/hbase

#启动服务

 bin/start-hbase.sh 

#访问web页面

http://standalone.hadoop.com:16010/master-status

elasticsearch 安装

#解压安装包

tar -zxvf elasticsearch-6.4.2.tar.gz -C ~/softs/

#配置文件
vi config/elasticsearch.yml

cluster.name: my-standalone-cluster
node.name: standalone-node-1
node.attr.rack: r1
path.data: /home/troll/data/data/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: false
network.host: 0.0.0.0
http.port: 9200
http.cors.enabled: true
http.cors.allow-origin: "*"

#创建数据目录

mkdir -p /home/troll/data/data/elasticsearch
mkdir -p /var/log/elasticsearch

sudo chmod a+x -R /var/log/elasticsearch
sudo chmod a+r -R /var/log/elasticsearch

#启动服务

bin/elasticsearch

你可能感兴趣的:(大数据-hbase,大数据-zookeeper,大数据-hadoop)