hadoop2.7.7安装和集群(适用hadoop3.1.2和docker容器)

准备hadoop2(master), Hadoop3,hadoop4,三台机器

  1. vi /etc/profile.d/hadoop.sh

    
    export JAVA_HOME=/usr/local/src/jdk1.8.0_92
    export JRE_HOME=${JAVA_HOME}/jre
    export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$CLASSPATH
    export JAVA_PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin
    export PATH=$PATH:${JAVA_PATH}
    
    
    export HADOOP_HOME=/usr/local/src/hadoop-2.7.7
    export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
    
    export HDFS_DATANODE_USER=root
    export HDFS_DATANODE_SECURE_USER=root
    export HDFS_SECONDARYNAMENODE_USER=root
    export HDFS_NAMENODE_USER=root
    export YARN_RESOURCEMANAGER_USER=root
    export YARN_NODEMANAGER_USER=root 

    mapred-env.sh hadoop-env.xml yarn-env.sh 至少有一个设置JAVA_HOME

  2. core-site.xml,配置hdfs端口和地址,临时文件存放地址

    更多参考core-site.xml

    
    
        fs.default.name
        hdfs://hadoop2:9091
    
    
        hadoop.tmp.dir
    /data/docker/hadoop/tmp
    
    
  3. hdfs-site.xml, 配置HDFS组件属性,副本个数以及数据存放的路径

    更多参考hdfs-site.xml

    dfs.namenode.name.dir和dfs.datanode.data.dir不再单独配置,官网给出的配置是针对规模较大的集群的较高配置。

    注意:这里目录是每台机器上的,不要去使用volumes-from data_docker资源共享卷

    三台机器同时做

    mkdir -p /opt/hadoop/tmp && mkdir -p /opt/hadoop/dfs/data && mkdir -p /opt/hadoop/dfs/name

    
        
            dfs.namenode.http-address
            hadoop2:9092
        
        
            dfs.replication
            2
        
        
            dfs.namenode.name.dir
            file:/opt/hadoop/dfs/name
        
        
            dfs.datanode.data.dir
            file:/opt/hadoop/dfs/data
        
        
            dfs.namenode.handler.count
            100
        
    
    
    
  4. mapred-site.xml,配置使用yarn框架执行mapreduce处理程序

    更多参考mapred-site.xml

    
          
              mapreduce.framework.name
              yarn
          
          
              mapreduce.jobhistory.address
              hadoop2:9094
          
         
             mapreduce.jobhistory.webapp.address
             hadoop2:9095
         
       
            mapreduce.application.classpath
            
                /usr/local/src/hadoop-3.1.2/etc/hadoop,
                /usr/local/src/hadoop-3.1.2/share/hadoop/common/*,
                /usr/local/src/hadoop-3.1.2/share/hadoop/common/lib/*,
                /usr/local/src/hadoop-3.1.2/share/hadoop/hdfs/*,
                /usr/local/src/hadoop-3.1.2/share/hadoop/hdfs/lib/*,
                /usr/local/src/hadoop-3.1.2/share/hadoop/mapreduce/*,
                /usr/local/src/hadoop-3.1.2/share/hadoop/mapreduce/lib/*,
                /usr/local/src/hadoop-3.1.2/share/hadoop/yarn/*,
                /usr/local/src/hadoop-3.1.2/share/share/hadoop/yarn/lib/*
            
        
    
  5. yarn-site.xml
    更多配置信息,请参考yarn-site.xml。

    
      
          yarn.resourcemanager.hostname
          bdfb9324ff7d
      
      
          yarn.nodemanager.aux-services
          mapreduce_shuffle
      
      
        yarn.resourcemanager.webapp.address
        hadoop2:9093
      
       
        yarn.nodemanager.aux-services.mapreduce.shuffle.class
        org.apache.hadoop.mapred.ShuffleHandler
      
    
    
    
  6. 配置ssh免密登录

    yum -y install openssh-server openssh-clients
    
    ssh-keygen -q -t rsa -b 2048 -f /etc/ssh/ssh_host_rsa_key -N ''  
    ssh-keygen -q -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ''
    ssh-keygen -t dsa -f /etc/ssh/ssh_host_ed25519_key -N ''
    ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa     #这样可以没有交互
    
    #进入~/.ssh
    cp id_rsa.pub authorized_keys
    cp authorized_keys /data/docker/hadoop/    #拷贝到共享磁盘
    
    #在其他docker
    #1. 依次完成上述操作(1-4)
    #2. hadoop3 ,hadoop4操作如下
    cp /data/docker/hadoop/authorized_keys  ~/.ssh
    cat id_rsa.pub >> authorized_keys
    cp authorized_keys /data/docker/hadoop/authorized_keys  #覆盖
    
    #再回到hadoop2容器
    cp  /data/docker/hadoop/authorized_keys  authorized_keys #覆盖,这样
    
    #测试
    #启动hadoop3,hadoop4的ssh
     /usr/sbin/sshd
    
    ssh root@hadoop3
    ssh root@hadoop4
  7. 配置hosts

    172.17.0.9    hadoop2
    172.17.0.10    hadoop3
    172.17.0.11    hadoop4
  8. 配置works定义工作节点

    vi /usr/local/src/hadoop-3.1.2/etc/hadoop/workers ,2.7版本中应该是slave

    hadoop2                     #这台以既可以是namenode,也可以是datanode,不要浪费机器
    hadoop3                     #只做datanode
    hadoop4                     #只做datanode
  9. 停止docker容器并创建镜像

    172.17.0.0/24 可用ip: 1-255 ip总数256, 子网掩码:255.255.255.0

    172.17.0.0/16 可用ip: 可用地址就是172.16.0.1-172.16.255.254. ip总数:65536 子网掩码:255.255.0.0

docker commit hadoop2 image_c

docker run --privileged -tdi --volumes-from data_docker --name hadoop2 --hostname hadoop2 --add-host hadoop2:172.17.0.8 --add-host hadoop3:172.17.0.9 --add-host hadoop4:172.17.0.10 --link mysqlcontainer:mysqlcontainer  -p 5002:22 -p 8088:8088 -p 9090:9090 -p 9091:9091  -p 9092:9092  -p 9093:9093  -p 9094:9094  -p 9095:9095  -p 9096:9096  -p 9097:9097  -p 9098:9098  -p 9099:9099 centos:hadoop /bin/bash 

docker run --privileged -tdi --volumes-from data_docker --name hadoop3 --hostname hadoop3 --add-host hadoop2:172.17.0.8 --add-host hadoop3:172.17.0.9 --add-host hadoop4:172.17.0.10 --link mysqlcontainer:mysqlcontainer  -p 5003:22  centos:hadoop /bin/bash 

docker run --privileged -tdi --volumes-from data_docker --name hadoop4 --hostname hadoop4 --add-host hadoop2:172.17.0.8 --add-host hadoop3:172.17.0.9 --add-host hadoop4:172.17.0.10 --link mysqlcontainer:mysqlcontainer  -p 5004:22  centos:hadoop /bin/bash 
  1. 启动

    首次hdfs namenode -format

    你会看到最后倒数: util.ExitUtil: Exiting with status 0

    start-all.sh This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh

#start-dfs.sh----------------------
# jps  可以在Master上看到如下进程:
5252 DataNode
5126 NameNode
5547 Jps
5423 SecondaryNameNode

# jps slave可以看到
1131 Jps
1052 DataNode
# start-yarn.sh------------------
# jps  可以在Master上看到如下进程:
5890 NodeManager
5252 DataNode
5126 NameNode
6009 Jps
5423 SecondaryNameNode
5615 ResourceManager

# jps slave可以看到
1177 NodeManager
1052 DataNode
1309 Jps

访问

http://hadoop2:9093

http://hadoop2:9092

试用hadoop

准备test

cat test.txt 

hadoop mapreduce hive
hbase spark storm
sqoop hadoop hive
spark hadoop


#hdfs dfs 看一下帮助
#创建hadoop下的目录
hadoop fs -mkdir /input
hadoop fs -ls /
#上传
hadoop fs -put test.txt /input
hadoop fs -ls /input
#运行hadoop自带workcount程序
#/hadoop-mapreduce-examples-2.7.7.jar里面有很多小程序
yarn jar /usr/local/src/hadoop-2.7.7/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.7.jar wordcount /input/test.txt /output 
hadoop fs -ls /output
-rw-r--r--   2 root supergroup          0 2019-06-03 01:28 /output/_SUCCESS
-rw-r--r--   2 root supergroup         60 2019-06-03 01:28 /output/part-r-00000
#查看结果
hadoop fs -cat /output/part-r-00000

#查看其他内置程序
hadoop jar /usr/local/src/hadoop-2.7.7/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.7.jar 
#可以看到grep的用法
hadoop jar /usr/local/src/hadoop-2.7.7/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.7.jar grep  

http://hadoop2:9093 看到任务信息

其他hadoop命令

#查看容量
hadoop fs -df -h
Filesystem              Size   Used  Available  Use%
hdfs://hadoop2:9091  150.1 G  412 K    129.9 G    0%
#查看各个机器状态
hdfs dfsadmin -report

文章内容由海畅智慧http://www.hichannel.net原创出品,转载请注明!
更多技术文档和海畅产品,请关注海畅智慧官方网站。

你可能感兴趣的:(hadoop,docker)