hadoop的五个配置文件,安装时候用

//core-site.xml

    
     
          fs.defaultFS
          hdfs://master:9000
     
     
          io.file.buffer.size
          131072
     
    
     
          hadoop.tmp.dir
          file:/home/hadoop/hadoop-2.7.1/tmp
          Abasefor other temporary directories.
     


//hdfs-site.xml

    
     
          dfs.namenode.secondary.http-address
          master:9001
     
    
          dfs.namenode.rcp-address
          master:9000
     
      
           dfs.namenode.name.dir
           file:/home/hadoop/hadoop-2.7.1/dfs/name
     
     
          dfs.datanode.data.dir
          file:/home/hadoop/hadoop-2.7.1/dfs/data
    

     
          dfs.replication
          3
     
     
          dfs.webhdfs.enabled
          true
     
    
     
          dfs.permissions
          false
     


//yarn-site.xml

      
           yarn.nodemanager.aux-services
           mapreduce_shuffle
      
      
           yarn.nodemanager.aux-services.mapreduce.shuffle.class
           org.apache.hadoop.mapred.ShuffleHandler
      
      
         
           yarn.log-aggregation-enable
           true
      
       
         
           yarn.log-aggregation.retain-seconds
           604800
      
     
         
           yarn.nodemanager.remote-app-log-dir
           /logs
      
    
     
           yarn.resourcemanager.address
           master:8032
      
      
           yarn.resourcemanager.scheduler.address
           master:8030
      
      
           yarn.resourcemanager.resource-tracker.address
           master:8035
      
      
           yarn.resourcemanager.admin.address
           master:8033
      
    
      
           yarn.resourcemanager.webapp.address
           master:8088
      
      
      
           yarn.web-proxy.address
           master:8888
      
    


//mapred-site.xml

    
      
           mapreduce.framework.name
           yarn
     

    
     
          mapreduce.jobhistory.address
          master:10020
     
     
          mapreduce.jobhistory.webapp.address
          master:19888
     

    
    
        mapreduce.job.ubertask.enable
                true 
    
    
             mapreduce.job.ubertask.maxmaps
             9
    

    
             mapreduce.job.ubertask.maxreduces
             1
    



//hadoop-env.sh
export JAVA_HOME=/home/hadoop/jdk1.7.0_80
//slaves指定datannode运行的节点
slave1
slave2


//********************重点配置讲解********//


//mapred-site.xml 设置jobhistory
    
          mapreduce.jobhistory.address
          master:10020
     

     
          mapreduce.jobhistory.webapp.address
          master:19888
     

//maped-site.xml启动uber模式,适用于小作业,变完后好快
    
        mapreduce.job.ubertask.enable
                true 
    
    
             mapreduce.job.ubertask.maxmaps
             9
    

    
             mapreduce.job.ubertask.maxreduces
             1
    

//yarn-site.xml 启动日志,日志会放到dfs上
    
                   yarn.log-aggregation-enable
                   true
          

         
                   yarn.log-aggregation.retain-seconds
                   604800
          

         
                   yarn.nodemanager.remote-app-log-dir
                   /logs
          
    //设置second-namenode的地址
     
          dfs.namenode.secondary.http-address
          master:9001
     
    //namenode的webui的端口,文件系统监控界面
     
          dfs.namenode.http-address
          master:50070
     


    //yarn-site.xml 设置yarn-daemon.sh代理
     
          yarn.web-proxy.address
          master:8888
     



你可能感兴趣的:(hadoop的五个配置文件,安装时候用)