Hadoop之——hadoop下配置文件说明

转载请注明出处:http://blog.csdn.net/l1028386804/article/details/51372663

1、文件coer-site.xml


         
          
       fs.defaultFS      
       hdfs://cluster1      
    
            
          
         hadoop.tmp.dir     
         /home/tom/yarn/yarn_data/tmp     
       
     
  	      
       ha.zookeeper.quorum      
       hadoop1:2181,hadoop2:2181,hadoop3:2181,hadoop4:2181,hadoop5:2181     
  	        

2、文件hdfs-site.xml(重点核心文件)


      
      
      dfs.replication  
       2  
            
      
       dfs.permissions  
      false  
     
     
      dfs.permissions.enabled  
      false  
   
      
         
      dfs.nameservices    
      cluster1      
   
                
    
     dfs.ha.namenodes.cluster1  
     hadoop1,hadoop2  
    
    
    
     dfs.namenode.rpc-address.cluster1.hadoop1  
     hadoop1:9000  
      
   
                    
    dfs.namenode.http-address.cluster1.hadoop1      
    hadoop1:50070      
         
        
    dfs.namenode.rpc-address.cluster1.hadoop2      
    hadoop2:9000      
    
        
    dfs.namenode.http-address.cluster1.hadoop2      
    hadoop2:50070     
    
    
    dfs.namenode.servicerpc-address.cluster1.hadoop1  
    hadoop1:53310  
    
    
    dfs.namenode.servicerpc-address.cluster1.hadoop2  
    hadoop2:53310  
  
      
      
    dfs.ha.automatic-failover.enabled.cluster1    
    true    
             
      
    
	  
    	dfs.namenode.shared.edits.dir       
    	qjournal://hadoop1:8485;hadoop2:8485;hadoop3:8485;hadoop4:8485;hadoop5:8485/cluster1  
	  
	
	  
		dfs.client.failover.proxy.provider.cluster1       
		org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider  
	  
	
  	      
       dfs.journalnode.edits.dir      
       /home/tom/yarn/yarn_data/tmp/journal      
  	  
  	
          
       dfs.ha.fencing.methods      
       sshfence      
    
      
         
        dfs.ha.fencing.ssh.private-key-files      
        /home/tom/.ssh/id_rsa      
        
      
        dfs.ha.fencing.ssh.connect-timeout  
        10000  
      
      
        dfs.namenode.handler.count  
        100  
     
 

3、文件mapred-site.xml


	  
	    mapreduce.framework.name  
	    yarn  
	  
 

4、文件yarn-site.xml

  
	 
	       
	    yarn.resourcemanager.hostname      
	    hadoop1      
	    
	  
	    yarn.nodemanager.aux-services  
	    mapreduce.shuffle  
	  
 

你可能感兴趣的:(Hadoop,Hadoop生态)