Hadoop2.7配置

core-site.xml




<configuration>

<property>
    <name>fs.defaultFSname>
    <value>hdfs://bigdata/value>
property>


<property>
    <name>ha.zookeeper.quorumname>
    <value>192.168.56.70:2181,192.168.56.71:2181,192.168.56.72:2181value> -->
property>


<property>
    <name>hadoop.tmp.dirname>
    <value>/export/data/hadoop/tmpvalue>
property>

<property>
    <name>fs.trash.intervalname>
    <value>1440value>
property>

<property> 
    <name>io.file.buffer.sizename> 
    <value>131072value> 
property>

<property>
    <name>io.compression.codecsname>
    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.BZip2Codecvalue>
property>

<property>
    <name>net.topology.script.file.namename>
    <value>/export/common/hadoop/conf/topo.shvalue>
property>

<property>
    <name>net.topology.script.number.argsname>
    <value>1value>
property>

<property>
    <name>ha.health-monitor.rpc-timeout.msname>
    <value>180000value>
property>

<property>
    <name>hadoop.security.authorizationname>
    <value>truevalue>
property>

<property>
   <name>hadoop.security.authenticationname>
   <value>kerberosvalue>
property>

<property>
    <name>dfs.permissions.enabledname>
    <value>truevalue>
property>

<property>
    <name>dfs.namenode.acls.enabledname>
    <value>truevalue>
property>

<property> 
    <name>ipc.maximum.data.lengthname> 
    <value>268435456value> 
property> 

<property>  
    <name>hadoop.proxyuser.httpfs.hostsname>  
    <value>*value>  
property>  
<property>  
    <name>hadoop.proxyuser.httpfs.groupsname>  
    <value>*value>  
property>

<property>
    <name>hadoop.proxyuser.hdfs.hostsname>
    <value>*value> 
property>

<property>
    <name>hadoop.proxyuser.hdfs.groupsname>
    <value>*value>
property>

<property>
    <name>hadoop.proxyuser.hue.hostsname>
    <value>*value> 
property>

<property>
    <name>hadoop.proxyuser.hue.groupsname>
    <value>*value>
property>

<property>
    <name>hadoop.proxyuser.hive.hostsname>
    <value>*value> 
property>

<property>
    <name>hadoop.proxyuser.hive.groupsname>
    <value>*value>
property>

<property>
    <name>hadoop.proxyuser.spark.hostsname>
    <value>*value> 
property>

<property>
    <name>hadoop.proxyuser.spark.groupsname>
    <value>*value>
property>

<property>
    <name>hadoop.proxyuser.dwetl.hostsname>
    <value>*value> 
property>

<property>
    <name>hadoop.proxyuser.dwetl.groupsname>
    <value>*value>
property>

<property>
    <name>hadoop.proxyuser.hbase.hostsname>
    <value>*value>
property>

<property>
    <name>hadoop.proxyuser.hbase.groupsname>
    <value>*value>
property>

configuration>

hadoop-env.sh

export JAVA_HOME=/export/java
export HADOOP_COMMON_LIB_NATIVE_DIR="${HADOOP_HOME}/lib/native"
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib/native/"

for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
  if [ "$HADOOP_CLASSPATH" ]; then
    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
  else
    export HADOOP_CLASSPATH=$f
  fi
done

# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=2048
export HADOOP_HEAPSIZE=4096

# Extra Java runtime options.  Empty by default.
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"

# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_NAMENODE_OPTS="-Xmx85g -Xms85g -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}"

#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
export HADOOP_DATANODE_OPTS="-server -XX:+UseConcMarkSweepGC -XX:SurvivorRatio=3 -XX:MaxTenuringThreshold=10 -XX:CMSInitiatingOccupancyFraction=80 -XX:+ExplicitGCInvokesConcurrent -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime -Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANOODE_OPTS"
#export HADOOP_DATANODE_OPTS="-Xmx8g -Xms8g ${HADOOP_DATANODE_OPTS}"
export HADOOP_DATANODE_OPTS="-Xmx16g -Xms16g ${HADOOP_DATANODE_OPTS}"

export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"

export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"

# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"

export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}

# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}

export HADOOP_PID_DIR=/export/hadoop/pids
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}

# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USER

export LD_LIBRARY_PATH=/export/hadoop/lib
export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/export/hadoop/lib
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/export/hadoop/lib/*

TEZ_CONF_DIR=/export/common/hadoop/conf/tez-site.xml
TEZ_JARS=/export/tez
export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${TEZ_CONF_DIR}:${TEZ_JARS}/*:${TEZ_JARS}/lib/*
export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:/export/hadoop/lib/native/"

HDFS

hdfs-site.xml




<configuration>

<property>
    <name>dfs.nameservicesname>
    <value>bigdatavalue>
property>

<property>
    <name>dfs.ha.namenodes.bigdataname>
    <value>nn1,nn2value>
property>

<property>
    <name>dfs.namenode.rpc-address.bigdata.nn1name>
    <value>192.168.56.71:8020value>
property>
<property>
    <name>dfs.namenode.rpc-address.bigdata.nn2name>
    <value>192.168.56.72:8020value>
property>
<property>
    <name>dfs.namenode.http-address.bigdata.nn1name>
    <value>192.168.56.71:50070value>
property>
<property>
    <name>dfs.namenode.http-address.bigdata.nn2name>
    <value>192.168.56.72:50070value>
property>
<property>
    <name>dfs.namenode.shared.edits.dirname>
    <value>qjournal://192.168.56.71:8485;192.168.56.72:8485;192.168.56.70:8485/bigdatavalue>
property>
<property>
    <name>dfs.journalnode.edits.dirname>
    <value>/export/data/hadoop/journalvalue>
property>
<property>
    <name>dfs.ha.automatic-failover.enabledname>
    <value>truevalue>
property>

<property>
    <name>dfs.client.failover.proxy.provider.bigdataname>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidervalue>
property>
 
<property>
    <name>dfs.namenode.name.dirname>
    <value>/export/data/hadoop/namenodevalue>
property>

<property>
    <name>dfs.datanode.data.dirname> 
    <value>/export/grid/01/hadoop/hdfs/data,/export/grid/02/hadoop/hdfs/datavalue>
property> 
<property>
    <name>dfs.datanode.addressname>
    <value>0.0.0.0:50010value>
property>

<property>
    <name>dfs.datanode.http.addressname>
    <value>0.0.0.0:50075value>
property>

<property>
    <name>dfs.datanode.ipc.addressname>
    <value>0.0.0.0:50020value>
property>

<property> 
    <name>dfs.replicationname> 
    <value>3value> 
property> 

<property> 
    <name>dfs.permissionsname> 
    <value>truevalue> 
property>  

<property>
    <name>dfs.webhdfs.enabledname>
    <value>truevalue>
property>

<property>
    <name>dfs.blocksizename>
    <value>134217728value>
property>

<property>  
    <name>dfs.datanode.balance.bandwidthPerSecname>  
    <value>41943040value> 
property>

<property>
    <name>dfs.datanode.max.transfer.threadsname>
    <value>8192value>
property>


<property>
    <name>dfs.namenode.handler.countname>
    <value>200value>
property>

<property>
    <name>dfs.datanode.handler.countname>
    <value>100value>
property>

<property>
    <name>dfs.datanode.max.xcieversname>
    <value>65535value>
property>

<property>
    <name>dfs.namenode.name.dir.restorename> 
    <value>falsevalue> 
property>

<property>
    <name>dfs.namenode.checkpoint.periodname> 
    <value>6000value> 
property>

<property>
    <name>dfs.hostsname>
    <value>/export/common/hadoop/conf/allowed_hostsvalue>
property>

<property>
    <name>dfs.hosts.excludename>
    <value>/export/common/hadoop/conf/exclude_datanode_hostsvalue>
property>

<property>
    <name>dfs.webhdfs.enabledname>
    <value>truevalue>
property>

<property>
    <name>dfs.qjournal.write-txns.timeout.msname>
    <value>60000value>
property>

<property>
    <name>dfs.permissions.enabledname>
    <value>truevalue>
property>

<property>
     <name>dfs.namenode.acls.enabledname>
     <value>truevalue>
property>

<property>
    <name>dfs.ha.fencing.methodsname>
    <value>
        shell(/bin/true)
    value>
property>

<property>
    <name>dfs.client.block.write.replace-datanode-on-failure.enablename>
    <value>truevalue>
property>

<property>
    <name>dfs.client.block.write.replace-datanode-on-failure.policyname>
    <value>DEFAULTvalue>
property>

<property>
    <name>dfs.block.access.token.enablename>
    <value>truevalue>
property>

<property>
    <name>dfs.namenode.keytab.filename>
    <value>/export/common/hadoop/conf/hdfs.keytabvalue>
property>

<property>
    <name>dfs.namenode.kerberos.principalname>
    <value>hdfs/[email protected]value>
property>

<property>
    <name>dfs.namenode.kerberos.internal.spnego.principalname>
    <value>HTTP/[email protected]value>
property>

<property>
    <name>dfs.datanode.data.dir.permname>
    <value>700value>
property>


<property>
    <name>dfs.datanode.addressname>
    <value>0.0.0.0:2828value>
property>

<property>
    <name>dfs.datanode.http.addressname>
    <value>0.0.0.0:2829value>
property>

<property>
    <name>dfs.datanode.keytab.filename>
    <value>/export/common/hadoop/conf/hdfs.keytabvalue>
property>

<property>
    <name>dfs.datanode.kerberos.principalname>
    <value>hdfs/[email protected]value>
property>


<property>
    <name>dfs.journalnode.keytab.filename>
    <value>/export/common/hadoop/conf/hdfs.keytabvalue>
property>

<property>
    <name>dfs.journalnode.kerberos.principalname>
    <value>hdfs/[email protected]value>
property>

<property>
    <name>dfs.journalnode.kerberos.internal.spnego.principalname>
    <value>HTTP/[email protected]value>
property>


<property>
    <name>dfs.web.authentication.kerberos.principalname>
    <value>HTTP/[email protected]value>
property>

<property>
    <name>dfs.web.authentication.kerberos.keytabname>
    <value>/export/common/hadoop/conf/hdfs.keytabvalue>
property>

<property>
    <name>dfs.http.policyname>
    <value>HTTPS_ONLYvalue>
property>

<property>
    <name>dfs.data.transfer.protectionname>
    <value>integrityvalue>
property>

<property>
    <name>dfs.encrypt.data.thransfername>
    <value>truevalue>
property>

<property>
    <name>mapreduce.jobtracker.handler.countname>
    <value>40value>
property>


	<property>
		<name>dfs.permissions.enabledname>
		<value>truevalue>
	property>
  <property>
        <name>jdjr.hadoop.path.permission.enablename>
        <value>truevalue>
    property>
    <property>
        <name>dfs.namenode.inode.attributes.provider.classname>
        <value>com.jdjr.flowyed.hadoop.permission.JdjrHdfsAuthorizervalue>
    property>
    <property>
        <name>jdjr.hadoop.path.permission.file.pathname>
        <value>/export/common/hadoop/conf/hdfs-policies.jsonvalue>
    property>
    <property>
        <name>jdjr.hadoop.cluster.namename>
        <value>agent-hadoop-devvalue>
    property>


configuration>

allowed_hosts

(datanode节点IP)

192.168.56.70
192.168.56.71
192.168.56.72

YARN

yarn-site.xml




<configuration>

<property>
    <name>yarn.resourcemanager.ha.enabledname>
    <value>truevalue>
property>
<property>
    <name>yarn.resourcemanager.cluster-idname>
    <value>bigdatavalue>
property>
<property>
    <name>yarn.resourcemanager.ha.rm-idsname>
    <value>rm1,rm2value>
property>
<property>
    <name>yarn.resourcemanager.hostname.rm1name>
    <value>192.168.56.71value>
property>

<property>
    <name>yarn.resourcemanager.hostname.rm2name>
    <value>192.168.56.72value>
property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm1name>
    <value>192.168.56.71:8088value>
property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm2name>
    <value>192.168.56.72:8088value>
property>
<property>
    <name>yarn.resourcemanager.zk-addressname>
    <value>192.168.56.71:2181,192.168.56.72:2181,192.168.56.70:2181value> 
property>
<property>
    <name>yarn.resourcemanager.ha.automatic-failover.zk-base-pathname>
    <value>/hadoop-yarn-havalue>
property>
<property>
    <name>yarn.resourcemanager.ha.automatic-failover.enabledname>
    <value>truevalue>
property>
<property>
    <name>yarn.resourcemanager.address.rm1name>
    <value>192.168.56.71:8132value>
property>
<property>
    <name>yarn.resourcemanager.address.rm2name>
    <value>192.168.56.72:8132value>
property>
<property>
    <name>yarn.resourcemanager.scheduler.address.rm1name>
    <value>192.168.56.71:8130value>
property>
<property>
    <name>yarn.resourcemanager.scheduler.address.rm2name>
    <value>192.168.56.72:8130value>
property>
<property>
    <name>yarn.resourcemanager.resource-tracker.address.rm1name>
    <value>192.168.56.71:8131value>
property>
<property>
    <name>yarn.resourcemanager.resource-tracker.address.rm2name>
    <value>192.168.56.72:8131value>
property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm1name>
    <value>192.168.56.71:8088value>
property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm2name>
    <value>192.168.56.72:8088value>
property>
<property> 
    <name>yarn.nodemanager.aux-servicesname> 
    <value>mapreduce_shufflevalue> 
property> 

<property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.classname>
    <value>org.apache.hadoop.mapred.ShuffleHandlervalue>
property> 

<property>
    <name>yarn.nodemanager.resource.memory-mbname>
    <value>78848value>
property>

<property>
    <name>yarn.nodemanager.vmem-pmem-rationame>
    <value>10value>
property>

<property>
    <name>yarn.scheduler.minimum-allocation-mbname>
    <value>1024value>
property>

<property>
    <name>yarn.scheduler.maximum-allocation-mbname>
    <value>78848value>
property>

<property>
    <name>yarn.app.mapreduce.am.resource.mbname>
    <value>4096value>
property>

<property>
    <name>yarn.app.mapreduce.am.command-optsname>
    <value>-Xmx3584Mvalue>
property>

<property>
    <name>yarn.nodemanager.resource.cpu-vcoresname>
    <value>76value>
property>

<property>
    <name>yarn.nodemanager.log-dirsname>
    <value>/export/grid/01/hadoop/yarn/log,/export/grid/02/hadoop/yarn/log,/export/grid/03/hadoop/yarn/log,/export/grid/04/hadoop/yarn/log,/export/grid/05/hadoop/yarn/log,/export/grid/06/hadoop/yarn/log,/export/grid/07/hadoop/yarn/log,/export/grid/08/hadoop/yarn/log,/export/grid/09/hadoop/yarn/log,/export/grid/10/hadoop/yarn/log,/export/grid/11/hadoop/yarn/log,/export/grid/12/hadoop/yarn/logvalue>
property>

<property>
    <name>yarn.acl.enablename>
    <value>falsevalue>
property>
<property>
    <name>yarn.admin.aclname>
    <value>*value>
property>

<property>
    <name>yarn.nodemanager.local-dirsname>
    <value>/export/grid/01/hadoop/yarn/local,/export/grid/02/hadoop/yarn/localvalue>
property>
<property>
    <name>yarn.log.server.urlname>
    <value>http://192.168.56.70:19888/jobhistory/logsvalue>
property>
<property>
    <name>yarn.log-aggregation-enablename>
    <value>truevalue>
property>
<property>
    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentagename>
    <value>95value>
property>
<property>
    <name>yarn.resourcemanager.scheduler.classname>
    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulervalue>
property>
<property>
    <name>yarn.scheduler.fair.preemptionname>
    <value>truevalue>
property>
<property>
    <name>yarn.nodemanager.addressname>
    <value>${yarn.nodemanager.hostname}:65033value>
property>

<property>
    <name>yarn.resourcemanager.nodes.exclude-pathname>
    <value>/export/common/hadoop/conf/exclude_nodemanager_hostsvalue>
property>

<property>
    <name>yarn.nodemanager.vmem-check-enabledname>
    <value>falsevalue>
property>

<property>
    <name>yarn.resourcemanager.keytabname>
    <value>/export/common/hadoop/conf/hdfs.keytabvalue>
property>

<property>
    <name>yarn.resourcemanager.principalname>
    <value>hdfs/[email protected]value>
property>


<property>
    <name>yarn.nodemanager.keytabname>
    <value>/export/common/hadoop/conf/hdfs.keytabvalue>
property>
<property>
    <name>yarn.nodemanager.principalname>
    <value>hdfs/[email protected]value>
property>

<property>
    <name>yarn.nodemanager.linux-container-executor.groupname>
    <value>hadoopvalue>
property>

<property>
    <name>yarn.nodemanager.container-executor.classname>
    <value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutorvalue>
property>

<property>
    <name>yarn.nodemanager.aux-servicesname>
    <value>spark_shuffle,mapreduce_shufflevalue>
property>

<property>
    <name>yarn.nodemanager.aux-services.spark_shuffle.classname>
    <value>org.apache.spark.network.yarn.YarnShuffleServicevalue>
property>

<property>
    <name>yarn.scheduler.fair.user-as-default-queuename>
    <value>falsevalue>
property>


<property>
    <name>yarn.resourcemanager.recovery.enabledname>
    <value>truevalue>
property>
<property>
    <name>yarn.resourcemanager.store.classname>
    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStorevalue>
property>
<property>
    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-msname>
    <value>10000value>
property>

configuration>

fail-scheduler.xml


<allocations>
	<userMaxAppsDefault>100userMaxAppsDefault>
	<fairSharePreemptionTimeout>3000fairSharePreemptionTimeout>
	<queue name="root">
		<aclSubmitApps>hdfsaclSubmitApps>
		<aclAdministerApps>hdfsaclAdministerApps>
		<queue name="default">
			
			<maxResources>249311 mb, 147 vcoresmaxResources>
			
			<minResources>1024 mb, 1 vcoresminResources>
			
			<maxRunningApps>1000maxRunningApps>
			
			<weight>1.0weight>
			
			<aclSubmitApps>hbase,hiveaclSubmitApps>
			
			<aclAdministerApps>hbase,hiveaclAdministerApps>
		queue>
		<queue name="etl">
			<maxResources>598346 mb, 352 vcoresmaxResources>
			<minResources>1024 mb, 1 vcoresminResources>
			<maxRunningApps>1000maxRunningApps>
			<weight>1.0weight>
			<aclSubmitApps>*aclSubmitApps>
			<aclAdministerApps>hdfsaclAdministerApps>
		queue>
		<queue name="personal">
			<maxResources>149586 mb, 88 vcoresmaxResources>
			<minResources>1024 mb, 1 vcoresminResources>
			<maxRunningApps>50maxRunningApps>
			<weight>1.0weight>
			<aclSubmitApps>*aclSubmitApps>
			<aclAdministerApps>hdfsaclAdministerApps>
		queue>
	queue>
allocations>

mapred-site.xml




<configuration>

<property>
    <name>mapreduce.framework.namename> 
    <value>yarnvalue> 
property> 


<property>
    <name>mapreduce.jobhistory.addressname> 
    <value>192.168.56.70:10020value> 
property>

<property>
    <name>mapreduce.jobhistory.webapp.addressname> 
    <value>192.168.56.70:19888value> 
property>


<property>
    <name>mapreduce.map.memory.mbname>
    <value>2048value>
property>

<property>
    <name>mapreduce.reduce.memory.mbname>
    <value>4096value>
property>

<property>
    <name>mapred.child.java.optsname>
    <value>-Xmx4096Mvalue>
property>
<property>
    <name>mapreduce.map.java.optsname>
    <value>-Xmx1536Mvalue>
property>
<property>
    <name>mapreduce.reduce.java.optsname>
    <value>-Xmx3276Mvalue>
property>

<property>
    <name>mapreduce.map.output.compressname>
    <value>truevalue>
property>

<property>
    <name>mapreduce.task.io.sort.mbname>
    <value>200value>
property>

<property>
    <name>mapreduce.task.io.sort.factorname>
    <value>50value>
property>

<property>
    <name>mapreduce.reduce.shuffle.parallelcopiesname>
    <value>50value>
property>

<property>
    <name>mapreduce.job.reduce.slowstart.completedmapsname>
    <value>0.3value>
property>

<property>
    <name>mapred.job.reuse.jvm.num.tasksname>
    <value>5value>
property>


<property>
    <name>mapreduce.job.counters.group.name.maxname>
    <value>100000value>
property>

<property>
    <name>mapreduce.job.counters.counter.name.maxname>
    <value>100000value>
property>

<property>
    <name>mapreduce.job.counters.groups.maxname>
    <value>100000value>
property>

<property>
    <name>mapreduce.job.counters.maxname>
    <value>100000value>
property>

<property>
    <name>mapreduce.jobhistory.keytabname>
    <value>/export/common/hadoop/conf/hdfs.keytabvalue>
property>

<property>
    <name>mapreduce.jobhistory.principalname>
    <value>hdfs/_HOST@{KDC_REALM}value>
property>

<property>
    <name>yarn.app.mapreduce.am.envname>
    <value>HADOOP_MAPRED_HOME=/export/hadoopvalue>
property>

<property>
    <name>mapreduce.map.envname>
    <value>HADOOP_MAPRED_HOME=/export/hadoopvalue>
property>

<property>
    <name>mapreduce.reduce.envname>
    <value>HADOOP_MAPRED_HOME=/export/hadoopvalue>
property>

<property>
    <name>mapreduce.tasktracker.map.tasks.maximumname>
    <value>34value>
property>

<property>
    <name>mapreduce.tasktracker.reduce.tasks.maximumname>
    <value>18value>
property>

configuration>

log4j.properties

hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log

log4j.rootLogger=${hadoop.root.logger}, EventCounter

log4j.threshold=ALL

log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender

hadoop.log.maxfilesize=256MB
hadoop.log.maxbackupindex=20
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}

log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}

log4j.appender.RFA.layout=org.apache.log4j.PatternLayout

log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}

log4j.appender.DRFA.DatePattern=.yyyy-MM-dd

log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout

log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n

hadoop.tasklog.taskid=null
hadoop.tasklog.iscleanup=false
hadoop.tasklog.noKeepSplits=4
hadoop.tasklog.totalLogFileSize=100
hadoop.tasklog.purgeLogSplits=true
hadoop.tasklog.logsRetainHours=12

log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}

log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth-${user.name}.audit
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}

log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd

hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}

mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}


log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR

log4j.logger.com.amazonaws=ERROR
log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN

log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter

hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
hadoop.mapreduce.jobsummary.log.maxbackupindex=20
log4j.appender.JSA=org.apache.log4j.RollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false

yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log

yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}

log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
log4j.appender.RMSUMMARY.MaxFileSize=256MB
log4j.appender.RMSUMMARY.MaxBackupIndex=20
log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n

你可能感兴趣的:(Hadoop,hadoop,大数据,hdfs)