hadoop core-default hdfs-default默认配置

 

dfs.replication.interval=3,
fs.hsftp.impl=org.apache.hadoop.hdfs.HsftpFileSystem,
dfs.safemode.extension=30000,
ipc.server.tcpnodelay=false,
dfs.web.ugi=webuser,webgroup,
fs.checkpoint.dir=${hadoop.tmp.dir}/dfs/namesecondary,
dfs.permissions.supergroup=supergroup,
dfs.datanode.http.address=0.0.0.0:50075,
dfs.replication.min=1,
dfs.https.address=0.0.0.0:50470,
dfs.datanode.dns.nameserver=default,
dfs.http.address=0.0.0.0:50070,
io.bytes.per.checksum=512,
dfs.blockreport.intervalMsec=3600000,
hadoop.util.hash.type=murmur,
dfs.data.dir=${hadoop.tmp.dir}/dfs/data,
fs.hdfs.impl=org.apache.hadoop.hdfs.DistributedFileSystem,
fs.ramfs.impl=org.apache.hadoop.fs.InMemoryFileSystem,
dfs.block.size=512,
fs.hftp.impl=org.apache.hadoop.hdfs.HftpFileSystem,
fs.checkpoint.period=3600,
dfs.https.client.keystore.resource=ssl-client.xml,
hadoop.logfile.count=10,
dfs.support.append=false,
ipc.client.connection.maxidletime=10000,
io.seqfile.lazydecompress=true,
dfs.datanode.dns.interface=default,
fs.checkpoint.size=67108864,
dfs.max.objects=0,
local.cache.size=10737418240,
fs.s3n.impl=org.apache.hadoop.fs.s3native.NativeS3FileSystem,
fs.file.impl=org.apache.hadoop.fs.LocalFileSystem,
fs.kfs.impl=org.apache.hadoop.fs.kfs.KosmosFileSystem,
fs.s3.buffer.dir=${hadoop.tmp.dir}/s3,
dfs.client.block.write.retries=3,
ipc.client.kill.max=10,
dfs.datanode.du.reserved=0,
hadoop.security.authorization=false,
dfs.replication.max=512,
dfs.balance.bandwidthPerSec=1048576,
fs.s3.sleepTimeSeconds=10,
fs.default.name=hdfs://10.0.18.105:54310,
hadoop.rpc.socket.factory.class.default=org.apache.hadoop.net.StandardSocketFactory,
topology.node.switch.mapping.impl=org.apache.hadoop.net.ScriptBasedMapping,
dfs.datanode.address=0.0.0.0:50010,
dfs.access.time.precision=3600000,
dfs.heartbeat.interval=3,
dfs.replication.considerLoad=true,
dfs.default.chunk.view.size=32768,
io.file.buffer.size=4096,
dfs.https.need.client.auth=false,
dfs.datanode.ipc.address=0.0.0.0:50020,
dfs.blockreport.initialDelay=0,
fs.har.impl.disable.cache=true,
hadoop.native.lib=true,
fs.s3.block.size=67108864,
dfs.replication=2,
io.compression.codecs=org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,
dfs.https.enable=false,
io.seqfile.compress.blocksize=1000000,
fs.har.impl=org.apache.hadoop.fs.HarFileSystem,
io.mapfile.bloom.error.rate=0.005,
dfs.namenode.decommission.interval=30,
io.skip.checksum.errors=false,
fs.s3.maxRetries=4,
ipc.server.listen.queue.size=128,
fs.trash.interval=0,
fs.s3.impl=org.apache.hadoop.fs.s3.S3FileSystem,
io.seqfile.sorter.recordlimit=1000000,
io.mapfile.bloom.size=1048576,
dfs.namenode.startup=FORMAT,
dfs.namenode.decommission.nodes.per.interval=5,
webinterface.private.actions=false,
dfs.name.edits.dir=${dfs.name.dir},
hadoop.tmp.dir=/home/dikar/hadoop/tmp,
fs.checkpoint.edits.dir=${fs.checkpoint.dir},
dfs.safemode.threshold.pct=0.999f,
ipc.client.idlethreshold=4000,
dfs.permissions=true,
dfs.namenode.handler.count=10,
hadoop.logfile.size=10000000,
dfs.namenode.logging.level=info,
dfs.datanode.https.address=0.0.0.0:50475,
dfs.secondary.http.address=0.0.0.0:50090,
topology.script.number.args=100,
dfs.https.server.keystore.resource=ssl-server.xml,
fs.ftp.impl=org.apache.hadoop.fs.ftp.FTPFileSystem,
dfs.name.dir=${hadoop.tmp.dir}/dfs/name,
io.serializations=org.apache.hadoop.io.serializer.WritableSerialization,
ipc.client.connect.max.retries=10,
ipc.client.tcpnodelay=false,
dfs.datanode.handler.count=3,
dfs.df.interval=60000
 

   看这个默认的配置,大家就会发现

  

hadoop.tmp.dir 

 

    这个配置的重要性了,其实tmp 真的不是temp *_<

你可能感兴趣的:(apache,hadoop,.net,socket,cache)