bdp01
bdp02
bdp03
bdp01
bdp02
bdp03
fs.defaultFS
hdfs://vmcluster
ha.zookeeper.quorum
bdp01:2181,bdp02:2181,bdp03:2181
hadoop.http.staticuser.user
bigdata
hadoop.tmp.dir
/opt/hadoop/data
dfs.nameservices
vmcluster
dfs.ha.namenodes.vmcluster
nn1,nn2,nn3
dfs.namenode.rpc-address.vmcluster.nn1
bdp01:8020
dfs.namenode.rpc-address.vmcluster.nn2
bdp02:8020
dfs.namenode.rpc-address.vmcluster.nn3
bdp03:8020
dfs.namenode.http-address.vmcluster.nn1
bdp01:9870
dfs.namenode.http-address.vmcluster.nn2
bdp02:9870
dfs.namenode.http-address.vmcluster.nn3
bdp03:9870
dfs.namenode.shared.edits.dir
qjournal://bdp01:8485;bdp02:8485;bdp03:8485/vmcluster
dfs.client.failover.proxy.provider.vmcluster
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.ha.fencing.methods
sshfence
dfs.ha.fencing.ssh.private-key-files
/home/bigdata/.ssh/id_rsa
dfs.ha.fencing.ssh.connect-timeout
30000
dfs.namenode.handler.count
100
dfs.safemode.threshold.pct
1
dfs.journalnode.edits.dir
/opt/hadoop/data/jn
dfs.ha.automatic-failover.enabled
true
dfs.namenode.name.dir
file://${hadoop.tmp.dir}/dfs/nn
dfs.datanode.data.dir
file://${hadoop.tmp.dir}/dfs/dn
dfs.replication
3
dfs.permissions.enabled
false
dfs.blocksize
67108864
yarn.resourcemanager.ha.enabled
true
yarn.resourcemanager.cluster-id
yarnCluster
yarn.resourcemanager.ha.automatic-failover.enabled
true
yarn.resourcemanager.ha.automatic-failover.embedded
true
yarn.resourcemanager.connect.retry-interval.ms
2000
yarn.resourcemanager.ha.rm-ids
rm1,rm2
yarn.resourcemanager.hostname.rm1
bdp01
yarn.resourcemanager.hostname.rm2
bdp02
yarn.resourcemanager.webapp.address.rm1
bdp01:8088
yarn.resourcemanager.webapp.address.rm2
bdp02:8088
yarn.resourcemanager.address.rm1
bdp01:8032
yarn.resourcemanager.address.rm2
bdp02:8032
yarn.resourcemanager.scheduler.address.rm1
bdp01:8030
yarn.resourcemanager.scheduler.address.rm2
bdp02:8030
yarn.resourcemanager.zk-address
bdp01:2181,bdp02:2181,bdp03:2181
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.nodemanager.aux-services.mapreduce_shuffle.class
org.apache.hadoop.mapred.ShuffleHandler
yarn.nodemanager.env-whitelist
JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME
yarn.log-aggregation-enable
true
yarn.log.server.url
http://bdp03:19888/jobhistory/logs
yarn.log-aggregation.retain-seconds
604800
yarn.resourcemanager.scheduler.class
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
yarn.resourcemanager.recovery.enabled
true
yarn.resourcemanager.store.class
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
yarn.resourcemanager.zk.state-store.address
bdp01:2181,bdp02:2181,bdp03:2181
yarn.application.classpath
$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$HADOOP_YARN_HOME/share/hadoop/yarn/*,
$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*
yarn.nodemanager.pmem-check-enabled
false
yarn.nodemanager.vmem-check-enabled
false
yarn.resourcemanager.am.max-attempts
4
The maximum number of application master execution attempts.
yarn.scheduler.minimum-allocation-mb
2048
yarn.scheduler.maximum-allocation-mb
204800
yarn.nodemanager.resource.cpu-vcores
32
yarn.nodemanager.resource.memory-mb
102400
yarn.nodemanager.vmem-pmem-ratio
3.0
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.181-7.b13.el7.x86_64/jre
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_JOURNALNODE_USER=root
export HDFS_ZKFC_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
bdp03:10020
mapreduce.jobhistory.webapp.address
bdp03:19888
yarn.app.mapreduce.am.env
HADOOP_MAPRED_HOME=/opt/hadoop
mapreduce.map.env
HADOOP_MAPRED_HOME=/opt/hadoop
mapreduce.reduce.env
HADOOP_MAPRED_HOME=/opt/hadoop
mapreduce.application.classpath
$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*
此步骤配置完记得执行
source /etc/profile
# /etc/profile
# System wide environment and startup programs, for login setup
# Functions and aliases go in /etc/bashrc
# It's NOT a good idea to change this file unless you know what you
# are doing. It's much better to create a custom.sh shell script in
# /etc/profile.d/ to make custom changes to your environment, as this
# will prevent the need for merging in future updates.
pathmunge () {
case ":${PATH}:" in
*:"$1":*)
;;
*)
if [ "$2" = "after" ] ; then
PATH=$PATH:$1
else
PATH=$1:$PATH
fi
esac
}
if [ -x /usr/bin/id ]; then
if [ -z "$EUID" ]; then
# ksh workaround
EUID=`/usr/bin/id -u`
UID=`/usr/bin/id -ru`
fi
USER="`/usr/bin/id -un`"
LOGNAME=$USER
MAIL="/var/spool/mail/$USER"
fi
# Path manipulation
if [ "$EUID" = "0" ]; then
pathmunge /usr/sbin
pathmunge /usr/local/sbin
else
pathmunge /usr/local/sbin after
pathmunge /usr/sbin after
fi
HOSTNAME=`/usr/bin/hostname 2>/dev/null`
HISTSIZE=1000
if [ "$HISTCONTROL" = "ignorespace" ] ; then
export HISTCONTROL=ignoreboth
else
export HISTCONTROL=ignoredups
fi
export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL
# By default, we want umask to get set. This sets it for login shell
# Current threshold for system reserved uid/gids is 200
# You could check uidgid reservation validity in
# /usr/share/doc/setup-*/uidgid file
if [ $UID -gt 199 ] && [ "`/usr/bin/id -gn`" = "`/usr/bin/id -un`" ]; then
umask 002
else
umask 022
fi
for i in /etc/profile.d/*.sh /etc/profile.d/sh.local ; do
if [ -r "$i" ]; then
if [ "${-#*i}" != "$-" ]; then
. "$i"
else
. "$i" >/dev/null
fi
fi
done
unset i
unset -f pathmunge
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.181-7.b13.el7.x86_64/jre
export HADOOP_HOME=/opt/hadoop
export FLINK_HOME=/opt/flink
export HADOOP_CONF_DIR=/opt/hadoop/etc/hadoop
export HADOOP_CLASSPATH=$HADOOP_HOME/lib/*.jar
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$FLINK_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tool.jar