vim /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=nn21021
|
vim /etc/hosts
127.0.0.1 localhost
#::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.17.210.21 nn21021
172.17.210.22 snn21022
172.17.210.23 dn21023
172.17.210.24 dn21024
172.17.210.25 dn21025
|
service iptables status
service iptables stop
chkconfig iptables off |
groupadd hadoop
useradd -g hadoop hdfs
useradd -g hadoop yarn
useradd -g hadoop mapred
|
passwd hdfs
passwd yarn
passwd mapred
|
|
|
vim
/etc/krb5.conf
[logging]
default = FILE:/data/logs/krb5/krb5libs.log
kdc = FILE:/data/logs/krb5/krb5kdc.log
admin_server = FILE:/data/logs/krb5/kadmind.log
[libdefaults]
default_realm = WONHIGH.CN
dns_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
[realms]
WONHIGH.CN = {
kdc = nn21021:88
admin_server = nn21021:749
}
[domain_realm]
.wonhigh.cn = WONHIGH.CN
wonhigh.cn = WONHIGH.CN
[kdc]
profile=/var/kerberos/krb5kdc/kdc.conf
|
vim
/var/kerberos/krb5kdc/kdc.conf
[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88
[realms]
WONHIGH.CN = {
#master_key_type = aes256-cts
acl_file = /var/kerberos/krb5kdc/kadm5.acl
dict_file = /usr/share/dict/words
admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
}
|
vim /var/kerberos/krb5kdc/kadm5.acl
*/ [email protected] *
|
mkdir -p
/data/logs/krb5
mkdir -p /data/hadoop/hdfs/name
mkdir -p /data/hadoop/hdfs/data
mkdir -p /data/hadoop/hdfs/tmp
mkdir -p /etc/hadoop/conf
|
|
service kadmin start |
|
|
|
|
|
|
|
|
scp hdfs.keytab yarn.keytab mapred.keytab nn21021:/etc/hadoop/conf
chown -R hdfs:hadoop
/etc/hadoop/conf/hdfs.keytab
chown -R yarn:hadoop
/etc/hadoop/conf/yarn.keytab
chown -R mapred:hadoop
/etc/hadoop/conf/mapred.keytab
scp hdfs.keytab yarn.keytab mapred.keytab dn21024:/etc/hadoop/conf
chown -R
hdfs:hadoop /etc/hadoop/conf/hdfs.keytab
chown -R
yarn:hadoop /etc/hadoop/conf/yarn.keytab
chown -R mapred:hadoop /etc/hadoop/conf/mapred.keytab
|
kinit root/admin
kinit -k -t /etc/hadoop/conf/hdfs.keytab hdfs/ [email protected]
klist
|
vim $HADOOP_HOME/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/java/java-1.7.0_67
|
vim
$HADOOP_HOME/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://nn21021:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/data/hadoop/hdfs/tmp</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
<description>The superuser can connect only from anywhere(here we also can set comma separated host names,such as: 'host1,host2') to impersonate a user</description>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
<description>Allow the superuser root to impersonate any members of the any groups</description>
</property>
<property>
<name>hadoop.security.authentication</name>
<value>kerberos</value>
</property>
<property>
<name>hadoop.security.authorization</name>
<value>true</value>
</property>
</configuration>
|
vim
$HADOOP_HOME/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>nn21021:9001</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/data/hadoop/hdfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.block.access.token.enable</name>
<value>true</value>
</property>
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>700</value>
</property>
<property>
<name>dfs.namenode.keytab.file</name>
<value>/etc/hadoop/conf/hdfs.keytab</value>
</property>
<property>
<name>dfs.namenode.kerberos.principal</name>
<value>hdfs/[email protected]</value>
</property> <property> <name>dfs.namenode.kerberos.https.principal</name> <value>HTTP/[email protected]</value> </property> <property> <name>dfs.datanode.address</name> <value>0.0.0.0:50070</value> </property> <property> <name>dfs.datanode.http.address</name> <value>0.0.0.0:50010</value> </property> <property> <name>dfs.datanode.keytab.file</name> <value>/etc/hadoop/conf/hdfs.keytab</value> </property> <property> <name>dfs.datanode.kerberos.principal</name> <value>hdfs/[email protected]</value> </property> <property> <name>dfs.datanode.kerberos.https.principal</name> <value>HTTP/[email protected]</value> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> </property>
<property>
<name>dfs.web.authentication.kerberos.principal</name> <value>HTTP/[email protected]</value> </property> <property> <name>dfs.web.authentication.kerberos.keytab</name> <value>/etc/hadoop/conf/hdfs.keytab</value> </property>
<property>
<name>ignore.secure.ports.for.testing</name>
<value>true</value>
</property>
</configuration>
|
scp core-site.xml hdfs-site.xml dn21024:/usr/local/hadoop-2.5.0/etc/hadoop/
|
chown -R hdfs:hadoop $HADOOP_HOME/logs/
chown -R hdfs:hadoop /data/hadoop/hdfs/
|
./start-dfs.sh
jps
|
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>nn21021</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>nn21021:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>nn21021:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>nn21021:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>nn21021:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>nn21021:8088</value>
</property>
<property>
<name>mapreduce.app-submission.cross-platform</name>
<value>true</value>
</property>
<!--Start set the container-executor to LinuxContainerExecutor-->
<property>
<description>who will execute(launch) the containers.</description>
<name>yarn.nodemanager.container-executor.class</name>
<value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
<!--
<value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-->
</property>
<property>
<description>The class which should help the LCE handle resources.</description>
<name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
<value>org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler</value>
</property>
<property>
<description>The cgroups hierarchy under which to place YARN proccesses (cannot contain commas).
If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have
been pre-configured), then this cgroups hierarchy must already exist and be writable by the
NodeManager user, otherwise the NodeManager may fail.
Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.
</description>
<name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
<value>/hadoop-yarn</value>
</property>
<property>
<description>Whether the LCE should attempt to mount cgroups if not found.
Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.
</description>
<name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
<value>true</value>
</property>
<property> <description> Where the LCE should attempt to mount cgroups if not found. Common locations include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux distribution in use. This path must exist before the NodeManager is launched. Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and yarn.nodemanager.linux-container-executor.cgroups.mount is true. </description> <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name> <value>/cgroup</value> </property> <property> <name>yarn.nodemanager.linux-container-executor.group</name> <value>hadoop</value> <description> your user group here. should match container-executor.cfg </description> </property> <property> <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name> <value>*</value> <description> users who can submit the job </description> </property> <property> <name>hadoop.security.authentication</name> <value>kerberos</value> </property> <property> <name>yarn.resourcemanager.keytab</name> <value>/etc/hadoop/conf/yarn.keytab</value> </property> <property> <name>yarn.resourcemanager.principal</name> <value>yarn/[email protected]</value> </property> <property> <name>yarn.nodemanager.keytab</name> <value>/etc/hadoop/conf/yarn.keytab</value> </property> <property> <name>yarn.nodemanager.principal</name> <value>yarn/[email protected]</value> </property> </configuration> |
<configuration>
<property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>nn21021:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>nn21021:19888</value> </property> <property> <name>mapreduce.app-submission.cross-platform</name> <value>true</value> </property> <property> <name>mapreduce.jobtracker.staging.root.dir</name> <value>/user</value> </property> <property> <name>mapreduce.jobhistory.keytab</name> <value>/etc/hadoop/conf/mapred.keytab</value> </property> <property> <name>mapreduce.jobhistory.principal</name> <value>mapred/[email protected]</value> </property> </configuration> |
chmod 771 $HADOOP_HOME/logs
chown root:hadoop
$HADOOP_HOME/bin/
container-executor
chmod 6050
$HADOOP_HOME/bin/container-executor
|
yum install libcgroup
mkdir /cgroup
service cgconfig start
mkdir /cgroup/cpu/hadoop-yarn
chown -R root:hadoop /cgroup/cpu/hadoop-yarn
chmod 771 cpu
chown -R yarn:hadoop hadoop-yarn/
|
查看状态:service cgconfig status
即时开启:service cgconfig start
即时停止:service cgconfig stop
|
|
$HADOOP_HOME/bin/yarn jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.5.0.jar pi 1 10
|
java.io.FileNotFoundException: /data/hadoop/hdfs/name/current/VERSION (Permission denied)
at java.io.RandomAccessFile.open(Native Method)
at java.io.RandomAccessFile.<init>(RandomAccessFile.java:241)
at org.apache.hadoop.hdfs.server.common.StorageInfo.readPropertiesFile(StorageInfo.java:241)
at org.apache.hadoop.hdfs.server.namenode.NNStorage.readProperties(NNStorage.java:627)
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverStorageDirs(FSImage.java:325)
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:202)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:1020)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:739)
at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:536)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:595)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:762)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:746)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1438)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1504)
2015-01-04 20:00:43,242 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1
2015-01-04 20:00:43,244 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
|
java.io.IOException: NameNode is not formatted.
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:212)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:1020)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:739)
at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:536)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:595)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:762)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:746)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1438)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1504)
2015-01-04 19:57:54,898 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1
2015-01-04 19:57:54,901 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
|
[ root@dn21024 ~]# kinit -k -t /etc/hadoop/conf/hdfs.keytab hdfs/ [email protected]
kinit: Cannot contact any KDC for realm 'WONHIGH.CN' while getting initial credentials |
2015-01-05 00:11:19,033 FATAL org.apache.hadoop.hdfs.server.namenode.NameNode: Exception in namenode join
org.apache.hadoop.hdfs.server.common.IncorrectVersionException: Unexpected version of storage directory /data/hadoop/hdfs/name. Reported: -60. Expecting = -57.
at org.apache.hadoop.hdfs.server.common.StorageInfo.setLayoutVersion(StorageInfo.java:178)
at org.apache.hadoop.hdfs.server.common.StorageInfo.setFieldsFromProperties(StorageInfo.java:131)
at org.apache.hadoop.hdfs.server.namenode.NNStorage.setFieldsFromProperties(NNStorage.java:608)
at org.apache.hadoop.hdfs.server.common.StorageInfo.readProperties(StorageInfo.java:228)
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverStorageDirs(FSImage.java:323)
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:202)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:955)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:700)
at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:529)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:585)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:751)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:735)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1407)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1473)
2015-01-05 00:11:19,035 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1
|
2015-01-05 10:26:17,411 FATAL org.apache.hadoop.hdfs.server.datanode.DataNode: Exception in secureMain
java.lang.RuntimeException: Cannot start secure cluster without privileged resources.
at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:737)
at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:292)
at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1895)
at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1782)
at org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode(DataNode.java:1829)
at org.apache.hadoop.hdfs.server.datanode.DataNode.secureMain(DataNode.java:2005)
at org.apache.hadoop.hdfs.server.datanode.DataNode.main(DataNode.java:2029)
2015-01-05 10:26:17,414 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1
|
<!--在hdfs-site.xml中增加如下配置项:忽略linux的secure认证,可以抛开jsvc服务
-->
<property>
<name>ignore.secure.ports.for.testing</name>
<value>true</value>
</property>
|
starting yarn daemons
chown: changing ownership of `/usr/local/hadoop-2.5.0/logs': Operation not permitted starting resourcemanager, logging to /usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 124: /usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out: Permission denied head: cannot open `/usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out' for reading: No such file or directory /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 129: /usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out: Permission denied /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 130: /usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out: Permission denied The authenticity of host 'dn21024 (172.17.210.24)' can't be established. RSA key fingerprint is b0:a5:86:df:a2:45:09:ca:fb:f9:fb:2d:5c:d5:8a:f1. Are you sure you want to continue connecting (yes/no)? yes dn21024: Warning: Permanently added 'dn21024,172.17.210.24' (RSA) to the list of known hosts. yarn@dn21024's password: dn21024: chown: changing ownership of `/usr/local/hadoop-2.5.0/logs': Operation not permitted dn21024: starting nodemanager, logging to /usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out dn21024: /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 124: /usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out: Permission denied dn21024: head: cannot open `/usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out' for reading: No such file or directory dn21024: /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 129: /usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out: Permission denied dn21024: /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 130: /usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out: Permission denied |
2015-01-05 12:14:48,452 INFO org.apache.hadoop.service.AbstractService: Service NodeManager failed in state INITED; cause: org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to in
itialize container executor
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to initialize container executor
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceInit(NodeManager.java:192)
at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:425)
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:472)
Caused by: java.io.IOException: Cannot run program "/usr/local/hadoop-2.5.0/bin/container-executor": error=13, Permission denied
at java.lang.ProcessBuilder.start(ProcessBuilder.java:1047)
at org.apache.hadoop.util.Shell.runCommand(Shell.java:485)
at org.apache.hadoop.util.Shell.run(Shell.java:455)
at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:702)
at org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor.init(LinuxContainerExecutor.java:169)
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceInit(NodeManager.java:190)
... 3 more
|
[ yarn@nn21021 sbin]$ $HADOOP_HOME/bin/yarn jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.5.0.jar pi 1 10
Error creating temp dir in hadoop.tmp.dir /data/hadoop/hdfs/tmp due to Permission denied |
org.apache.hadoop.security.AccessControlException: Permission denied: user=yarn, access=WRITE, inode="/user":hdfs:supergroup:drwxr-xr-x
|
1/1 local-dirs turned bad: /data/hadoop/hdfs/tmp/nm-local-dir;
|
vim /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=nn21021
|
vim /etc/hosts
127.0.0.1 localhost
#::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.17.210.21 nn21021
172.17.210.22 snn21022
172.17.210.23 dn21023
172.17.210.24 dn21024
172.17.210.25 dn21025
|
service iptables status
service iptables stop
chkconfig iptables off |
groupadd hadoop
useradd -g hadoop hdfs
useradd -g hadoop yarn
useradd -g hadoop mapred
|
passwd hdfs
passwd yarn
passwd mapred
|
|
|
vim
/etc/krb5.conf
[logging]
default = FILE:/data/logs/krb5/krb5libs.log
kdc = FILE:/data/logs/krb5/krb5kdc.log
admin_server = FILE:/data/logs/krb5/kadmind.log
[libdefaults]
default_realm = WONHIGH.CN
dns_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
[realms]
WONHIGH.CN = {
kdc = nn21021:88
admin_server = nn21021:749
}
[domain_realm]
.wonhigh.cn = WONHIGH.CN
wonhigh.cn = WONHIGH.CN
[kdc]
profile=/var/kerberos/krb5kdc/kdc.conf
|
vim
/var/kerberos/krb5kdc/kdc.conf
[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88
[realms]
WONHIGH.CN = {
#master_key_type = aes256-cts
acl_file = /var/kerberos/krb5kdc/kadm5.acl
dict_file = /usr/share/dict/words
admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
}
|
vim /var/kerberos/krb5kdc/kadm5.acl
*/ [email protected] *
|
mkdir -p
/data/logs/krb5
mkdir -p /data/hadoop/hdfs/name
mkdir -p /data/hadoop/hdfs/data
mkdir -p /data/hadoop/hdfs/tmp
mkdir -p /etc/hadoop/conf
|
|
service kadmin start |
|
|
|
|
|
|
|
|
scp hdfs.keytab yarn.keytab mapred.keytab nn21021:/etc/hadoop/conf
chown -R hdfs:hadoop
/etc/hadoop/conf/hdfs.keytab
chown -R yarn:hadoop
/etc/hadoop/conf/yarn.keytab
chown -R mapred:hadoop
/etc/hadoop/conf/mapred.keytab
scp hdfs.keytab yarn.keytab mapred.keytab dn21024:/etc/hadoop/conf
chown -R
hdfs:hadoop /etc/hadoop/conf/hdfs.keytab
chown -R
yarn:hadoop /etc/hadoop/conf/yarn.keytab
chown -R mapred:hadoop /etc/hadoop/conf/mapred.keytab
|
kinit root/admin
kinit -k -t /etc/hadoop/conf/hdfs.keytab hdfs/ [email protected]
klist
|
vim $HADOOP_HOME/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/java/java-1.7.0_67
|
vim
$HADOOP_HOME/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://nn21021:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/data/hadoop/hdfs/tmp</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
<description>The superuser can connect only from anywhere(here we also can set comma separated host names,such as: 'host1,host2') to impersonate a user</description>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
<description>Allow the superuser root to impersonate any members of the any groups</description>
</property>
<property>
<name>hadoop.security.authentication</name>
<value>kerberos</value>
</property>
<property>
<name>hadoop.security.authorization</name>
<value>true</value>
</property>
</configuration>
|
vim
$HADOOP_HOME/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>nn21021:9001</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/data/hadoop/hdfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.block.access.token.enable</name>
<value>true</value>
</property>
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>700</value>
</property>
<property>
<name>dfs.namenode.keytab.file</name>
<value>/etc/hadoop/conf/hdfs.keytab</value>
</property>
<property>
<name>dfs.namenode.kerberos.principal</name>
<value>hdfs/[email protected]</value>
</property> <property> <name>dfs.namenode.kerberos.https.principal</name> <value>HTTP/[email protected]</value> </property> <property> <name>dfs.datanode.address</name> <value>0.0.0.0:50070</value> </property> <property> <name>dfs.datanode.http.address</name> <value>0.0.0.0:50010</value> </property> <property> <name>dfs.datanode.keytab.file</name> <value>/etc/hadoop/conf/hdfs.keytab</value> </property> <property> <name>dfs.datanode.kerberos.principal</name> <value>hdfs/[email protected]</value> </property> <property> <name>dfs.datanode.kerberos.https.principal</name> <value>HTTP/[email protected]</value> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> </property>
<property>
<name>dfs.web.authentication.kerberos.principal</name> <value>HTTP/[email protected]</value> </property> <property> <name>dfs.web.authentication.kerberos.keytab</name> <value>/etc/hadoop/conf/hdfs.keytab</value> </property>
<property>
<name>ignore.secure.ports.for.testing</name>
<value>true</value>
</property>
</configuration>
|
scp core-site.xml hdfs-site.xml dn21024:/usr/local/hadoop-2.5.0/etc/hadoop/
|
chown -R hdfs:hadoop $HADOOP_HOME/logs/
chown -R hdfs:hadoop /data/hadoop/hdfs/
|
./start-dfs.sh
jps
|
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>nn21021</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>nn21021:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>nn21021:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>nn21021:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>nn21021:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>nn21021:8088</value>
</property>
<property>
<name>mapreduce.app-submission.cross-platform</name>
<value>true</value>
</property>
<!--Start set the container-executor to LinuxContainerExecutor-->
<property>
<description>who will execute(launch) the containers.</description>
<name>yarn.nodemanager.container-executor.class</name>
<value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
<!--
<value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-->
</property>
<property>
<description>The class which should help the LCE handle resources.</description>
<name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
<value>org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler</value>
</property>
<property>
<description>The cgroups hierarchy under which to place YARN proccesses (cannot contain commas).
If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have
been pre-configured), then this cgroups hierarchy must already exist and be writable by the
NodeManager user, otherwise the NodeManager may fail.
Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.
</description>
<name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
<value>/hadoop-yarn</value>
</property>
<property>
<description>Whether the LCE should attempt to mount cgroups if not found.
Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.
</description>
<name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
<value>true</value>
</property>
<property> <description> Where the LCE should attempt to mount cgroups if not found. Common locations include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux distribution in use. This path must exist before the NodeManager is launched. Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and yarn.nodemanager.linux-container-executor.cgroups.mount is true. </description> <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name> <value>/cgroup</value> </property> <property> <name>yarn.nodemanager.linux-container-executor.group</name> <value>hadoop</value> <description> your user group here. should match container-executor.cfg </description> </property> <property> <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name> <value>*</value> <description> users who can submit the job </description> </property> <property> <name>hadoop.security.authentication</name> <value>kerberos</value> </property> <property> <name>yarn.resourcemanager.keytab</name> <value>/etc/hadoop/conf/yarn.keytab</value> </property> <property> <name>yarn.resourcemanager.principal</name> <value>yarn/[email protected]</value> </property> <property> <name>yarn.nodemanager.keytab</name> <value>/etc/hadoop/conf/yarn.keytab</value> </property> <property> <name>yarn.nodemanager.principal</name> <value>yarn/[email protected]</value> </property> </configuration> |
<configuration>
<property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>nn21021:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>nn21021:19888</value> </property> <property> <name>mapreduce.app-submission.cross-platform</name> <value>true</value> </property> <property> <name>mapreduce.jobtracker.staging.root.dir</name> <value>/user</value> </property> <property> <name>mapreduce.jobhistory.keytab</name> <value>/etc/hadoop/conf/mapred.keytab</value> </property> <property> <name>mapreduce.jobhistory.principal</name> <value>mapred/[email protected]</value> </property> </configuration> |
chmod 771 $HADOOP_HOME/logs
chown root:hadoop
$HADOOP_HOME/bin/
container-executor
chmod 6050
$HADOOP_HOME/bin/container-executor
|
yum install libcgroup
mkdir /cgroup
service cgconfig start
mkdir /cgroup/cpu/hadoop-yarn
chown -R root:hadoop /cgroup/cpu/hadoop-yarn
chmod 771 cpu
chown -R yarn:hadoop hadoop-yarn/
|
查看状态:service cgconfig status
即时开启:service cgconfig start
即时停止:service cgconfig stop
|
|
$HADOOP_HOME/bin/yarn jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.5.0.jar pi 1 10
|
java.io.FileNotFoundException: /data/hadoop/hdfs/name/current/VERSION (Permission denied)
at java.io.RandomAccessFile.open(Native Method)
at java.io.RandomAccessFile.<init>(RandomAccessFile.java:241)
at org.apache.hadoop.hdfs.server.common.StorageInfo.readPropertiesFile(StorageInfo.java:241)
at org.apache.hadoop.hdfs.server.namenode.NNStorage.readProperties(NNStorage.java:627)
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverStorageDirs(FSImage.java:325)
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:202)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:1020)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:739)
at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:536)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:595)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:762)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:746)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1438)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1504)
2015-01-04 20:00:43,242 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1
2015-01-04 20:00:43,244 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
|
java.io.IOException: NameNode is not formatted.
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:212)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:1020)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:739)
at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:536)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:595)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:762)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:746)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1438)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1504)
2015-01-04 19:57:54,898 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1
2015-01-04 19:57:54,901 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
|
[ root@dn21024 ~]# kinit -k -t /etc/hadoop/conf/hdfs.keytab hdfs/ [email protected]
kinit: Cannot contact any KDC for realm 'WONHIGH.CN' while getting initial credentials |
2015-01-05 00:11:19,033 FATAL org.apache.hadoop.hdfs.server.namenode.NameNode: Exception in namenode join
org.apache.hadoop.hdfs.server.common.IncorrectVersionException: Unexpected version of storage directory /data/hadoop/hdfs/name. Reported: -60. Expecting = -57.
at org.apache.hadoop.hdfs.server.common.StorageInfo.setLayoutVersion(StorageInfo.java:178)
at org.apache.hadoop.hdfs.server.common.StorageInfo.setFieldsFromProperties(StorageInfo.java:131)
at org.apache.hadoop.hdfs.server.namenode.NNStorage.setFieldsFromProperties(NNStorage.java:608)
at org.apache.hadoop.hdfs.server.common.StorageInfo.readProperties(StorageInfo.java:228)
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverStorageDirs(FSImage.java:323)
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:202)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:955)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:700)
at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:529)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:585)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:751)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:735)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1407)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1473)
2015-01-05 00:11:19,035 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1
|
2015-01-05 10:26:17,411 FATAL org.apache.hadoop.hdfs.server.datanode.DataNode: Exception in secureMain
java.lang.RuntimeException: Cannot start secure cluster without privileged resources.
at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:737)
at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:292)
at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1895)
at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1782)
at org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode(DataNode.java:1829)
at org.apache.hadoop.hdfs.server.datanode.DataNode.secureMain(DataNode.java:2005)
at org.apache.hadoop.hdfs.server.datanode.DataNode.main(DataNode.java:2029)
2015-01-05 10:26:17,414 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1
|
<!--在hdfs-site.xml中增加如下配置项:忽略linux的secure认证,可以抛开jsvc服务
-->
<property>
<name>ignore.secure.ports.for.testing</name>
<value>true</value>
</property>
|
starting yarn daemons
chown: changing ownership of `/usr/local/hadoop-2.5.0/logs': Operation not permitted starting resourcemanager, logging to /usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 124: /usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out: Permission denied head: cannot open `/usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out' for reading: No such file or directory /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 129: /usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out: Permission denied /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 130: /usr/local/hadoop-2.5.0/logs/yarn-yarn-resourcemanager-nn21021.out: Permission denied The authenticity of host 'dn21024 (172.17.210.24)' can't be established. RSA key fingerprint is b0:a5:86:df:a2:45:09:ca:fb:f9:fb:2d:5c:d5:8a:f1. Are you sure you want to continue connecting (yes/no)? yes dn21024: Warning: Permanently added 'dn21024,172.17.210.24' (RSA) to the list of known hosts. yarn@dn21024's password: dn21024: chown: changing ownership of `/usr/local/hadoop-2.5.0/logs': Operation not permitted dn21024: starting nodemanager, logging to /usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out dn21024: /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 124: /usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out: Permission denied dn21024: head: cannot open `/usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out' for reading: No such file or directory dn21024: /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 129: /usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out: Permission denied dn21024: /usr/local/hadoop-2.5.0/sbin/yarn-daemon.sh: line 130: /usr/local/hadoop-2.5.0/logs/yarn-yarn-nodemanager-dn21024.out: Permission denied |
2015-01-05 12:14:48,452 INFO org.apache.hadoop.service.AbstractService: Service NodeManager failed in state INITED; cause: org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to in
itialize container executor
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to initialize container executor
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceInit(NodeManager.java:192)
at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:425)
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:472)
Caused by: java.io.IOException: Cannot run program "/usr/local/hadoop-2.5.0/bin/container-executor": error=13, Permission denied
at java.lang.ProcessBuilder.start(ProcessBuilder.java:1047)
at org.apache.hadoop.util.Shell.runCommand(Shell.java:485)
at org.apache.hadoop.util.Shell.run(Shell.java:455)
at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:702)
at org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor.init(LinuxContainerExecutor.java:169)
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceInit(NodeManager.java:190)
... 3 more
|
[ yarn@nn21021 sbin]$ $HADOOP_HOME/bin/yarn jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.5.0.jar pi 1 10
Error creating temp dir in hadoop.tmp.dir /data/hadoop/hdfs/tmp due to Permission denied |
org.apache.hadoop.security.AccessControlException: Permission denied: user=yarn, access=WRITE, inode="/user":hdfs:supergroup:drwxr-xr-x
|
1/1 local-dirs turned bad: /data/hadoop/hdfs/tmp/nm-local-dir;
|