下载src版本:http://mirrors.hust.edu.cn/apache/hadoop/common/hadoop-2.6.2/
下面基于 centeros 64位安装,本次安装host129、host130、host131分别是主服务器和从服务器的机器名,需要在hosts中配置
1
|
yum
install
autoconfautomake libtool cmake ncurses-devel openssl-devel gcc*
|
略
1
2
3
4
5
6
7
8
9
10
11
|
[grid@hadoop4 ~]$ wget http:
//apache
.fayea.com
/maven/maven-3/3
.2.5
/binaries/apache-maven-3
.2.5-bin.
tar
.gz
[grid@hadoop4 ~]$
tar
-zxvf .
/apache-maven-3
.2.5-bin.
tar
.gz
[root@hadoop4 grid]
# mv apache-maven-3.2.5 /usr/local
设置环境变量
[grid@hadoop4 ~]$
vi
.bash_profile
# User specific environment and startup programs
PATH=$PATH:$HOME
/bin
:
/usr/local/apache-maven-3
.2.5
/bin
JAVA_HOME=
/usr/java/jdk1
.7.0_72
export
JAVA_HOME
export
PATH
[grid@hadoop4 ~]$
source
.bash_profile
|
1
2
3
4
5
6
7
|
[root@hadoop4 grid]
# wget https://protobuf.googlecode.com/files/protobuf-2.5.0.tar.gz
[root@hadoop4 grid]
# tar zxvf protobuf-2.5.0.tar.gz
[root@hadoop4 grid]
# cd protobuf-2.5.0
[root@hadoop4 protobuf-2.5.0]
# ./configure
[root@hadoop4 protobuf-2.5.0]
# make
[root@hadoop4 protobuf-2.5.0]
# make check
[root@hadoop4 protobuf-2.5.0]
# make install
|
切换到解压后的src目录
1
|
mvn package -Pdist,native -DskipTests -Dtar
|
设置主机名(主机名不能包含下划线,但可以包含横线)
执行命令 (1)hostname host129
(2)vi /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=host129
注:这里的hostname一定要跟hadoop/conf里的一致才行,检验hadoop fs –ls hdfs:// host129:9000/
1
2
3
4
5
6
7
8
|
1、生成公钥【执行
ssh
-keygen -t rsa 】
2、写入自己的公钥【
cat
~/.
ssh
/id_rsa
.pub >> ~/.
ssh
/authorized_keys
】
注意:如果非ROOT用户,要进行授权:
chmod
600 authorized_keys
以上两步要在所有节点上都执行一次,先确定SSH自己是通的,再往下。
3、将slave的公钥取来【
ssh
host48
cat
~/.
ssh
/id_rsa
.pub >> ~/.
ssh
/authorized_keys
】
4、将公钥信息分发给所有其它节点:【
scp
~/.
ssh
/authorized_keys
slave1:~/.
ssh
/authorized_keys
】
|
1
2
3
4
5
6
7
8
9
10
11
|
#HADOOP VARIABLES START
export
HADOOP_INSTALL=
/opt/hadoop/hadoop-2
.6.2
export
PATH=$PATH:$HADOOP_INSTALL
/bin
export
PATH=$PATH:$HADOOP_INSTALL
/sbin
export
HADOOP_MAPRED_HOME=$HADOOP_INSTALL
export
HADOOP_COMMON_HOME=$HADOOP_INSTALL
export
HADOOP_HDFS_HOME=$HADOOP_INSTALL
export
YARN_HOME=$HADOOP_INSTALL
export
HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL
/lib/native
export
HADOOP_OPTS=
"-Djava.library.path=$HADOOP_INSTALL/lib:$HADOOP_INSTALL/lib/native"
#HADOOP VARIABLES END
|
使配置生效
source /etc/profile
1
|
export
JAVA_HOME=
/usr/local/jdk
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
<
configuration
>
<
property
>
<
name
>fs.defaultFS</
name
>
<
value
>hdfs://host129:8020</
value
>
</
property
>
<
property
>
<
name
>io.file.buffer.size</
name
>
<
value
>131072</
value
>
</
property
>
<
property
>
<
name
>hadoop.tmp.dir</
name
>
<
value
>/opt/hadoop/hadoop-2.6.2/data</
value
>
</
property
>
</
configuration
>
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
|
<
configuration
>
<!-- Site specific YARN configuration properties -->
<
property
>
<
name
>yarn.nodemanager.aux-services</
name
>
<
value
>mapreduce_shuffle</
value
>
</
property
>
<
property
>
<
name
>yarn.nodemanager.aux-services.mapreduce_shuffle.class</
name
>
<
value
>org.apache.hadoop.mapred.ShuffleHandler</
value
>
</
property
>
<
property
>
<
name
>yarn.resourcemanager.hostname</
name
>
<
value
>host129</
value
>
</
property
>
<
property
>
<
name
>yarn.resourcemanager.address</
name
>
<
value
>host129:8032</
value
>
</
property
>
<
property
>
<
name
>yarn.resourcemanager.scheduler.address</
name
>
<
value
>host129:8030</
value
>
</
property
>
<
property
>
<
name
>yarn.resourcemanager.resource-tracker.address</
name
>
<
value
>host129:8031</
value
>
</
property
>
<
property
>
<
name
>yarn.resourcemanager.admin.address</
name
>
<
value
>host129:8033</
value
>
</
property
>
<
property
>
<
name
>yarn.resourcemanager.webapp.address</
name
>
<
value
>host129:8088</
value
>
</
property
>
<
property
>
<
name
>yarn.scheduler.minimum-allocation-mb</
name
>
<
value
>512</
value
>
</
property
>
<
property
>
<
name
>yarn.scheduler.fair.user-as-default-queue</
name
>
<
value
>false</
value
>
</
property
>
<
property
>
<
name
>yarn.resourcemanager.zk-timeout-ms</
name
>
<
value
>120000</
value
>
</
property
>
<
property
>
<
name
>yarn.nodemanager.resource.memory-mb</
name
>
<
value
>3072</
value
>
</
property
>
<
property
>
<
name
>yarn.scheduler.minimum-allocation-mb</
name
>
<
value
>128</
value
>
</
property
>
<
property
>
<
name
>yarn.scheduler.maximum-allocation-mb</
name
>
<
value
>3072</
value
>
</
property
>
<
property
>
<
name
>yarn.nodemanager.resource.cpu-vcores</
name
>
<
value
>1</
value
>
</
property
>
<
property
>
<
name
>yarn.scheduler.maximum-allocation-vcores</
name
>
<
value
>1</
value
>
</
property
>
<
property
>
<
name
>yarn.nodemanager.container-monitor.interval-ms</
name
>
<
value
>300000</
value
>
</
property
>
</
configuration
>
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
<
configuration
>
<
property
>
<
name
>mapreduce.framework.name</
name
>
<
value
>yarn</
value
>
<
final
>true</
final
>
</
property
>
<
property
>
<
name
>mapreduce.jobhistory.address</
name
>
<
value
>host129:10020</
value
>
</
property
>
<
property
>
<
name
>mapreduce.jobhistory.webapp.address</
name
>
<
value
>host129:19888</
value
>
</
property
>
</
configuration
>
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
|
<
configuration
>
<
property
>
<
name
>dfs.replication</
name
>
<
value
>3</
value
>
</
property
>
<
property
>
<
name
>dfs.namenode.secondary.http-address</
name
>
<
value
>host129:50090</
value
>
</
property
>
<
property
>
<
name
>dfs.webhdfs.enabled</
name
>
<
value
>true</
value
>
</
property
>
<
property
>
<
name
>dfs.namenode.name.dir</
name
>
<
value
>${hadoop.tmp.dir}/dfs/name</
value
>
</
property
>
<
property
>
<
name
>dfs.datanode.data.dir</
name
>
<
value
>${hadoop.tmp.dir}/dfs/data</
value
>
</
property
>
<
property
>
<
name
>dfs.datanode.socket.write.timeout</
name
>
<
value
>600000</
value
>
</
property
>
<
property
>
<
name
>dfs.client.socket-timeout</
name
>
<
value
>300000</
value
>
</
property
>
<
property
>
<
name
>dfs.datanode.max.xcievers</
name
>
<
value
>4096</
value
>
</
property
>
</
configuration
>
|
1
2
|
host130
host131
|
把刚才的所有配置和文件完全安装相同路径同步到其他从机上面。
在主服务器执行bin/hdfs namenode -format
在主服务器执行./sbin/start-all.sh