1. 部署环境
1
2
3
4
|
/etc/init
.d
/iptables
stop
chkconfig iptables off
sed
-i
's/SELINUX=enforcing/SELINUX=disabled/'
/etc/selinux/config
setenforce 0
|
2. SSH配置
1
2
3
4
5
6
7
8
9
|
useradd
hadoop
echo
123456 |
passwd
--stdin hadoop
su
- hadoop
ssh
-keygen -t rsa
#生成密钥对
ssh
-copy-
id
user@ip
#将ssh公钥copy到指定的主机
cd
.
ssh
#每台服务器本机也需要配置ssh免密码登录
cat
id_rsa.pub >> authorized_keys
|
3. 部署hadoop
1
2
|
wget http:
//mirrors
.hust.edu.cn
/apache/hadoop/common/hadoop-1
.2.1
/hadoop-1
.2.1.
tar
.gz
tar
xf hadoop-1.2.1.
tar
.gz
|
1. conf/hadoop-env.sh
1
2
|
vi
hadoop-
env
.sh
export
JAVA_HOME=
/usr/java/jdk1
.7.0_67
|
2. conf/core-site.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
<configuration>
<property>
<name>fs.default.name<
/name
>
#指定名称节点在哪里...
<value>hdfs:
//hadoop1
:9000<
/value
>
#名称节点是hadoop1, 也可以写成ip地址.
<
/property
>
<property>
<name>hadoop.tmp.
dir
<
/name
>
#指定hadoop的临时文件目录,如果不指定会使用/tmp
<value>
/home/hadoop/hadoop-1
.2.1
/tmp
<
/value
>
#要记得创建目录 mkdir /home/hadoop-1.2.1/tmp
<
/property
>
<property>
<name>fs.trash.interval<
/name
>
#指定回收站的清空间隔
<value>21600<
/value
>
#单位是分钟
<description>
Number of minutes between trashcheckpoints. If zero, the trash feature is disabled
<
/description
>
<
/property
>
<
/configuration
>
|
3. conf/hdfs-site.xml
1
2
3
4
5
6
7
8
9
10
11
12
|
<configuration>
<property>
<name>dfs.replication<
/name
>
#定义hdfs的复制因子,保存多少副本数
<value>2<
/value
>
#如果是1的话,不复制..
<
/property
>
<property>
<name>dfs.block.size<
/name
>
<value>67108864<
/value
>
<description>The default block size
for
new files.<
/description
>
<
/property
>
<
/configuration
>
|
4. conf/mapred-site.xml
1
2
3
4
5
6
|
<configuration>
<property>
<name>mapred.job.tracker<
/name
>
#指定jobtracker的地址和监听端口
<value>hadoop1:9001<
/value
>
<
/property
>
<
/configuration
>
|
5. conf/masters
1
|
hadoop1
#在文件中指定充当master的主机名称
|
6. conf/slaves
1
2
|
hadoop2
#在文件中指定充当salve的主机名称
hadoop3
|
1
2
3
4
|
vi
/etc/hosts
192.168.188.210 hadoop1
192.168.188.220 hadoop2
192.168.188.230 hadoop3
|
8. 将hadoop目录scp到其他的设备上
1
2
|
cd
/home/hadoop
scp
-r hadoop-1.2.1 ip:
/home/hadoop
|
9. 格式化名称节点
1
2
3
4
5
|
cd
hadoop-1.2.1
.
/bin/hadoop
namenode -
format
如果最后面的信息有下面这样的表示成功...
14
/08/30
11:37:05 INFO common.Storage: Storage directory
/home/hadoop/hadoop-1
.2.1
/tmp/dfs/name
has been successfully formatted.
|
4. 启动hadoop
1
|
.
/bin/start-all
.sh
|
5. 启动后的检查
1
|
/usr/java/jdk1
.7.0_67
/bin/jps
|