一步一步安装hadoop1.2.1

一步一步安装hadoop1.2.1

--主机基本配置
--配置主机
hostname node1
hostname node2
hostname node3

--配置ip
ifconfig
192.168.239.129
192.168.239.135
192.168.239.136



--配置映射
vi /etc/hosts
192.168.239.129 node1
192.168.239.135 node2
192.168.239.136 node3

--配置网络,3台都要配置,或者用setup命令配置
vi /etc/sysconfig/network
HOSTNAME=node1

vi /etc/sysconfig/network-scripts/ifcfg-eth0
IPADDR=192.168.239.129
NETMASK=255.255.255.0
GATEWAY=192.168.239.1


/sbin/service network restart  #重新启动网络服务

--配置防火墙
service iptables stop
chkconfig iptables off
chkconfig|grep iptables
iptables        0:off   1:off   2:off   3:off   4:off   5:off   6:off

reboot之后主机名生效




--增加hadoop用户
groupadd hadoop
useradd hadoop -g hadoop
passwd hadoop

[hadoop@node1 ~]$ cat /etc/profile
export JAVA_HOME=/home/hadoop/jdk1.7.0_67
export HADOOP_HOME=/home/hadoop/hadoop-1.2.1

source /etc/profile 编译一次

--配置免密码

node1,2,3中执行:
su - hadoop
ssh-keygen -q -t rsa -N "" -f /home/hadoop/.ssh/id_rsa
cd .ssh
cat id_rsa.pub >> authorized_keys
chmod go-wx  authorized_keys

node1中执行:
scp id_rsa.pub hadoop@node2:~
scp id_rsa.pub hadoop@node3:~

node2和node3中执行:
cat ~/id_rsa.pub>>~/.ssh/authorized_keys
cat ~/id_rsa.pub>>~/.ssh/authorized_keys

node1中执行:
ssh node2
ssh node3

----测试命令
/sbin/ifconfig
ping node1
ssh node1
jps
echo $JAVA_HOME
echo $HADOOP_HOME
hadoop



--上传解压
tar -zxvf hadoop-1.2.1.tar.gz
cd /home/hadoop/hadoop-1.2/share/hadoop/templates/conf   --模版的例子


-----------------hadoop基本配置------------------------
[hadoop@node1 conf]$ vi core-site.xml
[hadoop@node1 conf]$ vi hdfs-site.xml
[hadoop@node1 conf]$ vi slaves
[hadoop@node1 conf]$ vi masters
[hadoop@node1 conf]$ vi hadoop-env.sh       

scp conf/* hadoop@node2:~/hadoop-1.2/conf
scp conf/* hadoop@node3:~/hadoop-1.2/conf

--创建data目录,对应core-site.xml中的配置
mkdir -p /home/hadoop/hadoop-1.2/data


--格式化
	NameNode Format
		bin/hdfs namenode -format		            --生成namenode目录 
或bin/hadoop namenode -format

--启动hadoop
		sbin/hadoop-daemon.sh start namenode   
		sbin/hadoop-daemon.sh start datanode   --生成datanode目录

--监控
		http://node1:50070



--配置mapreduce
[hadoop@node1 conf]$ vi mapred-site.xml

scp conf/* hadoop@node2:~/hadoop-1.2/conf
scp conf/* hadoop@node3:~/hadoop-1.2/conf

--监控界面
http://node1:50070/dfshealth.jsp
http://node1:50030/jobtracker.jsp
http://node2:50060/tasktracker.jsp



--windows本机配置ip映射
C:\WINDOWS\system32\drivers\etc\hosts
192.168.239.129 node1
192.168.239.135 node2
192.168.239.136 node3

--登录控制台
http://node1:50070


---------------------------------------------------------------
--附配置:
---------------------------------------------------------------

[hadoop@node1 conf]$ vi core-site.xml




    
        fs.default.name
        hdfs://node1:9000
    
    
        hadoop.tmp.dir
        /home/hadoop/hadoop-1.2.1/data
    


[hadoop@node1 conf]$ vi hdfs-site.xml






    
            dfs.replication
            3
    



[hadoop@node1 conf]$ mapred-site.xml


    
            mapred.job.tracker
            node1:9001
    


[hadoop@node1 conf]$ vi hadoop-env.sh
export JAVA_HOME=/home/hadoop/jdk1.7.0_67

[hadoop@node1 conf]$ vi slaves
node1
node2
node3

[hadoop@node1 conf]$ vi masters    --secondarynamenode
node2

你可能感兴趣的:(hadoop)