vvvvvvvvvvvv config vvvvvvvvvvvvvvv
安装jdk:
sudo -s ./jdk.bin
set environments:
/etc/profile #global
~/.profile #personalize
#optional
sudo addgroup hadoopgrp
sudo adduser --ingroup hadoopgrp hadoop #psw:hadoop
#switch,and let the hadoop user machine have a confidence to the requester(localhost)
#THIS STEP IS TO LET THE MASTER TO SSH TO SLAVE WITHOUT PASSWORD,SO IT IS NEEDNESS TO COPY IT'S RSA INFO TO SLAVES ;DO IT OPPOSITELY IF SLAVE ssh TO MASTER ALSO.
su - hadoop
ssh-keygen -t rsa -P ""
cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
#optional:add use privilege for modify files in /usr/local/hadoop-*.*/*.*
sudo chown -R hadoop:hadoopgrp hadoop-0.20.2
## test connect by no password
ssh localhost #if show can not open port 22,this maybe no installed sshd,use this to install:sudo apt-get install openssh-server
#modify config files
#core-site.xml
<property>
<name>hadoop.tmp.dir</name>
<value>/your/path/to/hadoop/tmp/dir/hadoop-${user.name}</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:54310</value>
<description>The name of the default file system. A URI whose
scheme and authority determine the FileSystem implementation. The
uri's scheme determines the config property (fs.SCHEME.impl) naming
the FileSystem implementation class. The uri's authority is used to
determine the host, port, etc. for a filesystem.</description>
</property>
#mapred-site.xml
<property>
<name>mapred.job.tracker</name>
<value>localhost:54311</value>
<description>The host and port that the MapReduce job tracker runs
at. If "local", then jobs are run in-process as a single map
and reduce task.
</description>
</property>
#hdfs-site.xml
<property>
<name>dfs.replication</name>
<value>1</value>
<description>Default block replication.
The actual number of replications can be specified when the file is created.
The default is used if replication is not specified in create time.
</description>
</property>
hadoop namenode -format
bin/start-all.sh
#test run info
jps
sudo netstat -plten | grep java
#demo run command:
hadoop jar hadoop-0.20.2-examples.jar wordcount input/data-file output/wc
bin/stop-all.sh
^^^^^^^^^ config ^^^^^^^^^^^^
output:
input:3 files(use 32s)
hadoop@leibnitz-laptop:~/hadoop/hadoop-0.20.2$ hadoop jar hadoop-0.20.2-examples.jar wordcount input output/wordcount
11/02/24 23:45:58 INFO input.FileInputFormat: Total input paths to process : 3
11/02/24 23:45:58 INFO mapred.JobClient: Running job: job_201102242334_0001
11/02/24 23:45:59 INFO mapred.JobClient: map 0% reduce 0%
11/02/24 23:46:13 INFO mapred.JobClient: map 66% reduce 0%
11/02/24 23:46:19 INFO mapred.JobClient: map 100% reduce 0%
11/02/24 23:46:22 INFO mapred.JobClient: map 100% reduce 33%
11/02/24 23:46:28 INFO mapred.JobClient: map 100% reduce 100%
11/02/24 23:46:30 INFO mapred.JobClient: Job complete: job_201102242334_0001
11/02/24 23:46:30 INFO mapred.JobClient: Counters: 17
11/02/24 23:46:30 INFO mapred.JobClient: Job Counters
11/02/24 23:46:30 INFO mapred.JobClient: Launched reduce tasks=1
11/02/24 23:46:30 INFO mapred.JobClient: Launched map tasks=3
11/02/24 23:46:30 INFO mapred.JobClient: Data-local map tasks=3
11/02/24 23:46:30 INFO mapred.JobClient: FileSystemCounters
11/02/24 23:46:30 INFO mapred.JobClient: FILE_BYTES_READ=2214725
11/02/24 23:46:30 INFO mapred.JobClient: HDFS_BYTES_READ=3671479
11/02/24 23:46:30 INFO mapred.JobClient: FILE_BYTES_WRITTEN=3689100
11/02/24 23:46:30 INFO mapred.JobClient: HDFS_BYTES_WRITTEN=880802
11/02/24 23:46:30 INFO mapred.JobClient: Map-Reduce Framework
11/02/24 23:46:30 INFO mapred.JobClient: Reduce input groups=82331
11/02/24 23:46:30 INFO mapred.JobClient: Combine output records=102317
11/02/24 23:46:30 INFO mapred.JobClient: Map input records=77931
11/02/24 23:46:30 INFO mapred.JobClient: Reduce shuffle bytes=1474279
11/02/24 23:46:30 INFO mapred.JobClient: Reduce output records=82331
11/02/24 23:46:30 INFO mapred.JobClient: Spilled Records=255947
11/02/24 23:46:30 INFO mapred.JobClient: Map output bytes=6076039
11/02/24 23:46:30 INFO mapred.JobClient: Combine input records=629167
11/02/24 23:46:30 INFO mapred.JobClient: Map output records=629167
11/02/24 23:46:30 INFO mapred.JobClient: Reduce input records=102317