example.java代码
importjava.io.IOException;
importjava.util.Iterator;
importjava.util.StringTokenizer;
importorg.apache.hadoop.fs.Path;
importorg.apache.hadoop.io.IntWritable;
importorg.apache.hadoop.io.LongWritable;
importorg.apache.hadoop.io.Text;
importorg.apache.hadoop.mapred.FileInputFormat;
importorg.apache.hadoop.mapred.FileOutputFormat;
importorg.apache.hadoop.mapred.JobClient;
importorg.apache.hadoop.mapred.JobConf;
importorg.apache.hadoop.mapred.MapReduceBase;
importorg.apache.hadoop.mapred.Mapper;
importorg.apache.hadoop.mapred.OutputCollector;
importorg.apache.hadoop.mapred.Reducer;
importorg.apache.hadoop.mapred.Reporter;
importorg.apache.hadoop.mapred.TextInputFormat;
importorg.apache.hadoop.mapred.TextOutputFormat;
/**
*
* 描述:WordCount explainsby Felix
* @author Hadoop Dev Group
*/
publicclass example
{
/**
* MapReduceBase类:实现了Mapper和Reducer接口的基类(其中的方法只是实现接口,而未作任何事情)
* Mapper接口:
* WritableComparable接口:实现WritableComparable的类可以相互比较。所有被用作key的类应该实现此接口。
* Reporter 则可用于报告整个应用的运行进度,本例中未使用。
*
*/
public static class Map extendsMapReduceBase implements
Mapper
{
/**
* LongWritable, IntWritable, Text 均是 Hadoop 中实现的用于封装 Java 数据类型的类,这些类实现了WritableComparable接口,
* 都能够被串行化从而便于在分布式环境中进行数据交换,你可以将它们分别视为long,int,String 的替代品。
*/
private final static IntWritable one =new IntWritable(1);
private Text word = new Text();
/**
* Mapper接口中的map方法:
* void map(K1 key, V1 value,OutputCollector
* 映射一个单个的输入k/v对到一个中间的k/v对
* 输出对不需要和输入对是相同的类型,输入对可以映射到0个或多个输出对。
* OutputCollector接口:收集Mapper和Reducer输出的
* OutputCollector接口的collect(k, v)方法:增加一个(k,v)对到output
*/
public void map(LongWritable key, Textvalue,
OutputCollector
throws IOException
{
String line =value.toString();
StringTokenizertokenizer = new StringTokenizer(line);
while(tokenizer.hasMoreTokens())
{
((Text)word).set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extendsMapReduceBase implements
Reducer
{
public void reduce(Text key,Iterator
OutputCollector
throws IOException
{
int sum = 0;
while(values.hasNext())
{
sum +=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args)throws Exception
{
/**
* JobConf:map/reduce的job配置类,向hadoop框架描述map-reduce执行的工作
* 构造方法:JobConf()、JobConf(Class exampleClass)、JobConf(Configurationconf)等
*/
JobConf conf = newJobConf(example.class);
conf.setJobName("wordcount"); //设置一个用户定义的job名称
conf.setOutputKeyClass(Text.class); //为job的输出数据设置Key类
conf.setOutputValueClass(IntWritable.class); //为job输出设置value类
conf.setMapperClass(Map.class); //为job设置Mapper类
conf.setCombinerClass(Reduce.class); //为job设置Combiner类
conf.setReducerClass(Reduce.class); //为job设置Reduce类
conf.setInputFormat(TextInputFormat.class); //为map-reduce任务设置InputFormat实现类
conf.setOutputFormat(TextOutputFormat.class); //为map-reduce任务设置OutputFormat实现类
/**
* InputFormat描述map-reduce中对job的输入定义
* setInputPaths():为map-reduce job设置路径数组作为输入列表
* setInputPath():为map-reduce job设置路径数组作为输出列表
*/
FileInputFormat.addInputPath(conf, newPath(args[0]));
FileOutputFormat.setOutputPath(conf,new Path(args[1]));
JobClient.runJob(conf); //运行一个job
}
}