hadoop编程:词频排序

hadoop编程:词频排序


本文博客链接:http://blog.csdn.net/jdh99,作者:jdh,转载请注明.


环境:

主机:Ubuntu10.04

hadoop版本:1.2.1

开发工具:gedit


说明:

在词频排序的例子上增加

教程中词频排序不能识别,;等符号,造成“soft”和“soft,”被识别为两个词,所以进行修改。


源代码:


TokenizerMapper.java:

package com.bazhangkeji.hadoop; import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; public class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> { IntWritable one = new IntWritable(1); Text word = new Text(); public void map(Object key, Text value, Context context) throws IOException, InterruptedException { StringTokenizer itr = new StringTokenizer(value.toString()); StringBuffer str_temp = new StringBuffer(); StringBuffer str_out = new StringBuffer(); int i = 0; while (itr.hasMoreTokens()) { str_temp.setLength(0); str_out.setLength(0); str_temp.append(itr.nextToken()); for (i = 0;i < str_temp.length();i++) { if ((str_temp.charAt(i) >= 'a' && str_temp.charAt(i) <= 'z') || (str_temp.charAt(i) >= 'A' && str_temp.charAt(i) <= 'Z') || (str_temp.charAt(i) >= '0' && str_temp.charAt(i) <= '9')) { str_out.append(str_temp.charAt(i)); } } word.set(str_out.toString()); context.write(word, one); } } } 


IntSumReducer.java:

package com.bazhangkeji.hadoop; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; public class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> { IntWritable result = new IntWritable(); public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int sum = 0; for (IntWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } } 


WordCount.java:

package com.bazhangkeji.hadoop; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; public class WordCount { public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (otherArgs.length != 2) { System.err.println("Usage: wordcount <in> <out>"); System.exit(2); } Job job = new Job(conf, "wordcount"); job.setJarByClass(WordCount.class); job.setMapperClass(TokenizerMapper.class); job.setReducerClass(IntSumReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.addInputPath(job, new Path(otherArgs[0])); FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); } } 


参考资料:

1.《从0开始学习hadoop》

2.《hadoop实战》



你可能感兴趣的:(hadoop编程:词频排序)