MapReduce Top-K问题

Mappers

使用默认的mapper数据,一个input split(输入分片)由一个mapper来处理。

在每一个map task中,我们找到这个input split的前k个记录。这里我们用TreeMap这个数据结构来保存top K的数据,这样便于更新。下一步,我们来加入新记录到TreeMap中去(这里的TreeMap我感觉就是个大顶堆)。在map中,我们对每一条记录都尝试去更新TreeMap,最后我们得到的就是这个分片中的local top k的k个值。在这里要提醒一下,以往的mapper中,我们都是处理一条数据之后就context.write或者output.collector一次。而在这里不是,这里是把所有这个input split的数据处理完之后再进行写入。所以,我们可以把这个context.write放在cleanup里执行。cleanup就是整个mapper task执行完之后会执行的一个函数。

Reducers

由于我前面讲了很清楚了,这里只有一个reducer,就是对mapper输出的数据进行再一次汇总,选出其中的top k,即可达到我们的目的。

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.util.TreeMap;

//利用MapReduce求最大值海量数据中的K个数
public class Top_k_new extends Configured implements Tool {

    public static class MapClass extends Mapper<LongWritable, Text, NullWritable, Text> {
        public static final int K = 100;
        private TreeMap fatcats = new TreeMap();
        public void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {

            String[] str = value.toString().split(",", -2);
            int temp = Integer.parseInt(str[8]);
            fatcats.put(temp, value);
            if (fatcats.size() > K)
                fatcats.remove(fatcats.firstKey())
        }
        @Override
        protected void cleanup(Context context) throws IOException,  InterruptedException {
            for(Text text: fatcats.values()){
                context.write(NullWritable.get(), text);
            }
        }
    }

    public static class Reduce extends Reducer<NullWritable, Text, NullWritable, Text> {
        public static final int K = 100;
        private TreeMap fatcats = new TreeMap();
        public void reduce(NullWritable key, Iterable values, Context context)
                throws IOException, InterruptedException {
            for (Text val : values) {
                String v[] = val.toString().split("\t");
                Integer weight = Integer.parseInt(v[1]);
                fatcats.put(weight, val);
                if (fatcats.size() > K)
                    fatcats.remove(fatcats.firstKey());
            }
            for (Text text: fatcats.values())
                context.write(NullWritable.get(), text);
        }
    }

    public int run(String[] args) throws Exception {
        Configuration conf = getConf();
        Job job = new Job(conf, "TopKNum");
        job.setJarByClass(Top_k_new.class);
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.setMapperClass(MapClass.class);
       // job.setCombinerClass(Reduce.class);
        job.setReducerClass(Reduce.class);
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);
        System.exit(job.waitForCompletion(true) ? 0 : 1);
        return 0;
    }
    public static void main(String[] args) throws Exception {
        int res = ToolRunner.run(new Configuration(), new Top_k_new(), args);
        System.exit(res);
    }

}

转载:http://www.cnblogs.com/hengli/archive/2012/12/04/2801619.html

你可能感兴趣的:(MapReduce,MapReduce)