hadoop中top-k问题解决

1.问题描述:在MapReduce中,想要输出最频繁出现的前k个单词。

                  问题输入:<单词,它出现的频率>

                  想要的输出:出现最多的前100个单词

   例如,输入是:

   hello  3

   word  4

   a   4

   moring  5

  goog  10

  bye  5

 (注意:中间的分割符是'\t')

  想要得到出现频率最多的前3个单词,则期望得到的结果为:

 goog  10

 moring  5

 bye  5

2.解决方案

   可以用一个map和一个reduce解决,map负责按频率降序输出键值对,把所有mapper的结果都输出到一个reduer中,reduce负责输出前3个出现频率最高的单词(这里输出是在reducer的cleanup()函数中输出)

详情 参见http://www.cnblogs.com/hengli/archive/2012/12/04/2801619.html

 

3.程序代码

(1)

package sort;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;

import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.io.*;

public class wordSort {

	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		Configuration conf = new Configuration();
		String[] otherArgs = new GenericOptionsParser(conf,args).getRemainingArgs();
		if(otherArgs.length != 2)
		{
			System.err.println("Usage: wordsort <in> <out>");
			System.exit(2);
		}
		Job job2 = new Job(conf,"word sort");    
		
	
		job2.setJarByClass(wordSort.class);
		job2.setMapperClass(SortMapper.class);
		job2.setReducerClass(SortReducer.class);
		
		job2.setMapOutputKeyClass(DesIntWritable.class);
		job2.setMapOutputValueClass(Text.class);
		//job2.setOutputKeyClass(Text.class);
		//job2.setOutputValueClass(DesIntWritable.class);
		job2.setNumReduceTasks(1);   //set the number of reducer = 1
		job2.setOutputKeyClass(NullWritable.class);
		job2.setOutputValueClass(Text.class);
		FileInputFormat.addInputPath(job2, new Path(otherArgs[0]));
		FileOutputFormat.setOutputPath(job2, new Path(otherArgs[1]));
		System.out.println("job2 start.....");
		job2.waitForCompletion(true);
		System.out.println("job2 done.");
		
	}

}

 (2)SortMapper类

   

package sort;

import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;

/**
 * 按键值降序输出
 * @author hx
 *
 */
public class SortMapper extends Mapper<LongWritable,Text,DesIntWritable,Text> {

	private DesIntWritable result = new DesIntWritable();
	private Text word = new Text();
	
	public void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException
	{
		
		String[] temp = value.toString().split("\t");

		if(temp != null && temp.length == 2)
		{
			result.set(Integer.parseInt(temp[1]));
			word.set(temp[0]);
			context.write(result, word);
		}
	}

}

 (3)SortReducer类

package sort;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.TreeMap;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;

/**
 *  输出排在前3个到单词及其频数
 * @author hx
 *
 */
public class SortReducer extends Reducer<DesIntWritable, Text, NullWritable, Text> {
	
	public static final int k = 3;
	public List<Text> words = new ArrayList<Text>();
	
	public void reduce(DesIntWritable key,Iterable<Text> values,Context context) throws IOException, InterruptedException
	{
		for(Text val:values)
		{
			Text result = new Text();
			result.set(key + "\t" + val.toString());
			if(words.size() <= k-1)
				words.add(result);
		}
	}
	
	@Override
	protected void cleanup(Context context) throws IOException, InterruptedException
	{
		for(Text text :words)
		{
			context.write(NullWritable.get(), text);
		}
	}
	
}

 (4)DesIntWritable类

package sort;

import java.io.*;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;

/** A WritableComparable for ints. */
public class DesIntWritable implements WritableComparable {
  private int value;

  public DesIntWritable() {}

  public DesIntWritable(int value) { set(value); }

  /** Set the value of this DesIntWritable. */
  public void set(int value) { this.value = value; }

  /** Return the value of this DesIntWritable. */
  public int get() { return value; }

  public void readFields(DataInput in) throws IOException {
    value = in.readInt();
  }

  public void write(DataOutput out) throws IOException {
    out.writeInt(value);
  }

  /** Returns true iff <code>o</code> is a DesIntWritable with the same value. */
  public boolean equals(Object o) {
    if (!(o instanceof DesIntWritable))
      return false;
    DesIntWritable other = (DesIntWritable)o;
    return this.value == other.value;
  }

  public int hashCode() {
    return value;
  }

  /** Compares two DesIntWritables. */
  public int compareTo(Object o) {
    int thisValue = this.value;
    int thatValue = ((DesIntWritable)o).value;
    return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
  }

  public String toString() {
    return Integer.toString(value);
  }

  /** A Comparator optimized for DesIntWritable. */ 
  public static class Comparator extends WritableComparator {
    public Comparator() {
      super(DesIntWritable.class);
    }

    public int compare(byte[] b1, int s1, int l1,
                       byte[] b2, int s2, int l2) {
      int thisValue = readInt(b1, s1);
      int thatValue = readInt(b2, s2);
      return (thisValue>thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
    }
  }

  static {                                        // register this comparator
    WritableComparator.define(DesIntWritable.class, new Comparator());
  }
}

 

参考:

[1] http://www.greenplum.com/blog/topics/hadoop/how-hadoop-mapreduce-can-transform-how-you-build-top-ten-lists

[2] http://www.cnblogs.com/hengli/archive/2012/12/04/2801619.html

你可能感兴趣的:(hadoop)