Hadoop从入门到精通32:MapReduce高级功能之Combiner

1.什么是Combiner?

在MapReduce任务中,每一个Mapper都可能产生大量的输出到Reducer,这对网络带宽和Reducer负载都有很大的压力,严重时会限制Hadoop集群的计算能力。Combiner(合并)就是为了减少Mapper和Reducer之间的数据传输而生的,添加Combiner绝不能影响最终的计算结果。

MapReduce允许用户针对Mapper阶段的输出进行一次合并,这次合并就是Combiner,主要是为了削减Mapper的输出从而减少网络带宽和Reducer之上的负载。Combiner最基本的功能就是实现本地Key的合并。一般来说,Combiner和Reducer的功能相同,Combiner相当于本地的Reducer,所以常以Reducer来作为Combiner使用。

在没有加入Combiner之前,Mapper的输出就是Reducer的输入。在Mapper和Reducer之间加入了Combiner之后,Mapper的输出就是Combiner的输入,Combiner的输出就是Reducer的输入。如果加入Combiner是可插拔的,那么该Combiner的输出类型就和Mapper的输出类型一致,该Combiner的输入类型就和Reducer的输入类型一致。

不是所有的计算场景都适合用Combiner,只有操作满足结合律的才可设置Combiner,比如求和,求最值等;而对于求中位数,求平均值等不适合用Combiner。

2.在程序中使用Combiner

示例1(求和):重写WordCount程序,在Mapper和Reducer之间加入Combiner功能。

//WordCountMapper.java
package demo.wc;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WordCountMapper extends Mapper {
    @Override
    protected void map(LongWritable key1, Text value1, Context context) throws IOException, InterruptedException {
        String str = value1.toString();
        String[] words = str.split(" ");
        for(String w:words){
            context.write(new Text(w), new LongWritable(1));
        }
    }
}
//WordCountReducer.java
package demo.wc;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class WordCountReducer extends Reducer{
    @Override
    protected void reduce(Text k3, Iterable v3,Context context) throws IOException, InterruptedException {
        long total = 0;
        for(LongWritable v:v3){
            total = total + v.get();
        }
        context.write(k3, new LongWritable(total));
    }
}
//WordCountMain.java
package demo.wc;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCountMain {
    public static void main(String[] args) throws Exception {
        Job job = Job.getInstance(new Configuration());
        job.setJarByClass(WordCountMain.class);
        job.setMapperClass(WordCountMapper.class);
        job.setMapOutputKeyClass(Text.class); 
        job.setMapOutputValueClass(LongWritable.class); 
        //指定任务的Combiner,这里直接使用Reducer作为Combiner。
        job.setCombinerClass(WordCountReducer.class);
        job.setReducerClass(WordCountReducer.class);
        job.setOutputKeyClass(Text.class); 
        job.setOutputValueClass(LongWritable.class);
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.waitForCompletion(true);
    }
}

打包并运行程序

  1. 将demo.wc目录打包成wc.jar,并指定主类是WordCountMain.java
  2. 将wc.jar上传到服务器,/root/input/wc.jar
  3. 准备测试数据HDFS:/input/data.txt
  4. 执行程序:# hadoop jar /root/input/wc.jar /input/data.txt /output/wc
  5. 查看输出目录:# hdfs dfs -ls /output/wc
  6. 查看结果:# hdfs dfs -cat /output/wc/part-r-00000

# hdfs dfs -cat /input/data.txt
I love Beijing
I love China
Beijing is the capital of China

# hadoop jar /root/input/wc.jar /input/data.txt /output/wc
......
18/11/07 23:30:13 INFO mapreduce.Job: map 0% reduce 0%
18/11/07 23:30:17 INFO mapreduce.Job: map 100% reduce 0%
18/11/07 23:30:21 INFO mapreduce.Job: map 100% reduce 100%
18/11/07 23:30:22 INFO mapreduce.Job: Job job_1540913287698_0001 completed successfully
......

# hdfs dfs -ls /output/wc
Found 2 items
-rw-r--r-- 1 root supergroup 0 2018-11-07 23:30 /output/wc/_SUCCESS
-rw-r--r-- 1 root supergroup 55 2018-11-07 23:30 /output/wc/part-r-00000

# hdfs dfs -cat /output/wc/part-r-00000
Beijing 2
China 2
I 2
capital 1
is 1
love 2
of 1
the 1

示例2(求最值):使用MapReduce求N个数的最大(小)值,加入Combiner。

//MaxValueMapper.java
package demo.max; 
import org.apache.hadoop.io.LongWritable; 
import org.apache.hadoop.io.NullWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapreduce.Mapper;
public static class MaxValueMapper extends Mapper {
  private Long max = Long.MIN_VALUE;
  @Override
  protected void map(LongWritable key1, Text value1, Context context) throws IOException, InterruptedException {
    String line = value1.toString();
    long tmp = Long.parseLong(line);
    if (tmp > max) {
      max = tmp;
    }
  }

  //cleanup()是指map函数执行完成之后就会调用
  @Override
  protected void cleanup(Context context) throws IOException, InterruptedException {
    context.write(new LongWritable(max), NullWritable.get());
  }
}
//MaxValueReducer.java
package demo.max; 
import org.apache.hadoop.io.LongWritable; 
import org.apache.hadoop.io.NullWritable; 
import org.apache.hadoop.mapreduce.Reducer;
public static class MaxValueReducer extends Reducer {
  private Long max = Long.MIN_VALUE;
  @Override
  protected void reduce(LongWritable key3, Iterable value3, Context context) throws IOException, InterruptedException {
    if (key3.get() > max) {
      max = key.get();
    }
  }

  //cleanup()是指reduce函数执行完成之后就会调用
  @Override
  protected void cleanup(Context context) throws IOException, InterruptedException {
    context.write(new LongWritable(max), NullWritable.get());
  }
}
//MaxValueMain.java
package demo.max; 
import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.LongWritable; 
import org.apache.hadoop.io.NullWritable; 
import org.apache.hadoop.mapreduce.Job; 
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 
public class MaxValueMain { 
  public static void main(String[] args) throws Exception { 
    Job job = Job.getInstance(new Configuration()); 
    job.setJarByClass(MaxValueMain.class); 
    job.setMapperClass(MaxValueMapper.class); 
    job.setMapOutputKeyClass(LongWritable.class); 
    job.setMapOutputValueClass(NullWritable.class); 
    //指定任务的Combiner,这里直接使用Reducer作为Combiner。 
    job.setCombinerClass(MaxValueReducer.class); 
    job.setReducerClass(MaxValueReducer.class); 
    job.setOutputKeyClass(LongWritable.class); 
    job.setOutputValueClass(NullWritable.class); 
    FileInputFormat.setInputPaths(job, new Path(args[0])); 
    FileOutputFormat.setOutputPath(job, new Path(args[1])); 
    job.waitForCompletion(true); 
  } 
}

打包并运行程序

  1. 将demo.max目录打包成max.jar,并指定主类是MaxValueMain.java
  2. 将max.jar上传到服务器,/root/input/max.jar
  3. 准备测试数据HDFS:/input/numbers.txt
  4. 执行程序:# hadoop jar /root/input/max.jar /input/data.txt /output/max
  5. 查看输出目录:# hdfs dfs -ls /output/max
  6. 查看结果:# hdfs dfs -cat /output/max/part-r-00000

# hdfs dfs -cat /input/numbers.txt
2
3
1
4
8
10
5
7
6
9

# hadoop jar /root/input/max.jar /input/data.txt /output/max
......
18/11/08 00:56:13 INFO mapreduce.Job: map 0% reduce 0%
18/11/08 00:56:17 INFO mapreduce.Job: map 100% reduce 0%
18/11/08 00:56:21 INFO mapreduce.Job: map 100% reduce 100%
18/11/08 00:56:22 INFO mapreduce.Job: Job job_1540913287563_0003 completed successfully
......

# hdfs dfs -ls /output/max
Found 2 items
-rw-r--r-- 1 root supergroup 0 2018-11-08 00:56 /output/wc/_SUCCESS
-rw-r--r-- 1 root supergroup 2 2018-11-08 00:56 /output/wc/part-r-00000

# hdfs dfs -cat /output/max/part-r-00000
10

你可能感兴趣的:(Hadoop从入门到精通32:MapReduce高级功能之Combiner)