自定义Combiner实现步骤
public class WordcountCombiner extends Reducer<Text, IntWritable, Text,IntWritable>{
@Override
protected void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {
// 1 汇总操作
int count = 0;
for(IntWritable v :values){
count += v.get();
}
// 2 写出
context.write(key, new IntWritable(count));
}
}
job.setCombinerClass(WordcountCombiner.class);
案例实操
1.需求
统计过程中对每一个MapTask的输出进行局部汇总,以减小网络传输量即采用Combiner功能。
2. 期望:
Combine输入数据多,输出时经过合并,输出数据降低。
方案一
1)增加一个WordcountCombiner类继承Reducer
package com.hadwinling.mapreduce.wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/12 上午11:18
*/
public class WordcountCombiner extends Reducer<Text, IntWritable, Text , IntWritable> {
IntWritable v = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
// 1 累加求和
for (IntWritable value : values) {
sum += value.get();
}
v.set(sum);
// 2 写出
context.write(key, v);
}
}
2)在WordcountDriver驱动类中指定Combiner
// 指定需要使用combiner,以及用哪个类作为combiner的逻辑
job.setCombinerClass(WordcountCombiner.class);
方案二
1)将WordcountReducer作为Combiner在WordcountDriver驱动类中指定
// 指定需要使用Combiner,以及用哪个类作为Combiner的逻辑
job.setCombinerClass(WordcountReducer.class);
方案一的完整代码如下:
Mapper类
package com.hadwinling.mapreduce.wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
// map阶段
// KEYIN 输入数据的key
// VALUEIN 输入数据的value
// KEYOUT 输出数据的key的类型 atguigu,1 ss,1
// VALUEOUT 输出的数据的value类型
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
Text k = new Text();
IntWritable v = new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
System.out.println(key.toString());
// 1 获取一行
String line = value.toString();
// 2 切割单词
String[] words = line.split(" ");
// 3 循环写出
for (String word : words) {
k.set(word);
context.write(k, v);
}
}
}
Combiner类
package com.hadwinling.mapreduce.wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/12 上午11:18
*/
public class WordcountCombiner extends Reducer<Text, IntWritable, Text , IntWritable> {
IntWritable v = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
// 1 累加求和
for (IntWritable value : values) {
sum += value.get();
}
v.set(sum);
// 2 写出
context.write(key, v);
}
}
Reducer类
package com.hadwinling.mapreduce.wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.Iterator;
// KEYIN, VALUEIN map阶段输出的key和value
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
IntWritable v = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
// atguigu,1
// atguigu,1
int sum = 0;
// 1 累加求和
for (IntWritable value : values) {
sum += value.get();
}
v.set(sum);
// 结果 2 写出 atguigu2
context.write(key, v);
}
}
Driverr类
package com.hadwinling.mapreduce.wordcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class WordCountDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args = new String[]{"/home/hadoop/MyTmp/mapreduceTest.txt", "/home/hadoop/workplace/Result/mapreduceTestReduce.txt"};
Configuration conf = new Configuration();
// 开启map端输出压缩
conf.setBoolean("mapreduce.map.output.compress", true);
// 设置map端输出压缩方式
conf.setClass("mapreduce.map.output.compress.codec", BZip2Codec.class, CompressionCodec.class);
// 1 获取Job对象
Job job = Job.getInstance(conf);
// 2 设置jar存储位置
job.setJarByClass(WordCountDriver.class);
// 3 关联Map和Reduce类
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4 设置Mapper阶段输出数据的key和value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5 设置最终数据输出的key和value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 如果不设置InputFormat,它默认用的是TextInputFormat.classhis
// job.setInputFormatClass(CombineTextInputFormat.class);
// 虚拟存储切片最大值设置4m
// CombineTextInputFormat.setMaxInputSplitSize(job, 4194304);
// 虚拟存储切片最大值设置20m
// CombineTextInputFormat.setMaxInputSplitSize(job, 20971520);
// job.setNumReduceTasks(2);//分区
// job.setCombinerClass(WordcountCombiner.class);//方案1:在WordcountDriver驱动类中指定Combiner
// job.setCombinerClass(WordcountReducer.class);//方案二:将WordcountReducer作为Combiner在WordcountDriver驱动类中指定
// 设置reduce端输出压缩开启
FileOutputFormat.setCompressOutput(job, true);
// 设置压缩的方式
// FileOutputFormat.setOutputCompressorClass(job, BZip2Codec.class);
FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
// 6 设置输入路径和输出路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 提交job
// job.submit();
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}