3. 使用MRUnit进行Mapeduce的单元测试

1. pom.xml增加依赖


  4.0.0

  com.yinbodotcc
  countwords
  0.0.1-SNAPSHOT
  jar

  countwords
  http://maven.apache.org
 
  
   UTF-8
   3.0.3
   0.13.1
   0.98.6-hadoop2


  
    
    
    
        org.apache.mrunit
        mrunit
        1.1.0
        hadoop2
        test
    
    
    
    
        org.apache.hadoop
        hadoop-hdfs
        3.0.3
    


    
      org.apache.hadoop  
      hadoop-client  
      2.5.1  
   

  
      org.apache.hadoop
      hadoop-common
      2.5.0
  
  
   
   
  
   
   
    
  

图片.png

2. Java文件

2.1 测试用例

图片.png
图片.png
package chapter3;

import java.io.IOException;
import java.util.ArrayList;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mrunit.mapreduce.MapDriver;
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver;
import org.apache.hadoop.mrunit.mapreduce.ReduceDriver;
import org.apache.hadoop.mrunit.types.Pair;
import org.junit.Before;
import org.junit.Test;

public class WordCountWithToolsTest {

    MapDriver mapDriver;
    ReduceDriver reduceDriver;
    MapReduceDriver mapReduceDriver;

    @Before
    public void setUp() {
        WordCountWithTools.TokenizerMapper mapper = new WordCountWithTools.TokenizerMapper();
        WordCountWithTools.IntSumReducer reducer = new WordCountWithTools.IntSumReducer();
        mapDriver = MapDriver.newMapDriver(mapper);
        reduceDriver = ReduceDriver.newReduceDriver(reducer);
        mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
    }
    
    @Test
    public void testWordCountMapper() throws IOException {
        IntWritable inKey = new IntWritable(0);
        mapDriver.withInput(inKey, new Text("Test Quick"));
        mapDriver.withInput(inKey, new Text("Test Quick"));
        
        mapDriver.withOutput(new Text(
                "Test"),new IntWritable(1));
        mapDriver.withOutput(new Text(
                "Quick"),new IntWritable(1));
        mapDriver.withOutput(new Text(
                "Test"),new IntWritable(1));
        mapDriver.withOutput(new Text(
                "Quick"),new IntWritable(1));
        mapDriver.runTest();
        
    }   
    @Test
    public void testWordCountReduce() throws IOException {
        
        ArrayList reduceInList = new ArrayList();
        reduceInList.add(new IntWritable(1));
        reduceInList.add(new IntWritable(2));

        reduceDriver.withInput(new Text("Quick"), reduceInList);
        reduceDriver.withInput(new Text("Test"), reduceInList);
        
        ArrayList> reduceOutList = new ArrayList>();
        reduceOutList.add(new Pair(new Text(
                "Quick"),new IntWritable(3)));
        reduceOutList.add(new Pair(new Text(
                "Test"),new IntWritable(3)));
        
        reduceDriver.withAllOutput(reduceOutList);
        reduceDriver.runTest();
    }
    
    @Test
    public void testWordCountMapReduce() throws IOException {
        
        IntWritable inKey = new IntWritable(0);
        mapReduceDriver.withInput(inKey, new Text("Test Quick"));
        mapReduceDriver.withInput(inKey, new Text("Test Quick"));
        
        ArrayList> reduceOutList = new ArrayList>();
        reduceOutList.add(new Pair(new Text(
                "Quick"),new IntWritable(2)));
        reduceOutList.add(new Pair(new Text(
                "Test"),new IntWritable(2)));

        mapReduceDriver.withAllOutput(reduceOutList);
        mapReduceDriver.runTest();
    }
}

2.2 要测试的MapReduce类

package chapter3;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class WordCountWithTools extends Configured implements Tool {
      /**
       * 

* The mapper extends from the org.apache.hadoop.mapreduce.Mapper interface. When Hadoop runs, * it receives each new line in the input files as an input to the mapper. The "map" function * tokenize the line, and for each token (word) emits (word,1) as the output.

*/ public static class TokenizerMapper extends Mapper{ private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(Object key, Text value, Context context ) throws IOException, InterruptedException { StringTokenizer itr = new StringTokenizer(value.toString()); while (itr.hasMoreTokens()) { word.set(itr.nextToken()); context.write(word, one); } } } /** *

Reduce function receives all the values that has the same key as the input, and it output the key * and the number of occurrences of the key as the output.

*/ public static class IntSumReducer extends Reducer { private IntWritable result = new IntWritable(); public void reduce(Text key, Iterable values, Context context ) throws IOException, InterruptedException { int sum = 0; for (IntWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } } public int run(String[] args) throws Exception { if (args.length < 2) { System.out.println("chapter3.WordCountWithTools "); ToolRunner.printGenericCommandUsage(System.out); System.out.println(""); return -1; } String inputPath = args[0]; String outPath = args[1]; Job job = prepareJob(inputPath, outPath, getConf()); job.waitForCompletion(true); return 0; } public Job prepareJob(String inputPath, String outPath,Configuration conf) throws IOException { Job job = Job.getInstance(conf, "word count"); job.setJarByClass(WordCountWithTools.class); job.setMapperClass(TokenizerMapper.class); // Uncomment this to // job.setCombinerClass(IntSumReducer.class); job.setReducerClass(IntSumReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.addInputPath(job, new Path(inputPath)); FileOutputFormat.setOutputPath(job, new Path(outPath)); return job; } public static void main(String[] args) throws Exception { int res = ToolRunner.run(new Configuration(), new WordCountWithTools(), args); System.exit(res); } }

3. 运行测试用例(以Eclipse为例)

run as---junit test


图片.png

另外一个例子

http://www.cnblogs.com/zimo-jing/p/8647113.html
https://www.cnblogs.com/zimo-jing/p/8650588.html

你可能感兴趣的:(3. 使用MRUnit进行Mapeduce的单元测试)