MapReduce经典案例wordcount

Driver

package MapReducer.wordcount;



import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * ---Context
 * @author HYT
 *
 */
public class WordCountDriver {

    public static void main(String[] args) throws Exception{
        String inputPath = "/test_wordcount";
        String outPath = "/out1";   //这个目录提前是不能存在
        Job job = Job.getInstance(); //获取job示例
        job.setJarByClass(WordCountDriver.class);
        job.setJobName("wordcount");
        job.setMapperClass(WordCountMapper.class);   //负责指定map类
        job.setReducerClass(WordCountReducer.class);   //负责指定reduce类
        FileInputFormat.addInputPath(job, new Path(inputPath));  //指定HDFS输入目录
        FileOutputFormat.setOutputPath(job, new Path(outPath));   //指定mapreduce输入到hdfs的目录
        job.setMapOutputKeyClass(Text.class);    //设置map输出的key的数据类型
        job.setMapOutputValueClass(IntWritable.class);      //设置map输出的value的数据类型
        job.setOutputKeyClass(Text.class);      //设置reduce输出的key的数据类型
        job.setOutputValueClass(IntWritable.class); //设置reduce输出的value的数据类型
        boolean waitForCompletion = job.waitForCompletion(true);
        System.out.println(waitForCompletion);
    }
}

MapReducer

package MapReducer.wordcount;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

/**
 * 通过map阶段进行单词分割
 * ---ConText
 * @author HYT
 *
 */
public class WordCountMapper extends Mapper<Object, Text, Text, IntWritable> {

    @Override
    protected void map(Object key, Text value, Context context)
            throws IOException, InterruptedException {
        //v1 k1 ConText
        String line = value.toString();
        if(StringUtils.isNotBlank(line)) {
            String[] words = line.split(",");//通过,分割单词
            if(words!=null&&words.length!=0) {
                for(String word:words) {
                    if(StringUtils.isNotBlank(word)) {
                        Text wordkey = new Text(word);
                        IntWritable tmpValue = new IntWritable(1);
                        context.write(wordkey, tmpValue);
                    }
                }
            }
        }
    }
}

Reducer

package MapReducer.wordcount;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
 * 将导入进行排序生成
 * 输出
 * @author HYT
 *
 */
public class WordCountReducer extends Reducer<Text,IntWritable,Text,IntWritable> {

    @Override
    protected void reduce(Text value, Iterable key,
            Reducer.Context context) throws IOException, InterruptedException {
        //v2 k3  context
        int sum=0;
        for(IntWritable tmpNum:key) {
            sum+=tmpNum.get();
        }
        context.write(value, new IntWritable(sum));
    }
}

你可能感兴趣的:(大数据基础,大数据基础)