Hadoop:MapReduce编程之统计单词的数目

MapReduce编程之统计单词的数目

要求:统计出每个单词的数目,显示结果为单词 单词的长度 单词的数目

分析:由于MapReduce中的数据传输只能以形式传输,只能传递两列数据,因此为了实现三列数据的传输,我们可以通过两种方法解决:字符串拼接和自定义数据类型封装JavaBean。

文章目录

  • MapReduce编程之统计单词的数目
    • 一、字符串拼接
    • 二、自定义数据类型封装JavaBean
    • 三、自定义数据类型实现比较器接口
    • 四、自定义排序器

一、字符串拼接

我们可以将单词与单词对应的长度拼接成一列数据进行传输
代码实现:

package com.miao.wordcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;

/**
 * @ClassName WordConcat
 * @Description TODO 统计每个单词出现的次数,并且单词与对应长度拼接
 * @Date 2021-04-27 20:08:50
 * @Create By     Miao
 */
public class WordConcat extends Configured implements Tool {
    public int run(String[] args) throws Exception {

        //构建Job
        Job job = Job.getInstance(this.getConf(),"wordConcat");
        job.setJarByClass(WordConcat.class);

        //配置Job
        job.setInputFormatClass(TextInputFormat.class);
        //指定输入源
        TextInputFormat.setInputPaths(job,new Path("D:\\Study\\idea\\MavenProject\\count.txt"));

        job.setMapperClass(WCMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        job.setReducerClass(WCReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.setOutputFormatClass(TextOutputFormat.class);
        //指定输出源
        Path outputPath = new Path("D:\\Study\\idea\\MavenProject\\output\\five");
        FileSystem fs = FileSystem.get(this.getConf());
        if(fs.exists(outputPath)){
            fs.delete(outputPath,true);
        }
        TextOutputFormat.setOutputPath(job,outputPath);
        //提交Job
        return job.waitForCompletion(true) ? 0 : -1;
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        int status = ToolRunner.run(conf, new WordConcat(), args);
        System.exit(status);
    }


    public static class WCMapper extends Mapper {
        //输出的Key2
        Text outputKey = new Text();
        //输出的Value2
        IntWritable outputValue = new IntWritable(1);

        /**
         * 每条KV调用一次map
         * @param key:行的偏移量
         * @param value:行的内容
         * @param context
         * @throws IOException
         * @throws InterruptedException
         */
        @Override
        

你可能感兴趣的:(Hadoop,hadoop,mapreduce)