Hadoop-MapReduce案例-倒排索引

1 需求
有大量的文本(文档、网页),需要建立搜索索引
(1)数据输入
aa.txt

hadoop spark
hadoop java
hadoop java
hadoop scala

bb.txt

hadoop spark
hadoop spark
spark scala
java scala

cc.txt

hadoop scala
hadoop spark
spark java

2 代码实现
(1)第一次处理,编写OneIndexMapper类

package com.jackyan.mapreduce.reverseindex;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

public class OneIndexMapper extends Mapper {

    Text k = new Text();
    String fileName;
    IntWritable v = new IntWritable(1);

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {

        // 获取文件名称
        FileSplit split = (FileSplit) context.getInputSplit();

        fileName = split.getPath().getName();
    }

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        String line = value.toString();
        String fields[] = line.split(" ");
        for (String field : fields) {
            k.set(field + "--" + fileName);
            context.write(k, v);
        }
    }
}

(2)第一次处理,编写OneIndexReducer类

package com.jackyan.mapreduce.reverseindex;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class OneIndexReducer extends Reducer {

    IntWritable v = new IntWritable();

    @Override
    protected void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {

        int sum = 0;
        for (IntWritable value : values) {
            sum += value.get();
        }

        v.set(sum);
        context.write(key, v);
    }
}

(3)第一次处理,编写OneIndexDriver类

package com.jackyan.mapreduce.reverseindex;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class OneIndexDriver {

    public static void main(String[] args) throws Exception {

        // 输入输出路径需要根据自己电脑上实际的输入输出路径设置
        args = new String[] { "h:/input/inputoneindex", "h:/output" };

        Configuration conf = new Configuration();

        Job job = Job.getInstance(conf);
        job.setJarByClass(OneIndexDriver.class);

        job.setMapperClass(OneIndexMapper.class);
        job.setReducerClass(OneIndexReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.waitForCompletion(true);
    }
}

3 第二次处理
(1)第二次处理,编写TwoIndexMapper类

package com.jackyan.mapreduce.reverseindex;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

public class TwoIndexMapper extends Mapper {

    Text k = new Text();
    Text v = new Text();

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        String line = value.toString();
        String fields[] = line.split("--");

        k.set(fields[0]);
        v.set(fields[1]);

        context.write(k, v);
    }
}

(2)第二次处理,编写TwoIndexReducer类

package com.jackyan.mapreduce.reverseindex;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class TwoIndexReducer extends Reducer {

    Text v = new Text();

    @Override
    protected void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {

        // hadoop aa.txt 3
        // hadoop bb.txt 2
        // hadoop cc.txt 2
        // hadoop cc.txt-->2 bb.txt-->2 aa.txt-->3

        StringBuilder sb = new StringBuilder();

        for (Text value : values) {
            sb.append(value.toString().replace("\t", "-->")).append("\t");
        }

        v.set(sb.toString());

        context.write(key, v);
    }
}

(3)第二次处理,编写TwoIndexDriver类

package com.jackyan.mapreduce.reverseindex;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class TwoIndexDriver {

    public static void main(String[] args) throws Exception {

        // 输入输出路径需要根据自己电脑上实际的输入输出路径设置
        args = new String[] { "e:/input/inputtwoindex", "e:/output1" };

        Configuration config = new Configuration();
        Job job = Job.getInstance(config);

        job.setJarByClass(TwoIndexDriver.class);
        job.setMapperClass(TwoIndexMapper.class);
        job.setReducerClass(TwoIndexReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        boolean result = job.waitForCompletion(true);
        System.exit(result?0:1);
    }
}

你可能感兴趣的:(大数据,hadoop,mapreduce,big,data)