MapReduce编程实例:连接(Join)

设计思路:

学生信息表studnet_info.txt:
Jenny 00001
Hardy 00002
Bradeley 00003
学生选课信息表student_class_info.txt
00001 Chinese
00001 Math
00002 Music
00002 Math
00003 Physic

经过join操作后,所得结果:
Jenny Chinese
Jenny Math
Hardy Music
Hardy Math
Bradley Physic
思路:
在map阶段读入student_class_info.txt、student_info.txt文件,
将每条记录标识上文件名,再将join的字段作为map输出的key,在reduce阶段再做笛卡尔
乘积

Mapper类:

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;


public class JoinMapper extends Mapper<LongWritable, Text, Text, Text>{
    public static final String LEFT_FILENAME="student_info.txt";
    public static final String RIGHT_FILENAME="student_class_info.txt";
    public static final String LEFT_FILENAME_FLAG="l";
    public static final String RIGHT_FILENAME_FLAG="r";
    protected void map(LongWritable key, Text value, Context context) throws java.io.IOException ,InterruptedException {
        //获取记录的HDFS路径
        String filePath=((FileSplit)context.getInputSplit()).getPath().toString();
        String fileFlag=null;
        String joinKey=null;
        String joinValue=null;
        //判断记录来自哪个文件
        if(filePath.contains(LEFT_FILENAME)){
            fileFlag=LEFT_FILENAME_FLAG;
            joinKey=value.toString().split("\t")[1];
            joinValue=value.toString().split("\t")[0];
        }else if(filePath.contains(RIGHT_FILENAME)){
            fileFlag=RIGHT_FILENAME_FLAG;
            joinKey=value.toString().split("\t")[0];
            joinValue=value.toString().split("\t")[1];
        }
        //输出键值对并标识该结果是来自哪个文件
        context.write(new Text(joinKey), new Text(joinValue+"\t"+fileFlag));
    };

}

Reducer类:

import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;


public class JoinReducer extends Reducer<Text, Text, Text, Text>{
    public static final String LEFT_FILENAME="student_info.txt";
    public static final String RIGHT_FILENAME="student_class_info.txt";
    public static final String LEFT_FILENAME_FLAG="l";
    public static final String RIGHT_FILENAME_FLAG="r";
    protected void reduce(Text key, Iterable values, Context context) throws java.io.IOException ,InterruptedException {
        Iterator iterator=values.iterator();
        List studentClassNames=new ArrayList();
        String studentName="";
        while(iterator.hasNext()){
            String[] infos=iterator.next().toString().split("\t");
            //判断该条记录来自那个文件,并根据文件格式解析记录获取相应信息
            if(infos[1].equals(LEFT_FILENAME_FLAG)){
                studentName=infos[0];
            }else if(infos[1].equals(RIGHT_FILENAME_FLAG)){
                studentClassNames.add(infos[0]);
            }
        }
        //求笛卡儿积
        for(int i=0;inew Text(studentName),new Text(studentClassNames.get(i)));
        }
    };

}

main方法:

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;


public class JobRun {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf=new Configuration();
        Job job=new Job(conf, "Join");
        job.setJarByClass(JobRun.class);
        FileInputFormat.addInputPath(job, new Path("/input/join"));
        FileOutputFormat.setOutputPath(job, new Path("/output/join"));
        job.setMapperClass(JoinMapper.class);
        job.setReducerClass(JoinReducer.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        System.out.println(job.waitForCompletion(true)?0:1);

    }

}

出现的问题:

上传文件到hdfs出错,关闭hadoop安全模式问题解决
//hadoop设置为非安全模式
hadoop dfsadmin -safemode

结果:
MapReduce编程实例:连接(Join)_第1张图片

你可能感兴趣的:(hadoop)