Hadoop从入门到精通35:MapReduce实战之多表查询

案例:从员工表(emp.csv)和部门表(dept.csv)中输出每个部门下的所有员工。格式:部门名字 员工名字列表

多表查询的原理:给关联字段(key2)的值(value2)的前面加上区分标记。

1.程序源码

//MultiTableMapper.java
package demo.multiTable;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class MultiTableMapper extends Mapper {
  @Override
  protected void map(LongWritable key1, Text value1, Context context)
    throws IOException, InterruptedException {
    //获取数据:员工表或者部门表
    //部门数据:10,ACCOUNTING,NEW YORK
    //员工数据:7782,CLARK,MANAGER,7839,1981/6/9,2450,,10
    String data = value1.toString();
    //分词
    String[] words = data.split(",");
    //根据数据的长度来区分是员工数据或者部门数据
    //给输出的value加上前缀:DNAME_(部门名字)或ENAME_(员工名字)
    if(words.length == 3) {
      //部门数据,输出:部门号 部门名称
      context.write(new LongWritable(Long.parseLong(words[0])), new Text("DNAME_"+words[1]));
    }else {
      //员工数据,输出:部门号 员工姓名
      context.write(new LongWritable(Long.parseLong(words[7])), new Text("ENAME_"+words[1]));
    }
  }
}
//MultiTableReducer.java
package demo.multiTable;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class MultiTableReducer extends Reducer {
  @Override
  protected void reduce(LongWritable key3, Iterable value3, Context context)
    throws IOException, InterruptedException {
    // value3中包含部门名称(前缀:DNAME_)和员工名称(前缀:ENAME_)
    String dname = "";
    String enameList = "";
    //分离出部门名称和该部门下的全部员工
    for(Text v:value3) {
      String name = v.toString();
      String pre = name.substring(0, 6);
      if(pre.equals("DNAME_")) {
        dname = name.substring(6);
      }else if(pre.equals("ENAME_")) {
        enameList += name.substring(6)+";";
      }else {
        continue;
      }
    }
    context.write(new Text(dname), new Text(enameList));
  }
}
//MultiTableMain.java
package demo.multiTable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class MultiTableMain {
  public static void main(String[] args) throws Exception {
    //创建Job
    Job job = Job.getInstance(new Configuration());
    //指定任务入口
    job.setJarByClass(MultiTableMain.class);
    //指定任务的Mapper和输出类型
    job.setMapperClass(MultiTableMapper.class);
    job.setMapOutputKeyClass(LongWritable.class);
    job.setMapOutputValueClass(Text.class);
    //指定任务的Reducer和输出类型
    job.setReducerClass(MultiTableReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);
    //指定输入和输出目录:HDFS路径
    FileInputFormat.setInputPaths(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job,new Path(args[1]));
    //执行任务
    job.waitForCompletion(true);
  }
}

2.打包执行

员工表和部门表放到同一个目录下:

# hdfs dfs -ls /input/multi-table
Found 2 items
-rw-r--r-- 1 root supergroup 84 2018-09-26 00:04 /input/multi-table/dept.csv
-rw-r--r-- 1 root supergroup 617 2018-09-26 00:04 /input/multi-table/emp.csv

员工表:emp.csv(员工号,姓名,职位,老板号,入职日期,工资,奖金,部门号)

# hdfs dfs -cat /input/multi-table/emp.csv
7369,SMITH,CLERK,7902,1980/12/17,800,,20
7499,ALLEN,SALESMAN,7698,1981/2/20,1600,300,30
7521,WARD,SALESMAN,7698,1981/2/22,1250,500,30
7566,JONES,MANAGER,7839,1981/4/2,2975,,20
7654,MARTIN,SALESMAN,7698,1981/9/28,1250,1400,30
7698,BLAKE,MANAGER,7839,1981/5/1,2850,,30
7782,CLARK,MANAGER,7839,1981/6/9,2450,,10
7788,SCOTT,ANALYST,7566,1987/4/19,3000,,20
7839,KING,PRESIDENT,,1981/11/17,5000,,10
7844,TURNER,SALESMAN,7698,1981/9/8,1500,0,30
7876,ADAMS,CLERK,7788,1987/5/23,1100,,20
7900,JAMES,CLERK,7698,1981/12/3,950,,30
7902,FORD,ANALYST,7566,1981/12/3,3000,,20
7934,MILLER,CLERK,7782,1982/1/23,1300,,10

部门表:dept.csv(部门号,部门名称,所在地)

# hdfs dfs -cat /input/multi-table/dept.csv
10,ACCOUNTING,NEW YORK
20,RESEARCH,DALLAS
30,SALES,CHICAGO
40,OPERATIONS,BOSTON

将程序打包成MultiTable.jar,上传到服务器执行:

# hadoop jar MultiTable.jar /input/multi-table /output/multi-table
……
18/11/18 10:44:50 INFO mapreduce.Job: map 0% reduce 0%
18/11/18 10:44:57 INFO mapreduce.Job: map 50% reduce 0%
18/11/18 10:44:58 INFO mapreduce.Job: map 100% reduce 0%
18/11/18 10:45:03 INFO mapreduce.Job: map 100% reduce 100%
18/11/18 10:45:04 INFO mapreduce.Job: Job job_1542506318955_0004 completed successfully
……

查看结果:

# hdfs dfs -ls /output/multi-table
Found 2 items
-rw-r--r-- 1 root supergroup 0 2018-11-18 10:45 /output/multi-table/_SUCCESS
-rw-r--r-- 1 root supergroup 125 2018-11-18 10:45 /output/multi-table/part-r-00000

# hdfs dfs -cat /output/multi-table/part-r-00000
ACCOUNTING MILLER;KING;CLARK;
RESEARCH ADAMS;SCOTT;SMITH;JONES;FORD;
SALES TURNER;ALLEN;BLAKE;MARTIN;WARD;JAMES;
OPERATIONS

你可能感兴趣的:(Hadoop从入门到精通35:MapReduce实战之多表查询)