转载:https://www.cnblogs.com/shishanyuan/p/4190403.html
1) 求各个部门的总工资 2) 求各个部门的人数和平均工资 3) 求每个部门最早进入公司的员工姓名 4) 求各个城市的员工的总工资 5) 列出工资比上司高的员工姓名及其工资 6) 列出工资比公司平均工资要高的员工姓名及其工资 7) 列出名字以J开头的员工姓名及其所属部门名称 8) 列出工资最高的头三名员工姓名及其工资 9) 将全体员工按照总收入(工资+提成)从高到低排列,要求列出姓名及其总收入 10) 如果每位员工只能和他的直接上司,直接下属,同一部门的同事交流,求任何两名员工之间若要进行信息传递所需要经过的中间节点数。请评价一下这个问题是否适合使用map-reduce解决 |
/**
dept表=====================
id,部门名称,地点
=====================
10,ACCOUNTING,NEW YORK
20,RESEARCH,DALLAS
30,SALES,CHICAGO
40,OPERATIONS,BOSTON
emp表=====================
id,姓名,岗位名称,员工上司编号,入职日期,工资,提成,部门id
=====================
7369,SMITH,CLERK,7902,17-12月-80,800,,20
7499,ALLEN,SALESMAN,7698,20-2月-81,1600,300,30
7521,WARD,SALESMAN,7698,22-2月-81,1250,500,30
7566,JONES,MANAGER,7839,02-4月-81,2975,,20
7654,MARTIN,SALESMAN,7698,28-9月-81,1250,1400,30
7698,BLAKE,MANAGER,7839,01-5月-81,2850,,30
7782,CLARK,MANAGER,7839,09-6月-81,2450,,10
7839,KING,PRESIDENT,,17-11月-81,5000,,10
7844,TURNER,SALESMAN,7698,08-9月-81,1500,0,30
7900,JAMES,CLERK,7698,03-12月-81,950,,30
7902,FORD,ANALYST,7566,03-12月-81,3000,,20
7934,MILLER,CLERK,7782,23-1月-82,1300,,10
*/
1) 求各个部门的总工资
package cn.xjw.mapreduce;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 1) 求各个部门的总工资
* @Date: 2019/3/1
*/
public class Q1SumDeptSalary extends Configured implements Tool {
public static class MapClass extends Mapper {
/**用于缓存 dept文件中的数据*/
private Map deptMap = new HashMap();
private String[] kv;
/**此方法会在Map方法执行之前执行且执行一次*/
@Override
protected void setup(Context context) throws IOException, InterruptedException {
BufferedReader in = null;
try {
// 从当前作业中获取要缓存的文件
Path[] paths = DistributedCache.getLocalCacheFiles(context.getConfiguration());
String deptIdName = null;
for (Path path : paths) {
// 对部门文件字段进行拆分并缓存到deptMap中
if (path.toString().contains("dept")) {
in = new BufferedReader(new FileReader(path.toString()));
while (null != (deptIdName = in.readLine())) {
// 对部门文件字段进行拆分并缓存到deptMap中
// 其中Map中key为部门编号,value为所在部门名称
deptMap.put(deptIdName.split(",")[0], deptIdName.split(",")[1]);
}
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
if (in != null) {
in.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
kv = value.toString().split(",");
// map join: 在map阶段过滤掉不需要的数据,输出key为部门名称和value为员工工资
if (deptMap.containsKey(kv[7])) {
if (null != kv[5] && !"".equals(kv[5].toString())) {
context.write(new Text(deptMap.get(kv[7].trim())), new Text(kv[5].trim()));
}
}
}
}
public static class Reduce extends Reducer {
@Override
public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {
// 对同一部门的员工工资进行求和
long sumSalary = 0;
for (Text val : values) {
sumSalary += Long.parseLong(val.toString());
}
// 输出key为部门名称和value为该部门员工工资总和
context.write(key, new LongWritable(sumSalary));
}
}
// @Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称、Mapper和Reduce类
Job job = new Job(getConf(), "Q1SumDeptSalary");
job.setJobName("Q1SumDeptSalary");
job.setJarByClass(Q1SumDeptSalary.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
// 设置输入格式类
job.setInputFormatClass(TextInputFormat.class);
// 设置输出格式
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// 第1个参数为缓存的部门数据路径、第2个参数为员工数据路径和第3个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
DistributedCache.addCacheFile(new Path(otherArgs[0]).toUri(), job.getConfiguration());
FileInputFormat.addInputPath(job, new Path(otherArgs[1]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q1SumDeptSalary(), args);
System.exit(res);
}
}
2) 求各个部门的人数和平均工资
package cn.xjw.mapreduce;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 2) 求各个部门的人数和平均工资
* @Date: 2019/3/1
*/
public class Q2DeptNumberAveSalary extends Configured implements Tool {
public static class MapClass extends Mapper {
/**用于缓存 dept文件中的数据*/
private Map deptMap = new HashMap();
private String[] kv;
/**此方法会在Map方法执行之前执行且执行一次*/
@Override
protected void setup(Context context) throws IOException, InterruptedException {
BufferedReader in = null;
try {
// 从当前作业中获取要缓存的文件
Path[] paths = DistributedCache.getLocalCacheFiles(context.getConfiguration());
String deptIdName = null;
for (Path path : paths) {
// 对部门文件字段进行拆分并缓存到deptMap中
if (path.toString().contains("dept")) {
in = new BufferedReader(new FileReader(path.toString()));
while (null != (deptIdName = in.readLine())) {
// 对部门文件字段进行拆分并缓存到deptMap中
// 其中Map中key为部门编号,value为所在部门名称
deptMap.put(deptIdName.split(",")[0], deptIdName.split(",")[1]);
}
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
if (in != null) {
in.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
kv = value.toString().split(",");
// map join: 在map阶段过滤掉不需要的数据,输出key为部门名称和value为员工工资
if (deptMap.containsKey(kv[7])) {
if (null != kv[5] && !"".equals(kv[5].toString())) {
context.write(new Text(deptMap.get(kv[7].trim())), new Text(kv[5].trim()));
}
}
}
}
public static class Reduce extends Reducer {
@Override
public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {
long sumSalary = 0;
int deptNumber = 0;
// 对同一部门的员工工资进行求和
for (Text val : values) {
sumSalary += Long.parseLong(val.toString());
deptNumber++;
}
// 输出key为部门名称和value为该部门员工工资平均值
context.write(key, new Text("Dept Number:" + deptNumber + ", Ave Salary:" + sumSalary / deptNumber));
}
}
// @Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称、Mapper和Reduce类
Job job = new Job(getConf(), "Q2DeptNumberAveSalary");
job.setJobName("Q2DeptNumberAveSalary");
job.setJarByClass(Q2DeptNumberAveSalary.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
// 设置输入格式类
job.setInputFormatClass(TextInputFormat.class);
// 设置输出格式类
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// 第1个参数为缓存的部门数据路径、第2个参数为员工数据路径和第3个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
DistributedCache.addCacheFile(new Path(otherArgs[0]).toUri(), job.getConfiguration());
FileInputFormat.addInputPath(job, new Path(otherArgs[1]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q2DeptNumberAveSalary(), args);
System.exit(res);
}
}
3) 求每个部门最早进入公司的员工姓名
package cn.xjw.mapreduce;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 3) 求每个部门最早进入公司的员工姓名
* @Date: 2019/3/1
*/
public class Q3DeptEarliestEmp extends Configured implements Tool {
public static class MapClass extends Mapper {
/**用于缓存 dept文件中的数据*/
private Map deptMap = new HashMap();
private String[] kv;
/**此方法会在Map方法执行之前执行且执行一次*/
@Override
protected void setup(Context context) throws IOException, InterruptedException {
BufferedReader in = null;
try {
// 从当前作业中获取要缓存的文件
Path[] paths = DistributedCache.getLocalCacheFiles(context.getConfiguration());
String deptIdName = null;
for (Path path : paths) {
if (path.toString().contains("dept")) {
in = new BufferedReader(new FileReader(path.toString()));
while (null != (deptIdName = in.readLine())) {
// 对部门文件字段进行拆分并缓存到deptMap中
// 其中Map中key为部门编号,value为所在部门名称
deptMap.put(deptIdName.split(",")[0], deptIdName.split(",")[1]);
}
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
if (in != null) {
in.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
kv = value.toString().split(",");
// map join: 在map阶段过滤掉不需要的数据
// 输出key为部门名称和value为员工姓名+","+员工进入公司日期
if (deptMap.containsKey(kv[7])) {
if (null != kv[4] && !"".equals(kv[4].toString())) {
context.write(new Text(deptMap.get(kv[7].trim())), new Text(kv[1].trim() + "," + kv[4].trim()));
}
}
}
}
public static class Reduce extends Reducer {
@Override
public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {
// 员工姓名和进入公司日期
String empName = null;
String empEnterDate = null;
// 设置日期转换格式和最早进入公司的员工、日期
DateFormat df = new SimpleDateFormat("dd-MM月-yy");
Date earliestDate = new Date();
String earliestEmp = null;
// 遍历该部门下所有员工,得到最早进入公司的员工信息
for (Text val : values) {
empName = val.toString().split(",")[0];
empEnterDate = val.toString().split(",")[1].toString().trim();
try {
System.out.println(df.parse(empEnterDate));
if (df.parse(empEnterDate).compareTo(earliestDate) < 0) {
earliestDate = df.parse(empEnterDate);
earliestEmp = empName;
}
} catch (ParseException e) {
e.printStackTrace();
}
}
// 输出key为部门名称和value为该部门最早进入公司员工
context.write(key, new Text("The earliest emp of dept:" + earliestEmp + ", Enter date:" + new SimpleDateFormat("yyyy-MM-dd").format(earliestDate)));
}
}
//@Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称
Job job = new Job(getConf(), "Q3DeptEarliestEmp");
job.setJobName("Q3DeptEarliestEmp");
// 设置Mapper和Reduce类
job.setJarByClass(Q3DeptEarliestEmp.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
// 设置输入格式类
job.setInputFormatClass(TextInputFormat.class);
// 设置输出格式类
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// 第1个参数为缓存的部门数据路径、第2个参数为员工数据路径和第三个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
DistributedCache.addCacheFile(new Path(otherArgs[0]).toUri(), job.getConfiguration());
FileInputFormat.addInputPath(job, new Path(otherArgs[1]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q3DeptEarliestEmp(), args);
System.exit(res);
}
}
4) 求各个城市的员工的总工资
package cn.xjw.mapreduce;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 4) 求各个城市的员工的总工资
* @Date: 2019/3/1
*/
public class Q4SumCitySalary extends Configured implements Tool {
public static class MapClass extends Mapper {
// 用于缓存 dept文件中的数据
private Map deptMap = new HashMap();
private String[] kv;
// 此方法会在Map方法执行之前执行且执行一次
@Override
protected void setup(Context context) throws IOException, InterruptedException {
BufferedReader in = null;
try {
// 从当前作业中获取要缓存的文件
Path[] paths = DistributedCache.getLocalCacheFiles(context.getConfiguration());
String deptIdName = null;
for (Path path : paths) {
if (path.toString().contains("dept")) {
in = new BufferedReader(new FileReader(path.toString()));
while (null != (deptIdName = in.readLine())) {
// 对部门文件字段进行拆分并缓存到deptMap中
// 其中Map中key为部门编号,value为所在城市名称
deptMap.put(deptIdName.split(",")[0], deptIdName.split(",")[2]);
}
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
if (in != null) {
in.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
kv = value.toString().split(",");
// map join: 在map阶段过滤掉不需要的数据,输出key为城市名称和value为员工工资
if (deptMap.containsKey(kv[7])) {
if (null != kv[5] && !"".equals(kv[5].toString())) {
context.write(new Text(deptMap.get(kv[7].trim())), new Text(kv[5].trim()));
}
}
}
}
public static class Reduce extends Reducer {
@Override
public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {
// 对同一城市的员工工资进行求和
long sumSalary = 0;
for (Text val : values) {
sumSalary += Long.parseLong(val.toString());
}
// 输出key为城市名称和value为该城市工资总和
context.write(key, new LongWritable(sumSalary));
}
}
//@Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称
Job job = new Job(getConf(), "Q4SumCitySalary");
job.setJobName("Q4SumCitySalary");
// 设置Mapper和Reduce类
job.setJarByClass(Q4SumCitySalary.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
// 设置输入格式类
job.setInputFormatClass(TextInputFormat.class);
// 设置输出格式类
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// 第1个参数为缓存的部门数据路径、第2个参数为员工数据路径和第3个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
DistributedCache.addCacheFile(new Path(otherArgs[0]).toUri(), job.getConfiguration());
FileInputFormat.addInputPath(job, new Path(otherArgs[1]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q4SumCitySalary(), args);
System.exit(res);
}
}
5) 列出工资比上司高的员工姓名及其工资
package cn.xjw.mapreduce;
import java.io.IOException;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 5) 列出工资比上司高的员工姓名及其工资
* @Date: 2019/3/1
*/
public class Q5EarnMoreThanManager extends Configured implements Tool {
public static class MapClass extends Mapper {
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
String[] kv = value.toString().split(",");
// 输出经理表数据,其中key为员工编号和value为M+该员工工资
context.write(new Text(kv[0].toString()), new Text("M," + kv[5]));
// 输出员工对应经理表数据,其中key为经理编号和value为(E,该员工姓名,该员工工资)
if (null != kv[3] && !"".equals(kv[3].toString())) {
context.write(new Text(kv[3].toString()), new Text("E," + kv[1] + "," + kv[5]));
}
}
}
public static class Reduce extends Reducer {
@Override
public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {
// 定义员工姓名、工资和存放部门员工Map
String empName;
long empSalary = 0;
HashMap empMap = new HashMap();
// 定义经理工资变量
long mgrSalary = 0;
for (Text val : values) {
if (val.toString().startsWith("E")) {
// 当是员工标示时,获取该员工对应的姓名和工资并放入Map中
empName = val.toString().split(",")[1];
empSalary = Long.parseLong(val.toString().split(",")[2]);
empMap.put(empName, empSalary);
} else {
// 当时经理标志时,获取该经理工资
mgrSalary = Long.parseLong(val.toString().split(",")[1]);
}
}
// 遍历该经理下属,比较员工与经理工资高低,输出工资高于经理的员工
for (java.util.Map.Entry entry : empMap.entrySet()) {
if (entry.getValue() > mgrSalary) {
context.write(new Text(entry.getKey()), new Text("" + entry.getValue()));
}
}
}
}
// @Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称
Job job = new Job(getConf(), "Q5EarnMoreThanManager");
job.setJobName("Q5EarnMoreThanManager");
// 设置Mapper和Reduce类
job.setJarByClass(Q5EarnMoreThanManager.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
// 设置输入格式类
job.setInputFormatClass(TextInputFormat.class);
// 设置输出格式类
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// 第1个参数为员工数据路径和第2个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q5EarnMoreThanManager(), args);
System.exit(res);
}
}
6) 列出工资比公司平均工资要高的员工姓名及其工资
package cn.xjw.mapreduce;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 6) 列出工资比公司平均工资要高的员工姓名及其工资
* @Date: 2019/3/1
*/
public class Q6HigherThanAveSalary extends Configured implements Tool {
public static class MapClass extends Mapper {
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
String[] kv = value.toString().split(",");
// 获取所有员工数据,其中key为0和value为该员工工资
context.write(new IntWritable(0), new Text(kv[5]));
// 获取所有员工数据,其中key为0和value为(该员工姓名 ,员工工资)
context.write(new IntWritable(1), new Text(kv[1] + "," + kv[5]));
}
}
public static class Reduce extends Reducer {
// 定义员工工资、员工数和平均工资
private long allSalary = 0;
private int allEmpCount = 0;
private long aveSalary = 0;
// 定义员工工资变量
private long empSalary = 0;
@Override
public void reduce(IntWritable key, Iterable values, Context context) throws IOException, InterruptedException {
for (Text val : values) {
if (0 == key.get()) {
// 获取所有员工工资和员工数
allSalary += Long.parseLong(val.toString());
allEmpCount++;
System.out.println("allEmpCount = " + allEmpCount);
} else if (1 == key.get()) {
if (aveSalary == 0) {
aveSalary = allSalary / allEmpCount;
context.write(new Text("Average Salary = "), new Text("" + aveSalary));
context.write(new Text("Following employees have salarys higher than Average:"), new Text(""));
}
// 获取员工的平均工资
System.out.println("Employee salary = " + val.toString());
aveSalary = allSalary / allEmpCount;
// 比较员工与平均工资的大小,输出比平均工资高的员工和对应的工资
empSalary = Long.parseLong(val.toString().split(",")[1]);
if (empSalary > aveSalary) {
context.write(new Text(val.toString().split(",")[0]), new Text("" + empSalary));
}
}
}
}
}
//@Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称
Job job = new Job(getConf(), "Q6HigherThanAveSalary");
job.setJobName("Q6HigherThanAveSalary");
// 设置Mapper和Reduce类
job.setJarByClass(Q6HigherThanAveSalary.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
// 必须设置Reduce任务数为1 # -D mapred.reduce.tasks = 1
// 这是该作业设置的核心,这样才能够保证各reduce是串行的
job.setNumReduceTasks(1);
// 设置输出格式类
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(Text.class);
// 设置输出键和值类型
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
// 第1个参数为员工数据路径和第2个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q6HigherThanAveSalary(), args);
System.exit(res);
}
}
7) 列出名字以J开头的员工姓名及其所属部门名称
package cn.xjw.mapreduce;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 7) 列出名字以J开头的员工姓名及其所属部门名称
* @Date: 2019/3/1
*/
public class Q7NameDeptOfStartJ extends Configured implements Tool {
public static class MapClass extends Mapper {
// 用于缓存 dept文件中的数据
private Map deptMap = new HashMap();
private String[] kv;
// 此方法会在Map方法执行之前执行且执行一次
@Override
protected void setup(Context context) throws IOException, InterruptedException {
BufferedReader in = null;
try {
// 从当前作业中获取要缓存的文件
Path[] paths = DistributedCache.getLocalCacheFiles(context.getConfiguration());
String deptIdName = null;
for (Path path : paths) {
// 对部门文件字段进行拆分并缓存到deptMap中
if (path.toString().contains("dept")) {
in = new BufferedReader(new FileReader(path.toString()));
while (null != (deptIdName = in.readLine())) {
// 对部门文件字段进行拆分并缓存到deptMap中
// 其中Map中key为部门编号,value为所在部门名称
deptMap.put(deptIdName.split(",")[0], deptIdName.split(",")[1]);
}
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
if (in != null) {
in.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
kv = value.toString().split(",");
// 输出员工姓名为J开头的员工信息,key为员工姓名和value为员工所在部门名称
if (kv[1].toString().trim().startsWith("J")) {
context.write(new Text(kv[1].trim()), new Text(deptMap.get(kv[7].trim())));
}
}
}
//@Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称
Job job = new Job(getConf(), "Q7NameDeptOfStartJ");
job.setJobName("Q7NameDeptOfStartJ");
// 设置Mapper和Reduce类
job.setJarByClass(Q7NameDeptOfStartJ.class);
job.setMapperClass(MapClass.class);
// 设置输入格式类
job.setInputFormatClass(TextInputFormat.class);
// 设置输出格式类
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// 第1个参数为缓存的部门数据路径、第2个参数为员工数据路径和第3个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
DistributedCache.addCacheFile(new Path(otherArgs[0]).toUri(), job.getConfiguration());
FileInputFormat.addInputPath(job, new Path(otherArgs[1]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q7NameDeptOfStartJ(), args);
System.exit(res);
}
}
8) 列出工资最高的头三名员工姓名及其工资
package cn.xjw.mapreduce;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 8) 列出工资最高的头三名员工姓名及其工资
* @Date: 2019/3/1
*/
public class Q8SalaryTop3Salary extends Configured implements Tool {
public static class MapClass extends Mapper {
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
String[] kv = value.toString().split(",");
// 输出key为0和value为员工姓名+","+员工工资
context.write(new IntWritable(0), new Text(kv[1].trim() + "," + kv[5].trim()));
}
}
public static class Reduce extends Reducer {
@Override
public void reduce(IntWritable key, Iterable values, Context context) throws IOException, InterruptedException {
// 定义工资前三员工姓名
String empName;
String firstEmpName = "";
String secondEmpName = "";
String thirdEmpName = "";
// 定义工资前三工资
long empSalary = 0;
long firstEmpSalary = 0;
long secondEmpSalary = 0;
long thirdEmpSalary = 0;
// 通过冒泡法遍历所有员工,比较员工工资多少,求出前三名
for (Text val : values) {
empName = val.toString().split(",")[0];
empSalary = Long.parseLong(val.toString().split(",")[1]);
if (empSalary > firstEmpSalary) {
thirdEmpName = secondEmpName;
thirdEmpSalary = secondEmpSalary;
secondEmpName = firstEmpName;
secondEmpSalary = firstEmpSalary;
firstEmpName = empName;
firstEmpSalary = empSalary;
} else if (empSalary > secondEmpSalary) {
thirdEmpName = secondEmpName;
thirdEmpSalary = secondEmpSalary;
secondEmpName = empName;
secondEmpSalary = empSalary;
} else if (empSalary > thirdEmpSalary) {
thirdEmpName = empName;
thirdEmpSalary = empSalary;
}
}
// 输出工资前三名信息
context.write(new Text("First employee name:" + firstEmpName), new Text("Salary:" + firstEmpSalary));
context.write(new Text("Second employee name:" + secondEmpName), new Text("Salary:" + secondEmpSalary));
context.write(new Text("Third employee name:" + thirdEmpName), new Text("Salary:" + thirdEmpSalary));
}
}
//@Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称
Job job = new Job(getConf(), "Q8SalaryTop3Salary");
job.setJobName("Q8SalaryTop3Salary");
// 设置Mapper和Reduce类
job.setJarByClass(Q8SalaryTop3Salary.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(Text.class);
// 设置输入格式类
job.setInputFormatClass(TextInputFormat.class);
// 设置输出格式类
job.setOutputKeyClass(Text.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputValueClass(Text.class);
// 第1个参数为员工数据路径和第2个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q8SalaryTop3Salary(), args);
System.exit(res);
}
}
9) 将全体员工按照总收入(工资+提成)从高到低排列,要求列出姓名及其总收入
package cn.xjw.mapreduce;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 9) 将全体员工按照总收入(工资+提成)从高到低排列,要求列出姓名及其总收入
* @Date: 2019/3/1
*/
public class Q9EmpSalarySort extends Configured implements Tool {
public static class MapClass extends Mapper {
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
String[] kv = value.toString().split(",");
// 输出key为员工所有工资和value为员工姓名
int empAllSalary = "".equals(kv[6]) ? Integer.parseInt(kv[5]) : Integer.parseInt(kv[5]) + Integer.parseInt(kv[6]);
context.write(new IntWritable(empAllSalary), new Text(kv[1]));
}
}
/**
* 递减排序算法
*/
public static class DecreaseComparator extends IntWritable.Comparator {
@Override
public int compare(WritableComparable a, WritableComparable b) {
return -super.compare(a, b);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return -super.compare(b1, s1, l1, b2, s2, l2);
}
}
//@Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称
Job job = new Job(getConf(), "Q9EmpSalarySort");
job.setJobName("Q9EmpSalarySort");
// 设置Mapper和Reduce类
job.setJarByClass(Q9EmpSalarySort.class);
job.setMapperClass(MapClass.class);
// 设置输出格式类
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(Text.class);
job.setSortComparatorClass(DecreaseComparator.class);
// 第1个参数为员工数据路径和第2个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q9EmpSalarySort(), args);
System.exit(res);
}
}
10) 如果每位员工只能和他的直接上司,直接下属,同一部门的同事交流,求任何两名员工之间若要进行信息传递所需要经过的中间节点数。请评价一下这个问题是否适合使用map-reduce解决
package cn.xjw.mapreduce;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @Author: w
* @Description: 10) 如果每位员工只能和他的直接上司,直接下属,同一部门的同事交流,求任何两名员工之间若要进行信息传递所需要经过的中间节点数。请评价一下这个问题是否适合使用map-reduce解决
* @Date: 2019/3/1
*/
public class Q10MiddlePersonsCountForComm extends Configured implements Tool {
public static class MapClass extends Mapper {
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 对员工文件字段进行拆分
String[] kv = value.toString().split(",");
// 输出key为0和value为员工编号+","+员工经理编号
context.write(new IntWritable(0), new Text(kv[0] + "," + ("".equals(kv[3]) ? " " : kv[3])));
}
}
public static class Reduce extends Reducer {
// 定义员工列表和员工对应经理Map
List employeeList = new ArrayList();
Map employeeToManagerMap = new HashMap();
@Override
public void reduce(IntWritable key, Iterable values, Context context) throws IOException, InterruptedException {
// 在reduce阶段把所有员工放到员工列表和员工对应经理Map中
for (Text value : values) {
employeeList.add(value.toString().split(",")[0].trim());
employeeToManagerMap.put(value.toString().split(",")[0].trim(), value.toString().split(",")[1].trim());
}
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
int totalEmployee = employeeList.size();
int i, j;
int distance;
System.out.println(employeeList);
System.out.println(employeeToManagerMap);
// 对任意两个员工计算出沟通的路径长度并输出
for (i = 0; i < (totalEmployee - 1); i++) {
for (j = (i + 1); j < totalEmployee; j++) {
distance = calculateDistance(i, j);
String value = employeeList.get(i) + " and " + employeeList.get(j) + " = " + distance;
context.write(NullWritable.get(), new Text(value));
}
}
}
/**
* 该公司可以由所有员工形成树形结构,求两个员工的沟通的中间节点数,可以转换在员工树中两员工之间的距离
* 由于在树中任意两点都会在某上级节点汇合,根据该情况设计了如下算法
*/
private int calculateDistance(int i, int j) {
String employeeA = employeeList.get(i);
String employeeB = employeeList.get(j);
int distance = 0;
// 如果A是B的经理,反之亦然
if (employeeToManagerMap.get(employeeA).equals(employeeB) || employeeToManagerMap.get(employeeB).equals(employeeA)) {
distance = 0;
}
// A和B在同一经理下
else if (employeeToManagerMap.get(employeeA).equals(
employeeToManagerMap.get(employeeB))) {
distance = 0;
} else {
// 定义A和B对应经理链表
List employeeA_ManagerList = new ArrayList();
List employeeB_ManagerList = new ArrayList();
// 获取从A开始经理链表
employeeA_ManagerList.add(employeeA);
String current = employeeA;
while (false == employeeToManagerMap.get(current).isEmpty()) {
current = employeeToManagerMap.get(current);
employeeA_ManagerList.add(current);
}
// 获取从B开始经理链表
employeeB_ManagerList.add(employeeB);
current = employeeB;
while (false == employeeToManagerMap.get(current).isEmpty()) {
current = employeeToManagerMap.get(current);
employeeB_ManagerList.add(current);
}
int ii = 0, jj = 0;
String currentA_manager, currentB_manager;
boolean found = false;
// 遍历A与B开始经理链表,找出汇合点计算
for (ii = 0; ii < employeeA_ManagerList.size(); ii++) {
currentA_manager = employeeA_ManagerList.get(ii);
for (jj = 0; jj < employeeB_ManagerList.size(); jj++) {
currentB_manager = employeeB_ManagerList.get(jj);
if (currentA_manager.equals(currentB_manager)) {
found = true;
break;
}
}
if (found) {
break;
}
}
// 最后获取两只之前的路径
distance = ii + jj - 1;
}
return distance;
}
}
//@Override
public int run(String[] args) throws Exception {
// 实例化作业对象,设置作业名称
Job job = new Job(getConf(), "Q10MiddlePersonsCountForComm");
job.setJobName("Q10MiddlePersonsCountForComm");
// 设置Mapper和Reduce类
job.setJarByClass(Q10MiddlePersonsCountForComm.class);
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
// 设置Mapper输出格式类
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(Text.class);
// 设置Reduce输出键和值类型
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Text.class);
// 第1个参数为员工数据路径和第2个参数为输出路径
String[] otherArgs = new GenericOptionsParser(job.getConfiguration(), args).getRemainingArgs();
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
/**
* 主方法,执行入口
* @param args 输入参数
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Q10MiddlePersonsCountForComm(), args);
System.exit(res);
}
}