有订单数据如下
Order_0000001,Pdt_01,222.8
Order_0000001,Pdt_05,25.8
Order_0000002,Pdt_05,325.8
Order_0000002,Pdt_03,522.8
Order_0000002,Pdt_04,122.4
Order_0000003,Pdt_01,222.8
Order_0000003,Pdt_01,322.8
现在要求,球订单号相同的最大成交金额的订单信息
比如订单号Order_0000001的最大交易额为222.8,那么需要挑选该订单记录。
分析:
我们可以再map中将订单id跟订单金额封装成一个bean作为key,然后写出去,然后传到reduce,这里用前面介绍的,让bean实现WritableComparable然后重写compareTo(用来排序)方法,使得不同订单号的订单按照订单号由小到大排序,相同订单号的订单按照成交额倒排序。但是存在一个问题,此时由于bean作为key那么不一定能保证传到同一个reduce中,都不在一个reduce中了那肯定也不能去比较订单号相同的记录了,这个时候就需要自己指定自定义的Partitioner(用来分区)了来使得相同订单id的订单记录传到同一个reduce中。最后由于key不同那么传到reduce中的时候,即使是相同订单id的bean也只会一次一次传到reduce,那么还怎么取比较呢?此时就要使用自定义GroupingComparator(用来分组)了,指定只要是订单id相同的bean都看为一组,拿Order_0000001的三条记录来说(三个bean),由于会被看成一组,那么传到reduce方法中的时候,key只传第一个bean,由于我们前面相同订单id的bean是按照成交额由高到低排的,那么这个时候传进来的key就肯定是成交额最大的bean。
程序实现
自定义Bean
public class InfoBean implements WritableComparable {
private Text orderId;//订单id
private DoubleWritable payment;//成交金额
//序列化方法
public void write(DataOutput out) throws IOException {
out.writeUTF(orderId.toString());
out.writeDouble(payment.get());
}
//反序列化方法 顺序和序列化方法一致
public void readFields(DataInput in) throws IOException {
this.orderId = new Text(in.readUTF());
this.payment = new DoubleWritable(in.readDouble());
}
//排序方法,shuffle过程中的排序会用到该方法
//不同订单号的订单按照由小到大排序,相同订单号的订单按照成交额倒排序
public int compareTo(InfoBean infoBean) {
int comt = this.orderId.compareTo(infoBean.getOrderId());
//订单id相同
if(comt==0){
//倒排序
comt = -this.payment.compareTo(infoBean.getPayment());
}
return comt;
}
@Override
public String toString() {
return "orderId=" + orderId + ", payment=" + payment ;
}
public InfoBean() {}
public void setInfoBean(Text orderId, DoubleWritable payment) {
this.orderId = orderId;
this.payment = payment;
}
get、set方法
}
自定义Partitioner
public class OrderPartitioner extends Partitioner<InfoBean, NullWritable>{
//分区方法,返回值表示分区号,与reducetask对应
@Override
public int getPartition(InfoBean key, NullWritable value, int numReduceTasks) {
//使相同id的订单bean交给同一个reducetask
//这里参照HashPartitioner来写
return (key.getOrderId().hashCode()& Integer.MAX_VALUE) % numReduceTasks;
}
}
自定义GroupingComparator
public class OrderGroupingComparator extends WritableComparator{
//传入作为key的bean的class类型,以及指定需要让框架做反射获取实例对象
protected OrderGroupingComparator(){
//不能丢,注册某个bean 要不要实例化 因为拿来的时候是序列化的结果
//实例化后才能去比
super(InfoBean.class,true);
}
//相同的订单id的bean看做一组(value值作为迭代器中数据),传到reduce中的时候取第一个bean
@Override
public int compare(WritableComparable a, WritableComparable b) {
InfoBean bean1 = (InfoBean)a;
InfoBean bean2 = (InfoBean)b;
return bean1.getOrderId().compareTo(bean2.getOrderId());
}
}
主程序
public class MaxPaymentWithOrder {
static class MaxPaymentWithOrderMapper extends Mapper<LongWritable, Text, InfoBean, NullWritable>{
InfoBean bean = new InfoBean();
//读取一行数据,封装成bean作为key写出
//传入格式Order_0000001,Pdt_01,222.8
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
String[] infos = line.split(",");
bean.setInfoBean(new Text(infos[0]), new DoubleWritable(Double.parseDouble(infos[2])));
context.write(bean, NullWritable.get());
}
}
static class MaxPaymentWithOrderReducer extends Reducer<InfoBean, NullWritable, InfoBean, NullWritable>{
//将相同id的订单看成一组传进来(使用自定义GroupingComparator实现)
//而key只会取第一个
@Override
protected void reduce(InfoBean key, Iterable value,Context context)
throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//jar包位置
job.setJarByClass(MaxPaymentWithOrder.class);
job.setMapperClass(MaxPaymentWithOrderMapper.class);
job.setReducerClass(MaxPaymentWithOrderReducer.class);
job.setMapOutputKeyClass(InfoBean.class);
job.setMapOutputValueClass(NullWritable.class);
//设置最终输出类型
job.setOutputKeyClass(InfoBean.class);
job.setOutputValueClass(NullWritable.class);
//设置reducetask数量
job.setNumReduceTasks(2);
//指定Partitioner
job.setPartitionerClass(OrderPartitioner.class);
//指定GroupingComparator
job.setGroupingComparatorClass(OrderGroupingComparator.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean ex = job.waitForCompletion(true);
System.exit(ex?0:1);
}
}
测试:
将工程打包上传到hadoop集群
linux本地创建文件gc.data
hdfs创建输入文件夹/gc/input
将gc.data传到文件夹/gc/input下
运行程序查看结果
[root@mini1 ~]# vi gc.data
Order_0000001,Pdt_01,222.8
Order_0000001,Pdt_05,25.8
Order_0000002,Pdt_05,325.8
Order_0000002,Pdt_03,522.8
Order_0000002,Pdt_04,122.4
Order_0000003,Pdt_01,222.8
Order_0000003,Pdt_01,322.8
[root@mini1 ~]# hadoop fs -mkdir -p /gc/input
[root@mini1 ~]# hadoop fs -put gc.data /gc/input
[root@mini1 ~]# hadoop jar gc.jar com.scu.hadoop.t.groupingcomparator.MaxPaymentWithOrder /gc/input /gc/output
[root@mini1 ~]# hadoop fs -ls /gc/output
Found 3 items
-rw-r--r-- 2 root supergroup 0 2017-10-17 06:16 /gc/output/_SUCCESS
-rw-r--r-- 2 root supergroup 53 2017-10-17 06:16 /gc/output/part-r-00000
-rw-r--r-- 2 root supergroup 106 2017-10-17 06:16 /gc/output/part-r-00001
[root@mini1 ~]# hadoop fs -cat /gc/output/part-r-00000
orderId=Order_0000002, payment=522.8
[root@mini1 ~]# hadoop fs -cat /gc/output/part-r-00001
orderId=Order_0000001, payment=222.8
orderId=Order_0000003, payment=322.8