Hadoop_MapReduce自定义Bean序列化

实现序列化需要七步

  1. 实现Writable接口
  2. 反序列化是需要调用空参构造方法,所以需要创建空参构造方法。
  3. 重写序列化方法(writer)
  4. 重写反序列化方法(read)
  5. 注意序列化和反序列化要一致
  6. 处理toString,以便于观察输出结果
  7. 如果需要将自定义的Bean放到key传输,则还要实现comparable接口,因为MapReduce框中的shuffle过程要求对key必须能排序。
  8. 代码参考
package hadoop.mapReduce.flowSum;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.Writable;

public class FlowBean implements Writable{
    private long upFlow;
    private long downFlow;
    private long sumFlow;
    public FlowBean() {
        // TODO Auto-generated constructor stub
    }
    @Override
    public void write(DataOutput out) throws IOException {
        out.writeLong(upFlow);
        out.writeLong(downFlow);
        out.writeLong(sumFlow);
        
    }
    @Override
    public void readFields(DataInput in) throws IOException {
        // TODO Auto-generated method stub
        upFlow = in.readLong();
        downFlow = in.readLong();
        sumFlow = in.readLong();
    }
    @Override
    public String toString() {
        return  upFlow + "\t" + downFlow + "\t" + sumFlow + "\t";
    }
    public long getUpFlow() {
        return upFlow;
    }
    public void setUpFlow(long upFlow) {
        this.upFlow = upFlow;
    }
    public long getDownFlow() {
        return downFlow;
    }
    public void setDownFlow(long downFlow) {
        this.downFlow = downFlow;
    }
    public long getSumFlow() {
        return sumFlow;
    }
    public void setSumFlow(long sumFlow) {
        this.sumFlow = sumFlow;
    }
    public void set(long sum_upFlow, long sum_downFlow, long sum_sumFlow) {
        this.upFlow = sum_upFlow;
        this.downFlow = sum_downFlow;
        this.sumFlow = sum_sumFlow;
    }
    public FlowBean(long upFlow, long downFlow) {
        super();
        this.upFlow = upFlow;
        this.downFlow = downFlow;
        this.sumFlow = upFlow + downFlow;
    }
}

你可能感兴趣的:(Hadoop_MapReduce自定义Bean序列化)