HBase协处理器

1. 定义

HBase可以让用户的部分逻辑在数据存放端及hbase服务端进行计算的机制(框架)。协处理器允许用户在hbase服务端上运行自己的代码。如SQL里面的求和、排序等操作。
主要有两种类型:Observer Coprocessors 和Endpoint Coprocessor。Observer Coprocessors相当于关系型数据库里面的触发器,而Endpoint类似于存储过程,执行数据计算。

2. Observer Coprocessors

OBserver可以分为RegionObserver、RegionServerObserver、MasterObserver、WalObserver。
以下是这四种类型的介绍:

名称 定义
RegionObserver 提供客户端的数据操纵事件钩子: Get、 Put、 Delete、Scan等
RegionServerObserver 专门处理RegionServer上的一些事件
MasterObserver 提供DDL-类型的操作钩子。如创建、删除、修改数据表等
WalObserver 提供WAL相关操作

RegionServer工作原理

HBase协处理器_第1张图片

3. Endpoint Coprocessor

Endpoint Coprocessor工作原理 如下图:
HBase协处理器_第2张图片

4. endpoint服务端编写演示

  1. 编写endpoint.proto文件,定义信息格式。
option java_package = "edu.endpoint";  
option java_outer_classname = "Sum";  
option java_generic_services = true;  
option java_generate_equals_and_hash = true;  
option optimize_for = SPEED;  
message SumRequest {  
    required string family = 1;  
    required string column = 2;  
}  
message SumResponse {  
    required int64 sum = 1 [default = 0];  
}  

service SumService {  
    rpc getSum(SumRequest)  
        returns (SumResponse);  
}  
  1. 使用protoc命令进行编译,将.proto文件生成Java代码
protoc endpoint.proto --java_cut=./
  1. 编写服务端代码,在eclipse中编写SumEndPoint.java
package edu.endpoint;  

import java.io.IOException;  
import java.util.ArrayList;  
import java.util.List;  

import org.apache.hadoop.hbase.Coprocessor;  
import org.apache.hadoop.hbase.CoprocessorEnvironment;  
import org.apache.hadoop.hbase.client.Scan;  
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;  
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;  
import org.apache.hadoop.hbase.regionserver.InternalScanner;  
import org.apache.hadoop.hbase.util.Bytes;  

import com.google.protobuf.RpcCallback;  
import com.google.protobuf.RpcController;  
import com.google.protobuf.Service;  

import edu.endpoint.Sum.SumRequest;  
import edu.endpoint.Sum.SumResponse;  
import edu.endpoint.Sum.SumService;  

public class SumEndPoint extends SumService implements Coprocessor,CoprocessorService{  
    private RegionCoprocessorEnvironment env;  

    public void getSum(RpcController controller,SumRequest request,RpcCallback done) throws IOException{  
        Scan scan = new Scan();  
        scan.addFamily(Bytes.toBytes(request.getFamily()));  
        scan.addColumn(Bytes.toBytes(request.getFamily()), Bytes.toBytes(request.getColumn()));  
        SumResponse response = null;  
        InternalScanner scanner = null;  
        try{  
            scanner = env.getRegion().getScanner(scan);  
            List results = new ArrayList();  
            boolean hasMore = false;  
            Long sum = 0;  
            do {  
                hasMore = scanner.next(results);  
                for(Cell cell:results){  
                    sum += Long.parseLong(new String(CellUtil.cloneValue(cell)));  
                }  
                results.clear();  
            } while (hasMore);  
            response = SumResponse.newBuilder().setSum(sum).build();  
        }catch (IOException e) {  
            ResponseConverter.setControllerException(controller,e);  
        }finally {  
                if (scanner!=null) {  
                    try {  
                        scanner.close();  
                    } catch (IOException e) {  
                        // TODO Auto-generated catch block  
                        e.printStackTrace();  
                    }     
                }  

        }  
        done.run(response);  
    }  
    public Service getService(){  
        return this;  
    }  
    @Override  
    public void start(CoprocessorEnvironment env) throws IOException {  
        if (env instanceof RegionCoprocessorEnvironment) {  
            this.env =(RegionCoprocessorEnvironment) env;  
        }else {  
            throw new CoprocessorException("no load region");  
        }  
    }  
    @Override  
    public void stop(CoprocessorEnvironment arg0) throws IOException {  
    }  
}  
  1. 编写客户端代码,作用是对表中某个列进行计数。
package edu.endpoint;

import java.io.IOException;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;

import com.google.protobuf.ServiceException;

import edu.endpoint.Sum.SumRequest;
import edu.endpoint.Sum.SumResponse;
import edu.endpoint.Sum.SumService;

public class Test {
    public static void main(String[] args) throws IOException{
        Configuration conf = HBaseConfiguration.create();
        conf.set("hbase.zookeeper.quorum", "cluster1");
        HConnection conn = HConnectionManager.createConnection(conf);
        HTableInterface table = conn.getTable("sum_table");

        long h =0L;

        final SumRequest request = SumRequest.newBuilder().setFamily("d").setColumn("b").build();
        try {
            Map<byte[], Long> result = table.coprocessorService(SumService.class,null,null,new Batch.Call() {

                @Override
                public Long call(SumService aggregate) throws IOException,{
                    BlockingRpcCallback rpcCallback = new BlockingRpcCallback();
                    aggregate.getSum(null,request,rpcCallback);
                    SumResponse response = (SumResponse)rpcCallback.get();
                    return response.hasSum() ? response.getSum() : 0L;
                }
            });
            for(Long sum : result.values()){
                h += sum;
            }
            system.out.println("sum = " + h);
        } catch (ServiceException e) {
            // TODO: handle exception
            e.printStackTrace();
        }catch(Throwable e){
            e.printStackTrace();
        }
        table.close();
        conn.close();
    }
}
  1. 最后新建一个测试类
package edu.endpoint;  

import java.io.IOException;  
import java.util.Map;  

import org.apache.hadoop.conf.Configuration;  
import org.apache.hadoop.hbase.HBaseConfiguration;  
import org.apache.hadoop.hbase.client.HConnection;  
import org.apache.hadoop.hbase.client.HConnectionManager;  
import org.apache.hadoop.hbase.client.HTableInterface;  
import org.apache.hadoop.hbase.client.Put;  

public class Test {  
    public static void main(String[] args) throws IOException{  
        Configuration conf = HBaseConfiguration.create();  
        HConnection conn = HConnectionManager.createConnection(conf);  
        HTableInterface table = conn.getTable("t2");  
        Put put = new Put("r".getBytes());  
        put.add("d".getBytes(),"b".getBytes(),"b1".getBytes());  
        put.add("d".getBytes(),"c".getBytes(),"c1".getBytes());  
        table.put(put);  
        table.close();  
        conn.close();  
    }  
}  

最后实验成功完成。

你可能感兴趣的:(hbase,数据库,处理器,大数据学习)