代码加载协处理器统计hbase表行数

package endPointBatchDel;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter;
import org.apache.hadoop.hbase.coprocessor.AggregateImplementation;
import org.apache.hadoop.hbase.util.Bytes;

import java.io.IOException;

/*************************************
 *Class Name:rowcountBycoprocessor
 *Description:
 *@author:Administrator
 *@create:2019/4/30
 *@since 1.0.0
 *************************************/
public class rowcountBycoprocessor {
    /**
     * HBase API添加协处理器
     * */
    public static void addCoprocessor(Configuration conf, String tableName) {
        try {

            byte[] tableNameBytes = Bytes.toBytes(tableName);
            Connection conn = ConnectionFactory.createConnection(conf);
            HBaseAdmin hbaseAdmin = (HBaseAdmin) conn.getAdmin();//得到管理员
            //HBaseAdmin hbaseAdmin = new HBaseAdmin(conf);
            HTableDescriptor htd = hbaseAdmin.getTableDescriptor(tableNameBytes);
            if (!htd.hasCoprocessor(AggregateImplementation.class.getName())) {
                hbaseAdmin.disableTable(tableNameBytes);
                htd.addCoprocessor(AggregateImplementation.class.getName());
                hbaseAdmin.modifyTable(tableNameBytes, htd);
                hbaseAdmin.enableTable(tableNameBytes);
            }
            hbaseAdmin.close();
        } catch (MasterNotRunningException e) {
            e.printStackTrace();
        } catch (ZooKeeperConnectionException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /**
     * 统计表数量
     *
     */
    public static void exeCount(Configuration conf, String tableName, String family) {

        try {
            // 使用hbase提供的聚合coprocessor
            AggregationClient aggregationClient = new AggregationClient(conf);
            Scan scan = new Scan();
            // 指定扫描列族,唯一值
            scan.addFamily(Bytes.toBytes(family));
            long start = System.currentTimeMillis();
            long rowCount = aggregationClient.rowCount(TableName.valueOf(tableName), new LongColumnInterpreter(), scan);
            System.out.println("Row count: " + rowCount + "; time cost: " + (System.currentTimeMillis() - start) + "ms");
        } catch (Throwable e) {
            e.printStackTrace();
        }
    }

    public static void main(String[] args) {
//        Configuration config = HBaseConfiguration.create();
//        String zkAddress = "host1:2181,host2:2181,host3:2181";
//        config.set(HConstants.ZOOKEEPER_QUORUM, zkAddress);
//        Connection connection = null;
//        try{
//            connection = ConnectionFactory.createConnection(config);
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
        //-----------------------
        String zkAddress = "host1:2181,host2:2181,host3:2181";
        String tableName = "am_bk";
        Configuration conf = new Configuration();
        conf.set("hbase.zookeeper.quorum", zkAddress);
        //conf.set("hbase.rootdir", "hdfs://host:8020/hbase");
        // 提高RPC通信时长
        conf.setLong("hbase.rpc.timeout", 600000);
        // 设置Scan缓存
        conf.setLong("hbase.client.scanner.caching", 1000);
        addCoprocessor(conf, tableName);
        //exeCount(conf, tableName, "c");
        exeCount(conf, tableName, "photo");

    }
}

你可能感兴趣的:(hbase)