HBase统计表行数的方式如下:
一、HBase自带MapReduce表行数统计RowCounter
$HBASE_HOME/bin/hbase org.apache.hadoop.hbase.mapreduce.RowCounter ‘tableName’
二、使用Scan与Filter的方式对表行数进行统计
public static long rowCount(String tableName) { long rowCount = 0; try { HTable table = new HTable(configuration, tableName); Scan scan = new Scan(); scan.setFilter(new FirstKeyOnlyFilter()); ResultScanner resultScanner = table.getScanner(scan); for (Result result : resultScanner) { rowCount += result.size(); } } catch (IOException e) { logger.info(e.getMessage(), e); } return rowCount; }
public static void addTableCoprocessor(String tableName, String coprocessorClassName) { try { admin.disableTable(tableName); HTableDescriptor htd = admin.getTableDescriptor(Bytes.toBytes(tableName)); htd.addCoprocessor(coprocessorClassName); admin.modifyTable(Bytes.toBytes(tableName), htd); admin.enableTable(tableName); } catch (IOException e) { logger.info(e.getMessage(), e); } } public static long rowCount(String tableName, String family) { AggregationClient ac = new AggregationClient(configuration); Scan scan = new Scan(); scan.addFamily(Bytes.toBytes(family)); long rowCount = 0; try { rowCount = ac.rowCount(Bytes.toBytes(tableName), new LongColumnInterpreter(), scan); } catch (Throwable e) { logger.info(e.getMessage(), e); } return rowCount; }
@Test public void testTableRowCount() { String coprocessorClassName = "org.apache.hadoop.hbase.coprocessor.AggregateImplementation"; HBaseUtils.addTableCoprocessor("user", coprocessorClassName); long rowCount = HBaseUtils.rowCount("user", "basic"); System.out.println("rowCount: " + rowCount); }