我们先看下 Hbase 的写流程:
通常 MapReduce 在写HBase时使用的是 TableOutputFormat 方式,在reduce中直接生成put对象写入HBase,该方式在大数据量写入时效率低下(HBase会block写入,频繁进行flush,split,compact等大量IO操作),并对HBase节点的稳定性造成一定的影响(GC时间过长,响应变慢,导致节点超时退出,并引起一系列连锁反应),而HBase支持 bulk load 的入库方式,它是利用hbase的数据信息按照特定格式存储在hdfs内这一原理,直接在HDFS中生成持久化的HFile数据格式文件,然后上传至合适位置,即完成巨量数据快速入库的办法。配合mapreduce完成,高效便捷,而且不占用region资源,增添负载,在大数据量写入时能极大的提高写入效率,并降低对HBase节点的写入压力。
通过使用先生成HFile,然后再BulkLoad到Hbase的方式来替代之前直接调用HTableOutputFormat的方法有如下的好处:
(1)消除了对HBase集群的插入压力
(2)提高了Job的运行速度,降低了Job的执行时间
目前此种方式仅仅适用于只有一个列族的情况,在新版 HBase 中,单列族的限制会消除。
了解了BULKLOAD基本过程后,开始进行我们的程序编写:
主函数流程:
1.首先加载集群配置文件
2.判断是否需要建表
3.执行builload任务
public static void main(String[] args) {
Configuration conf = HadoopConfiguration.getConf();
conf.set(XXT_CLICKLOG_COLUMN_FAMILY, familyName);
conf.set("hbase.zookeeper.quorum", "cmaster1.hadoop.xxt.cn,cmaster0.hadoop.xxt.cn,cslave0.hadoop.xxt.cn");
conf.set("zookeeper.znode.parent", "/hbase-unsecure");
conf.set("hbase.fs.tmp.dir", "/tmp/hbase/bulkload/hbase-staging");
Connection connection = null;
Admin admin = null;
try {
connection = ConnectionFactory.createConnection(conf);
admin = connection.getAdmin();
TableName tn = TableName.valueOf(tableName);
createTableIfNotExist(admin, tn);
boolean isSuccess = runBulkLoadJob(connection, admin , tn, conf);
System.exit(isSuccess?0:1);
} catch (IOException e) {
log.error("Clicklog bulkload job execute failed!", e);
throw new RuntimeException("Clicklog bulkload job execute failed!", e);
}
finally {
try {
if (connection != null) {
connection.close();
}
if (admin != null) {
admin.close();
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
如果表不存在建表,定义好列族及表名,并设置snappy压缩。
根据rowkey业务数据的分布特点进行预分区,尽量使落在每个region分区的数据均匀。
public static void createTableIfNotExist(Admin admin, TableName tableName) throws IOException {
if (!admin.tableExists(tableName)) {
HTableDescriptor tableDesc = new HTableDescriptor(tableName);
HColumnDescriptor columnDesc = new HColumnDescriptor(Bytes.toBytes(familyName));
columnDesc.setCompressionType(Algorithm.SNAPPY);
tableDesc.addFamily(columnDesc);
byte[][] splitKey = new byte[][]{Bytes.toBytes("000001000000"),Bytes.toBytes("000010000000"),
Bytes.toBytes("000050000000"), Bytes.toBytes("000100000000")};
admin.createTable(tableDesc, splitKey);
log.info("create table success");
}
else {
log.info("Table already exists!");
}
}
最后开始执行bulkload任务了
public static boolean runBulkLoadJob(Connection connection, Admin admin, TableName tn, Configuration conf) {
try {
Job job = Job.getInstance(conf, "BulkLoad");
/**
* 利用分布式缓存加载依赖第三方jar包
*/
FileSystem fs = FileSystem.get(conf);
FileStatus[] files = fs.listStatus(new Path(DISTRIBUTE_LIB_PATH));
for(FileStatus file : files)
{
log.info("file path:" + file.getPath().toString());
job.addCacheFile(file.getPath().toUri());
}
job.setJarByClass(ClickLogBulkLoadJob.class);
job.setMapperClass(ClickLogBulkLoadMap.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(Put.class);
job.setSpeculativeExecution(false);
job.setReduceSpeculativeExecution(false);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(HFileOutputFormat2.class);
FileInputFormat.setInputPaths(job, inputPath);
finalOutputPath = outputPath + System.currentTimeMillis();
FileOutputFormat.setOutputPath(job, new Path(finalOutputPath));
Table table = connection.getTable(tn);
//HTable htable = new HTable(conf, tableName);
RegionLocator regionLocator = connection.getRegionLocator(tn);
HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator);
//HFileOutputFormat2.configureIncrementalLoad(job, htable);
if (job.waitForCompletion(true)) {
log.info("Clicklog bulkload job execute success, load data to hbase...");
doBulkLoadData(job, admin, table, regionLocator);
processFinalFile(conf, true);
//doBulkLoadData(job, htable);
return true;
}
else {
processFinalFile(conf, false);
log.error("Clicklog bulkload job execute failed!");
return false;
}
} catch (Exception e) {
log.error("Clicklog bulkload job execute failed!", e);
throw new RuntimeException("Clicklog bulkload job execute failed!", e);
}
}
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
/**
* 输入数据格式:
* xxtsessionId webId ip agentType
* time url urlParam referUrl referUrlParam device os net browser remainTime
*/
log.info("-----this data-----" + value.toString());
String[] valueStrSplit = value.toString().split("#");
String hkey = getRowKey(valueStrSplit[1],valueStrSplit[4]);
String[] columnValue = new String[]{valueStrSplit[2],valueStrSplit[3],valueStrSplit[5],valueStrSplit[6],
valueStrSplit[7],valueStrSplit[8],valueStrSplit[9],valueStrSplit[10],valueStrSplit[11],
valueStrSplit[12],valueStrSplit[13]};
final byte[] rowKey = Bytes.toBytes(hkey);
final ImmutableBytesWritable hKey = new ImmutableBytesWritable(rowKey);
Put put = new Put(rowKey);
for(int i = 0; i < columnNames.length; i++) {
log.info("column name:" + columnNames[i] + ",column value :" + columnValue[i]);
put.add(Bytes.toBytes(family), Bytes.toBytes(columnNames[i]), Bytes.toBytes(columnValue[i]));
}
context.write(hKey, put);
}
private static void doBulkLoadData(Job job, Admin admin, Table table, RegionLocator regionLocator) {
try {
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(job.getConfiguration());
loader.doBulkLoad(new Path(finalOutputPath), admin, table, regionLocator);
log.info("bulkload date success!");
} catch (Exception e) {
log.error("bulkload date failed!", e);
throw new RuntimeException("bulkload date failed!" ,e);
}