hbase java客户端实现

hbase java客户端实现

  • Hbase 安装:
    • 伪分布安装:
      • 下载并解压
      • 配置修改
        • hbase-env.sh
        • hbase-site.xml
        • regionservers
      • 启动
  • Java 客户端
    • pom.xml
    • 代码实现

Hbase 安装:

伪分布安装:

下载并解压

下载页面: http://hbase.apache.org/downloads.html
解压命令 :

	tar -xzvf hbase-1.4.4.tar.gz -C targetDir

配置修改

hbase-env.sh

在 hbase-env.sh 加入 环境变量 JAVA_HOME, HBASE_HOME。
如果 没有单独的zookeeper,可设置

export HBASE_MANAGES_ZK=true

hbase-site.xml


	
	
		 hbase.rootdir
		 file:///root/hbase/data
		 The directory shared byregion servers.
	
	
	
	 zookeeper.session.timeout
	 120000
	
	
	 hbase.tmp.dir
	 /tmp/hbase/tmp
	
	
	
	 hbase.cluster.distributed
	 true
	

regionservers

在regionservers 文件中加regionserver host list ;
这里我只加了 hbase-host
并在/etc/hosts 中加入 hbase-host 的IP 映射。

启动

启动命令:

cd $HBASE_HOME/bin
./start-hbase.sh	

hbase shell client

./hbase shell

关闭Hbase命令:

./stop-hbase.sh

Java 客户端

pom.xml


            org.apache.hadoop
            hadoop-client
            2.4.2
        
        
        
            org.apache.hbase
            hbase-client
            1.5.2
        

注: 即使hbase.rootdir 为file:///root/hbase/data 本地路径, 因为hbase并不完成直 接的数据读写操作,所以仍需要hadoop jar 包来支持hbase 读写操作。

在系统环境中加入 HBASE_CONF_DIR 指定 $HBASE_HOME/conf,
从而可以 将hbase-site.xml的配置 加载到config中

this.config = HBaseConfiguration.create();
 if (System.getProperty("HBASE_CONF_DIR") != null) {
            config.addResource(new Path(System.getProperty("HBASE_CONF_DIR"), "hbase-site.xml"));
        } else {
            config.addResource(new Path(System.getenv("HBASE_CONF_DIR"), "hbase-site.xml"));
//            logger.info("HBASE_CONF_DIR : {}", System.getenv("HBASE_CONF_DIR"));
            File p = new File(System.getenv("HBASE_CONF_DIR")+"/"+ "hbase-site.xml");
//            logger.info("HBASE_CONF_DIR : file {}", p.exists() );
        }

代码实现

/**
 * knowledge :
 * 1. even if the hbase data is stored in the local place
 *    with the parameter {hbase.rootdir}:"file:///Users/cwiz/hbase/data",
 *    we still need the compatible hadoop jar to perform
 *    the function of storing.
 *
 */

public class HBaseClient implements Closeable {

    protected Connection connection;
    protected Configuration config;

    protected String tableName = "javaClient";
    protected String cf_create = "create";
    protected String cf_delete = "delete";
    {
        this.config = HBaseConfiguration.create();
//        config.set("hbase.zookeeper.quorum", "192.168.8.104");
//        config.set("hbase.zookeeper.property.clientPort", "2181");
        if (System.getProperty("HBASE_CONF_DIR") != null) {
            config.addResource(new Path(System.getProperty("HBASE_CONF_DIR"), "hbase-site.xml"));
        } else {
            config.addResource(new Path(System.getenv("HBASE_CONF_DIR"), "hbase-site.xml"));
//            logger.info("HBASE_CONF_DIR : {}", System.getenv("HBASE_CONF_DIR"));
            File p = new File(System.getenv("HBASE_CONF_DIR")+"/"+ "hbase-site.xml");
//            logger.info("HBASE_CONF_DIR : file {}", p.exists() );
        }

        try {
            this.connection  = ConnectionFactory.createConnection(this.config);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    public HBaseClient(){
    }

    public void createTable(String tableName,Configuration conf, String... columnFamilyNames) {
        System.out.println("start create table "+tableName);
        try {

            HBaseAdmin hBaseAdmin = new HBaseAdmin(conf);
            if (hBaseAdmin.tableExists(tableName)) {
                System.out.println(tableName + " already exists");
                //hBaseAdmin.disableTable(tableName);
                //hBaseAdmin.deleteTable(tableName);
                return;
            }
            HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
            for(int i=0; i keyValues) throws IOException {
        Table table = this.getTable(getTableName(tableName));
        Put put = generatePut(rowkey, keyValues);
        table.put(put);
    }

    public Put generatePut(String rowkey, Map keyValues){
        Put put = new Put(Bytes.toBytes(rowkey));
        ObjectMapper mapper = new ObjectMapper();
        for(Map.Entry entry : keyValues.entrySet()){
            String[] cols = entry.getKey().split(":");
            try {
                String value  = mapper.writeValueAsString( entry.getValue());
                put.addColumn(Bytes.toBytes(cols[0]), Bytes.toBytes(cols[1]), Bytes.toBytes( value ));
            } catch (JsonProcessingException e) {
                e.printStackTrace();
            }
        }

        //System.out.println(put.toString());
        return put;
    }

    public Put generatePut(String rowkey, String columnFamily, Map keyValues){
        Put put = new Put(Bytes.toBytes(rowkey));
        for(Map.Entry entry : keyValues.entrySet()){
            put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()));
        }
        //System.out.println(put.toString());
        return put;
    }

    public boolean isTableExists(TableName tableName) throws IOException {
        boolean result = false;
        Admin admin = this.connection.getAdmin();
        return admin.tableExists(tableName);
    }

    public Result read(String tableName , String rowkey){
        Get get = new Get(Bytes.toBytes(rowkey));
        get.addFamily(Bytes.toBytes(cf_create));
        Table table = getTable(getTableName(tableName));
        Result result = null;
        try {
            result = table.get(get);
        } catch (IOException e) {
            e.printStackTrace();
        }
        ObjectMapper mapper = new ObjectMapper();
        ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
        try {
            System.out.println(writer.writeValueAsString(result));
        } catch (JsonProcessingException e) {
            e.printStackTrace();
        }
        return result;
    }
    public void close() {
        try {
            this.connection.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    public static void main(String[] args) {
        HBaseClient hBaseClient = new HBaseClient();
        hBaseClient.createTable(hBaseClient.tableName,
                hBaseClient.config ,
                hBaseClient.cf_create,
                hBaseClient.cf_delete);
        Map  keyValue = new HashMap();
        keyValue.put(hBaseClient.cf_create+":c1", "v1");
        keyValue.put(hBaseClient.cf_create+":c2", "v2");
        String rowkey = "row1";
        try {
            hBaseClient.write(hBaseClient.tableName , rowkey, keyValue);
        } catch (IOException e) {
            e.printStackTrace();
        }

        Result result= hBaseClient.read(hBaseClient.tableName, rowkey);
        System.out.println();
        System.out.println("result :" + result );

        System.out.println(Bytes.toString(result.getRow()));
        System.out.println("rawCells");
        Cell[] cells = result.rawCells();
        for(Cell cell : cells){
            System.out.println(Bytes.toString(CellUtil.cloneRow(cell)) +" , "
                    + Bytes.toString(CellUtil.cloneFamily(cell)) +" , "
                    + Bytes.toString(CellUtil.cloneQualifier(cell)) +" , "
                    + Bytes.toString(CellUtil.cloneValue(cell)) +" , "
                    + cell.getTimestamp());
        }
//        CellScanner scanner = CellUtil.createCellScanner(cells)
    }
}

你可能感兴趣的:(hbase)