maven进行项目的管理
引入下面依赖的jar包
org.apache.hbase
hbase-client
1.4.11
org.apache.hbase
hbase-common
1.4.11
jdk.tools
jdk.tools
1.8
system
${JAVA_HOME}/lib/tools.jar
一:创建表t2, 列族f2,代码如下:
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import java.io.IOException;
/**
* 创建表:在HBase中创建一张表
* 1.4.11版本
*
*/
public class HBaseCreateTable {
public static void main( String[] args ) throws IOException {
// 创建HBase配置对象
Configuration configuration = HBaseConfiguration.create();
// 指定zookeeper集群地址
//configuration.set("hbase.zookeeper,quorum","node-1:1281,node-2:1281,node-3:1281"); // 使用域名一直不见创建成功输出
configuration.set("hbase.zookeeper.quorum",
"192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 创建连接对象
Connection connection = ConnectionFactory.createConnection(configuration);
// 得到数据库管理员对象
Admin admin = connection.getAdmin();
System.out.println("=========================");
// 创建描述,并指定表名
TableName tableName = TableName.valueOf("t2");
HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
// 创建列族描述
HColumnDescriptor family = new HColumnDescriptor("f2");
// 指定列族
hTableDescriptor.addFamily(family);
// 创建表
admin.createTable(hTableDescriptor);
System.out.println("create table success");
admin.close();
connection.close();
}
}
二: 向表t2中添加七条数据,代码如下:
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
/**
* 向t2表中添加三条数据
*/
public class HBasePutData {
public static void main(String[] args) throws IOException {
// 创建HBase配置对象
Configuration configuration = HBaseConfiguration.create();
// 指定zookeeper集群地址
configuration.set("hbase.zookeeper.quorum",
"192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 创建数据库连接对象
Connection connection = ConnectionFactory.createConnection(configuration);
// Table负责与记录相关的操作,如增删改查等
TableName tableName = TableName.valueOf("t2");
Table table = connection.getTable(tableName);
// 设置rowKey
Put put1 = new Put(Bytes.toBytes("row1"));
// 添加列数据,指定列族,列名与列值
put1.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("name"),Bytes.toBytes("小明"));
put1.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("age"),Bytes.toBytes("30"));
put1.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("college"),Bytes.toBytes("北京大学"));
put1.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("address"),Bytes.toBytes("北京市海淀区"));
Put put2 = new Put(Bytes.toBytes("row2"));
// 添加列数据,指定列族,列名与列值
put2.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("name"),Bytes.toBytes("张三"));
put2.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("age"),Bytes.toBytes("20"));
put2.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("college"),Bytes.toBytes("贵州大学"));
put2.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("address"),Bytes.toBytes("贵州贵阳"));
Put put3 = new Put(Bytes.toBytes("row3"));
// 添加列数据,指定列族,列名与列值
put3.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("name"),Bytes.toBytes("李四"));
put3.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("age"),Bytes.toBytes("50"));
put3.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("college"),Bytes.toBytes("北京外国语大学"));
put3.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("address"),Bytes.toBytes("北京市昌平区"));
Put put4 = new Put(Bytes.toBytes("row4"));
// 添加列数据,指定列族,列名与列值
put4.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("name"),Bytes.toBytes("王五"));
put4.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("age"),Bytes.toBytes("67"));
put4.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("college"),Bytes.toBytes("西安电子科技大学"));
put4.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("address"),Bytes.toBytes("北京市昌平区"));
Put put5 = new Put(Bytes.toBytes("row5"));
// 添加列数据,指定列族,列名与列值
put5.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("name"),Bytes.toBytes("赵六"));
put5.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("age"),Bytes.toBytes("55"));
put5.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("college"),Bytes.toBytes("四川音乐学院"));
put5.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("address"),Bytes.toBytes("贵州贵阳市"));
Put put6 = new Put(Bytes.toBytes("row6"));
// 添加列数据,指定列族,列名与列值
put6.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("name"),Bytes.toBytes("周小明"));
put6.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("age"),Bytes.toBytes("32"));
put6.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("college"),Bytes.toBytes("北京外国语大学"));
put6.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("address"),Bytes.toBytes("重庆"));
Put put7 = new Put(Bytes.toBytes("row7"));
// 添加列数据,指定列族,列名与列值
put7.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("name"),Bytes.toBytes("王二麻子"));
put7.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("age"),Bytes.toBytes("80"));
put7.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("college"),Bytes.toBytes("北京外国语大学联合学院"));
put7.addColumn(Bytes.toBytes("f2"),Bytes.toBytes("address"),Bytes.toBytes("天津市"));
// 执行添加数据
table.put(put1);
table.put(put2);
table.put(put3);
table.put(put4);
table.put(put5);
table.put(put6);
table.put(put7);
// 释放资源
table.close();
System.out.println("put data success!!");
}
}
三: 查询数据,查询表t2中行键为row1的一整条数据
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import java.io.IOException;
/**
* 查询表t1中的行键为row1的一行数据
*/
public class HBaseGetData {
public static void main(String[] args) throws IOException {
// 创建HBase配置对象
Configuration configuration = HBaseConfiguration.create();
// 指定zookeeper集群地址
configuration.set("hbase.zookeeper.quorum",
"192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 创建数据库连接对象
Connection connection = ConnectionFactory.createConnection(configuration);
// 获取Table对象,指定查询表名,Table负责与记录相关的操作,如增删改查等
Table table = connection.getTable(TableName.valueOf("t2"));
// 创建GET对象,根据rowKey查询,rowKey=row1
Get get = new Get("row1".getBytes());
// 查询数据,取得结果集
Result result = table.get(get);
// 循环输出每个单元格的数据
for(Cell cell : result.rawCells()){
// 取得当前单元格所属的列名称
String family = new String(CellUtil.cloneFamily(cell));
// 取得当前单元格所属的列名称
String qualifier = new String(CellUtil.cloneQualifier(cell));
// 取得当前单元格的列值
String value = new String(CellUtil.cloneValue(cell));
System.out.println("列:"+family+":"+qualifier+"值:"+value);
}
connection.close();
}
}
四: 删除表t2中行键为row3的一整条数据,代码如下:
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
/**
* 删除表t2中行键为row1的一整条数据
*/
public class HBaseDeleteData {
public static void main(String[] args) throws IOException {
// 指定HBase配置对象
Configuration configuration = HBaseConfiguration.create();
// 指定zookeeper集群地址
configuration.set("hbase.zookeeper.quorum","192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 获取连接数据库对象
Connection connection = ConnectionFactory.createConnection(configuration);
// 获取Table对象,指定表名,Table负责与记录相关的操作,如增删改查等
TableName tableName = TableName.valueOf("t2");
Table table = connection.getTable(tableName);
//创建删除对象Delete,根据rowkey删除一整条
Delete delete = new Delete(Bytes.toBytes("row3"));
table.delete(delete);
// 释放资源
table.close();
connection.close();
System.out.println("delete data success!!");
}
}
五,各种过滤器使用
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
/**
* 行键过滤器: 行键过滤器是通过一定的规则过滤行键,达到筛选数据的目的
* 使用二进制比较器BinaryComparator结合运算符可以筛选出具有某个行键的行,
* 或者通过改变比较运算符来筛选出行键符合某一条件的多条数据。
*/
public class HBaseRowFilterData { // 1528
public static void main(String[] args) throws IOException {
// 创建HBase配置对象
Configuration configuration = HBaseConfiguration.create();
// 设置Zookeeper集群地址
configuration.set("hbase.zookeeper.quorum","192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 获取数据库连接对象
Connection connection = ConnectionFactory.createConnection(configuration);
// 获取Table对象,指定表名,Table负责与记录相关的操作,如增删改查等,
TableName tableName = TableName.valueOf("t2");
Table table = connection.getTable(tableName);
// 创建scan对象
Scan scan = new Scan();
// 创建一个过滤器,筛选行键等于row1的数据
Filter filter = new RowFilter(CompareFilter.CompareOp.EQUAL,new
BinaryComparator(Bytes.toBytes("row1"))); // 二进制比较器
// 设置过滤器
scan.setFilter(filter);
// 查询数据,返回结果数据集
ResultScanner resultScanner = table.getScanner(scan);
for (Result result : resultScanner){
byte[] name= result.getValue("f2".getBytes(),"name".getBytes());
byte[] age= result.getValue("f2".getBytes(),"age".getBytes());
byte[] college= result.getValue("f2".getBytes(),"college".getBytes());
byte[] address= result.getValue("f2".getBytes(),"address".getBytes());
System.out.println("name:"+new String(name)+"\tage:"+new String(age)+"\tcollege:"+new String(college)+"\taddress:"+new String(address));
}
resultScanner.close();
connection.close();
}
}
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.QualifierFilter;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
import java.util.List;
/**
* 列过滤器: 列过滤器是通过列进行筛选,从而得到符合条件的所有数据
* 该例子,筛选出包含列name的所有数据。在HBase中,不同的行可以有不同的列,因此允许根据列进行筛选。
*/
public class HBaseQualiferData {
public static void main(String[] args) throws IOException {
// 创建配置对象
Configuration configuration = HBaseConfiguration.create();
// 设置Zookeeper的集群地址
configuration.set("hbase.zookeeper.quorum","192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 创建数据库连接对象
Connection connection = ConnectionFactory.createConnection(configuration);
// 创建Table对象,table对象负责与记录进行交互,比如增删改查等操作
TableName tableName = TableName.valueOf("t2");
Table table = connection.getTable(tableName);
// 创建Scan对象
Scan scan = new Scan();
// 创建过滤器
Filter filter =
new QualifierFilter(CompareFilter.CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes("name")));
scan.setFilter(filter);
ResultScanner resultScanner = table.getScanner(scan);
for(Result result : resultScanner){
List cells = result.listCells();
for(Cell cell:cells){
String row = Bytes.toString(result.getRow());// 获得一行字符串数据
String family =Bytes.toString(CellUtil.cloneFamily(cell));// 获得列族
String qualifier = Bytes.toString(CellUtil.cloneQualifier(cell)); // 获得标识符
String value = Bytes.toString(CellUtil.cloneValue(cell)); // 获得单元格的值
System.out.println("[row:"+row+"],[family:"+family+"],[qualifier:"+qualifier+"]"+ ",[value:"+value+"],[time:"+cell.getTimestamp()+"]");
}
}
resultScanner.close();
connection.close();
}
}
|
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.filter.SubstringComparator;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
import java.util.List;
/**
* 单列值过滤器: 单列值过滤器是通过对某一列的值进行筛选,从而得到符合条件的所有数据.
* 例如,筛选出name列的值不包含"小明"的所有数据,
* 该例子使用不等于运算符NOT_EQUAL和字符串包含比较符SubstringComparator
*/
public class HBaseSingleColumnFilterData {
public static void main(String[] args) throws IOException {
// 创建配置对象
Configuration configuration = HBaseConfiguration.create();
// 设置zookeeper的集群地址
configuration.set("hbase.zookeeper.quorum","192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 创建数据库连接对象
Connection connection = ConnectionFactory.createConnection(configuration);
// 创建Table对象
TableName tableName = TableName.valueOf("t2");
Table table = connection.getTable(tableName);
// 创建Scan对象
Scan scan = new Scan();
// 创建单例值过滤器
Filter filter =
new SingleColumnValueFilter(Bytes.toBytes("f2"),Bytes.toBytes("name"),
CompareFilter.CompareOp.NOT_EQUAL,new SubstringComparator("小明"));
scan.setFilter(filter);
// 查询数据,返回结果数据集
ResultScanner resultScanner = table.getScanner(scan);
for(Result result : resultScanner){
List cells = result.listCells();
for(Cell cell:cells){
String row = Bytes.toString(result.getRow());// 获得一行字符串数据
String family =Bytes.toString(CellUtil.cloneFamily(cell));// 获得列族
String qualifier = Bytes.toString(CellUtil.cloneQualifier(cell)); // 获得标识符
String value = Bytes.toString(CellUtil.cloneValue(cell)); // 获得单元格的值
System.out.println("[row:"+row+"],[family:"+family+"],[qualifier:"+qualifier+"]"+ ",[value:"+value+"],[time:"+cell.getTimestamp()+"]");
}
}
resultScanner.close();
connection.close();
}
}
|
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.SubstringComparator;
import org.apache.hadoop.hbase.filter.ValueFilter;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
import java.util.List;
/**
* 值过滤器: 值过滤器是通过对单元格中的值进行筛选,
* 从而得到符合条件的所有单元格的数据:如筛选出包含"小明"的所有单元格的数据
*/
public class HBaseValueFilterData {
public static void main(String[] args) throws IOException {
// 创建配置对象
Configuration configuration = HBaseConfiguration.create();
// 设置zookeeper的地址
configuration.set("hbase.zookeeper.quorum", "192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 数据库连接对象
Connection connection = ConnectionFactory.createConnection(configuration);
// 创建Table对象,table对象负责与数据记录进行交互
TableName tableName = TableName.valueOf("t2");
Table table = connection.getTable(tableName);
// 创建扫描对象scan
Scan scan = new Scan();
// 创建值过滤器
Filter filter = new ValueFilter(CompareFilter.CompareOp.EQUAL,new SubstringComparator("小明"));
scan.setFilter(filter);
// 查询数据,返回结果数据集
ResultScanner resultScanner = table.getScanner(scan);
for(Result result : resultScanner){
List cells = result.listCells();
for(Cell cell:cells){
String row = Bytes.toString(result.getRow());// 获得一行字符串数据
String family =Bytes.toString(CellUtil.cloneFamily(cell));// 获得列族
String qualifier = Bytes.toString(CellUtil.cloneQualifier(cell)); // 获得标识符
String value = Bytes.toString(CellUtil.cloneValue(cell)); // 获得单元格的值
System.out.println("[row:"+row+"],[family:"+family+"],[qualifier:"+qualifier+"]"+ ",[value:"+value+"],[time:"+cell.getTimestamp()+"]");
}
}
resultScanner.close();
connection.close();
}
}
|
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.FamilyFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
import java.util.List;
/**
* 列族过滤器: 列族过滤器是通过列族进行筛选,从而得到符合条件的所有列族数据.
* 该程序筛选出列族为f2的所有数据
*/
public class HBaseFamilyFilterData {
public static void main(String[] args) throws IOException {
// 创建配置对象
Configuration configuration = HBaseConfiguration.create();
// 设置zookeeper集群地址
configuration.set("hbase.zookeeper.quorum","192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 创建数据库连接对象
Connection connection = ConnectionFactory.createConnection(configuration);
// 创建Table对象,指定表名,Table负责与记录相关的操作,如增删改查等
TableName tableName = TableName.valueOf("t2");
Table table = connection.getTable(tableName);
// 创建scan对象
Scan sc = new Scan();
// 创建一个过滤器,筛选列族为f2的所有数据
Filter filter = new FamilyFilter(CompareFilter.CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes("f2")));
sc.setFilter(filter);
ResultScanner resultScanner = table.getScanner(sc);
for(Result result : resultScanner){
List cells = result.listCells();
for(Cell cell:cells){
String row = Bytes.toString(result.getRow());// 获得一行字符串数据
String family =Bytes.toString(CellUtil.cloneFamily(cell));// 获得列族
String qualifier = Bytes.toString(CellUtil.cloneQualifier(cell)); // 获得标识符
String value = Bytes.toString(CellUtil.cloneValue(cell)); // 获得单元格的值
System.out.println("[row:"+row+"],[family:"+family+"],[qualifier:"+qualifier+"]"+ ",[value:"+value+"],[time:"+cell.getTimestamp()+"]");
}
}
resultScanner.close();
connection.close();
}
}
|
package org.jy.data.yh.bigdata.drools.hadoop.hbase;
import org.apache.commons.configuration.ConfigurationFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
import java.util.List;
/**
* 多条件组合查询: 多条件过滤
* 多条件过滤,即将多条件过滤器组合进行查询。
* 例如下面的例子,使用单列值过滤器筛选出年龄在18到30岁之间的所有数据
*/
public class HBaseMultiesConditionData {
public static void main(String[] args) throws IOException {
// 创建配置对象
Configuration configuration = HBaseConfiguration.create();
// 配置zookeeper的地址
configuration.set("hbase.zookeeper.quorum","192.168.227.128:2181,192.168.227.129:2181,192.168.227.130:2181");
// 创建数据库连接对象
Connection connection = ConnectionFactory.createConnection(configuration);
// 创建Table对象
TableName tableName = TableName.valueOf("t2");
Table table = connection.getTable(tableName);
// 创建Scan对象
Scan scan = new Scan();
// 创建过滤器1,查询年龄小于等于30岁的所有数据
Filter filter1 = new SingleColumnValueFilter(Bytes.toBytes("f2"),Bytes.toBytes("age"),
CompareFilter.CompareOp.LESS_OR_EQUAL,Bytes.toBytes("50"));
// 创建过滤器2,查询年龄大于等于18岁的所有数据
Filter filter2 = new SingleColumnValueFilter(Bytes.toBytes("f2"),Bytes.toBytes("age"),
CompareFilter.CompareOp.GREATER_OR_EQUAL,Bytes.toBytes("18"));
// 创建过滤器集合对象
FilterList filterList = new FilterList();
filterList.addFilter(filter1);
filterList.addFilter(filter2);
// 设置过滤器
scan.setFilter(filterList);
// 查询数据,返回结果数据集
ResultScanner resultScanner = table.getScanner(scan);
for(Result result : resultScanner){
List cells = result.listCells();
for(Cell cell:cells){
String row = Bytes.toString(result.getRow());// 获得一行字符串数据
String family =Bytes.toString(CellUtil.cloneFamily(cell));// 获得列族
String qualifier = Bytes.toString(CellUtil.cloneQualifier(cell)); // 获得标识符
String value = Bytes.toString(CellUtil.cloneValue(cell)); // 获得单元格的值
System.out.println("[row:"+row+"],[family:"+family+"],[qualifier:"+qualifier+"]"+ ",[value:"+value+"],[time:"+cell.getTimestamp()+"]");
}
}
resultScanner.close();
connection.close();
}
}
|
结尾: 各基本操作可以在HBase Shell中查看效果