java代码
package com.zcl.demo;
import java.io.IOException;
import java.util.Arrays;
import java.util.Date;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import com.google.protobuf.InvalidProtocolBufferException;
import com.htsc.octopus.protocol.StateEntry;
import com.zcl.utils.HBaseUtils;
/**
* 消费指定分区的消息
* @author 45335
*.
*/
public class KafkaConsumerExample {
public static void main(String[] args) {
HBaseUtils hd = new HBaseUtils();
Properties props = new Properties();
String[] s = new String[]{"base","diskInfo","networkInfo"};
try {
hd.createTable("zcl", s);
} catch (IOException e1) {
e1.printStackTrace();
}
//设置brokerServer(kafka)ip地址:要读取的服务器地址
props.put("bootstrap.servers","192.168.xxx.xx:9092");
//group 代表一个消费组(test)(设置consumer group name)
props.put("group.id", "test");
//设置自动提交偏移量(offset),由auto.commit.interval.ms控制提交频率
props.put("enable.auto.commit", "true");
//偏移量(offset)提交频率
props.put("auto.commit.interval.ms", "1000");
//设置使用最开始的offset偏移量为该group.id的最早。如果不设置,则会是latest即该topic最新一个消息的offset//如果采用latest,消费者只能得道其启动后,生产者生产的消息
//props.put("auto.offset.reset", "earliest");
//30秒无连接自动结束会话
props.put("session.timeout.ms", "30000");
//序列化kv值
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
//===================================================
@SuppressWarnings("resource")
KafkaConsumer consumer = new KafkaConsumer(props);
// Kafka Consumer subscribes list of topics here.Kafka消费者在这里订阅主题列表
consumer.subscribe(Arrays.asList("test"));
while (true) {
ConsumerRecords records = consumer.poll(100);
for (ConsumerRecord record : records) {
if(record.key().equals("ProcessInfo")) {
System.out.println("ProcessInfo序列化========================");
try {
StateEntry.ProcessInfo bus =StateEntry.ProcessInfo.parseFrom(record.value());
//写入HBase
//创建簇数组(三个簇)
// String[] s = new String[]{"base","diskInfo","networkInfo"};
try {
//创建HBase表,表名为zcl,簇为s数组
//hd.createTable("wang",s);
//添加数据(参数:表名,rowKey为当前时间戳,簇名,值)
//hd.addRecord("zcl",bus);
byte[] tableName ="zcl".getBytes();
byte[] rowKey =Bytes.toBytes(new Date().getTime());
hd.addRecord( tableName,rowKey,"base".getBytes(), "pid".getBytes(), String.valueOf(bus.getPid()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "runningTime".getBytes(), String.valueOf(bus.getRunningTime()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "totalMemory".getBytes(), String.valueOf(bus.getTotalMemory()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "freeMemory".getBytes(), String.valueOf(bus.getFreeMemory()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "maxMemory".getBytes(), String.valueOf(bus.getMaxMemory()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "totalMemorySize".getBytes(), String.valueOf(bus.getTotalMemorySize()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "freePhysicalMemorySize".getBytes(), String.valueOf(bus.getFreePhysicalMemorySize()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "usedMemory".getBytes(), String.valueOf(bus.getUsedMemory()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "totalThread".getBytes(), String.valueOf(bus.getTotalThread()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "cpuRatio".getBytes(), String.valueOf(bus.getCpuRatio()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "cpuProcess".getBytes(), String.valueOf(bus.getCpuProcess()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "ioReadBytes".getBytes(), String.valueOf(bus.getIoReadBytes()).getBytes());
hd.addRecord( tableName,rowKey,"base".getBytes(), "ioWriteBytes".getBytes(), String.valueOf(bus.getIoWriteBytes()).getBytes());
hd.addRecord( tableName,rowKey,"diskInfo".getBytes(), "disksize".getBytes(), String.valueOf(bus.getUtilizationDisk(0).getDisksize()).getBytes());
hd.addRecord( tableName,rowKey,"diskInfo".getBytes(), "diskused".getBytes(), String.valueOf(bus.getUtilizationDisk(0).getDiskused()).getBytes());
hd.addRecord( tableName,rowKey,"diskInfo".getBytes(), "diskavail".getBytes(), String.valueOf(bus.getUtilizationDisk(0).getDiskavail()).getBytes());
hd.addRecord( tableName,rowKey,"diskInfo".getBytes(), "diskusepercent".getBytes(), String.valueOf(bus.getUtilizationDisk(0).getDiskusepercent()).getBytes());
hd.addRecord( tableName,rowKey,"diskInfo".getBytes(), "mountedon".getBytes(), String.valueOf(bus.getUtilizationDisk(0).getMountedon()).getBytes());
hd.addRecord( tableName,rowKey,"networkInfo".getBytes(), "networkport".getBytes(), String.valueOf(bus.getIonetwork(0).getNetworkport()).getBytes());
hd.addRecord( tableName,rowKey,"networkInfo".getBytes(), "receivebytes".getBytes(), String.valueOf(bus.getIonetwork(0).getReceivebytes()).getBytes());
hd.addRecord( tableName,rowKey,"networkInfo".getBytes(), "receivepackets".getBytes(), String.valueOf(bus.getIonetwork(0).getTransmitbytes()).getBytes());
hd.addRecord( tableName,rowKey,"networkInfo".getBytes(), "transmitbytes".getBytes(), String.valueOf(bus.getIonetwork(0).getReceivepackets()).getBytes());
hd.addRecord( tableName,rowKey,"networkInfo".getBytes(), "transmitpackets".getBytes(), String.valueOf(bus.getIonetwork(0).getTransmitpackets()).getBytes());
System.out.println("insert HBaseData over。。。。。。。");
} catch (Exception e) {
e.printStackTrace();
}
} catch (InvalidProtocolBufferException e) {
e.printStackTrace();
}
}
}
}
}
}
utils
package 包名;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.util.Bytes;
import com.htsc.octopus.protocol.StateEntry;
import com.htsc.octopus.protocol.StateEntry.ProcessInfo;
/**demo
* @author
*
*/
public class HBaseUtils {
// 与HBase数据库的连接对象
public static Connection connection;
// 数据库元数据操作对象
public static Admin admin;
public static Configuration conf;
static {
// 取得一个数据库连接的配置参数对象
conf = HBaseConfiguration.create();
// 设置连接参数:HBase数据库所在的主机IP
conf.set("hbase.zookeeper.quorum", "192.168.xxx.xxx");
// 设置连接参数:HBase数据库使用的端口
conf.set("hbase.zookeeper.property.clientPort", "2181");
// 取得一个数据库连接对象
try {
connection = ConnectionFactory.createConnection(conf);
} catch (IOException e) {
e.printStackTrace();
}
// 取得一个数据库元数据操作对象
try {
admin = connection.getAdmin();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 创建表
*/
public static void createTable(String tableNameString,String[] columnFamily) throws IOException{
System.out.println("---------------create table START-----------------");
// 数据表表名
//String tableNameString = "t_book";
// 新建一个数据表表名对象
TableName tableName = TableName.valueOf(tableNameString);
// 如果需要新建的表已经存在
if(admin.tableExists(tableName)){
System.out.println("表已经存在!");
}
// 如果需要新建的表不存在
else{
// 数据表描述对象
HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
for (int i = 0; i < columnFamily.length; i++) {
hTableDescriptor.addFamily(new HColumnDescriptor(columnFamily[i]));
}
// 列族描述对象
//HColumnDescriptor family= new HColumnDescriptor("base");;
// 在数据表中新建一个列族
//hTableDescriptor.addFamily(family);
// 新建数据表
admin.createTable(hTableDescriptor);
}
System.out.println("---------------create table END-----------------");
}
/**
* 添加数据
* @param tableName
* @param rowKey
* @param family
* @param qualifier
* @param value
*/
// 插入一行记录
public static void addRecord(byte[] tableName, byte[] rowKey, byte[] family, byte[] qualifier, byte[] value){
try {
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName));
Put put = new Put(rowKey);
put.addColumn(family, qualifier, value);
put.addColumn(family, qualifier, value);
table.put(put);
table.close();
connection.close();
System.out.println("insert recored " + rowKey + " to table " + tableName + " ok.");
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 删除一张表
* @param tableName
*/
public static void dropTable(String tableName) {
try {
HBaseAdmin admin = new HBaseAdmin(conf);
admin.disableTable(tableName);
admin.deleteTable(tableName);
System.out.println("delete table end");
} catch (MasterNotRunningException e) {
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
try {
String[] s = new String[]{"base","diskInfo","networkInfo"};
createTable("zcl", s);
} catch (IOException e) {
e.printStackTrace();
}
}
}
pom.xml
4.0.0
com.zcl
ConsumerDataToHBase
0.0.1-SNAPSHOT
org.apache.kafka
kafka_2.12
1.0.0
org.apache.kafka
kafka-clients
1.0.0
org.scala-lang
scala-library
2.12.4
org.apache.hbase
hbase-client
1.2.6
maven-assembly-plugin
2.2.1
false
jar-with-dependencies
com.zcl.demo.KafkaConsumerExample
make-assembly
package
single
org.apache.maven.plugins
maven-compiler-plugin
3.3
1.8