1、KSQL语法:https://docs.confluent.io/current/ksql/docs/developer-guide/syntax-reference.html
KSQL kafka实战:https://my.oschina.net/guol/blog/2236817
KSQL REST API:https://docs.confluent.io/current/ksql/docs/developer-guide/api.html
2、基础语法
ssh -p5022 [email protected]
-p 'bdp_jt'
kafka目录
/home/bdp_jt/confluent-5.1.2
手动生成topic
./bin/kafka-topics --create --zookeeper 10.138.225.199:2181/kafka_confluent_512 --replication-factor 1 --partitions 1 --topic zqdtest
查看kafka的topic
./bin/kafka-topics --zookeeper 10.138.225.199:2181/kafka_confluent_512 --list
生成生产者
(1)./bin/kafka-console-producer --broker-list 10.138.225.199:9092 --topic zqdtest
(2)./bin/ksql-datagen bootstrap-server=10.138.225.199:9092 quickstart=orders format=json topic=orders maxInterval=2000(自动生成topic并随机造数或往topic造数)
生成消费者
./bin/kafka-console-consumer --bootstrap-server 10.138.225.199:9092 --topic zqdtest
连接ksql
./bin/ksql http://10.138.225.199:8088
创建stream和table
(1)根据topic pageviews创建一个stream pageviews_original,value_format为DELIMITED
ksql>CREATE STREAM pageviews_original (userid varchar, pageid varchar) WITH \(kafka_topic='pageviews', value_format='DELIMITED');
(2)根据topic users创建一个table users_original,value_format为json
ksql>CREATE TABLE users_original (registertime BIGINT, gender VARCHAR, regionid VARCHAR, userid VARCHAR) WITH \(kafka_topic='users', value_format='JSON', key = 'userid');
查询数据
ksql> SELECT * FROM USERS_ORIGINAL LIMIT 3;
持久化查询
ksql> CREATE STREAM pageviews2 AS SELECT userid FROM pageviews_original;
查询steam
ksql> SHOW STREAMS;
查询执行任务
ksql> SHOW QUERIES;
消费新数据
cd /opt/programs/confluent_5.0.0/bin
./kafka-console-consumer --bootstrap-server 10.205.151.145:9092 --from-beginning --topic PAGEVIEWS2
终止查询任务
ksql> TERMINATE query_id;
显示所有字段
DESCRIBE MONITOR_ORIGINAL;
{"userid":2,"username":"cvcv"}
10.138.225.199
ssh [email protected]
ssh [email protected]
./bin/kafka-topics --zookeeper 10.138.225.199:2181/kafka_confluent_512 --topic zqdtest
insert into MONITOR_ORIGINAL select 1 as MONITORID, '{''FACTORYCODE'':'''+FACTORYCODE+''','+'''PROD_CODE'':'''+PROD_CODE+''','+'''BARCODE'':'''+BARCODE+'''}' as MONITORCONTENT from JIEPAI_ORIGINAL where cast(BARCODE_TIMEDIFF as int) > 300;
3、ksql启动查询
@Override
public void saveKsql(DatastreamEntity mon) {
logger.debug("saveKsql");
String streamname = "";//根据选中的数据流,拼接所有表名
String column = "";//根据选中的多个数据流,拼接所有要查询的字段
//根据数据流ID获取所有选择的数据流
for (DatastreamBymetaEntity streams : mon.getMetastreamidlist()) {//streams.getMeta_stream_id() 2385
VTableMetaDataJson datajson = tableMetaDataService.findDetailByTableID(streams.getIntMetaStreamID());
streamname += datajson.getStrTableNameEN() + ",";
for (VTableColumnJson str : datajson.getvTableColumns()) {
column += str.getStrColumnNameEN() + ",";
}
}
streamname = streamname.substring(0, streamname.length() - 1);
column = column.substring(0, column.length() - 1);
//String column="ID,TX_NAME";
//String streamname="TEST_HDP0118";
//把要查询的字段拼接成Ksql需要的格式
String[] strs = column.split(",");
String strjson = "'{";
for (String str : strs) {
strjson += "''" + str + "'':'''+ cast(" + str + " as string ) +''','+'";
}
strjson = strjson.substring(0, strjson.length() - 4);
strjson += "}'";
System.out.println(strjson);
for (DatastreamRuleconfigEntity rule : mon.getRulelist()) {
String ksql = "insert into MONITOR_ORIGINAL2 "
+ " select " + rule.getIntID() + " as monitorid, " + strjson + " as monitorcontent "
+ " from " + streamname
+ " where " + rule.getStrDefinedCondition() + ";";
System.out.println(ksql);
//获取query_id
String query_id=ksqlServerService.createInsert(ksql);
rule.setStrQueryID(query_id);
rule.setStrKsqlContent(ksql);
rule.setIntStreamID(mon.getIntID());
monitorRuleDao.saveAndFlush(rule);
}
//logger.error();
}
@Override
public String createInsert(String ksql) {
//得到当前的query 列表
KSQLQueries ksqlQuery = this.getKSQLQuery();
List queries = ksqlQuery.getQueries();
Set queryIDs = new HashSet();
for (int i = 0; i < queries.size(); i++) {
queryIDs.add(queries.get(i));
}
System.out.println(ksql);
String resJson = this.executeKSQL(ksql);
JSONObject rep = JSON.parseObject(resJson);
String status = rep.getJSONObject("commandStatus").getString("status");
if (SUCCESSS.equals(status)) {
//成功后,得到当前的query列表,与上一个进行比较,取出当前的query_id。
//当前系统中,不允许重复创建相同的query内容
KSQLQueries ksqlQuery2 = this.getKSQLQuery();
List queries2 = ksqlQuery2.getQueries();
for (int i = 0; i < queries2.size(); i++) {
KSQLQuery ksqlQuery1 = queries2.get(i);
if (!queryIDs.contains(ksqlQuery1.getId()) && ksql.toUpperCase().equals(ksqlQuery1.getQueryString().toUpperCase())) {
return ksqlQuery1.getId();
}
}
return SUCCESSS;
} else {
logger.error(rep.getJSONObject("commandStatus").toString());
return null;
}
}
private String executeKSQL(String ksql) {
HttpHeaders headers = new HttpHeaders();
headers.set("Content-Type", "application/vnd.ksql.v1+json; charset=utf-8");
//具体执行的ksql
JSONObject postData = new JSONObject();
postData.put("ksql", ksql);
//返回值
HttpEntity requestEntity = new HttpEntity(postData.toJSONString(), headers);
String resJson = restTemplate.postForEntity(ksqlServerAddress, requestEntity, String.class).getBody();
String resJson2 = resJson.substring(1, resJson.length() - 1).replaceAll("@", "a");
return resJson2;
}
private KSQLQueries getKSQLQuery() {
String resJson = this.executeKSQL("show queries;");
KSQLQueries ksqlQueries = JSON.parseObject(resJson, KSQLQueries.class);
return ksqlQueries;
}
4、根据query_id终止查询
/**
* 停掉单个规则配置的ksql语句
*
* @param id
*/
@Override
public void stopKsql(Integer id) {
RestTemplate restTemplate = new RestTemplate();
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON_UTF8);
JSONObject postData = new JSONObject();
DatastreamRuleconfigEntity rule = monitorService.findDatastreamRuleByid(id);
postData.put("ksql", "TERMINATE " + rule.getStrQueryID() + ";");
String resJson = restTemplate.postForEntity(ksqlServerAddress, postData, String.class).getBody();
String resJson2 = resJson.substring(1, resJson.length() - 1).replaceAll("@", "a");
JSONObject rep = JSON.parseObject(resJson2);
String status = rep.getJSONObject("commandStatus").getString("status");
if (SUCCESSS.equals(status)) {
monitorRuleDao.updateState1(id);//把数据流状态修改为未启动1
} else {
logger.error("stopKsql.停止Ksql", rep.getJSONObject("commandStatus").toString());
}
}
5、kafka Topic创建、删除
import com.sdjictec.bdmpextend.monitor.service.KafkaServerService;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import java.util.*;
@Service
public class KafkaServerServiceImpl implements KafkaServerService {
protected static Logger logger = LoggerFactory.getLogger(KafkaServerServiceImpl.class);
@Value("${kafka.bootstrap.servers}")
private String bootstrapServers;
@Value("${kafka.bootstrap.numPartitions}")
private Integer numPartitions;
@Value("${kafka.bootstrap.numReplicationFactor}")
private Integer numReplicationFactor;
@Override
public Integer createKafkaTopic(String topicName, Integer replicationFactor, Integer partitions, String bootstarpServers) {
logger.info("topicName:"+topicName);
logger.info("replicationFactor:"+replicationFactor);
logger.info("partitions:"+partitions);
logger.info("bootstarpServers:"+bootstarpServers);
try {
AdminClient adminClient = getAdminClient(bootstarpServers);
NewTopic newTopic = new NewTopic(topicName, partitions, replicationFactor.shortValue());
Collection newTopicList = new ArrayList<>();
newTopicList.add(newTopic);
adminClient.createTopics(newTopicList);
adminClient.close();
return 0;
} catch (Exception e) {
logger.error(e.getLocalizedMessage(), e);
e.printStackTrace();
return 920007;
}
}
@Override
public Integer createKafkaTopic(String topicName, Integer replicationFactor, Integer partitions) {
return this.createKafkaTopic(topicName, partitions, replicationFactor, bootstrapServers);
}
@Override
public Integer createKafkaTopic(String topicName) {
return this.createKafkaTopic(topicName, numPartitions, numReplicationFactor, bootstrapServers);
}
@Override
public Integer deleteKafkaTopic(String topicName, String bootstarpServers) {
try {
AdminClient adminClient = getAdminClient(bootstarpServers);
Collection topicList = new ArrayList<>();
topicList.add(topicName);
adminClient.deleteTopics(topicList);
adminClient.close();
return 0;
} catch (Exception e) {
logger.error(e.getLocalizedMessage(), e);
e.printStackTrace();
return 920008;
}
}
@Override
public Integer deleteKafkaTopic(String topicName) {
return this.deleteKafkaTopic(topicName, bootstrapServers);
}
private AdminClient getAdminClient(String bootstarpServers) {
try {
Properties properties = new Properties();
properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstarpServers);
AdminClient adminClient = AdminClient.create(properties);
return adminClient;
} catch (Exception e) {
logger.error(e.getLocalizedMessage(), e);
e.printStackTrace();
return null;
}
}
}
6、stream创建、修改、删除
public String createStream(MetaStream metaStream,Integer userID){ //ListmetaColumnList=new ArrayList<>(); //生成KStream创建语句 String sql="CREATE "+metaStream.getStrTypeName()+" "+metaStream.getStrStreamName()+" ("; for(MetaColumn metaColumn:metaStream.getMetaColumnList()){ sql+=metaColumn.getStrColumnName()+" "+metaColumn.getStrColumnType()+","; } sql=sql.substring(0,sql.length()-1); sql+=") WITH (kafka_topic='"+metaStream.getStrTopicName()+"', value_format='JSON');"; //创建topic Integer flag=kafkaServerService.createKafkaTopic(metaStream.getStrTopicName()); if(flag!=0){ return ResponseMsgEnum.getMsg(flag); } //创建Stream String info=ksqlServerService.createInsert(sql); if(null==info){ return "元数据创建失败!"; } //保存数据流 metaStream.setUserCreate(userService.findUserByID(userID)); metaStream.setTimCreateTime(new Timestamp(System.currentTimeMillis())); metaStream.setIntIsDel(0); metaStream=metaStreamDao.save(metaStream); for(MetaColumn metaColumn:metaStream.getMetaColumnList()){ metaColumn.setIntStreamID(metaStream.getIntID()); metaColumn.setIntCreateUserID(userID); metaColumn.setTimCreateTime(new Timestamp(System.currentTimeMillis())); metaColumn.setIntIsDel(0); metaColumnDao.save(metaColumn); } //生成权限 Gson gson = new Gson(); Type type = new TypeToken >() {}.getType(); List
groupList = gson.fromJson(metaStream.getStrGroupList(), type); streamGroupService.createUserGroupTaskList( metaStream.getIntID(), groupList, metaStream.getUserCreate().getIntID(),0); return "0"; } public String updateStream(MetaStream metaStream,Integer userID){ //生成KStream创建语句 String sql="DROP "+metaStream.getStrTypeName()+" "+metaStream.getStrStreamName()+";"; //删除Stream String info=ksqlServerService.createInsert(sql); if(null==info){ return "元数据删除错误,更新失败!"; } //生成KStream创建语句 sql="CREATE "+metaStream.getStrTypeName()+" "+metaStream.getStrStreamName()+" ("; for(MetaColumn metaColumn:metaStream.getMetaColumnList()){ sql+=metaColumn.getStrColumnName()+" "+metaColumn.getStrColumnType()+","; } sql=sql.substring(0,sql.length()-1); sql+=") WITH (kafka_topic='"+metaStream.getStrTopicName()+"', value_format='JSON');"; //创建Stream info=ksqlServerService.createInsert(sql); if(null==info){ return "元数据创建错误,更新失败!"; } //保存数据流 metaStream.setUserUpdate(userService.findUserByID(userID)); metaStream.setTimUpdateTime(new Timestamp(System.currentTimeMillis())); metaStream=metaStreamDao.save(metaStream); metaColumnDao.deleteByStreamID(metaStream.getIntID()); for(MetaColumn metaColumn:metaStream.getMetaColumnList()){ metaColumn.setIntStreamID(metaStream.getIntID()); metaColumn.setIntCreateUserID(userID); metaColumn.setTimCreateTime(new Timestamp(System.currentTimeMillis())); metaColumn.setIntIsDel(0); metaColumnDao.save(metaColumn); } //生成权限 Gson gson = new Gson(); Type type = new TypeToken >() {}.getType(); List
groupList = gson.fromJson(metaStream.getStrGroupList(), type); streamGroupService.createUserGroupTaskList( metaStream.getIntID(), groupList, metaStream.getUserCreate().getIntID(),0); return "0"; } public String deleteStream(Integer streamID,Integer userID){ MetaStream metaStream=metaStreamDao.findOne(streamID); //生成KStream创建语句 String sql="DROP "+metaStream.getStrTypeName()+" "+metaStream.getStrStreamName()+";"; //删除Stream String info=ksqlServerService.createInsert(sql); if(null==info){ return "元数据删除失败!"; } //删除topic Integer flag=kafkaServerService.deleteKafkaTopic(metaStream.getStrTopicName()); if(flag!=0){ return ResponseMsgEnum.getMsg(flag); } //删除Stream数据 metaStream.setIntIsDel(1); metaStream.setUserUpdate(userService.findUserByID(userID)); metaStream.setTimUpdateTime(new Timestamp(System.currentTimeMillis())); metaStreamDao.save(metaStream); //删除Column数据 List metaColumnList=metaColumnDao.findMetaColumnByStreamID(streamID); for(MetaColumn metaColumn:metaColumnList){ metaColumn.setIntUpdateUserID(userID); metaColumn.setTimUpdateTime(new Timestamp(System.currentTimeMillis())); metaColumn.setIntIsDel(1); metaColumnDao.save(metaColumn); } return "0"; }