4.0.0
com.kgf.ibor
ibor-common
0.0.1-SNAPSHOT
org.springframework.boot
spring-boot-starter-parent
2.1.6.RELEASE
1.8
1.8
2.7.5
1.1.19
12.2.0.1.0
org.springframework.boot
spring-boot-starter-web
org.slf4j
slf4j-api
org.apache.commons
commons-dbcp2
com.google.code.gson
gson
org.apache.ignite
ignite-core
${ignite.version}
org.apache.ignite
ignite-zookeeper
${ignite.version}
org.slf4j
slf4j-log4j12
org.slf4j
slf4j-api
org.apache.ignite
ignite-spring
${ignite.version}
org.springframework
spring-core
org.springframework
spring-context
org.springframework
spring-beans
org.springframework
spring-aop
org.apache.ignite
ignite-indexing
${ignite.version}
org.apache.commons
commons-lang3
org.projectlombok
lombok
provided
com.oracle
ojdbc8
${ojdbc8.version}
org.springframework.kafka
spring-kafka
org.springframework
spring-core
org.springframework
spring-context
org.springframework
spring-beans
com.alibaba
druid
${druid.version}
ch.qos.logback
logback-core
注意:上面的springboot版本和Ignite包有一些冲突,需要剔除一些冲突的jar,上面已经剔除。
4.0.0
com.kgf.ibor
ibor-pulsar
0.0.1-SNAPSHOT
com.kgf.ibor
ibor-common
0.0.1-SNAPSHOT
org.springframework.boot
spring-boot-maven-plugin
maven-compiler-plugin
1.8
⑵application.yml,配置了kafka,Oracle,Ignite等相关信息,注意:ibor-pulsar和ibor-quantfin必须注册到同一个zookeeper中
server:
port: 9090 #指定端口号
#===========kafka配置================
spring:
kafka:
bootstrap-servers:
- localhost:9092 #指定kafka代理地址,可以是多个
producer: #以下是生产者配置
retries: 0 #如果这个值大于0,表示启用重试失败的发送次数
batch-size:
16384 #每当多个记录被发送到统一分区时,生产者会尝试将记录一起批量处理为更少的请求,这有助于提高客户端和服务器性能,默认16384
buffer-memory:
33554432 #生产者可用于缓冲等待发送到服务器的记录的内存总字节数,默认值为33554432
key-serializer: #下面用来指定key和消息体的编解码方式
org.apache.kafka.common.serialization.StringSerializer
value-serializer:
org.apache.kafka.common.serialization.StringSerializer
consumer: #下面是消费者配置
group-id: ibor-consumer-kgf #指定默认消费者的groupId,由于在kafka中,同一组中的consumer不会读取到同一个消息,依靠groupid设置组名
##earliest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
#latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据
#none:topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
auto-offset-reset: earliest
enable-auto-commit: false #如果为true,则消费者的偏移量将在后台定期提交,默认值为true
auto-commit-interval: 100 #如果'enable.auto.commit'为true,则消费者偏移自动提交给Kafka的频率(以毫秒为单位),默认值为5000。
key-deserializer: #密钥的反序列化器类,实现类实现了接口org.apache.kafka.common.serialization.Deserializer
org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: #值的反序列化器类,实现类实现了接口org.apache.kafka.common.serialization.Deserializer
org.apache.kafka.common.serialization.StringDeserializer
#============Ignite配置信息===========
ignite:
clientmode: false
ibor:
domain: DATA_QUANT #配置Ignite节点属性
zookeeper:
address: localhost:2181 #配置zookeeper连接地址
#=============数据库信息配置==============
jdbc:
driverClassName: oracle.jdbc.driver.OracleDriver
url: jdbc:oracle:thin:@localhost:1521:orcl
username: system
password: 897570
⑶applicationContext.xml文件,配置了数据源对象,以及Ignite集群相关配置。
⑷logback-spring.xml,日志配置
%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n
${LOG_HOME}/pulsar.log.%d{yyyy-MM-dd}.log
30
%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n
10MB
⑸启动类IborPulsarApplication.java
⑹创建IgniteInitation.java类,在项目启动时,初始化Ignite对象,以及相关cache缓存实例。
⑺创建IgniteUtil.java用来初始化和获取Ignite实例对象
⑻创建IgniteCacheManager.java,用来初始化Ignite中各个Cache对象,并且初始化service层的cache对象。
package com.kgf.ibor.pulsar.utils;
import javax.annotation.Resource;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.cache.CacheMode;
import org.apache.ignite.configuration.CacheConfiguration;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import com.kgf.ibor.common.pulsar.cachestore.TestCacheStoreFactory;
import com.kgf.ibor.common.pulsar.model.PulsarConstant;
import com.kgf.ibor.common.pulsar.model.TestKey;
import com.kgf.ibor.common.pulsar.model.TestValue;
import com.kgf.ibor.common.utils.DomainNodeFilter;
import com.kgf.ibor.pulsar.service.TestService;
@Component
public class IgniteCacheManager {
@Value("${ibor.domain:PULSAR}")
private String domainName;
@Resource
private IgniteUtil igniteUtil;
@Resource
private TestService testService;
/***
* 该方法在IgniteInitation中被调用
*/
public void init() {
try {
//初始化缓存
initCaches();
//初始化service中的cache
initServices();
} catch (Exception e) {
e.printStackTrace();
}
}
private void initServices() {
testService.init();
}
private void initCaches() {
initTestCache();
}
private void initTestCache() {
//1:首先获取Ignite对象
Ignite ignite = igniteUtil.getInstance();
IgniteCache cache = ignite.cache(PulsarConstant.CACHE_TEST_INFO);
if(cache!=null) {
return;
}
//2:自定义存储区域的使用方式
CacheConfiguration cacheCfg = new CacheConfiguration();
//3:设置cache名称
cacheCfg.setName(PulsarConstant.CACHE_TEST_INFO);
//4:设置缓存模式,这里我们使用分区模式,能存储海量数据,频繁更新对其影响不大
cacheCfg.setCacheMode(CacheMode.PARTITIONED);
//5:设置一个备份,防止数据丢失
cacheCfg.setBackups(1);
//6:设置缓存Table名称,这里设置通用
cacheCfg.setSqlSchema("PUBLIC");
//7:设置关联配置的数据类型,key-value
cacheCfg.setIndexedTypes(TestKey.class,TestValue.class);
//8;设置节点过滤器,获取我们配置文件中的节点信息
cacheCfg.setNodeFilter(new DomainNodeFilter(this.domainName));
//9:支持ReadThrough持久化方法,如果要从数据库中加载数据,必须启用这个
cacheCfg.setReadThrough(true);
//10:启用WriteThrough,可以向数据库中写数据
cacheCfg.setWriteThrough(true);
//11:配置持久化工厂
cacheCfg.setCacheStoreFactory(new TestCacheStoreFactory());
//12:启用缓存度量收集
cacheCfg.setStatisticsEnabled(true);
//13:创建或者获取cache对象
cache = ignite.getOrCreateCache(cacheCfg);
//14:加载数据到缓存
cache.loadCache(null, PulsarConstant.LOAD_HISTORY_DATA);
}
}
⑼创建TestKey,TestValue对象,这两个我们需要建立在ibor-common模块,因为ibor-quantfin节点获取数据时也需要。当然,我们
可以根据需求自定义key-value.
⑽定义各个cache的名称变量
⑾创建DomainNodeFilter.java,这个是对节点属性的一个拦截校验,必须建立在ibor-common,否则集群起不来。
⑿创建TestCacheStore,这个是用来和数据库进行交互的,也要建立在ibor-common。
package com.kgf.ibor.common.pulsar.cachestore;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Collection;
import javax.cache.Cache.Entry;
import javax.cache.integration.CacheLoaderException;
import javax.cache.integration.CacheWriterException;
import javax.sql.DataSource;
import org.apache.commons.lang3.StringUtils;
import org.apache.ignite.cache.store.CacheStore;
import org.apache.ignite.cache.store.CacheStoreAdapter;
import org.apache.ignite.lang.IgniteBiInClosure;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.kgf.ibor.common.pulsar.model.PulsarConstant;
import com.kgf.ibor.common.pulsar.model.TestKey;
import com.kgf.ibor.common.pulsar.model.TestValue;
/**
* 该store用来和数据库进行交互
* @author KGF
*
*/
public class TestCacheStore extends CacheStoreAdapter implements CacheStore{
private transient DataSource dataSource;
private Logger log = LoggerFactory.getLogger(TestCacheStore.class);
private static final String TABLE_NAME = "IBOR_TEST_KGF";
private static final Integer BATCH_NUM = 1000;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
/***
* 加载数据到缓存中
*/
@Override
public void loadCache(IgniteBiInClosure clo, Object... args) {
if(dataSource==null)return;
String lable = (String) args[0];
if(lable.equalsIgnoreCase(PulsarConstant.LOAD_HISTORY_DATA)) {
try (Connection conn = dataSource.getConnection()){
String sql = "SELECT TEST_ID,TEST_NAME,TEST_DATE FROM "+TABLE_NAME;
try(PreparedStatement st = conn.prepareStatement(sql)){
try(ResultSet rs = st.executeQuery()){
while (rs.next()) {
String testId = rs.getString(1);
String testName = rs.getString(2);
String testDate = rs.getString(3);
TestKey k = new TestKey(testId, testDate);
TestValue v = new TestValue(testId, testName, testDate);
clo.apply(k, v);
log.info("[TestCacheStore]: load TestValue:"+v);
}
log.info("[TestCacheStore]: load TestValue finished...");
}
}
} catch (Exception e) {
e.printStackTrace();
throw new CacheLoaderException("Failed to load TestValue values to store");
}
}else {
log.info("[TestCacheStore]: loadCache fail,lable is not right");
}
}
@Override
public TestValue load(TestKey key) throws CacheLoaderException {
return null;
}
/***
* 单条数据存储
*/
@Override
public void write(Entry extends TestKey, ? extends TestValue> entry) throws CacheWriterException {
if(dataSource==null)return;
Connection conn = null;
PreparedStatement st = null;
try {
conn = dataSource.getConnection();
String sql = getMergeSql();
st = conn.prepareStatement(sql);
TestValue value = entry.getValue();
String testId = value.getTestId();
if(StringUtils.isBlank(testId)) {
return;
}
st.setString(1,testId);
st.setString(2,value.getTestName());
st.setString(3,value.getTestDate());
st.addBatch();
st.executeUpdate();
log.info("[ibor-TestValue] persistence TestValue:"+value);
} catch (Exception e) {
try {
conn.rollback();
} catch (SQLException e1) {
e1.printStackTrace();
}
throw new CacheLoaderException("Failed to write values to database",e);
}finally {
try {
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
@Override
public void delete(Object key) throws CacheWriterException {
}
/***
* 批量写入数据库
*/
@Override
public void writeAll(Collection> entries) {
if(dataSource==null)return;
Connection conn = null;
PreparedStatement st = null;
try {
conn = dataSource.getConnection();
conn.setAutoCommit(false);
String sql = getMergeSql();
st = conn.prepareStatement(sql);
int testNum = 0;
for (Entry extends TestKey, ? extends TestValue> entry : entries) {
TestValue value = entry.getValue();
String testId = value.getTestId();
if(StringUtils.isBlank(testId)) {
continue;
}
st.setString(1,testId);
st.setString(2,value.getTestName());
st.setString(3,value.getTestDate());
st.addBatch();
if(++testNum>=BATCH_NUM) {
st.executeBatch();
conn.commit();
log.info("[ibor-TestValue] persistence TestValue num:"+testNum);
testNum = 0;
}
log.debug("add or update TestValue:"+value);
}
if(testNum>0) {
log.debug("[ibor-TestValue] persistence TestValue num:"+testNum);
st.executeBatch();
conn.commit();
}
} catch (Exception e) {
try {
conn.rollback();
} catch (SQLException e1) {
e1.printStackTrace();
}
throw new CacheLoaderException("Failed to write values to database",e);
}finally {
try {
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
/***
* 用来拼接SQL
* @return
*/
public String getMergeSql() {
String baseSql = "MERGE INTO "+ TABLE_NAME +" T"+
" USING ( "+
"SELECT "
+"? AS testId,"
+"? AS testName,"
+"? AS testDate"
+" FROM DUAL "
+") T1 ON (T.TEST_ID = T1.testId AND T.TEST_DATE = T1.testDate )"
+ " WHEN MATCHED THEN ";
String middleSql = "UPDATE SET T.TEST_NAME = T1.testName";
String endSql = " WHEN NOT MATCHED THEN "
+" INSERT (TEST_ID,TEST_NAME,TEST_DATE)"
+" VALUES (T1.testId,T1.testName,T1.testDate)";
return baseSql+middleSql+endSql;
}
}
⒀创建缓存存储工厂,TestCacheStoreFactory
⒁在ibor-pulsar中创建TestService.java接口类以及实现类TestServiceImpl
package com.kgf.ibor.pulsar.service.impl;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import javax.annotation.Resource;
import javax.cache.Cache.Entry;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.cache.query.QueryCursor;
import org.apache.ignite.cache.query.SqlFieldsQuery;
import org.apache.ignite.cache.query.SqlQuery;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.kgf.ibor.common.pulsar.model.PulsarConstant;
import com.kgf.ibor.common.pulsar.model.TestKey;
import com.kgf.ibor.common.pulsar.model.TestValue;
import com.kgf.ibor.pulsar.service.TestService;
import com.kgf.ibor.pulsar.utils.IgniteUtil;
@Component
public class TestServiceImpl implements TestService {
private Logger log = LoggerFactory.getLogger(TestServiceImpl.class);
@Resource
private IgniteUtil igniteUtil;
private IgniteCache testInfoCache;
@Override
public void init() {
testInfoCache = igniteUtil.getInstance().cache(PulsarConstant.CACHE_TEST_INFO);
}
/***
* 根据条件查询
*/
@Override
public List queryTestValueById(String testId){
try {
List list = new ArrayList();
SqlQuery sqlQuery = new SqlQuery(TestValue.class,
"testId = ?");
try(QueryCursor> cursor = testInfoCache.query(sqlQuery.setArgs(testId))){
for (Entry entry : cursor) {
list.add(entry.getValue());
}
}
return list;
} catch (Exception e) {
log.error(e.getMessage());
return null;
}
}
/***
* 支持完全SQL查询
*/
@Override
public String queryTestName(String testId, Integer count) {
try {
String sql = "select testId from TestValue where TestId = ? order by testDate desc";
if(count != null) {
sql = sql+" limit ?";
}
SqlFieldsQuery query = new SqlFieldsQuery(sql);
try(QueryCursor> cursor =
testInfoCache.query(count==null?query.setArgs(testId):query.setArgs(testId,count))){
for (List> entry : cursor) {
return (String) entry.get(0);
}
}
return null;
} catch (Exception e) {
log.error(e.getMessage());
return null;
}
}
/**
* 向数据库插入数据
* @param lists
*/
@Override
public void insertOrUpdateTestValues(List lists) {
HashMap map = new HashMap();
for (TestValue testValue : lists) {
map.put(new TestKey(testValue.getTestId(), testValue.getTestDate()),
new TestValue(testValue.getTestId(), testValue.getTestName(), testValue.getTestDate()));
}
if(map.size()>0) {
if(map.size()>1) {
testInfoCache.putAll(map);
}else {
testInfoCache.put(map.keySet().iterator().next(), map.values().iterator().next());
}
}
}
}
⒂创建TestDataController测试类:
package com.kgf.ibor.pulsar.controller;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.Resource;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import com.kgf.ibor.common.pulsar.model.TestValue;
import com.kgf.ibor.pulsar.kafka.producer.KafkaProducer;
import com.kgf.ibor.pulsar.service.TestService;
@RequestMapping("testData")
@RestController
public class TestDataController {
@Autowired
private TestService testService;
@Resource
private KafkaProducer kafkaProducer;
@RequestMapping("sendMsg")
public void sendMsg() {
kafkaProducer.send();
}
@RequestMapping("sendQuantfin")
public void sendQuantfin() {
kafkaProducer.sendQuantfin();
}
@RequestMapping("queryTestValueById")
public List queryTestValueById(String testId){
return testService.queryTestValueById(testId);
}
@RequestMapping("queryTestName")
public String queryTestName(String testId,Integer count) {
return testService.queryTestName(testId,count);
}
@RequestMapping("insertOrUpdateTestValues")
public void insertOrUpdateTestValues() {
List lists = new ArrayList();
for (int i = 0; i < 10; i++) {
lists.add(new TestValue("000"+i+3,"name"+i,"20190803"));
}
testService.insertOrUpdateTestValues(lists);
}
}
4.0.0
com.kgf.ibor
ibor-quantfin
0.0.1-SNAPSHOT
com.kgf.ibor
ibor-common
0.0.1-SNAPSHOT
org.springframework.boot
spring-boot-maven-plugin
maven-compiler-plugin
1.8
⑵配置文件:
⑶启动类IborQuantfinApplication:
⑷项目启动初始化类CapitalIndicatorStarter.java
⑸IgniteUtil不变:
⑹IgniteCacheManager.java初始化一个sevice
⑺创建QuantfinTestService以及实现类QuantfinTestServiceImpl
package com.kgf.ibor.quantfin.service.impl;
import java.util.List;
import javax.annotation.Resource;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.cache.query.QueryCursor;
import org.apache.ignite.cache.query.SqlFieldsQuery;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.kgf.ibor.common.pulsar.model.PulsarConstant;
import com.kgf.ibor.common.pulsar.model.TestKey;
import com.kgf.ibor.common.pulsar.model.TestValue;
import com.kgf.ibor.quantfin.service.QuantfinTestService;
import com.kgf.ibor.quantfin.utils.IgniteUtil;
@Component
public class QuantfinTestServiceImpl implements QuantfinTestService{
private Logger log = LoggerFactory.getLogger(QuantfinTestServiceImpl.class);
@Resource
private IgniteUtil igniteUtil;
private IgniteCache testInfoCache;
@Override
public void init() {
testInfoCache = igniteUtil.getInstance().cache(PulsarConstant.CACHE_TEST_INFO);
}
/***
* 支持完全SQL查询
*/
@Override
public String queryTestName(String testId, Integer count) {
try {
String sql = "select testId from TestValue where TestId = ? order by testDate desc";
if(count != null) {
sql = sql+" limit ?";
}
SqlFieldsQuery query = new SqlFieldsQuery(sql);
try(QueryCursor> cursor =
testInfoCache.query(count==null?query.setArgs(testId):query.setArgs(testId,count))){
for (List> entry : cursor) {
return (String) entry.get(0);
}
}
return null;
} catch (Exception e) {
log.error(e.getMessage());
return null;
}
}
}
package com.kgf.ibor.pulsar.kafka.producer;
import java.util.Date;
import java.util.UUID;
import javax.annotation.Resource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Component;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.kgf.ibor.common.model.KafkaMessage;
/***
* 生产者
* @author KGF
*
*/
@Component
public class KafkaProducer {
@Resource
private KafkaTemplate kafkaTemplate;
private Gson gson = new GsonBuilder().create();
private Logger log = LoggerFactory.getLogger(KafkaProducer.class);
//发送消息
public void send() {
KafkaMessage msg = new KafkaMessage();
msg.setId(System.currentTimeMillis());
msg.setMsg(UUID.randomUUID().toString());
msg.setSendTime(new Date());
kafkaTemplate.send("topic1", gson.toJson(msg));
log.info("[KafkaProducer send msg is]:"+msg);
}
/**
* 发送给Quantfin
*/
public void sendQuantfin() {
KafkaMessage msg = new KafkaMessage();
msg.setId(System.currentTimeMillis());
msg.setMsg(UUID.randomUUID().toString());
msg.setSendTime(new Date());
kafkaTemplate.send("topic-kgf", gson.toJson(msg));
log.info("[KafkaProducer send msg is]:"+msg);
}
}