1.配置kafka集群
package com.seadun.dun.sbox.config;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.PropertySource;
@Configuration
@ComponentScan("com.kafka.isduncollectbasic.KafkaPro")
@PropertySource("classpath:kafka.properties")
public class KafkaPro {
@Value("${kafka.services}")
private String services;
@Value("${kafka.zookeepers}")
private String zookeepers;
@Value("${kafka.pollRecord}")
private String pollRecord;
public String getServices() {
return services;
}
public void setServices(String services) {
this.services = services;
}
public String getZookeepers() {
return zookeepers;
}
public void setZookeepers(String zookeepers) {
this.zookeepers = zookeepers;
}
public String getPollRecord() {
return pollRecord;
}
public void setPollRecord(String pollRecord) {
this.pollRecord = pollRecord;
}
public Properties getConsumerProps(String groupId){
Properties props = new Properties();
props.put("group.id", groupId);
props.put("zookeeper.connect", zookeepers);
props.put("zookeeper.session.timeout.ms", "400");
props.put("zookeeper.sync.time.ms", "200");
props.put("enable.auto.commit", false);
props.put("auto.commit.interval.ms", "1000");
props.put("max.poll.records", pollRecord);
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("auto.offset.reset", "smallest");
return props;
}
public Map
Map
props.put(ProducerConfig.ACKS_CONFIG, "0");
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, services);
props.put(ProducerConfig.RETRIES_CONFIG, 1);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 4096);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 40960);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
}
}
2.kafka.properties中
kafka.services=192.168.2.204:9092,192.168.2.204:9093,192.168.2.204:9094,192.168.2.204:9095,192.168.2.204:9096
kafka.zookeepers=192.168.2.204:2181
kafka.pollRecord=10
3.kafaka多线程消费
package com.seadun.dun.sbox.kafka;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.index.VersionType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.elasticsearch.core.ElasticsearchTemplate;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.seadun.dun.sbox.config.KafkaPro;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;
/**
* 锁屏
* @author 圆子酱
* @time 2018年1月22日
*/
@Service
public class SboxOnlineSaveService{
private static final Logger logger = LoggerFactory.getLogger(SboxOnlineSaveService.class);
@Autowired
private ElasticsearchTemplate elasticsearchTemplate;
@Autowired
private KafkaPro kafkaPro;
//修订部分
private ConsumerConnector consumer;
//心跳topic
private String topic = "event-online";
private int numThreads = 5;
private ExecutorService executorPool;
private BlockingQueue
private static int status = 0;
private SimpleDateFormat sdf1 = new SimpleDateFormat("yyyy-MM-dd");
KafkaProducer
@Scheduled(initialDelay=30000,fixedDelay=10000) //间隔时间
public void run() {
ConsumerConfig cc = new ConsumerConfig(kafkaPro.getConsumerProps("event-online"));
this.consumer = Consumer.createJavaConsumerConnector(cc);
producer=new KafkaProducer
Map
topicCountMap.put(this.topic, this.numThreads);
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map
List
this.executorPool = Executors.newFixedThreadPool(this.numThreads);
for (final KafkaStream
executorPool.execute(new Runnable() {
@Override
public void run() {
ConsumerIterator
while (iter.hasNext()) {
MessageAndMetadata
saveStandard(value.message());
}
}
});
}
try {
Thread.sleep(10000);
saveToEs();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
protected void saveStandard(String value) {
long StartTime = System.currentTimeMillis();
JSONObject json = JSON.parseObject(value);
String hostName=json.containsKey("hostCode")?json.get("hostCode").toString():null;
if(!StringUtils.isEmpty(hostName)){
Date date =new Date();
//最新数据
try {
json.put("id", hostName);
json.put("hostName", hostName);
json.put("index", "isc-standard-secrecy-result");
json.put("type", "equipments-status-standard-"+sdf1.format(date));
queue.offer(json);
String json2 = json.toString();
JSONObject json3=JSON.parseObject(json2);
json3.put("index", "isc-standard-secrecy-result");
json3.put("type", "equipments-status-standard-lastest");
queue.offer(json3);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
long EndTime = System.currentTimeMillis();
logger.info("-------------在线处理时间----【"+StartTime+"】----结束时间----【"+EndTime+"】----耗时:---【"+(EndTime-StartTime)+"】--------------------");
}
/**
* 批量存储es线程
*/
public void saveToEs() {
int count = 0;
if (queue.size() >= 1000) {
count = 1000;
status = 1;
} else {
count = queue.size();
status = 0;
}
if (count != 0) {
long StartTime = System.currentTimeMillis();
BulkRequestBuilder bulk = elasticsearchTemplate.getClient().prepareBulk();
for (int i = 0; i < count; i++) {
JSONObject test = queue.poll();
IndexRequest indexRequest=new IndexRequest(test.get("index").toString(),test.get("type").toString(),test.get("id").toString()).source(test);
bulk.add(elasticsearchTemplate.getClient().prepareUpdate(test.get("index").toString(),test.get("type").toString(),test.get("id").toString())
.setDoc(test)
.setUpsert(indexRequest).setRetryOnConflict(5));
}
//5:执行批处理
BulkResponse bulkResponse=bulk.execute().actionGet();
if(bulkResponse.hasFailures())
{
BulkItemResponse[] items = bulkResponse.getItems();
for(BulkItemResponse item : items)
{
logger.info(item.getFailureMessage());
}
}
else
{
long EndTime = System.currentTimeMillis();
logger.info("全部执行成功时间----【"+StartTime+"】----结束时间----【"+EndTime+"】----耗时:---【"+(EndTime-StartTime)+"】--------------------");
}
}
if (status == 0 || count == 0) {
try {
Thread.sleep(5000);
saveToEs();
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
saveToEs();
}
}
}