阅读官网文档很有必要
http://kafka.apache.org/documentation.html
分区、Offset、消费线程、group.id的关系
1.2 Kafka的使用场景:
#kafka接收源报文
kafkaProp:
#测试环境zk
# zk: 10.11.24.21:2181,10.11.24.22:2181,10.11.24.23:2181
zk: 1.11.11.22:2181,1.11.11.22:2181,1.11.11.22:2181
#
groupId: account_cascade_client_11
timeOut: 3000
syncTime: 100
interval: 1000
topics:
- req.log.converted
@Data
@AllArgsConstructor
@NoArgsConstructor
@Configuration
@ConfigurationProperties(prefix = "kafkaProp")
public class KafkaProp {
private String zk;
private String groupId;
private String timeOut;
private String interval;
private String syncTime;
private List<String> topics;
}
@Component
public class casCadeMain implements CommandLineRunner {
@Autowired
private KafkaProp kafkaProp;
@Autowired
private CasCadeConsumer acceptConsumer;
@Autowired
private IHandler iHandler;
@Resource(name="tacheHandlerContainer")
private Map<String, AbStractTacheHandler> allHandlers;
@Override
public void run(String... args) throws Exception {
// init();
initConsumerMain();
}
private void initConsumerMain() {
ConsumerMain example = new ConsumerMain(iHandler,kafkaProp,allHandlers);
example.run(3);
}
}
ConsumerMain
public class ConsumerMain {
private final ConsumerConnector consumer;
private final String topic;
private ExecutorService executor;
private IHandler iHandler;
private Map<String, AbStractTacheHandler> allHandlers;
public ConsumerMain(IHandler iHandler, String a_zookeeper, String a_groupId, String a_topic) {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig(a_zookeeper, a_groupId));
this.topic = a_topic;
this.iHandler = iHandler;
}
public ConsumerMain(IHandler iHandler, KafkaProp kafkaProp, Map<String, AbStractTacheHandler> allHandlers) {
String zooKeeper = kafkaProp.getZk();
String groupId = kafkaProp.getGroupId();
String topic = kafkaProp.getTopics().get(0);
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig(zooKeeper, groupId));
this.topic = topic;
this.iHandler = iHandler;
this.allHandlers = allHandlers;
}
public void shutdown() {
if (consumer != null){
consumer.shutdown();
}
if (executor != null){
executor.shutdown();
}
}
public void run(int a_numThreads) {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(a_numThreads));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
executor = Executors.newFixedThreadPool(a_numThreads);
int threadNumber = 0;
for (final KafkaStream stream : streams) {
executor.submit(new ConsumerThread(iHandler,stream, allHandlers));
threadNumber++;
}
}
}
ConsumerThread线程类
package com.staryea.accountCascade.kafka;
import com.app.frame.util.logging.Log;
import com.app.frame.util.logging.LogFactory;
import com.staryea.accountCascade.core.CasCadeConsumer;
import com.staryea.accountCascade.inft.AbStractTacheHandler;
import com.staryea.accountCascade.inft.IHandler;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import java.util.Map;
public class ConsumerThread implements Runnable {
private final Log log = LogFactory.getLog(CasCadeConsumer.class);
private KafkaStream m_stream;
private Map<String, AbStractTacheHandler> allHandlers;
private IHandler iHandler;
public ConsumerThread(IHandler iHandler, KafkaStream a_stream, Map<String, AbStractTacheHandler> allHandlers) {
this.allHandlers = allHandlers;
this.m_stream = a_stream;
this.iHandler = iHandler;
}
@Override
public void run() {
ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
while (it.hasNext()) {
String jsonItem = new String(it.next().message());
AbStractTacheHandler tacheHandler = iHandler.choose(jsonItem);
if(tacheHandler == null){
//log.warn("未解析到json处理器,json: " + jsonItem);
continue;
}
try {
tacheHandler.work(jsonItem);
} catch (Exception e) {
log.error(e.getMessage(),e);
e.printStackTrace();
}
}
}
}