kafka的c/c++的client有
- librdkafka
- kafka_cpp 这个是笔者基于librdkafka封装的cpp库
- libkafka
- libkafka-asio
- csi-kafka
- libasynckafkaclient
- kafka-cpp
用户最多的是librdkafka, github上有2000+star, 笔者使用的也是librdkafka
还没有正式的0.11.6 release版本,故而笔者选用的是v0.11.5版本,然后掉坑里了
v0.11.5版本rd_lock() 有个bug,在window平台下,机器开机超过7天,rd_lock()的返回值就溢出了,导致无法produce和consume;笔者的机器是常年不关的,所以写测试程序的时候,一开始就发现无法produce和consume,一度怀疑是不是配置没有 配好,还是跟kafka server 版本不兼容,然后用go的kafka client sarama 几分钟完成测试,弄的我快崩溃了,一度想放弃;最后没办法,就单步调试,发现是因为获取系统时间溢出,导致produce request 和 fetch request一直无法满足条件去执行;v0.11.6.-rc2 已经修复这个问题或者版本回退到v0.11.4也没有这个问题。。。
下面几个issues都是这个问题导致的
- https://github.com/edenhill/librdkafka/issues/2025
- https://github.com/confluentinc/confluent-kafka-dotnet/issues/603#issuecomment-417487187
- https://github.com/edenhill/librdkafka/issues/2030
鉴于官方还没有正式发布v0.11.6,笔者选用了最新的v0.11.6-rc4
-
下载和编译
- v0.11.6-rc4
- 打开根目录\win32下librdkafaka.sln,使用vs 编译, 默认librdkafka和librdkafkacpp都是编译成dll,你可以自行修改编译选项改成静态库
- 如果你想编译成静态库的话,注意将librdkafka project的预处理定义里面"_USRDLL;LIBRDKAFKA_EXPORTS;"删除,然后加上"LIBRDKAFKA_STATICLIB"; 将librdkafkacpp project的预处理定义里面"_USRDLL;LIBRDKAFKACPP_EXPORTS;"删除,然后加上"LIBRDKAFKA_STATICLIB";然后在你使用librdkafka的项目里面也加上宏定义LIBRDKAFKA_STATICLIB
- librdkafka 需要使用到zlib和openssl库,注意修改vsproject里面zlib和openssl库的头文件目录和lib目录
producer测试
#include
#include
#include
#include
#include
static bool run = true;
static bool exit_eof = false;
void dump_config(RdKafka::Conf* conf) {
std::list *dump = conf->dump();
printf("config dump(%d):\n", (int32_t)dump->size());
for (auto it = dump->begin(); it != dump->end(); ) {
std::string name = *it++;
std::string value = *it++;
printf("%s = %s\n", name.c_str(), value.c_str());
}
printf("---------------------------------------------\n");
}
class my_event_cb : public RdKafka::EventCb {
public:
void event_cb(RdKafka::Event &event) override {
switch (event.type())
{
case RdKafka::Event::EVENT_ERROR:
std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
run = false;
break;
case RdKafka::Event::EVENT_STATS:
std::cerr << "\"STATS\": " << event.str() << std::endl;
break;
case RdKafka::Event::EVENT_LOG:
fprintf(stderr, "LOG-%i-%s: %s\n",
event.severity(), event.fac().c_str(), event.str().c_str());
break;
default:
std::cerr << "EVENT " << event.type() <<
" (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
break;
}
}
};
class my_hash_partitioner_cb : public RdKafka::PartitionerCb {
public:
int32_t partitioner_cb(const RdKafka::Topic *topic, const std::string *key,
int32_t partition_cnt, void *msg_opaque) override {
return djb_hash(key->c_str(), key->size()) % partition_cnt;
}
private:
static inline unsigned int djb_hash(const char *str, size_t len) {
unsigned int hash = 5381;
for (size_t i = 0; i < len; i++)
hash = ((hash << 5) + hash) + str[i];
return hash;
}
};
namespace producer_ts {
class my_delivery_report_cb : public RdKafka::DeliveryReportCb {
public:
void dr_cb(RdKafka::Message& message) override {
printf("message delivery %d bytes, error:%s, key: %s\n",
(int32_t)message.len(), message.errstr().c_str(), message.key() ? message.key()->c_str() : "");
}
};
void producer_test() {
printf("producer test\n");
int32_t partition = RdKafka::Topic::PARTITION_UA;
printf("input brokers list(127.0.0.1:9092;127.0.0.1:9093;127.0.0.1:9094):\n");
std::string broker_list;
//std::cin >> broker_list;
broker_list = "127.0.0.1:9092";
printf("input partition:");
//std::cin >> partition;
partition = 0;
// config
RdKafka::Conf* global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
RdKafka::Conf* topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
my_hash_partitioner_cb hash_partitioner;
my_event_cb event_cb;
my_delivery_report_cb delivery_cb;
std::string err_string;
if (topic_conf->set("partitioner_cb", &hash_partitioner, err_string) != RdKafka::Conf::CONF_OK) {
printf("set partitioner_cb error: %s\n", err_string.c_str());
return;
}
global_conf->set("metadata.broker.list", broker_list, err_string);
global_conf->set("event_cb", &event_cb, err_string);
global_conf->set("dr_cb", &delivery_cb, err_string);
//global_conf->set("retry.backoff.ms", "10", err_string);
//global_conf->set("debug", "all", err_string);
//global_conf->set("debug", "topic,msg", err_string);
//global_conf->set("debug", "msg,queue", err_string);
dump_config(global_conf);
dump_config(topic_conf);
// create producer
RdKafka::Producer* producer = RdKafka::Producer::create(global_conf, err_string);
if (!producer) {
printf("failed to create producer, %s\n", err_string.c_str());
return;
}
printf("created producer %s\n", producer->name().c_str());
std::string topic_name;
while (true) {
printf("input topic to create:\n");
std::cin >> topic_name;
// create topic
RdKafka::Topic* topic =
RdKafka::Topic::create(producer, topic_name, topic_conf, err_string);
if (!topic) {
printf("try create topic[%s] failed, %s\n",
topic_name.c_str(), err_string.c_str());
return;
}
printf(">");
for (std::string line; run && std::getline(std::cin, line); ) {
if (line.empty()) {
producer->poll(0);
continue;
}
if (line == "quit") {
break;
}
std::string key = "kafka_test";
RdKafka::ErrorCode res = producer->produce(topic, partition,
RdKafka::Producer::RK_MSG_COPY,
(char*)line.c_str(), line.size(), key.c_str(), key.size(), NULL);
if (res != RdKafka::ERR_NO_ERROR) {
printf("produce failed, %s\n", RdKafka::err2str(res).c_str());
}
else {
printf("produced msg, bytes %d\n", (int32_t)line.size());
}
// do socket io
producer->poll(0);
printf("outq_len: %d\n", producer->outq_len());
//producer->flush(1000);
//while (run && producer->outq_len()) {
// printf("wait for write queue( size %d) write finish\n", producer->outq_len());
// producer->poll(1000);
//}
printf(">");
}
delete topic;
if (!run) {
break;
}
}
run = true;
while (run && producer->outq_len()) {
printf("wait for write queue( size %d) write finish\n", producer->outq_len());
producer->poll(1000);
}
delete producer;
}
}
- consumer测试
#include
#include
#include
#include
#include
static bool run = true;
static bool exit_eof = false;
void dump_config(RdKafka::Conf* conf) {
std::list *dump = conf->dump();
printf("config dump(%d):\n", (int32_t)dump->size());
for (auto it = dump->begin(); it != dump->end(); ) {
std::string name = *it++;
std::string value = *it++;
printf("%s = %s\n", name.c_str(), value.c_str());
}
printf("---------------------------------------------\n");
}
class my_event_cb : public RdKafka::EventCb {
public:
void event_cb(RdKafka::Event &event) override {
switch (event.type())
{
case RdKafka::Event::EVENT_ERROR:
std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
run = false;
break;
case RdKafka::Event::EVENT_STATS:
std::cerr << "\"STATS\": " << event.str() << std::endl;
break;
case RdKafka::Event::EVENT_LOG:
fprintf(stderr, "LOG-%i-%s: %s\n",
event.severity(), event.fac().c_str(), event.str().c_str());
break;
default:
std::cerr << "EVENT " << event.type() <<
" (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
break;
}
}
};
class my_hash_partitioner_cb : public RdKafka::PartitionerCb {
public:
int32_t partitioner_cb(const RdKafka::Topic *topic, const std::string *key,
int32_t partition_cnt, void *msg_opaque) override {
return djb_hash(key->c_str(), key->size()) % partition_cnt;
}
private:
static inline unsigned int djb_hash(const char *str, size_t len) {
unsigned int hash = 5381;
for (size_t i = 0; i < len; i++)
hash = ((hash << 5) + hash) + str[i];
return hash;
}
};
namespace consumer_ts
{
void msg_consume(RdKafka::Message* message, void* opaque)
{
switch (message->err())
{
case RdKafka::ERR__TIMED_OUT:
break;
case RdKafka::ERR_NO_ERROR:
/* Real message */
std::cout << "Read msg at offset " << message->offset() << std::endl;
if (message->key())
{
std::cout << "Key: " << *message->key() << std::endl;
}
printf("%.*s\n", static_cast(message->len()), static_cast(message->payload()));
break;
case RdKafka::ERR__PARTITION_EOF:
/* Last message */
if (exit_eof)
{
run = false;
}
break;
case RdKafka::ERR__UNKNOWN_TOPIC:
case RdKafka::ERR__UNKNOWN_PARTITION:
std::cerr << "Consume failed: " << message->errstr() << std::endl;
run = false;
break;
default:
/* Errors */
std::cerr << "Consume failed: " << message->errstr() << std::endl;
run = false;
}
}
class my_consumer_cb : public RdKafka::ConsumeCb {
public:
void consume_cb(RdKafka::Message &msg, void *opaque) override
{
msg_consume(&msg, opaque);
}
};
void consumer_test() {
printf("conumer test\n");
int32_t partition = RdKafka::Topic::PARTITION_UA;
printf("input brokers list(127.0.0.1:9092;127.0.0.1:9093;127.0.0.1:9094):\n");
std::string broker_list;
//std::cin >> broker_list;
broker_list = "127.0.0.1:9092";
printf("inpute partition:");
//std::cin >> partition;
partition = 0;
// config
RdKafka::Conf* global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
RdKafka::Conf* topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
my_hash_partitioner_cb hash_partitioner;
my_event_cb event_cb;
my_consumer_cb consume_cb;
int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING;
std::string err_string;
if (topic_conf->set("partitioner_cb", &hash_partitioner, err_string) != RdKafka::Conf::CONF_OK){
printf("set partitioner_cb error: %s\n", err_string.c_str());
return;
}
global_conf->set("metadata.broker.list", broker_list, err_string);
global_conf->set("event_cb", &event_cb, err_string);
//global_conf->set("debug", "all", err_string);
//global_conf->set("debug", "topic,msg", err_string);
//global_conf->set("debug", "topic,msg,queue", err_string);
dump_config(global_conf);
dump_config(topic_conf);
// create consumer
RdKafka::Consumer* consumer = RdKafka::Consumer::create(global_conf, err_string);
if (!consumer) {
printf("failed to create consumer, %s\n", err_string.c_str());
return;
}
printf("created consumer %s\n", consumer->name().c_str());
// create topic
printf("input topic name:\n");
std::string topic_name;
std::cin >> topic_name;
RdKafka::Topic* topic = RdKafka::Topic::create(consumer, topic_name, topic_conf, err_string);
if (!topic) {
printf("try create topic[%s] failed, %s\n", topic_name.c_str(), err_string.c_str());
return;
}
// Start consumer for topic+partition at start offset
RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset);
if (resp != RdKafka::ERR_NO_ERROR) {
printf("Failed to start consumer: %s\n",
RdKafka::err2str(resp).c_str());
return;
}
int use_ccb = 0;
while (run) {
//consumer->consume_callback(topic, partition, 1000, &consume_cb, &use_ccb);
//consumer->poll(0);
RdKafka::Message *msg = consumer->consume(topic, partition, 2000);
msg_consume(msg, NULL);
delete msg;
}
// stop consumer
consumer->stop(topic, partition);
consumer->poll(1000);
delete topic;
delete consumer;
}
};
- metadata测试
class my_event_cb : public RdKafka::EventCb {
public:
void event_cb(RdKafka::Event &event) override {
switch (event.type())
{
case RdKafka::Event::EVENT_ERROR:
std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
run = false;
break;
case RdKafka::Event::EVENT_STATS:
std::cerr << "\"STATS\": " << event.str() << std::endl;
break;
case RdKafka::Event::EVENT_LOG:
fprintf(stderr, "LOG-%i-%s: %s\n",
event.severity(), event.fac().c_str(), event.str().c_str());
break;
default:
std::cerr << "EVENT " << event.type() <<
" (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
break;
}
}
};
class my_hash_partitioner_cb : public RdKafka::PartitionerCb {
public:
int32_t partitioner_cb(const RdKafka::Topic *topic, const std::string *key,
int32_t partition_cnt, void *msg_opaque) override {
return djb_hash(key->c_str(), key->size()) % partition_cnt;
}
private:
static inline unsigned int djb_hash(const char *str, size_t len) {
unsigned int hash = 5381;
for (size_t i = 0; i < len; i++)
hash = ((hash << 5) + hash) + str[i];
return hash;
}
};
namespace metadata_ts{
static void metadata_print (const std::string &topic,
const RdKafka::Metadata *metadata) {
if (!metadata) {
printf("try metadata_print for topic: %s failed.\n", topic.empty() ? "all topic" : topic.c_str());
return;
}
printf("Metadata for %s ( from broker %d:%s)\n",
topic.empty() ? "all topic" : topic.c_str(),
metadata->orig_broker_id(), metadata->orig_broker_name().c_str());
/* Iterate brokers */
printf("brokers(%d):\n", (int32_t)metadata->brokers()->size());
RdKafka::Metadata::BrokerMetadataIterator ib;
for (ib = metadata->brokers()->begin();
ib != metadata->brokers()->end();
++ib) {
printf("broker[%d] at %s:%d\n", (*ib)->id(), (*ib)->host().c_str(), (*ib)->port());
}
/* Iterate topics */
printf("topics(%d):\n", (int32_t)metadata->topics()->size());
RdKafka::Metadata::TopicMetadataIterator it;
for (it = metadata->topics()->begin();
it != metadata->topics()->end();
++it) {
printf(" topic\"%s\" with %d partitions:",
(*it)->topic().c_str(), (int32_t)(*it)->partitions()->size());
if ((*it)->err() != RdKafka::ERR_NO_ERROR) {
printf(" %s", err2str((*it)->err()).c_str());
if ((*it)->err() == RdKafka::ERR_LEADER_NOT_AVAILABLE)
printf(" (try again)");
}
printf("\n");
/* Iterate topic's partitions */
RdKafka::TopicMetadata::PartitionMetadataIterator ip;
for (ip = (*it)->partitions()->begin();
ip != (*it)->partitions()->end();
++ip) {
printf(" partition %d, leader %d, replicas:", (*ip)->id(), (*ip)->leader());
/* Iterate partition's replicas */
RdKafka::PartitionMetadata::ReplicasIterator ir;
for (ir = (*ip)->replicas()->begin();
ir != (*ip)->replicas()->end();
++ir) {
printf("%s%d", (ir == (*ip)->replicas()->begin() ? "" : ","), *ir);
}
/* Iterate partition's ISRs */
printf(", isrs: ");
RdKafka::PartitionMetadata::ISRSIterator iis;
for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end() ; ++iis)
printf("%s%d", (iis == (*ip)->isrs()->begin() ? "" : ","), *iis);
if ((*ip)->err() != RdKafka::ERR_NO_ERROR)
printf(", %s\n", RdKafka::err2str((*ip)->err()).c_str());
else
printf("\n");
}
}
}
void metadata_test() {
printf("metadata_test\n");
printf("input brokers list(127.0.0.1:9092;127.0.0.1:9093;127.0.0.1:9094):\n");
std::string broker_list;
//std::cin >> broker_list;
broker_list = "127.0.0.1:9092";
// config
RdKafka::Conf* global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
std::string err_string;
my_hash_partitioner_cb hash_partitioner;
my_event_cb event_cb;
global_conf->set("metadata.broker.list", broker_list, err_string);
global_conf->set("event_cb", &event_cb, err_string);
// create producer
RdKafka::Producer* producer = RdKafka::Producer::create(global_conf, err_string);
if (!producer) {
printf("failed to create producer, %s\n", err_string.c_str());
return;
}
printf("created producer %s\n", producer->name().c_str());
while (run) {
std::string cmd;
std::cin >> cmd;
if (cmd == "ls") {
class RdKafka::Metadata *metadata;
/* Fetch metadata */
RdKafka::ErrorCode err = producer->metadata(true, NULL,
&metadata, 5000);
if (err != RdKafka::ERR_NO_ERROR) {
std::cerr << "%% Failed to acquire metadata: "
<< RdKafka::err2str(err) << std::endl;
run = 0;
break;
}
std::string topic_name;
metadata_print(topic_name, metadata);
delete metadata;
}
//run = 0;
}
}
}