kafka C++ 生产者 消费者

使用的是rdkafka这个库,可以到网上下载.

下面是生产者和消费者C++编程客户端.

#include   
#include   
#include   
#include   
#include   
#include   
#include   
#include   
  
#include "rdkafka.h"  
  
const int PRODUCER_INIT_FAILED = -1;  
const int PRODUCER_INIT_SUCCESS = 0;  
const int PUSH_DATA_FAILED = -1;  
const int PUSH_DATA_SUCCESS = 0;  
  
  
static void logger(const rd_kafka_t *rk, int level,const char *fac, const char *buf)   
{  
    struct timeval tv;  
    gettimeofday(&tv, NULL);  
    fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",  
        (int)tv.tv_sec, (int)(tv.tv_usec / 1000),  
        level, fac, rk ? rd_kafka_name(rk) : NULL, buf);  
}  
  
  
class ProducerKafka  
{  
public:  
    ProducerKafka(){};  
    ~ProducerKafka(){}  
  
    int init_kafka(int partition, char *brokers, char *topic);  
    int push_data_to_kafka(const char* buf, const int buf_len);  
    void destroy();  
  
private:  
    int partition_;   
      
    //rd  
    rd_kafka_t* handler_;  
    rd_kafka_conf_t *conf_;  
      
    //topic  
    rd_kafka_topic_t *topic_;  
    rd_kafka_topic_conf_t *topic_conf_;  
};  
  
int ProducerKafka::init_kafka(int partition, char *brokers, char *topic)  
{  
    char tmp[16]={0};  
    char errstr[512]={0};     
  
    partition_ = partition;   
  
    /* Kafka configuration */  
    conf_ = rd_kafka_conf_new();  
      
    //set logger :register log function  
    rd_kafka_conf_set_log_cb(conf_, logger);      
      
    /* Quick termination */  
    snprintf(tmp, sizeof(tmp), "%i", SIGIO);  
    rd_kafka_conf_set(conf_, "internal.termination.signal", tmp, NULL, 0);  
  
    /*topic configuration*/  
    topic_conf_ = rd_kafka_topic_conf_new();  
  
    if (!(handler_  = rd_kafka_new(RD_KAFKA_PRODUCER, conf_, errstr, sizeof(errstr))))   
    {  
        fprintf(stderr, "*****Failed to create new producer: %s*******\n",errstr);  
        return PRODUCER_INIT_FAILED;  
    }  
  
    rd_kafka_set_log_level(handler_, LOG_DEBUG);  
  
    /* Add brokers */  
    if (rd_kafka_brokers_add(handler_, brokers) == 0)  
    {  
        fprintf(stderr, "****** No valid brokers specified********\n");  
        return PRODUCER_INIT_FAILED;         
    }     
      
  
    /* Create topic */  
    topic_ = rd_kafka_topic_new(handler_, topic, topic_conf_);  
      
    return PRODUCER_INIT_SUCCESS;  
}  
  
void ProducerKafka::destroy()  
{  
    /* Destroy topic */  
    rd_kafka_topic_destroy(topic_);  
  
    /* Destroy the handle */  
    rd_kafka_destroy(handler_);  
}  
  
int ProducerKafka::push_data_to_kafka(const char* buffer, const int buf_len)  
{  
    int ret;  
    char errstr[512]={0};  
      
    if(NULL == buffer)  
        return 0;  
  
    ret = rd_kafka_produce(topic_, partition_, RD_KAFKA_MSG_F_COPY,   
                            (void*)buffer, (size_t)buf_len, NULL, 0, NULL);  
  
    if(ret == -1)  
    {  
        fprintf(stderr,"****Failed to produce to topic %s partition %i: %s*****\n",  
            rd_kafka_topic_name(topic_), partition_,  
            rd_kafka_err2str(rd_kafka_errno2err(errno)));  
      
        rd_kafka_poll(handler_, 0);  
        return PUSH_DATA_FAILED;  
    }  
      
    fprintf(stderr, "***Sent %d bytes to topic:%s partition:%i*****\n",  
        buf_len, rd_kafka_topic_name(topic_), partition_);  
  
    rd_kafka_poll(handler_, 0);  
  
    return PUSH_DATA_SUCCESS;  
}  
  
int main()  
{  
    char test_data[100];  
    strcpy(test_data, "helloworld");  
  
    ProducerKafka* producer = new ProducerKafka;  
    if (PRODUCER_INIT_SUCCESS == producer->init_kafka(0, "192.168.1.108:9092", "chenxun"))  
    {  
        printf("producer init success\n");  
    }  
    else  
    {  
        printf("producer init failed\n");  
        return 0;  
    }  
      
    while (fgets(test_data, sizeof(test_data), stdin)) {  
        size_t len = strlen(test_data);  
        if (test_data[len - 1] == '\n')  
            test_data[--len] = '\0';  
        if (strcmp(test_data, "end") == 0)  
            break;  
        if (PUSH_DATA_SUCCESS == producer->push_data_to_kafka(test_data, strlen(test_data)))  
            printf("push data success %s\n", test_data);  
        else  
            printf("push data failed %s\n", test_data);  
    }  
  
    producer->destroy();  
      
    return 0;     
}  

#ifndef KAFKACONSUMMER_H
#define KAFKACONSUMMER_H

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include "librdkafka/rdkafkacpp.h"
#include 
#include

using std::string;
using std::list;
using std::cout;
using std::endl;
using std::vector;
using std::fstream;

static bool run = true;
static bool exit_eof = true;

// 从kafka消费消息存到msg_data数组


struct protodata
{
    uint64_t uuid;
    uint64_t position;
    uint64_t next_position;
    string gtid;
};

static vector fulltopic;



class MyEventCb : public RdKafka::EventCb {
public:
  void event_cb (RdKafka::Event &event) {
    switch (event.type())
      {
      case RdKafka::Event::EVENT_ERROR:
        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
          event.str() << std::endl;
        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
          run = false;
        break;

      case RdKafka::Event::EVENT_STATS:
        std::cerr << "\"STATS\": " << event.str() << std::endl;
        break;

      case RdKafka::Event::EVENT_LOG:
        fprintf(stderr, "LOG-%i-%s: %s\n",
                event.severity(), event.fac().c_str(), event.str().c_str());
        break;

      default:
        std::cerr << "EVENT " << event.type() <<
          " (" << RdKafka::err2str(event.err()) << "): " <<
          event.str() << std::endl;
        break;
      }
  }
};


void msg_consume(RdKafka::Message* message, void* opaque) ;




class MyConsumeCb : public RdKafka::ConsumeCb {
public:
  void consume_cb (RdKafka::Message &msg, void *opaque) {
    msg_consume(&msg, opaque);
  }
};

static void sigterm (int sig) {
  run = false;
}

class ConsummerKafka
{
public:
    ConsummerKafka();;
    ~ConsummerKafka(){}

    int init_kafka(int partition, string brokers, string topic);
    int pull_data_from_kafka();
    void destroy();

private:
    RdKafka::Conf * global_conf;
    RdKafka::Conf * topic_conf;
    string brokers;
    string errstr;
    RdKafka::Consumer *consumer;
    string topic_name ;
    RdKafka::Topic *topic;
    int32_t partition;
    int64_t start_offset;
    RdKafka::Message *msg;
};

#endif // KAFKACONSUMMER_H


#include
#include
#include "protobufferrdp.h"

ConsummerKafka::ConsummerKafka()
{

}
int ConsummerKafka::init_kafka(int _partition, string broker, string _topic)
{
      global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
      topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);

      brokers = broker;
      partition = _partition;
      topic_name = _topic;
      start_offset = RdKafka::Topic::OFFSET_BEGINNING;
      global_conf->set("metadata.broker.list", brokers, errstr);

      MyEventCb ex_event_cb;
      global_conf->set("event_cb", &ex_event_cb, errstr);


      /*
        * Create consumer using accumulated global configuration.
        */
        consumer = RdKafka::Consumer::create(global_conf, errstr);
       if (!consumer) {
         std::cerr << "Failed to create consumer: " << errstr << std::endl;
         exit(1);
       }
         /* Create topic */
      topic = RdKafka::Topic::create(consumer, topic_name, topic_conf, errstr);
      if (!topic) {
        std::cerr << "Failed to create topic: " << errstr << std::endl;
        exit(1);
      }
}

void ConsummerKafka::destroy()
{
      consumer->stop(topic, partition);
      consumer->poll(1000);

      delete topic;
      delete consumer;
}

int ConsummerKafka::pull_data_from_kafka()
{
    RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset);
    if (resp != RdKafka::ERR_NO_ERROR) {
        std::cerr << "Failed to start consumer: " <<
          RdKafka::err2str(resp) << std::endl;
        exit(1);
      }

    /*
       * Consume messages
       */
      MyConsumeCb ex_consume_cb;
      int use_ccb = 0;
      while (run) {
        if (use_ccb) {
  //        consumer->consume_callback(topic, partition, 1000,
  //                                   &ex_consume_cb, &use_ccb);
        } else {
          RdKafka::Message *msg = consumer->consume(topic, partition, 1000);
          msg_consume(msg, NULL);
          delete msg;
        }
        consumer->poll(0);
      }
}


void msg_consume(RdKafka::Message* message, void* opaque) {
  switch (message->err()) {
  case RdKafka::ERR__TIMED_OUT:
    break;

  case RdKafka::ERR_NO_ERROR:
    /* Real message */
    std::cout << "Read msg at offset " << message->offset() << std::endl;
    if (message->key()) {
      std::cout << "Key: " << *message->key() << std::endl;
    }
    cout << static_cast(message->payload()) << endl;
    break;

  case RdKafka::ERR__PARTITION_EOF:
    cout << "reach last message" << endl;
    /* Last message */
    if (exit_eof) {
      run = false;
    }
    break;

  case RdKafka::ERR__UNKNOWN_TOPIC:
  case RdKafka::ERR__UNKNOWN_PARTITION:
    std::cerr << "Consume failed: " << message->errstr() << std::endl;
    run = false;
    break;

  default:
    /* Errors */
    std::cerr << "Consume failed: " << message->errstr() << std::endl;
    run = false;
  }
}

int main(int argc, char **argv) {
  // Process kill signal, quit from the loop

  signal(SIGINT, sigterm);
  signal(SIGTERM, sigterm);

   ConsummerKafka test;
   test.init_kafka(0, "localhost", "Hello-Kafka");
   test.pull_data_from_kafka();

}




这里有一点要注意,在ubuntu下编程时,需要链接**.a库,***.so库,如果编译提示找不到****.so库,记得加入系统

配置动态链接库路径

sudo vim /etc/ld.so.conf
插入:
/usr/local/rdkafka/lib  //写入你自己的路径
 su #root 权限
ldconfig





你可能感兴趣的:(kafka)