Kafka Java API使用Demo

首先导入相关pom文件依赖,这里使用的kafak0.8.1,scala是2.10.4版本,注意导入正确的版本,与你的kafka集群版本相匹配。

pom.xml:

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
  <modelVersion>4.0.0modelVersion>
  <groupId>cn.just.shinelongroupId>
  <artifactId>SparkSql_ProjartifactId>
  <version>1.0-SNAPSHOTversion>
  <inceptionYear>2008inceptionYear>

  <repositories>
    <repository>
      <id>scala-tools.orgid>
      <name>Scala-Tools Maven2 Repositoryname>
      <url>http://scala-tools.org/repo-releasesurl>
    repository>

  repositories>

  <pluginRepositories>
    <pluginRepository>
      <id>scala-tools.orgid>
      <name>Scala-Tools Maven2 Repositoryname>
      <url>http://scala-tools.org/repo-releasesurl>
    pluginRepository>
  pluginRepositories>

  <dependencies>
    <dependency>
      <groupId>org.scala-langgroupId>
      <artifactId>scala-libraryartifactId>
      <version>2.10.4version>
    dependency>

    
    
    <dependency>
      <groupId>org.apache.kafkagroupId>
      <artifactId>kafka_2.10artifactId>
      <version>0.8.2.0version>
    dependency>

  dependencies>

  <build>
    <sourceDirectory>src/main/scalasourceDirectory>
    <testSourceDirectory>src/test/scalatestSourceDirectory>
    <plugins>
      <plugin>
        <groupId>org.scala-toolsgroupId>
        <artifactId>maven-scala-pluginartifactId>
        <executions>
          <execution>
            <goals>
              <goal>compilegoal>
              <goal>testCompilegoal>
            goals>
          execution>
        executions>
        <configuration>
          <scalaVersion>2.10.4scalaVersion>
          <args>
            <arg>-target:jvm-1.5arg>
          args>
        configuration>
      plugin>
      <plugin>
        <groupId>org.apache.maven.pluginsgroupId>
        <artifactId>maven-eclipse-pluginartifactId>
        <configuration>
          <downloadSources>truedownloadSources>
          <buildcommands>
            <buildcommand>ch.epfl.lamp.sdt.core.scalabuilderbuildcommand>
          buildcommands>
          <additionalProjectnatures>
            <projectnature>ch.epfl.lamp.sdt.core.scalanatureprojectnature>
          additionalProjectnatures>
          <classpathContainers>
            <classpathContainer>org.eclipse.jdt.launching.JRE_CONTAINERclasspathContainer>
            <classpathContainer>ch.epfl.lamp.sdt.launching.SCALA_CONTAINERclasspathContainer>
          classpathContainers>
        configuration>
      plugin>
    plugins>
  build>
  <reporting>
    <plugins>
      <plugin>
        <groupId>org.scala-toolsgroupId>
        <artifactId>maven-scala-pluginartifactId>
        <configuration>
          <scalaVersion>2.10.4scalaVersion>
        configuration>
      plugin>
    plugins>
  reporting>
project>

将需要使用的配置属性定一个properties文件。
KafkaProperties.java:

package cn.just.spark.kafka.producer;

/**
 * 配置属性常量
 */
public class KafkaProperties {
    public static final String ZK="hadoop-senior.shinelon.com:2181";      //Zookeeper地址
    public static final String TOPIC="topic01";                          //topic名称
    public static final String BROKER_LIST="hadoop-senior.shinelon.com:9092";    //Broker列表
    public static final String GROUP_ID="test_group01";                 //消费者使用
}

KafkaProducer.java:

package cn.just.spark.kafka.producer;



import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import scala.collection.Seq;

import java.util.Properties;

/**
 * Kafka java API:Producer
 */
public class KafkaProducer extends Thread{

    public String topic;

    public Producer producer;

    public KafkaProducer(String topic){
        this.topic=topic;

        Properties properties=new Properties();

        properties.put("metadata.broker.list",KafkaProperties.BROKER_LIST);
        properties.put("serializer.class","kafka.serializer.StringEncoder");
        //设置生产者与消费者的生产握手机制:0代表不需要Broker回复消息
        //1表示等到Broker回复消息之后继续生产
        //-1表示需要所有的Broker都回复消息之后才继续,这种更严格,数据不会丢失,持久性更好
        properties.put("request.required.acks","1");

        ProducerConfig config=new ProducerConfig(properties);

        producer=new Producer(config);
    }

    @Override
    public void run() {
        int messageId=1;
        while(true){
            String message="kafkaProducer"+messageId;
            producer.send(new KeyedMessage(topic,message));
            System.out.println(message);
            messageId++;

            try {
                Thread.sleep(2000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }
}

KafkaConsumer.java:

package cn.just.spark.kafka.consumer;

import cn.just.spark.kafka.producer.KafkaProperties;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

public class KafkaConsumer extends Thread{

    public String topic;
    public KafkaConsumer(String topic){
        this.topic=topic;
    }

    public ConsumerConnector getConnection(){

        Properties properties=new Properties();
        properties.put("group.id", KafkaProperties.GROUP_ID);
        properties.put("zookeeper.connect",KafkaProperties.ZK);
        return Consumer.createJavaConsumerConnector(new ConsumerConfig(properties));
    }


    @Override
    public void run() {
        ConsumerConnector consumer=getConnection();

        Map topicMap=new HashMap();
        topicMap.put(topic,1);     //从一个KafkaStream消费数据

        //String:topic
        //List>对应的数据流
        Mapbyte[], byte[]>>> messageStream= consumer.createMessageStreams(topicMap);

        //获取我们每次接收到的数据
        KafkaStream<byte[], byte[]> stream=messageStream.get(topic).get(0);        //get(0)对应于上面的一个KafkaStream

        ConsumerIterator<byte[], byte[]> it=stream.iterator();
        while(it.hasNext()){
            String message=new String(it.next().message());
            System.out.println("resever message: "+message);
        }


    }
}

生产者和消费者编写完成后编写主类进行测试:
KafkaProducerApp.java:

package cn.just.spark.kafka.producer;


import cn.just.spark.kafka.consumer.KafkaConsumer;

public class KafkaProducerApp {
    //快捷键psvm
    public static void main(String[] args) {
        new KafkaProducer(KafkaProperties.TOPIC).start();

        new KafkaConsumer(KafkaProperties.TOPIC).start();
    }
}

测试结果如下,可以看见无论是客户端消费者还是我们的代码编写程序消费者都能接收到生产者产生的数据。

Kafka Java API使用Demo_第1张图片

你可能感兴趣的:(Kafka)