spring+kafka客户端

kafka使用记录 (Zookeeper的环境搭建这里就不做记录了)

maven jar包

		<dependency>
			<groupId>org.springframework.kafka</groupId>
			<artifactId>spring-kafka</artifactId>
			<version>2.2.0.RELEASE</version>
		</dependency>
		<dependency>
			<groupId>org.apache.kafka</groupId>
			<artifactId>kafka-clients</artifactId>
			<version>2.0.0</version>
		</dependency>

srping 配置kafka

<!-- 创建kafkatemplate bean,使用的时候,只需要注入这个bean,即可使用template的send消息方法 -->
	<bean id="kafkaTemplate" class="org.springframework.kafka.core.KafkaTemplate">
		<constructor-arg ref="producerFactory" />
		<!--设置对应topic-->
		<property name="defaultTopic" value="对应的主题" />
	</bean>
<!-- 创建kafkatemplate需要使用的producerfactory bean -->
	<bean id="producerFactory"
		  class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
		<constructor-arg>
			<ref bean="producerProperties" />
		</constructor-arg>
	</bean>

<!--基本配置 -->
	<bean id="producerProperties" class="java.util.HashMap">
		<constructor-arg>
			<map>
             <entry key="bootstrap.servers" value="${kafka.producer.bootstrap.servers}" />
               <!-- &lt;!&ndash;<entry key="group.id" value="${group.id}" />&ndash;&gt;-->
                <entry key="retries" value="${kafka.producer.retries}" />
                <entry key="batch.size" value="${kafka.producer.batch.size}" />
                <entry key="linger.ms" value="${kafka.producer.linger.ms}" />
                <entry key="buffer.memory" value="${kafka.producer.buffer.memory}" />
                <entry key="acks" value="${kafka.producer.acks}" />
                <entry key="key.serializer"
                       value="${kafka.producer.key.serializer}" />
                <entry key="value.serializer"
                       value="${kafka.producer.value.serializer}"/>
            </map>
		</constructor-arg>
	</bean>

参数配置

kafka.producer.bootstrap.servers=ip:端口
kafka.producer.retries=10
kafka.producer.batch.size=1638
kafka.producer.linger.ms=1
kafka.producer.buffer.memory=33554432
kafka.producer.acks=all
kafka.producer.key.serializer=org.apache.kafka.common.serialization.StringSerializer
kafka.producer.value.serializer=org.apache.kafka.common.serialization.StringSerializer
kafka.producer.max.block.ms=6000

java代码

@Service("kafKaSndService")
public class KafKaServiceImpl implements SndService {
     
    private static CustomLogger logger = CustomLoggerFactory.getCustomLogger(KafKaServiceImpl.class);
    @Resource
    private KafkaTemplate<Integer, String> kafkaTemplate;

public void kafkaSendMsg(Record Record) {
     
        try {
     
            logger.info("kafka请求报文:"+json数据);
            ListenableFuture<SendResult<Integer, String>> resultListenableFuture = kafkaTemplate.sendDefault( kaRecord.getRequesttext());
            //发送成功回调
            SuccessCallback<SendResult<Integer, String>> successCallback = new SuccessCallback<SendResult<Integer, String>>() {
     
                @Override
                public void onSuccess(SendResult<Integer, String> result) {
     
                    //成功业务逻辑
                    Record.setStatus("1");
                    logger.info("kafka请求成功:"+json数据));
                    try{
     
                        BaseOperation<aRecord> RecordOperation = (BaseOperation<Record>) OperationFactory.getInstance().getBaseOperation("RecordOperation");
                        Record.setSendtimes(new Short("1"));
                        Record.setInserttimeforhis(new Date());
                        RecordOperation.insert(Record);
                    }catch (Exception e){
     
                        e.printStackTrace();
                    }
                }
            };
            //发送失败回调
            FailureCallback failureCallback = new FailureCallback() {
     
                @Override
                public void onFailure(Throwable ex) {
     
                    //失败业务逻辑
                    Record.setStatus("2");
                    logger.info("kafka请求失败:"+kaRecord.getRequesttext());
                    try{
     
                        BaseOperation<KafKaRecord> RecordOperation = (BaseOperation<Record>) OperationFactory.getInstance().getBaseOperation("RecordOperation");
                        Record.setSendtimes(new Short("1"));
                        Record.setInserttimeforhis(new Date());
                        RecordOperation.insert(Record);
                    }catch (Exception e){
     
                        e.printStackTrace();
                    }
                }
            };
            resultListenableFuture.addCallback(successCallback, failureCallback);
        } catch (Exception e) {
     
            kaRecord.setStatus("2");
            logger.info("kafka请求失败:"+json数据));
            try{
     
                BaseOperation<Record> RecordOperation = (BaseOperation<Record>) OperationFactory.getInstance().getBaseOperation("RecordOperation");
                Record.setSendtimes(new Short("1"));
                Record.setInserttimeforhis(new Date());
                RecordOperation.insert(Record);
            }catch (Exception e2){
     
                e2.printStackTrace();
            }
            e.printStackTrace();
        }

    }

}

kafka查看命令


    以下操作均为相对路径(cd /kafka/kafka_2.11-1.1.1)

一、启动服务(nohup后台启动)
	(1)先启动zk:    nohup bin/zookeeper-server-start.sh config/zookeeper.properties &
	(2)启动kafka:   nohup bin/kafka-server-start.sh config/server.properties &
	
二、关闭服务
	(1)先停止kafka:    bin/kafka-server-stop.sh
	(2)再停止zookeeper(注意是否有其他服务应用zookeeper,若有,不能随意停止):
    bin/zookeeper-server-stop.sh

	
三、相关操作
	(1)创建主题:bin/kafka-topics.sh --create --zookeeper ip:端口 --replication-factor 1 --partitions 1 --topic test
				参数说明:--zookeeper			 zookeeper的服务所在ip和端口,一般zookeeper端口为2181
						  --replication-factor   数据副本数量
						  --partitions			 消息分区数量
						  --topic                主题名称
	(2)查看主题清单:bin/kafka-topics.sh --list --zookeeper ip:端口

	


目录
/kafka/kafka_2.11-1.1.1/bin/

查看主题 
kafka-topics.sh --list --zookeeper ip:端口

查看主题消息
kafka-console-consumer.sh --bootstrap-server ip:端口 --topic ****** --from-beginning

kafka-console-consumer.sh --bootstrap-server ip:端口 --topic ****** –from-beginning

你可能感兴趣的:(kafka,kafka,java,spring)