实际工作中可能在一个工程里面同时连接多个不同的kafka集群读写数据,spring cloud stream也提供了类似的配置方式,首先给出一个demo配置:
spring:
cloud:
stream:
#指定用kafka stream来作为默认消息中间件
# default-binder: kafka
# kafka:
# #来自KafkaBinderConfigurationProperties
# binder:
# brokers: name87:9094
# zkNodes: name85:2181,name86:2181,name87:2181/kafka0101
# #如果需要传递自定义header信息,需要在此处声明,不然自定义消息头是不会出现在最终消息当中的
## headers: myType
# configuration:
# auto:
# offset:
# #可以设置原生kafka属性,比如设置新的消费组从最新的offset开始消费
# reset: latest
#属性来自BindingProperties
bindings:
#与@StreamListener注解中的value一致,是绑定的渠道名
input_1:
binder: kafka1
consumer:
headerMode: raw
producer:
headerMode: raw
#绑定的kafka topic名称为test
destination: cloud-test10
#消费组
# group: cloud-test2-group1
# content-type: application/json
input_2:
binder: kafka2
consumer:
headerMode: raw
producer:
headerMode: raw
#绑定的kafka topic名称为test
destination: cloud-test10
#消费组
# group: cloud-test2-group1
# content-type: application/json
output_1:
binder: kafka1
consumer:
headerMode: raw
producer:
headerMode: raw
destination: cloud-test10
# content-type: application/json
output_2:
binder: kafka2
consumer:
headerMode: raw
producer:
headerMode: raw
destination: cloud-test10
binders:
kafka1:
type: kafka
environment:
spring:
cloud:
stream:
kafka:
binder:
brokers: name87:9094
zkNodes: name85:2181,name86:2181,name87:2181/kafka0101
kafka2:
type: kafka
environment:
spring:
cloud:
stream:
kafka:
binder:
brokers: name85:9094
zkNodes: name85:2181,name86:2181,name87:2181/kafkatest0101
这里我们配置了两个binders:kafka1 和kafka2,分别连接的两个不同的kafka broker。bindings部分分别定义了两组input、output channel, input_1和output_1映射到kafka1,input_2和output_2映射到kafka2.
定义输入输出渠道:
import org.springframework.cloud.stream.annotation.Input;
import org.springframework.messaging.SubscribableChannel;
public interface MySink {
//接收队列1
String INPUT_1 = "input_1";
//接收队列1
String INPUT_2 = "input_2";
@Input(MySink.INPUT_1)
SubscribableChannel input1();
@Input(MySink.INPUT_2)
SubscribableChannel input2();
}
import org.springframework.cloud.stream.annotation.Output;
import org.springframework.messaging.MessageChannel;
public interface MySource {
//发送队列1
String OUTPUT_1 = "output_1";
String OUTPUT_2 = "output_2";
@Output(MySource.OUTPUT_1)
MessageChannel output1();
@Output(MySource.OUTPUT_2)
MessageChannel output2();
}
import com.example.kafkastreamdemo.model.MyModel;
import com.example.kafkastreamdemo.source.MySource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class MyController {
private final Logger logger = LoggerFactory.getLogger(MyController.class);
@Autowired
private MySource source;
@RequestMapping("/send")
public void sendMessage(@RequestParam("type") String type, @RequestParam("message") String message) {
try {
MyModel myModel = new MyModel(message);
// source.output1().send(MessageBuilder.createMessage(myModel, new MessageHeaders(Collections.singletonMap("myType", type))));
source.output1().send(MessageBuilder.withPayload(myModel).setHeader("myType", type).build());
} catch (Exception e) {
logger.info("消息发送失败,原因:" + e);
e.printStackTrace();
}
}
@RequestMapping("/send2")
public void sendMessage2(@RequestParam("message") String message) {
try {
source.output2().send(MessageBuilder.withPayload(message).build());
} catch (Exception e) {
logger.info("消息发送失败,原因:" + e);
e.printStackTrace();
}
}
}
import com.example.kafkastreamdemo.sink.MySink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.cloud.stream.annotation.EnableBinding;
import org.springframework.cloud.stream.annotation.StreamListener;
@EnableBinding(MySink.class)
public class KafkaReceiver {
private static Logger logger = LoggerFactory.getLogger(KafkaReceiver.class);
// @StreamListener(value = MySink.INPUT_1) //可以只过滤header中有type=foo的消息
// @SendTo(MySource.OUTPUT_2) //可以将返回结果转发到另外一个topic
// public MyModel receive(MyModel payload) {
// logger.info("received:" + payload.getMessage());
// MyModel myModel = new MyModel("received:" + payload.getMessage());
// return myModel;
// }
//
// //可以只过滤header中有type=foo的消息,有condition的话,方法不能有返回值
// @StreamListener(value = MySink.INPUT_2, condition = "headers['myType']=='foo'")
// public void receiveByHeaderCondition(MyModel payload) {
// logger.info("received header=foo message:" + payload.getMessage());
// }
@StreamListener(value = MySink.INPUT_1) //可以只过滤header中有type=foo的消息
public void receive(String payload) {
logger.info("INPUT_1 received:" + payload);
}
@StreamListener(value = MySink.INPUT_2)
public void receive2(String payload) {
logger.info("INPUT_2 received:" + payload);
}
}
receive方法从kafka1中消费消息,receive2方法从kafka2中消费消息。
整个工程目录结构如下:
参考官方资料:http://cloud.spring.io/spring-cloud-static/Dalston.SR3/#multiple-binders