实例及接入准备
Kafka实例创建,这里使用阿里云Kafka消息队列。为了方便本地测试,创建公网 + VPC实例,参考链接。
污水处理流量计
公网接入
三种消息发送方式
1、发后即忘(fire-and-forget)
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.config.SslConfigs;
import java.util.Properties;
public class KafkaProducerDemo {
public static void main(String args[]) {
//设置sasl文件的路径
JavaKafkaConfigurer.configureSasl();
//加载kafka.properties
Properties kafkaProperties = JavaKafkaConfigurer.getKafkaProperties();
Properties props = new Properties();
// 设置接入点,请通过控制台获取对应Topic的接入点
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getProperty("bootstrap.servers"));
// 设置SSL根证书的路径,请记得将XXX修改为自己的路径
// 与sasl路径类似,该文件也不能被打包到jar中
props.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, kafkaProperties.getProperty("ssl.truststore.location"));
// 根证书store的密码,保持不变
props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "KafkaOnsClient");
// 接入协议,目前支持使用SASL_SSL协议接入
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
// SASL鉴权方式,保持不变
props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
// Kafka消息的序列化方式
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
// 请求的最长等待时间
props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 30 * 1000);
// 设置客户端内部重试次数
props.put(ProducerConfig.RETRIES_CONFIG, 5);
// 设置客户端内部重试间隔
props.put(ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG, 3000);
// hostname校验改成空
props.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");
// 构造Producer对象,注意,该对象是线程安全的,一般来说,一个进程内一个Producer对象即可;
// 如果想提高性能,可以多构造几个对象,但不要太多,最好不要超过5个
KafkaProducer producer = new KafkaProducer(props);
// 构造一个Kafka消息
String topic = kafkaProperties.getProperty("topic"); //消息所属的Topic,请在控制台申请之后,填写在这里
String value = "this is the message's value"; //消息的内容
long t1 = System.currentTimeMillis();
try {
for (int i =0; i < 10000; i++) {
// 发送消息:发后即忘
ProducerRecord kafkaMessage = new ProducerRecord(topic, value + ": " + i);
producer.send(kafkaMessage);
}
// 将缓冲区的全部消息push到broker当中
producer.flush();
producer.close();
long t2 = System.currentTimeMillis();
System.out.println("发送消息耗时:" + (t2-t1));
} catch (Exception e) {
// 客户端内部重试之后,仍然发送失败,业务要应对此类错误
// 参考常见报错: https://help.aliyun.com/document_detail/68168.html?spm=a2c4g.11186623.6.567.2OMgCB
System.out.println("error occurred");
e.printStackTrace();
}
System.out.println("消息发送完成!");
}
}
2、同步(sync)
long t1=System.currentTimeMillis();
try {
//批量获取 futures 可以加快速度, 但注意,批量不要太大
List> futures = new ArrayList>(128);
for (int i =0; i < 10000; i++) {
//发送消息,并获得一个Future对象
ProducerRecord kafkaMessage = new ProducerRecord(topic, value + ": " + i);
Future metadataFuture = producer.send(kafkaMessage);
futures.add(metadataFuture);
}
producer.flush();
for (Future future: futures) {
//同步获得Future对象的结果
try {
RecordMetadata recordMetadata = future.get();
System.out.println("Produce ok:" + recordMetadata.toString());
} catch (Throwable t) {
t.printStackTrace();
}
}
producer.close(); // 关闭producer
long t2=System.currentTimeMillis();
System.out.println("同步发送耗时:" + (t2-t1));
} catch (Exception e) {
//客户端内部重试之后,仍然发送失败,业务要应对此类错误
//参考常见报错: https://help.aliyun.com/document_detail/68168.html?spm=a2c4g.11186623.6.567.2OMgCB
System.out.println("error occurred");
e.printStackTrace();
}
3、异步(async)
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.config.SslConfigs;
import java.util.Properties;
public class KafkaProducerDemoAsync {
public static void main(String args[]) {
//设置sasl文件的路径
JavaKafkaConfigurer.configureSasl();
//加载kafka.properties
Properties kafkaProperties = JavaKafkaConfigurer.getKafkaProperties();
Properties props = new Properties();
//设置接入点,请通过控制台获取对应Topic的接入点
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getProperty("bootstrap.servers"));
//设置SSL根证书的路径,请记得将XXX修改为自己的路径
//与sasl路径类似,该文件也不能被打包到jar中
props.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, kafkaProperties.getProperty("ssl.truststore.location"));
//根证书store的密码,保持不变
props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "KafkaOnsClient");
//接入协议,目前支持使用SASL_SSL协议接入
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
//SASL鉴权方式,保持不变
props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
//Kafka消息的序列化方式
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
//请求的最长等待时间
props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 30 * 1000);
//设置客户端内部重试次数
props.put(ProducerConfig.RETRIES_CONFIG, 5);
//设置客户端内部重试间隔
props.put(ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG, 3000);
//hostname校验改成空
props.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");
//构造Producer对象,注意,该对象是线程安全的,一般来说,一个进程内一个Producer对象即可;
//如果想提高性能,可以多构造几个对象,但不要太多,最好不要超过5个
KafkaProducer producer = new KafkaProducer(props);
//构造一个Kafka消息
String topic = kafkaProperties.getProperty("topic"); //消息所属的Topic,请在控制台申请之后,填写在这里
String value = "this is the message's value"; //消息的内容
long t1=System.currentTimeMillis();
try {
for (int i =0; i < 10000; i++) {
//发送消息,并获得一个Future对象
ProducerRecord kafkaMessage = new ProducerRecord(topic, value + ": " + i);
producer.send(kafkaMessage,new MyProducerCallback());
}
// 将缓冲区的全部消息push到broker当中
producer.flush();
producer.close();
long t2=System.currentTimeMillis();
System.out.println("发送消息耗时:" + (t2-t1));
} catch (Exception e) {
//客户端内部重试之后,仍然发送失败,业务要应对此类错误
//参考常见报错: https://help.aliyun.com/document_detail/68168.html?spm=a2c4g.11186623.6.567.2OMgCB
System.out.println("error occurred");
e.printStackTrace();
}
System.out.println("消息发送完成!");
}
/**
* callback 类实现
*/
private static class MyProducerCallback implements Callback {
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
e.printStackTrace();
return;
}
System.out.println(recordMetadata.topic());
System.out.println(recordMetadata.partition());
System.out.println(recordMetadata.offset());
System.out.println("Coming in MyProducerCallback");
}
}
}
时间对比
fire-and-forget:发送消息耗时:456
sync:同步发送耗时:613
async:发送消息耗时:713
污水处理流量计