postgresql
postgresql.conf
listen_addresses = '*'
shared_buffers = 128MB
dynamic_shared_memory_type = posix
wal_level = logical
max_wal_size = 1GB
min_wal_size = 80MB
max_wal_senders = 8
wal_keep_segments = 4
max_replication_slots = 4
pg_hba.conf
重启PG
service postgresql restart
zk和kafka
- 下载
https://mirrors.aliyun.com/apache/
应该下载可执行文件的压缩包apache-zookeeper-x.x.x-bin.tar.gz,apache-zookeeper-x.x.x.tar.gz是源码 - 然后解压至指定目录
tar -xzvf
Debezium
- 下载对应数据库的Debezium连接器
https://debezium.io/documentation/reference/1.0/install.html -
解压将jar拷贝到kafka的libs目录下
配置
服务器eth0_ip为172.18.4.109
,公网ip为8.129.215.124
kafka
mkdir /usr/local/zgxsoft/etl/kafka/log
connect-distributed.properties
bootstrap.servers=172.18.4.109:9092
key.converter=org.apache.kafka.connect.json.JsonConverter
value.converter=org.apache.kafka.connect.json.JsonConverter
key.converter.schemas.enable=true
value.converter.schemas.enable=true
offset.storage.topic=connect-offsets
offset.storage.replication.factor=1
config.storage.topic=connect-configs
config.storage.replication.factor=1
status.storage.topic=connect-status
status.storage.replication.factor=1
offset.flush.interval.ms=10000
server.properties
broker.id=0
listeners=PLAINTEXT://172.18.4.109:9092
host.name = 172.18.4.109
advertised.listeners=PLAINTEXT://8.129.215.124:9092
num.network.threads=3
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/zgxsoft/etl/kafka/log
num.partitions=1
num.recovery.threads.per.data.dir=1
such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=172.18.4.109:2181
zk
mkdir /usr/local/zgxsoft/etl/zk/log
mkdir /usr/local/zgxsoft/etl/zk/data
cp zookeeper-3.4.14/conf/zoo_sample.cfg zookeeper-3.4.14/conf/zoo.cfg
zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zgxsoft/etl/zk/log
dataLogDir=/usr/local/zgxsoft/etl/zk/data
clientPort=2181
kafka connector
增加postgresql connector
启动
-
启动zk
sh zkServer.sh start
-
启动kafaka connector
sh connect-distributed.sh ../../config/connect-distributed.properties
-
启动kafaka
./kafka-server-start.sh -daemon config/server.properties
springboot消费端
public class MQDict {
public static final String MQ_ADDRESS_COLLECTION="8.129.215.124:9092"; //kafka集群地址
public static final String CONSUMER_TOPIC = "track_iqifu.public.track_iqifu"; //消费者连接的topic
public static final String CONSUMER_GROUP_ID = "iqifu_group_001"; //groupId,可以分开配置
public static final String CONSUMER_ENABLE_AUTO_COMMIT = "true"; //是否自动提交(消费者)
/*
消费者自动提交偏移量。enable.auto.commit 设为 true,那么每过一定时间间隔,消费者会自动把从 poll() 方法接收到的最大偏移量提交上去。
提交时间间隔由 auto.commit.interval.ms 控制,默认是5s。
*/
public static final String CONSUMER_AUTO_COMMIT_INTERVAL_MS = "1000";
public static final String CONSUMER_SESSION_TIMEOUT_MS = "30000"; //连接超时时间
public static final int CONSUMER_MAX_POLL_RECORDS = 10; //每次拉取数
public static final Duration CONSUMER_POLL_TIME_OUT = Duration.ofMillis(6000); //拉去数据超时时间
/*
指定了消费者在读取一个没有偏移量(offset)的分区或者偏移量无效的情况下(因消费者长时间失效,包含偏移量的记录已经过时井被删除)该作何处理,
默认值是 latest,表示在 offset 无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)。
另一个值是 earliest,消费者将从起始位置读取分区的记录。
*/
public static final String AUTO_OFFSET_RESET = "earliest";
//client.id是用户特定的字符串,用来在每次请求中帮助跟踪调用。它应该可以逻辑上确认产生这个请求的应用
public static final String CLIENT_ID = "client_1";
}
public class MQConsumer {
private Logger logger = LoggerFactory.getLogger(getClass());
private static KafkaConsumer consumer;
/**
* 初始化消费者
*/
static {
Properties configs = initConfig();
consumer = new KafkaConsumer(configs);
/*
创建了消费者之后,需要订阅 Topic,subscribe() 方法接受一个主题列表作为参数。
subscribe() 也可以接收一个正则表达式,匹配多个主题(如果有新的名称匹配的主题创建,会立即触发一次再均衡,消费者就可以读取新添加的主题)
*/
consumer.subscribe(Arrays.asList(MQDict.CONSUMER_TOPIC));
}
/**
* 初始化配置
*/
private static Properties initConfig(){
Properties props = new Properties();
props.put("bootstrap.servers", MQDict.MQ_ADDRESS_COLLECTION);
props.put("group.id", MQDict.CONSUMER_GROUP_ID);
props.put("enable.auto.commit", MQDict.CONSUMER_ENABLE_AUTO_COMMIT);
props.put("auto.commit.interval.ms", MQDict.CONSUMER_AUTO_COMMIT_INTERVAL_MS);
props.put("session.timeout.ms", MQDict.CONSUMER_SESSION_TIMEOUT_MS);
props.put("max.poll.records", MQDict.CONSUMER_MAX_POLL_RECORDS);
props.put("auto.offset.reset", MQDict.AUTO_OFFSET_RESET);
props.put("client.id", MQDict.CLIENT_ID);
// key反序列化
props.put("key.deserializer", StringDeserializer.class.getName());
// value反序列化
props.put("value.deserializer", StringDeserializer.class.getName());
return props;
}
public void exeConsumer(IqifuBakDAO iqifuBakDAO) {
try {
while (true) {
/*
poll() 返回一个记录列表。
每条记录都包含了记录所属主题的信息、记录所在分区的信息、记录在分区里的偏移量,以及记录的键值对。
*/
ConsumerRecords records = consumer.poll(MQDict.CONSUMER_POLL_TIME_OUT);
if (records != null) {
records.forEach((ConsumerRecord record) -> {
//打印偏移量,key,value
System.out.printf("offset = %d, value = %s", record.offset(), record.key(),record.value());
//插入数据库
iqifuBakDAO.insert(record);
});
}
}
}finally {
// 关闭消费者,网络连接和 socket 也会随之关闭,并立即触发一次再均衡
consumer.close();
}
}
}
/**
* Spring boot监听类,用于启动Spring boot容器后加载数据
*/
@Component
public class ApplicationStartup implements ApplicationListener {
private Logger logger = LoggerFactory.getLogger(getClass());
@SuppressWarnings("all")
@Autowired
IqifuBakDAO iqifuBakDAO;
@Override
public void onApplicationEvent(ContextRefreshedEvent contextRefreshedEvent) {
logger.info("#########################################注入DAO层接口,开始消费#########################################");
new MQConsumer().exeConsumer(iqifuBakDAO);
}
}
参考资料:
https://debezium.io/documentation/reference/1.1/connectors/postgresql.html
https://blog.csdn.net/liu_c_y/article/details/103537490?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-3.nonecase&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-3.nonecase
https://blog.csdn.net/u012551524/article/details/82798066/