yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce
systemctl start docker
docker pull mysql:5.7
docker run -p 3306:3306 --name mysql \
-v /mydata/mysql/log:/var/log/mysql \
-v /mydata/mysql/data:/var/lib/mysql \
-v /mydata/mysql/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=root \
-d mysql:5.7
-p 3306:3306:将容器的3306端口映射到主机的3306端口
-v /mydata/mysql/conf:/etc/mysql:将配置文件夹挂在到主机
-v /mydata/mysql/log:/var/log/mysql:将日志文件夹挂载到主机
-v /mydata/mysql/data:/var/lib/mysql/:将数据文件夹挂载到主机
-e MYSQL_ROOT_PASSWORD=root:初始化root用户的密码
docker exec -it mysql /bin/bash
docker cp /mydata/mall.sql mysql:/
mysql -uroot -proot --default-character-set=utf8
create database mall character set utf8;
use mall;
source /mall.sql;
grant all privileges on *.* to 'reader' @'%' identified by '123456';
docker pull redis:5
docker run -p 6379:6379 --name redis \
-v /mydata/redis/data:/data \
-d redis:5 redis-server --appendonly yes
docker exec -it redis redis-cli
docker pull canal/canal-server:v1.1.4
sudo docker run -it --name canal -p 11111:11111 \
-p 8000:8000 -p 2222:2222 -p 1111:1111 -p 11112:11112 \
-p 11110:11110 -d canal/canal-server:v1.1.4
docker exec -it mysql /bin/bash
遇到的问题:在 /etc/mysql 文件夹下没有发现 my.cnf 文件
解决办法:手动创建 my.cnf 文件
vim /etc/my.cnf
如果出现 bash: vi: command not found 这个问题,则通过以下命令 apt-get update 和 apt-get install vim 解决。
# 开启 mysql 的 binlog 模块
log-bin=/var/lib/mysql/mysql-bin
binlog-format=ROW
# server_id 需保证唯一,不能和 canal 的 slaveId 重复
server_id=1
# 需要同步的数据库名称
binlog-do-db=mall
# 忽略的数据库,建议填写
binlog-ignore-db=mysql
# 启动 mysql 时不启动 grant-tables 授权表
skip-grant-tables
CREATE USER canal IDENTIFIED BY 'canal';
GRANT ALL PRIVILEGES ON mall.ums_admin TO 'canal'@'%';
FLUSH PRIVILEGES;
docker exec -it canal /bin/bash
vim canal-server/conf/example/instance.properties
注意:上图中第二处默认为不要使用 127.0.0.1,不然会出现无法连接 MySQL 的错误。
<dependency>
<groupId>com.alibaba.otter</groupId>
<artifactId>canal.client</artifactId>
<version>1.0.25</version>
</dependency>
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.9.0</version>
</dependency>
package com.macro.mall.canal;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
/**
* @ClassName RedisUtil
* @Description TODO
* @Author 听秋
* @Date 2022/4/21 20:22
* @Version 1.0
**/
public class RedisUtil {
private static String ip = "10.0.0.4";
private static int port = 6379;
private static int timeout = 10000;
private static JedisPool pool = null;
static {
JedisPoolConfig config = new JedisPoolConfig();
config.setMaxTotal(1024);//最大连接数
config.setMaxIdle(200);//最大空闲实例数
config.setMaxWaitMillis(10000);//等连接池给连接的最大时间,毫秒
config.setTestOnBorrow(true);//borrow一个实例的时候,是否提前vaildate操作
pool = new JedisPool(config, ip, port, timeout);
}
//得到redis连接
public static Jedis getJedis() {
if (pool != null) {
return pool.getResource();
} else {
return null;
}
}
//关闭redis连接
public static void close(final Jedis redis) {
if (redis != null) {
redis.close();
}
}
public static boolean existKey(String key) {
return getJedis().exists(key);
}
public static void delKey(String key) {
getJedis().del(key);
}
public static String stringGet(String key) {
return getJedis().get(key);
}
public static String stringSet(String key, String value) {
return getJedis().set(key, value);
}
public static String stringSet(String key, String value, long time) {
return getJedis().set(key, value, null, null, time);
}
public static void hashSet(String key, String field, String value) {
getJedis().hset(key, field, value);
}
}
package com.macro.mall.canal;
import java.net.InetSocketAddress;
import java.util.List;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.protocol.Message;
import com.alibaba.otter.canal.protocol.CanalEntry.Column;
import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
import com.alibaba.otter.canal.protocol.CanalEntry.RowChange;
import com.alibaba.otter.canal.protocol.CanalEntry.RowData;
import com.alibaba.otter.canal.client.*;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* @ClassName CanalClient
* @Description TODO
* @Author 听秋
* @Date 2022/4/21 18:58
* @Version 1.0
**/
@Configuration
public class CanalSyncConfig {
private static String REDIS_DATABASE = "mall";
private static String REDIS_KEY_ADMIN = "ums:admin";
@Bean
public static void canalSync() {
// 创建链接,127.0.0.1是ip,11111是canal的端口号,默认是11111,这个在conf/canal.properties文件里配置,example是canal虚拟的模块名,
// 在canal.properties文件canal.destinations= example 这段可以自行修改。canal是创建的数据库账号密码
CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress("10.0.0.4",
11111), "example", "canal", "canal");
int batchSize = 1000;
int emptyCount = 0;
try {
connector.connect();
connector.subscribe(".*\\..*");
connector.rollback();
int totalEmtryCount = 1200;
while (emptyCount < totalEmtryCount) {
Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
long batchId = message.getId();
int size = message.getEntries().size();
if (batchId == -1 || size == 0) {
emptyCount++;
System.out.println("empty count : " + emptyCount);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
emptyCount = 0;
printEntry(message.getEntries());
}
connector.ack(batchId); // 提交确认
// connector.rollback(batchId); // 处理失败, 回滚数据
}
System.out.println("empty too many times, exit");
} finally {
connector.disconnect();
}
}
private static void printEntry(List<Entry> entrys) {
for (Entry entry : entrys) {
if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN || entry.getEntryType() == EntryType.TRANSACTIONEND) {
continue;
}
RowChange rowChage = null;
try {
rowChage = RowChange.parseFrom(entry.getStoreValue());
} catch (Exception e) {
throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(),
e);
}
EventType eventType = rowChage.getEventType();
System.out.println(String.format("================> binlog[%s:%s] , name[%s,%s] , eventType : %s",
entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(),
entry.getHeader().getSchemaName(), entry.getHeader().getTableName(),
eventType));
for (RowData rowData : rowChage.getRowDatasList()) {
if (eventType == EventType.DELETE) {
printColumn(rowData.getBeforeColumnsList());
redisDelete(rowData.getBeforeColumnsList());
} else if (eventType == EventType.INSERT) {
printColumn(rowData.getAfterColumnsList());
redisInsert(rowData.getAfterColumnsList());
} else {
System.out.println("-------> before");
printColumn(rowData.getBeforeColumnsList());
System.out.println("-------> after");
printColumn(rowData.getAfterColumnsList());
redisUpdate(rowData.getAfterColumnsList());
}
}
}
}
private static void printColumn(List<Column> columns) {
for (Column column : columns) {
System.out.println(column.getName() + " : " + column.getValue() + " update=" + column.getUpdated());
}
}
private static void redisInsert(List<Column> columns) {
JSONObject json = new JSONObject();
for (Column column : columns) {
json.put(column.getName(), column.getValue());
}
if (columns.size() > 0) {
RedisUtil.stringSet(REDIS_DATABASE + ":" + REDIS_KEY_ADMIN + ":"
+ columns.get(1).getValue(), json.toJSONString());
}
}
private static void redisUpdate(List<Column> columns) {
JSONObject json = new JSONObject();
for (Column column : columns) {
json.put(column.getName(), column.getValue());
}
if (columns.size() > 0) {
RedisUtil.stringSet(REDIS_DATABASE + ":" + REDIS_KEY_ADMIN + ":"
+ columns.get(1).getValue(), json.toJSONString());
}
}
private static void redisDelete(List<Column> columns) {
JSONObject json = new JSONObject();
for (Column column : columns) {
json.put(column.getName(), column.getValue());
}
if (columns.size() > 0) {
RedisUtil.delKey(REDIS_DATABASE + ":" + REDIS_KEY_ADMIN + ":" + columns.get(1).getValue());
}
}
}