通过前面的几节,我们已经把web入口端的数据和db数据变更的数据都发到mq里去了。现在我们只需要去从mq里取数据做一些少的改动写到仓储里就行了。我们本例使用的是mysql作为仓储,当然这种数据最好选择nosql来最好,在我们公司实际使用的是es作为仓储,每月产生的数据大概在2亿多(并不是所有的表都监控),然后我们是只保存最近半年的数据。
来吧,直接上主要的代码:
db实体对象:
package com.lang.oliver.analysis.domain;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import javax.persistence.Column;
import javax.persistence.Id;
import javax.persistence.Table;
import java.util.Date;
/**
* mysql变更后的数据信息
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
@Table(name = "audit_log")
public class AuditLog {
@Id
@TableId(type = IdType.AUTO)
private Integer id;
/**
* 数据库名称
*/
private String db;
/**
* 表名称
*/
@Column(name = "db_table")
private String dbTable;
/**
* 操作人ID
*/
@Column(name = "operator_id")
private Integer operatorId;
/**
* 操作类型insert/update/delete
*/
@Column(name = "operation_type")
private String operationType;
/**
* 操作前数据
*/
@Column(name = "old_data")
private String oldData;
/**
* 操作后数据
*/
@Column(name = "new_data")
private String newData;
/**
* 修改时间戳
*/
@Column(name = "execution_time")
private Long executionTime;
/**
* 执行的sql语句
*/
@Column(name = "db_sql")
private String dbSql;
/**
* 链路跟踪id
*/
@Column(name = "trace_id")
private String traceId;
/**
* mysql主键ID
*/
@Column(name = "primary_ids")
private String primaryIds;
@Column(name = "create_time")
private Date createTime;
}
package com.lang.oliver.analysis.domain;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.Getter;
import lombok.NoArgsConstructor;
import javax.persistence.Column;
import javax.persistence.Id;
import javax.persistence.Table;
import java.util.Date;
/***
* 入口来源
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
@Table(name = "entry_log")
public class EntryLog {
@Id
@TableId(type = IdType.AUTO)
private Integer id;
/**
* 登录人信息
*/
@Column(name = "customer_id")
private Integer customerId;
/**
* 每次请求产生的traceId
*/
@Column(name = "trace_id")
private String traceId;
@Column(name = "project_name")
private String projectName;
@Column(name = "class_name")
private String className;
@Column(name = "method_name")
private String methodName;
@Column(name = "request_time")
private Date requestTime;
private String parameters;
/**
* 返回结果 该值需要根据不同的情况来判断要不要记录,比如返回数据比较大(导出操作)会严重影响性能
*/
private String result;
@Column(name = "create_time")
private Date createTime;
}
消费者代码:
package com.lang.oliver.analysis.consumer;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.lang.oliver.analysis.consumer.event.AuditLogEvent;
import com.lang.oliver.analysis.consumer.event.EntryLogEvent;
import com.lang.oliver.analysis.domain.AuditLog;
import com.lang.oliver.analysis.domain.EntryLog;
import com.lang.oliver.analysis.dto.OperationContext;
import com.lang.oliver.analysis.repository.AuditLogRepository;
import com.lang.oliver.analysis.repository.EntryLogRepository;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.stereotype.Component;
import org.springframework.util.ObjectUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Map;
@Slf4j
@Component
public class LogConsumer {
@Autowired
private EntryLogRepository entryLogRepository;
@Autowired
private AuditLogRepository auditLogRepository;
private ObjectMapper objectMapper = new ObjectMapper();
/**
* web入口事件
*
* @param
*/
@KafkaListener(topics = "common_req_trace_log", containerFactory = "logContainerFactory")
public void entryLogTopicListener(@Payload String content) {
log.info("收到入口消息:{}", content);
EntryLogEvent logInfoRequest = null;
try {
logInfoRequest = objectMapper.readValue(content, EntryLogEvent.class);
} catch (JsonProcessingException e) {
e.printStackTrace();
}
EntryLog entryLogInfo = new EntryLog();
BeanUtils.copyProperties(logInfoRequest, entryLogInfo);
entryLogRepository.insert(entryLogInfo);
}
/**
* 数据库的更改:canal
* topic :和canal里配置的topic保持一致
*
* @param
*/
@KafkaListener(topics = "audit_test1", containerFactory = "logContainerFactory")
public void auditLogTopicListener(@Payload String content) {
log.info("收到mysql变更消息:{}", content);
AuditLogEvent auditLogEvent = JSONObject.parseObject(content, AuditLogEvent.class);
AuditLog auditLog = new AuditLog();
auditLog.setDb(auditLogEvent.getDatabase());
auditLog.setDbTable(auditLogEvent.getTable());
auditLog.setOperationType(auditLogEvent.getType());
auditLog.setOldData(JSON.toJSONString(auditLogEvent.getOld(), SerializerFeature.WriteMapNullValue));
auditLog.setNewData(JSON.toJSONString(auditLogEvent.getData(), SerializerFeature.WriteMapNullValue));
auditLog.setExecutionTime(auditLogEvent.getEs());
auditLog.setCreateTime(new Date());
auditLog.setDbSql(auditLogEvent.getSql());
OperationContext operationContext = evalOperationContext(auditLogEvent.getSql());
auditLog.setTraceId(operationContext.getTraceId());
auditLog.setOperatorId(operationContext.getOperatorId() == null ? -1 : Integer.parseInt(operationContext.getOperatorId()));
//存成nosql 很方便能根据id查询该条数据的所有的生命周期
auditLog.setPrimaryIds(getUpdatedPrimaryKey(auditLogEvent.getData()));
auditLogRepository.insert(auditLog);
}
private String getUpdatedPrimaryKey(List
其实消费者解析这块的逻辑简单,只负责存储,如果你要做界面展示的话,还需要单独写一个查询服务,这两个实体的关联关系就是traceId,这个比较简单,我就做了。
整个项目的代码地址我放在github上了:
https://github.com/waterlang/audit-system
https://github.com/waterlang/canal-data-sql
如果对你有些帮助,可以star一下,谢谢咯.