官方
github
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>3.0.5</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
spring:
datasource:
username: xxx
password: xxx
url: jdbc:mysql://120.79.18.7:32553/test_xf?characterEncoding=utf-8&useSSL=false
driver-class-name: com.mysql.jdbc.Driver
/**
* 1. 与表名一样
* 2. 字段一致
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
public class Employee {
Integer id;
String lastName;
String email;
String gender;
Integer age;
}
// 对应的 Mapper上面集成基本的类 BaseMapper
@Repository
public interface EmployeeMapper extends BaseMapper<Employee> {
/**
* 此时所有的CRUD(基本的)已经完成
*/
}
@MapperScan("com.bnmzy.mybatisplus.mapper")
@SpringBootApplication
public class MybatisPlus01Application {
/**
* 1. 继承了BaseMapper,所有的方法都来自父类
* 2. 也可以编写自己的扩展方法
*/
@Autowired
private EmployeeMapper employeeMapper;
@Test
void testList(){
// 参数是一个Wrapper , 条件构造器, 这里先不用,设为null
// 查询全部员工
List<Employee> employees = employeeMapper.selectList(null);
employees.forEach(System.out::print);
}
所有的sql现在是不可见得,我们希望知道它如何执行,此时需要看日志
# 配置日志(此处是默认)
mybatis-plus:
configuration:
log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
若实体对象没设置id(主键),会自动生成一个id(详见主键生成策略),此时若主键自增则会报错,此时需要在主键字段上设置@TableID,详见下文
测试增加:
Employee employee = new Employee();
employee.setLastName("Acai");
employee.setAge(22);
employee.setEmail("[email protected]");
employee.setGender("1");
// employee.setId(5);
// 自动生成Id
int insert = employeeMapper.insert(employee);
System.out.println(insert);
// 自动生成的Id会写到该对象
System.out.println(employee);
雪花算法:雪花算法
1(0,无意义)+ 41(时间戳)+5(机房id)+5(机器id)+12(序号,即每秒可以生成4096个ID)
@TableId(type = IdType.ID_WORKER,value = "")
Integer id
public @interface TableId {
String value() default "";
IdType type() default IdType.NONE;
}``
3. IdType
```java
public enum IdType {
//数据库ID自增
AUTO(0),
//该类型为未设置主键类型
NONE(1),
/**
* 用户输入ID
* 该类型可以通过自己注册自动填充插件进行填充
*/
INPUT(2),
/* 以下3种类型、只有当插入对象ID 为空,才自动填充。 */
//全局唯一ID (idWorker)
ID_WORKER(3),
// 全局唯一ID (UUID)
UUID(4),
//字符串全局唯一ID (idWorker 的字符串表示)
ID_WORKER_STR(5);
private int key;
IdType(int key) {
this.key = key;
}
}
Employee employee = new Employee();
employee.setId(5);
// 通过条件自动拼接动态sql
employee.setLastName("mr");
// 传入的对象是 T ,即mapper传入的泛型
int result = employeeMapper.updateById(employee);
System.out.println(result);
// 表示在插入时自行填充(对象属性为null情况下)
@TableField(fill = FieldFill.INSERT)
private LocalDateTime createTime;
// 表示在插入和更新时自行填充(对象属性为null情况下)
@TableField(fill = FieldFill.INSERT_UPDATE)
private LocalDateTime updateTime;
@Slf4j
@Component
public class MyMetaObjectHandler implements MetaObjectHandler {
@Override
public void insertFill(MetaObject metaObject) {
log.info("start insert fill...");
/**
* 3.3.0 后该方法已过时
* default MetaObjectHandler setFieldValByName
* (String fieldName, Object fieldVal, MetaObject metaObject)
* 1.字段名 2.字段值 3. 给哪个数据处理
*/
this.setFieldValByName("createTime",LocalDateTime.now(),metaObject);
this.setFieldValByName("updateTime",LocalDateTime.now(),metaObject);
/**
* 下面是新版的方法
*/
this.strictInsertFill(metaObject,"createTime",LocalDateTime.class,LocalDateTime.now());
// this.strictInsertFill(metaObject,"updateTime",LocalDateTime.class,LocalDateTime.now());
this.fillStrategy(metaObject,"updateTime",LocalDateTime.now());
}
@Override
public void updateFill(MetaObject metaObject) {
this.strictInsertFill(metaObject,"updateTime",LocalDateTime.class,LocalDateTime.now());
}
}
乐观锁的实现方式
- 取出记录时,获取当前version
- 更新时,带上这个version
- 执行更新时,set version = new Version where version = oldVersion
- 如果version不对,就更新失败
// 表示这是一个乐观锁
@Version
private Integer version;
@MapperScan("com.bnmzy.mybatisplus.mapper")
@EnableTransactionManagement
@Configuration
public class MybatisPlusConfig {
// 注册乐观锁插件
@Bean
public OptimisticLockerInterceptor optimisticLockerInterceptor() {
return new OptimisticLockerInterceptor();
}
}
//测试乐观锁
@Test
void testOptimisticLocker(){
Employee employee = employeeMapper.selectById(6);
employee.setLastName("Acai");
employee.setEmail("[email protected]");
employeeMapper.updateById(employee);
}
Employee employee = employeeMapper.selectById(8);
employee.setLastName("Em1");
employee.setEmail("[email protected]");
Employee employee2 = employeeMapper.selectById(8);
employee2.setLastName("Em2");
employee2.setEmail("[email protected]");
employeeMapper.updateById(employee2);
// 此时触发乐观锁,失败,可用自旋锁尝试多次提交
employeeMapper.updateById(employee);
Employee employee = employeeMapper.selectById(1);
System.out.println(employee);
批量查询
List<Employee> employees = employeeMapper.selectBatchIds(Arrays.asList(1, 2, 3));
employees.forEach(System.out::println);
条件查询之一:按map操作
HashMap<String, Object> map = new HashMap<>();
// 自定义要查询
map.put("last_name","Acai");
map.put("email","[email protected]");
List<Employee> employees = employeeMapper.selectByMap(map);
employees.forEach(System.out::println);
原始limit、pageHelper…
@Bean // 来自官网
public PaginationInterceptor paginationInterceptor() {
PaginationInterceptor paginationInterceptor = new PaginationInterceptor();
// 设置请求的页面大于最大页后操作, true调回到首页,false 继续请求 默认false
// paginationInterceptor.setOverflow(false);
// 设置最大单页限制数量,默认 500 条,-1 不受限制
// paginationInterceptor.setLimit(500);
// 开启 count 的 join 优化,只针对部分 left join
paginationInterceptor.setCountSqlParser(new JsqlParserCountOptimize(true));
return paginationInterceptor;
}
// 第1页,一页面大小是3
Page<Employee> employeePage = new Page<>(1, 3);
employeeMapper.selectPage(employeePage,null);
// 获得结果
employeePage.getRecords().forEach(System.out::println);
// 总记录数
System.out.println(employeePage.getTotal());
// 当前页数
System.out.println(employeePage.getCurrent());
// 共多少页
System.out.println(employeePage.getSize());
// 测试删除
@Test
void testDeleteById(){
employeeMapper.deleteById(9);
}
// 批量删除
@Test
void testDeleteBatchId(){
employeeMapper.deleteBatchIds(Arrays.asList(10,11));
}
// map删除
@Test
void testDeleteMap(){
HashMap<String, Object> stringObjectHashMap = new HashMap<>();
stringObjectHashMap.put("last_name","FF");
employeeMapper.deleteByMap(stringObjectHashMap);
}
物理删除:从数据库中直接移除
逻辑删除:在数据库中没有被移除,而是通过一个变量来让它失效, deleted =0 ==> deleted = 1
场景示例:管理员可以查看被删除的记录,防止数据的丢失,类似于回收站
待我变强再分析!