nacos2.1.0+seata1.5.2搭建分布式事务

1、nacos搭建部署省略,这个百度一大堆

2、seata 服务端搭建

       2.1 解压文件夹,找到\conf\application.yml 配置,粘贴以下配置

server:
  port: 7091

spring:
  application:
    name: seata-server

logging:
  config: classpath:logback-spring.xml
  file:
    path: ./logs/seata
#  extend:
#    logstash-appender:
#      destination: 127.0.0.1:4560
#    kafka-appender:
#      bootstrap-servers: 127.0.0.1:9092
#      topic: logback_to_logstash

console:
  user:
    username: seata
    password: seata

seata:
  config:
    # support: nacos 、 consul 、 apollo 、 zk  、 etcd3
    type: nacos
    nacos:
      # nacos ip地址
      server-addr: 127.0.0.1:8848
      group: SEATA_GROUP
      namespace: 46d8963e-646d-4c28-b9aa-abd8ea1dae4a
      username:
      password:
      ##if use MSE Nacos with auth, mutex with username/password attribute
      #access-key: ""
      #secret-key: ""
      # 读取nacos上的配置文件
      data-id: seata-server.properties
  registry:
    # support: nacos 、 eureka 、 redis 、 zk  、 consul 、 etcd3 、 sofa
    type: nacos
    nacos:
      application: seata-server
      # nacos ip地址
      server-addr: 127.0.0.1:8848
      group: SEATA_GROUP
      namespace: 46d8963e-646d-4c28-b9aa-abd8ea1dae4a
      cluster: default
      username:
      password:
      ##if use MSE Nacos with auth, mutex with username/password attribute
      #access-key: ""
      #secret-key: ""
  security:
    secretKey: SeataSecretKey0c382ef121d778043159209298fd40bf3850a017
    tokenValidityInMilliseconds: 1800000
    ignore:
      urls: /,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-fe/public/**,/api/v1/auth/login

这个配置是设置nacos配置中心和注册中心,需要注意的是分组和命名空间,后面nacos配置中会用到

     group: SEATA_GROUP #分组
     namespace: 46d8963e-646d-4c28-b9aa-abd8ea1dae4a #命名空间

2.2 建数据库和表,脚本在\script\server\db 里面

 

create database  seata;
use seata;

-- -------------------------------- The script used when storeMode is 'db' --------------------------------
-- the table to store GlobalSession data
CREATE TABLE IF NOT EXISTS `global_table`
(
    `xid`                       VARCHAR(128) NOT NULL,
    `transaction_id`            BIGINT,
    `status`                    TINYINT      NOT NULL,
    `application_id`            VARCHAR(32),
    `transaction_service_group` VARCHAR(32),
    `transaction_name`          VARCHAR(128),
    `timeout`                   INT,
    `begin_time`                BIGINT,
    `application_data`          VARCHAR(2000),
    `gmt_create`                DATETIME,
    `gmt_modified`              DATETIME,
    PRIMARY KEY (`xid`),
    KEY `idx_status_gmt_modified` (`status` , `gmt_modified`),
    KEY `idx_transaction_id` (`transaction_id`)
) ENGINE = InnoDB
  DEFAULT CHARSET = utf8mb4;

-- the table to store BranchSession data
CREATE TABLE IF NOT EXISTS `branch_table`
(
    `branch_id`         BIGINT       NOT NULL,
    `xid`               VARCHAR(128) NOT NULL,
    `transaction_id`    BIGINT,
    `resource_group_id` VARCHAR(32),
    `resource_id`       VARCHAR(256),
    `branch_type`       VARCHAR(8),
    `status`            TINYINT,
    `client_id`         VARCHAR(64),
    `application_data`  VARCHAR(2000),
    `gmt_create`        DATETIME(6),
    `gmt_modified`      DATETIME(6),
    PRIMARY KEY (`branch_id`),
    KEY `idx_xid` (`xid`)
) ENGINE = InnoDB
  DEFAULT CHARSET = utf8mb4;

-- the table to store lock data
CREATE TABLE IF NOT EXISTS `lock_table`
(
    `row_key`        VARCHAR(128) NOT NULL,
    `xid`            VARCHAR(128),
    `transaction_id` BIGINT,
    `branch_id`      BIGINT       NOT NULL,
    `resource_id`    VARCHAR(256),
    `table_name`     VARCHAR(32),
    `pk`             VARCHAR(36),
    `status`         TINYINT      NOT NULL DEFAULT '0' COMMENT '0:locked ,1:rollbacking',
    `gmt_create`     DATETIME,
    `gmt_modified`   DATETIME,
    PRIMARY KEY (`row_key`),
    KEY `idx_status` (`status`),
    KEY `idx_branch_id` (`branch_id`),
    KEY `idx_xid_and_branch_id` (`xid` , `branch_id`)
) ENGINE = InnoDB
  DEFAULT CHARSET = utf8mb4;

CREATE TABLE IF NOT EXISTS `distributed_lock`
(
    `lock_key`       CHAR(20) NOT NULL,
    `lock_value`     VARCHAR(20) NOT NULL,
    `expire`         BIGINT,
    primary key (`lock_key`)
) ENGINE = InnoDB
  DEFAULT CHARSET = utf8mb4;

INSERT INTO `distributed_lock` (lock_key, lock_value, expire) VALUES ('AsyncCommitting', ' ', 0);
INSERT INTO `distributed_lock` (lock_key, lock_value, expire) VALUES ('RetryCommitting', ' ', 0);
INSERT INTO `distributed_lock` (lock_key, lock_value, expire) VALUES ('RetryRollbacking', ' ', 0);
INSERT INTO `distributed_lock` (lock_key, lock_value, expire) VALUES ('TxTimeoutCheck', ' ', 0);

show tables

3、nacos 上的配置

 3.1新增命名空间,确定并记录命名空间ID ,我这边自动生成的是:

               46d8963e-646d-4c28-b9aa-abd8ea1dae4a

        nacos2.1.0+seata1.5.2搭建分布式事务_第1张图片

  3.2 新增seataServer.properties 配置

                点击配置列表,选择seata 新增的命名空间,新增配置

        nacos2.1.0+seata1.5.2搭建分布式事务_第2张图片

 配置内容如下:        

store.mode=db

store.redis.host=127.0.0.1
store.redis.port=6379
store.redis.maxConn=10
store.redis.minConn=1
store.redis.database=0
store.redis.queryLimit=100

#store.lock.mode=db
#store.session.mode=db
#store.publicKey=

store.db.datasource=druid
store.db.dbType=mysql
store.db.driverClassName=com.mysql.cj.jdbc.Driver
store.db.url=jdbc:mysql://192.168.1.86:3306/seata?useUnicode=true&rewriteBatchedStatements=true
store.db.user=root
store.db.password=qwe123
store.db.minConn=5
store.db.maxConn=30
store.db.globalTable=global_table
store.db.branchTable=branch_table
store.db.distributedLockTable=distributed_lock
store.db.queryLimit=100
store.db.lockTable=lock_table
store.db.maxWait=5000

#Transaction rule configuration, only for the server
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.maxCommitRetryTimeout=-1
server.maxRollbackRetryTimeout=-1
server.rollbackRetryTimeoutUnlockEnable=false
server.distributedLockExpireTime=10000
server.xaerNotaRetryTimeout=60000
server.session.branchAsyncQueueSize=5000
server.session.enableBranchAsyncRemove=false

#Transaction rule configuration, only for the client
client.rm.asyncCommitBufferLimit=10000
client.rm.lock.retryInterval=10
client.rm.lock.retryTimes=30
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
client.rm.reportRetryCount=5
client.rm.tableMetaCheckEnable=true
client.rm.tableMetaCheckerInterval=60000
client.rm.sqlParserType=druid
client.rm.reportSuccessEnable=false
client.rm.sagaBranchRegisterEnable=false
client.rm.sagaJsonParser=fastjson
client.rm.tccActionInterceptorOrder=-2147482648
client.tm.commitRetryCount=5
client.tm.rollbackRetryCount=5
client.tm.defaultGlobalTransactionTimeout=60000
client.tm.degradeCheck=false
client.tm.degradeCheckAllowTimes=10
client.tm.degradeCheckPeriod=2000
client.tm.interceptorOrder=-2147482648
client.undo.dataValidation=true
client.undo.logSerialization=jackson
client.undo.onlyCareUpdateColumns=true
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000
client.undo.logTable=undo_log
client.undo.compress.enable=true
client.undo.compress.type=zip
client.undo.compress.threshold=64k

#For TCC transaction mode
tcc.fence.logTableName=tcc_fence_log
tcc.fence.cleanPeriod=1h

#Log rule configuration, for client and server
log.exceptionRate=100

#Metrics configuration, only for the server
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898

transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableTmClientBatchSendRequest=false
transport.enableRmClientBatchSendRequest=true
transport.enableTcServerBatchSendResponse=false
transport.rpcRmRequestTimeout=30000
transport.rpcTmRequestTimeout=30000
transport.rpcTcRequestTimeout=30000
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThreadSize=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=default
transport.shutdown.wait=3
transport.serialization=seata
transport.compressor=none

根据需要,改下sql 连接信息即可

3.3 增加 seata-client-demo.yml 配置文件(ymal)备用,这个是客户端加载使用的

# seata配置
seata:
  enabled: true
  application-id: seata-cilent-deme
  # Seata 事务组编号,此处需于 seata 相同
  tx-service-group: default-tx-group
  config:
    type: nacos
    nacos:
      # nacos ip地址
      server-addr: 127.0.0.1:8848
      group: SEATA_GROUP
      namespace: 46d8963e-646d-4c28-b9aa-abd8ea1dae4a
      data-id: seata-server.properties
  registry:
    type: nacos
    nacos:
      application: seata-server
      # nacos ip地址
      server-addr: 127.0.0.1:8848
      group: SEATA_GROUP
      namespace: 46d8963e-646d-4c28-b9aa-abd8ea1dae4a

nacos2.1.0+seata1.5.2搭建分布式事务_第3张图片

 

4、启动先启动nacos 再启动seata

5、seata 客户端整合搭建        

        5.1 pom.xml  



    
        parent_alibaba
        com.atguigu.springcloud
        0.0.1-SNAPSHOT
    
    4.0.0

    spring-cloud-alibaba-seata-order-9001


    

        
            org.springframework.boot
            spring-boot-starter-web
        
        
            org.springframework.boot
            spring-boot-starter-actuator
        

        
            org.springframework.cloud
            spring-cloud-starter
        

        
            org.springframework.cloud
            spring-cloud-starter-loadbalancer
        
        
            org.springframework.cloud
            spring-cloud-starter-openfeign
        

        
            org.springframework.boot
            spring-boot-devtools
            runtime
            true
        


        
            org.springframework.boot
            spring-boot-configuration-processor

            true
        
        
            org.projectlombok
            lombok

            true
        
        
            org.springframework.boot
            spring-boot-starter-test
            test
        

        
            com.alibaba.cloud
            spring-cloud-starter-alibaba-nacos-discovery
        

        
            mysql
            mysql-connector-java
            ${mysql.version}
            runtime
        

        
            com.alibaba
            druid-spring-boot-starter
        

        
            org.mybatis.spring.boot
            mybatis-spring-boot-starter
            pom
        

        
            com.atguigu.springcloud
            comment
            0.0.1-SNAPSHOT
            compile
        

        
            io.seata
            seata-spring-boot-starter
            1.5.2
        
        
            com.alibaba.cloud
            spring-cloud-starter-alibaba-seata
            
                
                    io.seata
                    seata-spring-boot-starter
                
            
        

    
    
        
            
                org.springframework.boot
                spring-boot-maven-plugin
                
                    
                        
                            org.projectlombok
                            lombok
                        
                    
                
            
        
    

5.2 application.yml

server:
  port: 9001

# Spring
spring:
  application:
    # 应用名称
    name: seata-order

  datasource:
    type: com.alibaba.druid.pool.DruidDataSource
    driver-class-name: com.mysql.cj.jdbc.Driver
    url: jdbc:mysql://localhost:3306/mydb?useUnicode=true&characterEncoding=utf-8&useSSL=false
    username: root
    password: qwe123
    druid:
      test-while-idle: false

  # nacos 配置
  cloud:
    nacos:
      # nacos 服务地址
      server-addr: 127.0.0.1:8848
      discovery:
        # 注册组 要与 seata 相同
        group: SEATA_GROUP
        namespace: 46d8963e-646d-4c28-b9aa-abd8ea1dae4a
      config:
        # 配置组 要与 seata 相同
        group: SEATA_GROUP
        namespace: 46d8963e-646d-4c28-b9aa-abd8ea1dae4a
  config:
    import:
      - optional:nacos:seata-client-demo.yml



seata:
  service:
    vgroup-mapping:
      default_tx_group: default
    grouplist:
      default: 127.0.0.1:8091


mybatis:
  mapper-locations: classpath:mybatis/mapper/*.xml  #sql映射文件位置
  type-aliases-package: com.atguigu.springcloud.Beans  # 开启类别名
  configuration:  # 相当于全局配置文件的配置项
    map-underscore-to-camel-case: true  # 开启驼峰命名

5.3 建数据库、建表

        创建订单库、库存库、账号库,并创建相关表

        分别为三个库  创建undo_log表 

          脚本在源码的\script\client\at\db

CREATE TABLE `undo_log` (
                            `id` bigint(20) NOT NULL AUTO_INCREMENT,
                            `branch_id` bigint(20) NOT NULL,
                            `xid` varchar(100) NOT NULL,
                            `context` varchar(128) NOT NULL,
                            `rollback_info` longblob NOT NULL,
                            `log_status` int(11) NOT NULL,
                            `log_created` datetime NOT NULL,
                            `log_modified` datetime NOT NULL,
                            `ext` varchar(100) DEFAULT NULL,
                            PRIMARY KEY (`id`),
                            UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;

5.4 编写具体的实现

 @GlobalTransactional
    public ComResult createOrder(TblOrder order){
        System.out.println("order-->"+order);
        //创建订单
        orderService.create(order);
        //减库存
        TblStorage storage=new TblStorage(null,order.getCommodityCode(),order.getCount());
        System.out.println("storage-->"+storage);
        storageService.updateStorage(storage);
        //支付
        TblAccount account=new TblAccount(null,order.getUserId(),order.getMoney());
        System.out.println("account-->"+account);
        accountService.updateAccount(account);
        return  new ComResult(200,"成功",null);
    } 
  

使用很简单,在需要的方法上面加上@GlobalTransactional,即可

但是前期搭建有很多坑,搭建成功了,对配置还可能云里雾里

你可能感兴趣的:(spring,cloud,分布式,java,服务器)