spring cloud集成seata分布式事务(eureka注册中心)

在微服务架构应用中,有一些业务场景要求数据的强一致性,这就促进了分布式事务的产生。ps:大部分应用场景是不建议使用分布式事务的,分布式事务效率会比较低。

相关理论知识可参考CAP原则,BASE理论

seata相关资料请参考:SEATA

seata官方示例请参考:SEATA示例

1.seata-docker集群服务的搭建

a.因为集群中的每个服务实例都是共用db,所以第一步先建立TC服务的数据表:

-- the table to store GlobalSession data
drop table if exists `global_table`;
create table `global_table` (
  `xid` varchar(128)  not null,
  `transaction_id` bigint,
  `status` tinyint not null,
  `application_id` varchar(32),
  `transaction_service_group` varchar(32),
  `transaction_name` varchar(128),
  `timeout` int,
  `begin_time` bigint,
  `application_data` varchar(2000),
  `gmt_create` datetime,
  `gmt_modified` datetime,
  primary key (`xid`),
  key `idx_gmt_modified_status` (`gmt_modified`, `status`),
  key `idx_transaction_id` (`transaction_id`)
);

-- the table to store BranchSession data
drop table if exists `branch_table`;
create table `branch_table` (
  `branch_id` bigint not null,
  `xid` varchar(128) not null,
  `transaction_id` bigint ,
  `resource_group_id` varchar(32),
  `resource_id` varchar(256) ,
  `lock_key` varchar(128) ,
  `branch_type` varchar(8) ,
  `status` tinyint,
  `client_id` varchar(64),
  `application_data` varchar(2000),
  `gmt_create` datetime,
  `gmt_modified` datetime,
  primary key (`branch_id`),
  key `idx_xid` (`xid`)
);

-- the table to store lock data
drop table if exists `lock_table`;
create table `lock_table` (
  `row_key` varchar(128) not null,
  `xid` varchar(96),
  `transaction_id` long ,
  `branch_id` long,
  `resource_id` varchar(256) ,
  `table_name` varchar(32) ,
  `pk` varchar(36) ,
  `gmt_create` datetime ,
  `gmt_modified` datetime,
  primary key(`row_key`)
);

b.docker-compose.yml的编写

version: "2.0"
services:
  seata-server1:
    image: seataio/seata-server
    ports:
      - "8091:8091"
    environment:
      #宿主机ip
      - SEATA_IP=192.168.2.40
      - SEATA_PORT=8091
      - STORE_MODE=db
    volumes:
      - "./resources/file.conf:/seata-server/resources/file.conf"
      - "./resources/registry.conf:/seata-server/resources/registry.conf"
      - "./logs:/root/logs/seata"
  seata-server2:
    image: seataio/seata-server
    ports:
      - "8092:8092"
    environment:
      #宿主机ip
      - SEATA_IP=192.168.2.40
      - SEATA_PORT=8092
      - STORE_MODE=db
    volumes:
      - "./resources/file.conf:/seata-server/resources/file.conf"
      - "./resources/registry.conf:/seata-server/resources/registry.conf"
      - "./logs:/root/logs/seata"

c.TC服务配置

registry.conf

registry {
  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
  # 指定注册中心为eureka,以下只需要配置eureka相关配置参数即可
  type = "eureka"

  nacos {
    serverAddr = "localhost"
    namespace = ""
    cluster = "default"
  }
  eureka {
    #修改了宿主机的hosts,将注册中心的ip统一为指定域名,方便维护
    serviceUrl = "http://www.eagle.com:8761/eureka,http://www.eagle.com:8762/eureka"
    application = "SEATA"
    weight = "1"
  }
  redis {
    serverAddr = "localhost:6379"
    db = "0"
  }
  zk {
    cluster = "default"
    serverAddr = "127.0.0.1:2181"
    session.timeout = 6000
    connect.timeout = 2000
  }
  consul {
    cluster = "default"
    serverAddr = "127.0.0.1:8500"
  }
  etcd3 {
    cluster = "default"
    serverAddr = "http://localhost:2379"
  }
  sofa {
    serverAddr = "127.0.0.1:9603"
    application = "default"
    region = "DEFAULT_ZONE"
    datacenter = "DefaultDataCenter"
    cluster = "default"
    group = "SEATA_GROUP"
    addressWaitTime = "3000"
  }
  file {
    name = "file.conf"
  }
}

config {
  # file、nacos 、apollo、zk、consul、etcd3
  type = "file"

  nacos {
    serverAddr = "localhost"
    namespace = ""
  }
  consul {
    serverAddr = "127.0.0.1:8500"
  }
  apollo {
    app.id = "seata-server"
    apollo.meta = "http://192.168.1.204:8801"
  }
  zk {
    serverAddr = "127.0.0.1:2181"
    session.timeout = 6000
    connect.timeout = 2000
  }
  etcd3 {
    serverAddr = "http://localhost:2379"
  }
  file {
    # 存储模式的配置
    name = "file.conf"
  }
}

file.conf

service {
  #transaction service group mapping
  #注册中心指定的Application name
  vgroup_mapping.my_test_tx_group = "SEATA"
  #only support when registry.type=file, please don't set multiple addresses
  # file模式不需要配置这个参数
  SEATA.grouplist = "www.eagle.com:8091"
  #disable seata
  disableGlobalTransaction = false
}

## transaction log store, only used in seata-server
store {
  ## store mode: file、db
  # 指定存储模式为db,集群必须配置为db
  mode = "db"

  ## file store property
  file {
    ## store location dir
    dir = "sessionStore"
  }

  ## database store property
  # 第一步创建数据表的数据库配置参数
  db {
    ## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp) etc.
    datasource = "druid"
    ## mysql/oracle/h2/oceanbase etc.
    db-type = "mysql"
    driver-class-name = "com.mysql.jdbc.Driver"
    url = "jdbc:mysql://www.eagle.com:3306/seata"
    user = "root"
    password = "root"
  }
}

以上配置完成后,启动seata server的docker实例,即可创建TC服务集群。

2.搭建client微服务测试分布式事务

假设有一个扣库存并创建订单的业务,以下便需要创建一个库存相关的微服务模块和订单相关的微服务模块

a.通过spring boot创建并初始化项目;seata依赖请使用:

//分布式事务
    compile('com.alibaba.cloud:spring-cloud-alibaba-seata:2.1.1.RELEASE') {
        exclude group: 'io.seata', name: 'seata-all', version: '0.9.0'
    }
    compile('io.seata:seata-all:1.0.0')

b.将STOCK,ORDER两个微服务分别注册到seata-docker配置的注册中心。(STOCK,ORDER,Seata server需要注册到同一个注册中心)

c.创建回滚日志表(每一个微服务链接的数据库都需要创建)

CREATE TABLE `undo_log` (
  `id` bigint(20) NOT NULL AUTO_INCREMENT,
  `branch_id` bigint(20) NOT NULL,
  `xid` varchar(100) NOT NULL,
  `context` varchar(128) NOT NULL,
  `rollback_info` longblob NOT NULL,
  `log_status` int(11) NOT NULL,
  `log_created` datetime NOT NULL,
  `log_modified` datetime NOT NULL,
  `ext` varchar(100) DEFAULT NULL,
  PRIMARY KEY (`id`),
  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;

d.在各微服务resources文件夹中添加seata相关配置文件

registry.conf

registry {
  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
  type = "eureka"

  nacos {
    serverAddr = "localhost"
    namespace = ""
    cluster = "default"
  }
  eureka {
    serviceUrl = "http://www.eagle.com:8761/eureka,http://www.eagle.com:8762/eureka"
    application = "SEATA"
    weight = "1"
  }
  redis {
    serverAddr = "localhost:6379"
    db = "0"
  }
  zk {
    cluster = "default"
    serverAddr = "127.0.0.1:2181"
    session.timeout = 6000
    connect.timeout = 2000
  }
  consul {
    cluster = "default"
    serverAddr = "127.0.0.1:8500"
  }
  etcd3 {
    cluster = "default"
    serverAddr = "http://localhost:2379"
  }
  sofa {
    serverAddr = "127.0.0.1:9603"
    application = "default"
    region = "DEFAULT_ZONE"
    datacenter = "DefaultDataCenter"
    cluster = "default"
    group = "SEATA_GROUP"
    addressWaitTime = "3000"
  }
  file {
    name = "file.conf"
  }
}

config {
  # file、nacos 、apollo、zk、consul、etcd3
  type = "file"

  nacos {
    serverAddr = "localhost"
    namespace = ""
  }
  consul {
    serverAddr = "127.0.0.1:8500"
  }
  apollo {
    app.id = "seata-server"
    apollo.meta = "http://192.168.1.204:8801"
  }
  zk {
    serverAddr = "127.0.0.1:2181"
    session.timeout = 6000
    connect.timeout = 2000
  }
  etcd3 {
    serverAddr = "http://localhost:2379"
  }
  file {
    name = "file.conf"
  }
}

file.conf

注意:这个文件和server端配置不同

transport {
   # tcp udt unix-domain-socket
   type = "TCP"
   #NIO NATIVE
   server = "NIO"
   #enable heartbeat
   heartbeat = true
   # the client batch send request enable
   enable-client-batch-send-request = true
   #thread factory for netty
   thread-factory {
     boss-thread-prefix = "NettyBoss"
     worker-thread-prefix = "NettyServerNIOWorker"
     server-executor-thread-prefix = "NettyServerBizHandler"
     share-boss-worker = false
     client-selector-thread-prefix = "NettyClientSelector"
     client-selector-thread-size = 1
     client-worker-thread-prefix = "NettyClientWorkerThread"
     # netty boss thread size,will not be used for UDT
     boss-thread-size = 1
     #auto default pin or 8
     worker-thread-size = 8
   }
   shutdown {
     # when destroy server, wait seconds
     wait = 3
   }
   serialization = "seata"
   compressor = "none"
 }
 service {
   #transaction service group mapping
   # 指定TC服务,ORDER-seata-service-group为订单微服务Application-name和seata-service-group拼接而成,假如是STOCK微服务,就为:STOCK-seata-service-group
   vgroup_mapping.ORDER-seata-service-group="SEATA"
   #only support when registry.type=file, please don't set multiple addresses
   # 这个参数集群环境不需要配置
   default.grouplist = "www.eagle.com:8091"
   #degrade, current not support
   enableDegrade = false
   #disable seata
   disableGlobalTransaction = false
 }

 client {
   rm {
     async.commit.buffer.limit = 10000
     lock {
       retry.internal = 10
       retry.times = 30
       retry.policy.branch-rollback-on-conflict = true
     }
     report.retry.count = 5
     table.meta.check.enable = false
     report.success.enable = true
   }
   tm {
     commit.retry.count = 5
     rollback.retry.count = 5
   }
   undo {
     data.validation = true
     log.serialization = "jackson"
     #指定回滚日志表名称
     log.table = "undo_log"
   }
   log {
     exceptionRate = 100
   }
   support {
     # auto proxy the DataSource bean
     spring.datasource.autoproxy = false
   }
 }

e.以上配置完成后,编码方面只需要添加@GlobalTransactional即可,类似以下这种:

import io.seata.sample.feign.OrderFeignClient;
import io.seata.sample.feign.StorageFeignClient;
import io.seata.spring.annotation.GlobalTransactional;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;


@Service
public class BusinessService {

    @Autowired
    private StorageFeignClient storageFeignClient;
    @Autowired
    private OrderFeignClient orderFeignClient;

    /**
     * 减库存,下订单
     *
     * @param userId
     * @param commodityCode
     * @param orderCount
     */
    @GlobalTransactional
    public void purchase(String userId, String commodityCode, int orderCount) {
        storageFeignClient.deduct(commodityCode, orderCount);

        orderFeignClient.create(userId, commodityCode, orderCount);
    }
}

相关微服务示例请查看SEATA示例

你可能感兴趣的:(spring cloud集成seata分布式事务(eureka注册中心))