尚硅谷springcloud2020day18(p142-148)

今天是2020-12-22。
一。springcloud整合seata
坑巨多,太难了。
1.pom,注意你的seata版本要与seata-all依赖的版本一致:

<dependencies>
    <dependency>
        <groupId>org.springframework.cloud</groupId>
        <artifactId>spring-cloud-starter-openfeign</artifactId>
    </dependency>
    <dependency>
        <groupId>com.alibaba.cloud</groupId>
        <artifactId>spring-cloud-starter-alibaba-seata</artifactId>
        <exclusions>
            <exclusion>
                <groupId>io.seata</groupId>
                <artifactId>seata-spring-boot-starter</artifactId>
            </exclusion>
        </exclusions>
        <version>2.2.1.RELEASE</version>
    </dependency>
    <dependency>
        <groupId>io.seata</groupId>
        <artifactId>seata-spring-boot-starter</artifactId>
        <version>1.2.0</version>
        <exclusions>
            <exclusion>
                <groupId>io.seata</groupId>
                <artifactId>seata-all</artifactId>
            </exclusion>
        </exclusions>
    </dependency>
    <dependency>
        <groupId>io.seata</groupId>
        <artifactId>seata-all</artifactId>
        <version>1.2.0</version>
    </dependency>
    <dependency>
        <groupId>com.alibaba.cloud</groupId>
        <artifactId>spring-cloud-alibaba-nacos-discovery</artifactId>
        <version>2.1.0.RELEASE</version>
    </dependency>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
    </dependency>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-actuator</artifactId>
    </dependency>
    <dependency>
        <groupId>org.mybatis.spring.boot</groupId>
        <artifactId>mybatis-spring-boot-starter</artifactId>
    </dependency>
    <dependency>
        <groupId>com.alibaba</groupId>
        <artifactId>druid-spring-boot-starter</artifactId>
        <version>1.1.10</version>
    </dependency>
    <dependency>
        <groupId>mysql</groupId>
        <artifactId>mysql-connector-java</artifactId>
    </dependency>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-jdbc</artifactId>
    </dependency>
    <dependency>
        <groupId>org.projectlombok</groupId>
        <artifactId>lombok</artifactId>
        <optional>true</optional>
    </dependency>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-test</artifactId>
        <scope>test</scope>
    </dependency>
</dependencies>

2.resouce目录下新建registry.conf和file.conf,registry.conf:

registry {
    
    
  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
  type = "nacos"

  nacos {
    
    
    serverAddr = "xxx:8080"
    application= "seata-server"
    namespace = "public"
    cluster = "default"
  }

}

config {
    
    
  # file、nacos 、apollo、zk、consul、etcd3
  type = "file"

  nacos {
    
    
    serverAddr = "localhost"
    namespace = "public"
    cluster = "default"
  }
  consul {
    
    
    serverAddr = "127.0.0.1:8500"
  }
  apollo {
    
    
    app.id = "seata-server"
    apollo.meta = "http://192.168.1.204:8801"
  }
  zk {
    
    
    serverAddr = "127.0.0.1:2181"
    session.timeout = 6000
    connect.timeout = 2000
  }
  etcd3 {
    
    
    serverAddr = "http://localhost:2379"
  }
  file {
    
    
    name = "file.conf"
  }
}
 

file.conf,注意这里是mysql8:

transport {
    
    
  # tcp udt unix-domain-socket
  type = "TCP"
  #NIO NATIVE
  server = "NIO"
  #enable heartbeat
  heartbeat = true
  #thread factory for netty
  thread-factory {
    
    
    boss-thread-prefix = "NettyBoss"
    worker-thread-prefix = "NettyServerNIOWorker"
    server-executor-thread-prefix = "NettyServerBizHandler"
    share-boss-worker = false
    client-selector-thread-prefix = "NettyClientSelector"
    client-selector-thread-size = 1
    client-worker-thread-prefix = "NettyClientWorkerThread"
    # netty boss thread size,will not be used for UDT
    boss-thread-size = 1
    #auto default pin or 8
    worker-thread-size = 8
  }
  shutdown {
    
    
    # when destroy server, wait seconds
    wait = 3
  }
  serialization = "seata"
  compressor = "none"
}
service {
    
    
  #vgroup->rgroup
  vgroup_mapping.my_test_tx_group = "default"
  #only support single node
  default.grouplist = "101.200.172.53:8091"
  #degrade current not support
  enableDegrade = false
  #disable
  disable = false
  #unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
  max.commit.retry.timeout = "-1"
  max.rollback.retry.timeout = "-1"
}

client {
    
    
  async.commit.buffer.limit = 10000
  lock {
    
    
    retry.internal = 10
    retry.times = 30
  }
  report.retry.count = 5
}

## transaction log store
store {
    
    
  ## store mode: file、db
  mode = "db"

  ## file store
  file {
    
    
    dir = "sessionStore"

    # branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
    max-branch-session-size = 16384
    # globe session size , if exceeded throws exceptions
    max-global-session-size = 512
    # file buffer size , if exceeded allocate new buffer
    file-write-buffer-cache-size = 16384
    # when recover batch read size
    session.reload.read_size = 100
    # async, sync
    flush-disk-mode = async
  }

  ## database store
  db {
    
    
    ## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp) etc.
    datasource = "druid"
    ## mysql/oracle/h2/oceanbase etc.
    db-type = "mysql"
    driverClassName = "com.mysql.cj.jdbc.driver"
    url = "jdbc:mysql://xxx:3306/seata?serverTimezone=UTC"
    user = "root"
    password = "123456"
    min-conn = 1
    max-conn = 3
    global.table = "global_table"
    branch.table = "branch_table"
    lock-table = "lock_table"
    query-limit = 100
  }
}
lock {
    
    
  ## the lock store mode: local、remote
  mode = "remote"

  local {
    
    
    ## store locks in user's database
  }

  remote {
    
    
    ## store locks in the seata's server
  }
}
recovery {
    
    
  committing-retry-delay = 30
  asyn-committing-retry-delay = 30
  rollbacking-retry-delay = 30
  timeout-retry-delay = 30
}

transaction {
    
    
  undo.data.validation = true
  undo.log.serialization = "jackson"
}

## metrics settings
metrics {
    
    
  enabled = false
  registry-type = "compact"
  # multi exporters use comma divided
  exporter-list = "prometheus"
  exporter-prometheus-port = 9898
}

3.application.yml:

server:
  port: 2001
spring:
  application:
    name: seata-order-service
  cloud:
    alibaba:
      seata:
        tx-service-group: my_test_tx_group
    nacos:
      discovery:
        server-addr: xxx:8080

  datasource:
    url: jdbc:mysql://xxx:3306/seata_order?serverTimezone=UTC
    username: root
    password: 123456
    driver-class-name: com.mysql.cj.jdbc.Driver
      #初始化模式
      #initialization-mode: always
    type: com.alibaba.druid.pool.DruidDataSource
      #关于druid的配置
    druid:
      initial-size: 5 #连接池初始化大小
      min-idle: 10 #最小空闲连接数
      max-active: 20 #最大连接数
        # 配置监控统计拦截的 Filter,去掉后监控界面 SQL 无法统计,wall 用于防火墙
      filters: stat,wall,slf4j
      web-stat-filter:
        enabled: true
        url-pattern: /*
        exclusions: "*.js,*.gif,*.jpg,*.png,*.css,*.ico,/druid/*" #不统计这些请求数据
      stat-view-servlet: #访问监控网页的登录用户名和密码
        url-pattern: /druid/*
        reset-enable: false
        allow: 127.0.0.1
        login-username: admin
        login-password: 123456
        enabled: true
feign:
  client:
    config:
      default:
        connectTimeOut: 10000
        readTimeOut: 10000
  hystrix:
    enabled: true
hystrix:
  command:
    default:
      execution:
        isolation:
          thread:
            timeoutInMilliseconds: 20000
logging:
  level:
    io:
      seata: info
seata:
  enabled: true
  application-id: ${spring.application.name}
  tx-service-group: my_test_tx_group
  enable-auto-data-source-proxy: true
  service:
    vgroup-mapping:
      my_test_tx_group: default  # 此处key需要与tx-service-group的value一致,否则会报 no available service 'null' found, please make sure registry config correct 异常
    grouplist:
      default: xxx:8091
    enable-degrade: false
    disable-global-transaction: false



mybatis:
  mapper-locations: classpath:mapper/*.xml
  configuration:
    map-underscore-to-camel-case: true

如果是不同项目,那么改一下连接的数据库、应用名、端口号即可
4.新建seata的数据源代理类DataSourceProxyConfig:

@Configuration
public class DataSourceProxyConfig {
    
    
    @Value("${mybatis.mapperLocations}")
    private String mapperLocations;
    @Bean
    @ConfigurationProperties(prefix = "spring.datasource")
    public DataSource dataSource() {
    
    
        return new DruidDataSource();
    }

    @Bean
    public DataSourceProxy dataSourceProxy(DataSource dataSource) {
    
    
        return new DataSourceProxy(dataSource);
    }

    @Bean
    public SqlSessionFactory sqlSessionFactoryBean(DataSourceProxy dataSourceProxy) throws Exception {
    
    
        SqlSessionFactoryBean sqlSessionFactoryBean = new SqlSessionFactoryBean();
        sqlSessionFactoryBean.setDataSource(dataSourceProxy);
        sqlSessionFactoryBean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources(mapperLocations));
        sqlSessionFactoryBean.setTransactionFactory(new SpringManagedTransactionFactory());
        return sqlSessionFactoryBean.getObject();
    }
}

再之后具体的业务代码就不写了
5.新建dao接口,新建domain目录,其中新建实体类
6.resource目录下新建mapper目录,新建xxx.xml绑定对应的dao接口
7.新建service接口与其实现类,注入dao接口对象,在实现类中调用dao的方法
8.新建控制器,注入service接口实现类的对象,调用实现类的方法
9.主启动类:

@SpringBootApplication
@EnableDiscoveryClient
@EnableFeignClients
@MapperScan(basePackages = "com.cfy.springcloud.dao")
public class OrderSeataMain {
    
    
    public static void main(String[] args) {
    
    
        SpringApplication.run(OrderSeataMain.class,args);
    }
}

二。@GlobalTransactional测试
1.除了2001,又新建2002与2003,搭建好环境后,由2001调用2002和2003来实现跨多个数据库的分布式事务。
2.比如说在2002中方法的实现中添加sleep使方法超时,此时访问2001的服务,发现返回错误页面,且2001对应的数据库中,也就是订单表,出现“未支付的订单”,也就是状态为0。查看2002和2003的数据库,库存还扣减了,最终钱也扣了,就是因为2002的支付服务超时导致最后2001的订单服务无法修改订单状态而直接抛出异常,出现了分布式的事务问题,而真实业务中的数据库只会更多,分布式事务的控制就显得非常重要。
3.而此时我们在2001的业务方法–也就是调用2002与2003服务的方法上添加 @GlobalTransactional(name = “default”,rollbackFor = Exception.class),表示出现所有异常都回滚全局事务。此时再来调用这个业务方法,错误页面还是会有,但是我们查看后台,发现2001与2003都出现了事务的回滚信息,因为在2002支付服务之前,2001的创建订单和2003的扣减库存的分支事务都已完成,但此时2002支付服务超时失败抛出了异常,被@GlobalTransactional监控到,整个全局事务就必须回滚,所以这两个操作–也就是分支事务都要回滚,这样就达到了控制跨多个数据源的分布式事务的效果。
三。seata概念补充
1.TC:就是我们的seata服务器
2.TM:哪个项目的方法标注了@GlobalTransactional,这个项目就是TM。一般来说就是一条调用链上的第一个服务,由它开始这条调用链,发起一个全局事务,并将全局事务的XID在这条调用链上传递给后面的分支事务。
3.RM:就是调用链上每个服务与对应数据库的连接。
4.大概流程:
(1):TM向TC注册全局事务记录,开启全局事务
(2):RM向TC汇报资源准备状态
(3):TM向TC申请提交/回滚分布式事务,结束分布式事务,一阶段结束
(4):TC汇总事务信息,决定是提交还是回滚
(5):TC通知所有RM提交/回滚分支事务,二阶段结束
5.seata四大模式:AT、TCC、SAGA、XA,默认的是AT
6.具体讲解:
1.全局事务一阶段,业务数据和回滚日志在同一个本地事务中提交,释放本地锁和连接资源:

1.拦截业务sql,解析sql语义,找到要更新的业务数据。在更新前,将数据保存为"before image",生成前置镜像--通过解析sql语义来生成查询语句,定位到业务数据,保存这个状态的业务数据--更新前
2.执行业务sql
3.业务数据更新后,将其保存为"after image",生成后置镜像--通过前置镜像的结果,通过主键定位到业务数据,保存这个状态的业务数据--更新后
4.插入回滚日志:把前后镜像数据以及业务 SQL 相关的信息组成一条回滚日志记录,插入到 UNDO_LOG 表中。
5.提交前,向 TC 注册分支:申请 product 表中,主键值等于 1 的记录的 全局锁 。
6.本地事务提交:业务数据的更新和前面步骤中生成的 UNDO LOG 一并提交。
8.将本地事务提交的结果上报给 TC。

2.全局事务二阶段,提交,一阶段已将业务sql提交到数据库,此时只需要将一阶段保存的前置镜像和后置镜像以及行锁删除,完成数据清理:


1.收到 TC 的分支提交请求,把请求放入一个异步任务的队列中,马上返回提交成功的结果给 TC。
2.异步任务阶段的分支提交请求将异步和批量地删除相应 UNDO LOG 记录。

3.全局事务二阶段,回滚,如果二阶段是回滚,需要用一阶段保存的前置镜像还原业务数据:

1.某个分支业务收到TC的分支回滚请求,开启一个本地事务。
2.通过 XID 和 当前分支的Branch ID 查找到相应的 UNDO LOG 记录。拿 UNDO LOG 中的后置镜像与当前数据进行比较,如果有不同,说明数据被当前全局事务之外的动作做了修改。这种情况,需要根据配置策略来做处理。
3.在还原时,其实就是执行逆向sql,比如说业务insert语句执行时失败,那么就执行delete语句,如果业务update语句执行时失败,那么就将业务数据update为前置镜像保存的数据值,当然在执行逆向sql前会用后置镜像验证数据的版本,可以说类似于乐观锁。

4.如果简单点来说就是,执行sql前后保存备份和行锁到日志表中,最终提交成功删除备份,最终提交失败就从表里查前备份还原,还原之前用后备份验证数据版本,一致就还原,不一致另说,还原成功也把日志表中的前后备份和行锁删除。
四。补充
使用seata的项目在集成feign时,application.yml一定要配置如下:

feign:
  client:
    config:
      default:
        connectTimeOut: 10000
        readTimeOut: 10000
  hystrix:
    enabled: true
hystrix:
  command:
    default:
      execution:
        isolation:
          thread:
            timeoutInMilliseconds: 20000

算是搞了几个小时的坑,之前发现不加注解控制全局事务可以正常负载均衡,加了注解反而超时,最后发现应该是seata控制事务时导致请求变慢,而如果不像上面那么配,ribbon默认的超时时间太短了,就会报错。注意配了feign的两个超时时间后,会覆盖ribbon的两个超时时间,所以要配hystrix的全局超时时间,也算是之前的知识了,熔断器的超时时间一定要大于ribbon算上重试次数的超时时间,这样配好就可以看到,seata控制事务后,一套请求要好几秒才能完成,只能说seata是真的稳。

猜你喜欢

转载自blog.csdn.net/qq_44727091/article/details/111560832