因业务服务提升,单个库已经扛不住日活10w,每天订单380w+的数据, 故引入mysql读写分离;
首先项目架构 springboot+mybatis ;
由于引用的druid 的连接池 ,所以第一步要禁用springboot默认加载的tomcat连接池
@EnableAutoConfiguration(exclude={DataSourceAutoConfiguration.class})
@EnableAutoConfiguration(exclude={DataSourceAutoConfiguration.class})
@ServletComponentScan
@EnableAspectJAutoProxy
@SpringBootApplication(exclude = { org.springframework.boot.autoconfigure.thymeleaf.ThymeleafAutoConfiguration.class })
public class ApplicationRun extends SpringBootServletInitializer
接下来就配置引用的数据源 .properties 文件引入主库链接以及从库链接以及最小连接数,最大连接数等等。
################数据写库配置##############
mysql.datasource.write.url=jdbc:mysql://127.0.0.1/test?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true
mysql.datasource.write.username=root
mysql.datasource.write.password=root123
mysql.datasource.write.driverClassName=com.mysql.jdbc.Driver
mysql.datasource.write.minIdle=5
mysql.datasource.write.maxActive=100
mysql.datasource.write.initialSize=10
mysql.datasource.write.maxWait=60000
mysql.datasource.write.timeBetweenEvictionRunsMillis=60000
mysql.datasource.write.minEvictableIdleTimeMillis=300000
mysql.datasource.write.validationQuery=select 'x'
mysql.datasource.write.testWhileIdle=true
mysql.datasource.write.testOnBorrow=false
mysql.datasource.write.testOnReturn=false
mysql.datasource.write.poolPreparedStatements=true
mysql.datasource.write.maxPoolPreparedStatementPerConnectionSize=50
mysql.datasource.write.removeAbandoned=true
mysql.datasource.write.filters=stat
################数据读库配置##############
mysql.datasource.read.url=jdbc:mysql://127.0.0.1/test2?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true
mysql.datasource.read.username=root
mysql.datasource.read.password=root123
mysql.datasource.read.driverClassName=com.mysql.jdbc.Driver
mysql.datasource.read.minIdle=5
mysql.datasource.read.maxActive=100
mysql.datasource.read.initialSize=10
mysql.datasource.read.maxWait=60000
mysql.datasource.read.timeBetweenEvictionRunsMillis=60000
mysql.datasource.read.minEvictableIdleTimeMillis=300000
mysql.datasource.read.validationQuery=select 'x'
mysql.datasource.read.testWhileIdle=true
mysql.datasource.read.testOnBorrow=false
mysql.datasource.read.testOnReturn=false
mysql.datasource.read.poolPreparedStatements=true
mysql.datasource.read.maxPoolPreparedStatementPerConnectionSize=50
mysql.datasource.read.removeAbandoned=true
mysql.datasource.read.filters=stat
配置加载重写数据源
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.jdbc.DataSourceBuilder;
import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import javax.sql.DataSource;
/**
* 配置数据源
*/
@Configuration
@EnableConfigurationProperties(DataSourceProperties.class)
public class DataSourceConfiguration {
static final Log log = LogFactory.getLog(DataSourceConfiguration.class);
@Value("${mysql.datasource.type}")
private Class<? extends DataSource> dataSourceType;
/**
* 写库 数据源配置
* @return
*/
@Bean(name = "writeDataSource")
@Primary
@ConfigurationProperties(prefix ="mysql.datasource.write")
public DataSource writeDataSource() throws Exception{
log.info("-------------------- writeDataSource init ---------------------");
return DataSourceBuilder.create().type(dataSourceType).build();
}
/**
* 从库
* @return
*/
@Bean(name = "readDataSource")
@ConfigurationProperties(prefix ="mysql.datasource.read")
public DataSource readDataSourceOne() throws Exception{
log.info("-------------------- read01 DataSourceOne init ---------------------");
// DruidDataSource druidDataSource = new DruidDataSource();
// return druidDataSource;
return DataSourceBuilder.create().type(dataSourceType).build();
}
}
设置 本地线程,数据源上下文;引用threadlocal
package com.mp.chengxin.config.datasourceconfig;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/*
* 本地线程,数据源上下文
*/
public class DataSourceContextHolder {
static final Log log = LogFactory.getLog(DataSourceContextHolder.class);
//线程本地环境
private static final ThreadLocal<String> local = new ThreadLocal<String>();
public static ThreadLocal<String> getLocal() {
return local;
}
/**
* 读库
*/
public static void setRead() {
local.set(DataSourceType.read.getType());
log.info("数据库切换到读库...");
}
/**
* 写库
*/
public static void setWrite() {
local.set(DataSourceType.write.getType());
log.info("数据库切换到写库...");
}
public static String getReadOrWrite() {
return local.get();
}
public static void clear(){
local.remove();
}
}
设置主从库枚举类
public enum DataSourceType {
read("read", "从库"),
write("write", "主库");
private String type;
private String name;
DataSourceType(String type, String name) {
this.type = type;
this.name = name;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
设置动态路由: 切面拦截后会只入数据源
@Configuration
@AutoConfigureAfter(DataSourceConfiguration.class)
@MapperScan(basePackages = "com.*.*.mapper")
public class MyBatisConfiguration {
@Autowired
@Qualifier("writeDataSource")
private DataSource writeDataSource;
@Autowired
@Qualifier("readDataSource")
private DataSource readDataSource;
@Bean(name = "sqlSessionFactory")
public SqlSessionFactory sqlSessionFactoryBean() throws Exception {
SqlSessionFactoryBean sqlSessionFactoryBean = new SqlSessionFactoryBean();
sqlSessionFactoryBean.setDataSource(roundRobinDataSouceProxy());
// 读取配置
sqlSessionFactoryBean.setTypeAliasesPackage("com.*.*.model");
//设置mapper.xml文件所在位置
Resource[] resources = new PathMatchingResourcePatternResolver().getResources("classpath:com/*/*Mapper.xml");
sqlSessionFactoryBean.setMapperLocations(resources);
//添加插件
sqlSessionFactoryBean.setPlugins(new Interceptor[]{paginationInterceptor});
try {
return sqlSessionFactoryBean.getObject();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
/**
* 把所有数据库都放在路由中
* @return
*/
@Bean(name="roundRobinDataSouceProxy")
public AbstractRoutingDataSource roundRobinDataSouceProxy() {
Map<Object, Object> targetDataSources = new HashMap<Object, Object>();
//把所有数据库都放在targetDataSources中,注意key值要和determineCurrentLookupKey()中代码写的一至,
//否则切换数据源时找不到正确的数据源
targetDataSources.put(DataSourceType.write.getType(), writeDataSource);
targetDataSources.put(DataSourceType.read.getType(), readDataSource);
// targetDataSources.put(DataSourceType.read.getType()+"1", readDataSource01);
// targetDataSources.put(DataSourceType.read.getType()+"2", readDataSource02);
// final int readSize = Integer.parseInt(readDataSourceSize);
// MyAbstractRoutingDataSource proxy = new MyAbstractRoutingDataSource(readSize);
//路由类,寻找对应的数据源
AbstractRoutingDataSource proxy = new AbstractRoutingDataSource(){
/**
* 这是AbstractRoutingDataSource类中的一个抽象方法,
* 而它的返回值是你所要用的数据源dataSource的key值,有了这个key值,
* targetDataSources就从中取出对应的DataSource,如果找不到,就用配置默认的数据源。
*/
@Override
protected Object determineCurrentLookupKey() {
String typeKey = DataSourceContextHolder.getReadOrWrite();
if(typeKey == null){
// System.err.println("使用数据库write.............");
// return DataSourceType.write.getType();
throw new NullPointerException("数据库路由时,决定使用哪个数据库源类型不能为空...");
}
if (typeKey.equals(DataSourceType.write.getType())){
System.err.println("使用数据库write.............");
return DataSourceType.write.getType();
}
//读库
return DataSourceType.read.getType();
}
};
proxy.setDefaultTargetDataSource(writeDataSource);//默认库
proxy.setTargetDataSources(targetDataSources);
return proxy;
}
@Bean
public SqlSessionTemplate sqlSessionTemplate(SqlSessionFactory sqlSessionFactory) {
return new SqlSessionTemplate(sqlSessionFactory);
}
// //事务管理
@Bean
public PlatformTransactionManager annotationDrivenTransactionManager() {
return new DataSourceTransactionManager((DataSource) SpringUtil.getApplicationContext().getBean("roundRobinDataSouceProxy"));
}
}
下面最关键的一步 切面拦截, service层拦截,这样可以保证写事物的一致性
/**
* 在service层决定数据源
*
* 必须在事务AOP之前执行,所以实现Ordered,order的值越小,越先执行
* 如果一旦开始切换到写库,则之后的读都会走写库
*
* @author Jfei
*
*/
@Aspect
@EnableAspectJAutoProxy(exposeProxy=true,proxyTargetClass=true)
@Component
public class DataSourceAopInService implements PriorityOrdered {
static final Log log = LogFactory.getLog(DataSourceAopInService.class);
@Before("apiAspect() ")
public void setReadDataSourceType() {
//如果已经开启写事务了,那之后的所有读都从写库读
// if(!DataSourceType.write.getType().equals(DataSourceContextHolder.getReadOrWrite())){
// DataSourceContextHolder.setRead();
// }
DataSourceContextHolder.setRead();
}
//
// @Before("apiAspect()")
// public void setWriteDataSourceType() {
// DataSourceContextHolder.setWrite();
// }
@Override
public int getOrder() {
/**
* 值越小,越优先执行
* 要优于事务的执行
* 在启动类中加上了@EnableTransactionManagement(order = 10)
*/
return 1;
}
}