基于Apache ShardingSphere打造分布式数据库
- 格式:pptx
- 大小:1.54 MB
- 文档页数:38
sharding-proxy代理分库分表配置原理看官⽹:sharding proxy代理mysql实现分库的步骤:1. 下载wget1. 解压tar xvf apache-shardingsphere-4.1.0-sharding-proxy-bin.tar.gz1. 使⽤mysql数据库时需要⾃⼰导⼊运⾏⼯具:(1) 下载wget(2) 解压:tar xvf mysql-connector-java-5.1.47(3) 把运⾏ jar复制到sharding proxy 的 lib/下cd cd mysql-connector-java-5.1.47cp mysql-connector-java-5.1.47.jar /_mytools/sharding-proxy/v4.1.0/lib/1. sharding proxy基本配置server.yaml:cd apache-shardingsphere-4.1.0-sharding-proxy-bin/conf/#⽤户配置authentication:users: #⽤户列表root: #名为root的⽤户password: root #连接密码sharding: #名为 sharding的⽤户password: sharding #密码authorizedSchemas: sharding_db #为sharding⽤户配置管理表的权限,不配置时为全部权限#sharding proxy 基本参数配置props:max.connections.size.per.query: 1 #每个查询可以打开的最⼤连接数量,默认为1acceptor.size: 12 # ⽤于设置接收客户端请求的⼯作线程个数,默认为CPU核数*2.executor.size: 6 # ⼯作线程数量,默认值: CPU核数proxy.frontend.flush.threshold: 128 # 对于单个⼤查询,每多少个⽹络包返回⼀次 128.# LOCAL: Proxy will run with LOCAL transaction.# XA: Proxy will run with XA transaction.# BASE: Proxy will run with B.A.S.E transaction.proxy.transaction.type: LOCAL #默认为LOCAL事务,允许LOCAL,XA,BASE三个值,XA采⽤Atomikos作为事务管理器,BASE类型需要拷贝实现ShardingTransactionManager的接⼝的jar包⾄lib⽬录中 proxy.opentracing.enabled: false #是否开启链路追踪功能,默认为不开启proxy.hint.enabled: false #是否启⽤hint算法强制路由默认falsequery.with.cipher.column: true #是否使⽤密⽂列查询默认falsesql.show: false #是否打印sql 默认falseallow.range.query.with.inline.sharding: false #允许范围查询,默认为 false ,要是我们分库分表是⽔平切分,可以想得到范围查询会像⼴播去查每⼀个表,⽐较耗性能能1. 数据源、分⽚配置config-sharding.yaml(1) 逻辑库名称schemaName: sharding_db(2) 数据源配置dataSources:#数据源别名,保证唯⼀且最好与分⽚列保持规律性,ds_37:#数据源地址,后⾯是为了统⼀⽇期和管理乱码,配置时物理库必须存在url: jdbc:mysql://192.168.2.170:3306/ds37?useUnicode=true&characterEncoding=UTF-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=Asia/Shanghaiusername: rootpassword: 123456#连接超时时间connectionTimeoutMilliseconds: 30000#空闲连接回收超时毫秒数idleTimeoutMilliseconds: 60000#连接最⼤存活时间毫秒数maxLifetimeMilliseconds: 1800000#最⼤连接数maxPoolSize: 50ds_45:url: jdbc:mysql://192.168.2.170:3306/ds45?useUnicode=true&characterEncoding=UTF-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=Asia/Shanghaiusername: rootpassword: 123456connectionTimeoutMilliseconds: 30000idleTimeoutMilliseconds: 60000maxLifetimeMilliseconds: 1800000maxPoolSize: 50(3) 分⽚规则shardingRule:tables: #数据分⽚规则,可配置多个seal_data: #逻辑库中的逻辑表名actualDataNodes: ds_${['37','45']}.seal_data #规则节点,逻辑数据源+逻辑表名⽀持inline表达式。
springboot:shardingsphere多数据源,⽀持未分表的数据源(shard。
⼀,为什么要给shardingsphere配置多数据源?1,shardingjdbc默认接管了所有的数据源,如果我们有多个⾮分表的库时,则最多只能设置⼀个为默认数据库,其他的⾮分表数据库不能访问2,shardingjdbc对数据库的访问有⼀些限制:官⽅站的说明:https:///document/legacy/4.x/document/cn/manual/sharding-jdbc/unsupported-items/说明:作者:刘宏缔邮箱: 371125307@⼆,演⽰项⽬的相关信息1,项⽬地址:https:///liuhongdi/shardingmulti2,项⽬功能说明:演⽰shardingsphere集成了两个分表的库,1个默认库,⾮shardingsphere数据源集成了第4个数据库3,项⽬结构;如图:4,数据库结构:三,配置⽂件说明1,pom.xml<dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-web</artifactId><exclusions><exclusion><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-logging</artifactId></exclusion></exclusions></dependency><!--sharding jdbc begin--><dependency><groupId>org.apache.shardingsphere</groupId><artifactId>sharding-jdbc-core</artifactId><version>4.1.1</version></dependency><!--sharding jdbc end--><!--mybatis begin--><dependency><groupId>org.mybatis.spring.boot</groupId><artifactId>mybatis-spring-boot-starter</artifactId><version>2.1.3</version></dependency><!--mybatis end--><!--druid begin--><dependency><groupId>com.alibaba</groupId><artifactId>druid-spring-boot-starter</artifactId><version>1.1.23</version></dependency><dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-log4j2</artifactId></dependency><dependency><groupId>com.lmax</groupId><artifactId>disruptor</artifactId><version>3.4.2</version></dependency><!--druid end--><!--mysql begin--><dependency><groupId>mysql</groupId><artifactId>mysql-connector-java</artifactId><scope>runtime</scope></dependency><!--mysql end--><!--pagehelper begin--><dependency><groupId>com.github.pagehelper</groupId><artifactId>pagehelper-spring-boot-starter</artifactId><version>1.3.0</version></dependency><!--pagehelper end--><!--thymeleaf begin--><dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-thymeleaf</artifactId></dependency><!--thymeleaf end-->2,application.properties#errorserver.error.include-stacktrace=always#error.springframework.web=trace#name = dynamic# goodsdb数据源基本配置ername = rootspring.datasource.druid.goodsdb.password = lhddemospring.datasource.druid.goodsdb.driver-class-name = com.mysql.cj.jdbc.Driverspring.datasource.druid.goodsdb.url = jdbc:mysql://127.0.0.1:3306/store?serverTimezone=UTC spring.datasource.druid.goodsdb.type = com.alibaba.druid.pool.DruidDataSourcespring.datasource.druid.goodsdb.initialSize = 5spring.datasource.druid.goodsdb.minIdle = 5spring.datasource.druid.goodsdb.maxActive = 20spring.datasource.druid.goodsdb.maxWait = 60000spring.datasource.druid.goodsdb.timeBetweenEvictionRunsMillis = 60000spring.datasource.druid.goodsdb.minEvictableIdleTimeMillis = 300000spring.datasource.druid.goodsdb.validationQuery = SELECT 1 FROM DUALspring.datasource.druid.goodsdb.testWhileIdle = truespring.datasource.druid.goodsdb.testOnBorrow = falsespring.datasource.druid.goodsdb.testOnReturn = falsespring.datasource.druid.goodsdb.poolPreparedStatements = true# 配置监控统计拦截的filters,去掉后监控界⾯sql⽆法统计,'wall'⽤于防⽕墙spring.datasource.druid.maxPoolPreparedStatementPerConnectionSize = 20eGlobalDataSourceStat = truespring.datasource.druid.connectionProperties = druid.stat.mergeSql=true;druid.stat.slowSqlMillis=500 #druid sql firewall monitorspring.datasource.druid.filter.wall.enabled=true#druid sql monitorspring.datasource.druid.filter.stat.enabled=truespring.datasource.druid.filter.stat.log-slow-sql=truespring.datasource.druid.filter.stat.slow-sql-millis=10000spring.datasource.druid.filter.stat.merge-sql=true#druid uri monitorspring.datasource.druid.web-stat-filter.enabled=truespring.datasource.druid.web-stat-filter.url-pattern=/*spring.datasource.druid.web-stat-filter.exclusions=*.js,*.gif,*.jpg,*.bmp,*.png,*.css,*.ico,/druid/*#druid session monitorspring.datasource.druid.web-stat-filter.session-stat-enable=truespring.datasource.druid.web-stat-filter.profile-enable=true#druid spring monitorspring.datasource.druid.aop-patterns=com.druid.*#monintor,druid login user configspring.datasource.druid.stat-view-servlet.enabled=truespring.datasource.druid.stat-view-servlet.login-username=rootspring.datasource.druid.stat-view-servlet.login-password=root# IP⽩名单 (没有配置或者为空,则允许所有访问)spring.datasource.druid.stat-view-servlet.allow = 127.0.0.1,192.168.163.1# IP⿊名单 (存在共同时,deny优先于allow)spring.datasource.druid.stat-view-servlet.deny = 192.168.10.1#mybatismybatis.mapper-locations=classpath:/mapper/sharding/*Mapper.xml#mybatis.type-aliases-package=com.example.demo.mappermybatis.configuration.log-impl=org.apache.ibatis.logging.stdout.StdOutImpl#loglogging.config = classpath:log4j2.xml#shardingsphere第⼀个分表数据源spring.datasource.druid.saleorder01.type=com.alibaba.druid.pool.DruidDataSourcespring.datasource.druid.saleorder01.driverClassName=com.mysql.cj.jdbc.Driverspring.datasource.druid.saleorder01.url=jdbc:mysql://127.0.0.1:3306/saleorder01?characterEncoding=utf-8ername=rootspring.datasource.druid.saleorder01.password=lhddemo#shardingsphere第⼆个分表数据源spring.datasource.druid.saleorder02.type=com.alibaba.druid.pool.DruidDataSourcespring.datasource.druid.saleorder02.driverClassName=com.mysql.cj.jdbc.Driverspring.datasource.druid.saleorder02.url=jdbc:mysql://127.0.0.1:3306/saleorder02?characterEncoding=utf-8ername=rootspring.datasource.druid.saleorder02.password=lhddemo#shardingsphere第三个数据源,⾮分表,作为默认库访问spring.datasource.druid.orderdb.type=com.alibaba.druid.pool.DruidDataSourcespring.datasource.druid.orderdb.driverClassName=com.mysql.cj.jdbc.Driverspring.datasource.druid.orderdb.url=jdbc:mysql://127.0.0.1:3306/orderdb?characterEncoding=utf-8ername=rootspring.datasource.druid.orderdb.password=lhddemo3,各数据表的建表语句:我们创建两个库:saleorder01,saleorder02然后在各个库内各创建两个数据表:saleorder01库包括t_order_1,t_order_2saleorder02库包括t_order_3,t_order_4建表的sql:CREATE TABLE `t_order_4` (`orderId` bigint(11) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',`goodsName` varchar(250) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL DEFAULT'' COMMENT 'name', PRIMARY KEY (`orderId`)) ENGINE=InnoDB AUTO_INCREMENT=0DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='order4' goodsdb的数据表:CREATE TABLE `goods` (`goodsId` bigint(11) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',`goodsName` varchar(500) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL DEFAULT'' COMMENT 'name', `subject` varchar(200) NOT NULL DEFAULT'' COMMENT '标题',`price` decimal(15,2) NOT NULL DEFAULT'0.00' COMMENT '价格',`stock` int(11) NOT NULL DEFAULT'0' COMMENT 'stock',PRIMARY KEY (`goodsId`)) ENGINE=InnoDB AUTO_INCREMENT=0DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='商品表'插⼊数据:INSERT INTO `goods` (`goodsId`, `goodsName`, `subject`, `price`, `stock`) VALUES(3, '100分电动⽛刷', '好⽤到让你爱上刷⽛', '59.00', 96);orderdb的数据表:CREATE TABLE `orderinfo` (`orderId` bigint(11) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',`orderSn` varchar(100) NOT NULL DEFAULT'' COMMENT '编号',`orderTime` timestamp NOT NULL DEFAULT'1971-01-01 00:00:01' COMMENT '下单时间',`orderStatus` tinyint(4) NOT NULL DEFAULT'0' COMMENT '状态:0,未⽀付,1,已⽀付,2,已发货,3,已退货,4,已过期',`userId` int(12) NOT NULL DEFAULT'0' COMMENT '⽤户id',`price` decimal(10,0) NOT NULL DEFAULT'0' COMMENT '价格',`addressId` int(12) NOT NULL DEFAULT'0' COMMENT '地址',PRIMARY KEY (`orderId`),UNIQUE KEY `orderSn` (`orderSn`)) ENGINE=InnoDB AUTO_INCREMENT=0DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='订单表'插⼊数据:INSERT INTO `orderinfo` (`orderId`, `orderSn`, `orderTime`, `orderStatus`, `userId`, `price`, `addressId`) VALUES(77, '20200814171411660', '2020-08-14 09:14:12', 0, 8, '100', 0);四,java代码说明:1, DatabasePreciseShardingAlgorithm.java@Componentpublic class DatabasePreciseShardingAlgorithm implements PreciseShardingAlgorithm<Long> {@Overridepublic String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) {Long curValue = shardingValue.getValue();String curBase = "";if (curValue > 0 && curValue<=200) {curBase = "saleorder01";} else {curBase = "saleorder02";}return curBase;}}数据库分库算法2,OrderTablePreciseShardingAlgorithm.java@Componentpublic class OrderTablePreciseShardingAlgorithm implements PreciseShardingAlgorithm<Long> {@Overridepublic String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) {Long curValue = shardingValue.getValue();String curTable = "";if (curValue > 0 && curValue<=100) {curTable = "t_order_1";} else if (curValue > 100 && curValue<=200) {curTable = "t_order_2";} else if (curValue > 200 && curValue<=300) {curTable = "t_order_3";} else {curTable = "t_order_4";}return curTable;}}数据库分表算法3,GoodsdbSourceConfig.java@Configuration@MapperScan(basePackages = "com.shardingmulti.demo.mapper.goodsdb", sqlSessionTemplateRef = "goodsdbSqlSessionTemplate") public class GoodsdbSourceConfig {@Bean@Primary@ConfigurationProperties("spring.datasource.druid.goodsdb")public DataSource goodsdbDataSource() {return DruidDataSourceBuilder.create().build();}@Bean@Primarypublic SqlSessionFactory goodsdbSqlSessionFactory(@Qualifier("goodsdbDataSource") DataSource dataSource) throws Exception { SqlSessionFactoryBean bean = new SqlSessionFactoryBean();bean.setDataSource(dataSource);bean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:mapper/goodsdb/*.xml"));return bean.getObject();}@Bean@Primarypublic DataSourceTransactionManager goodsdbTransactionManager(@Qualifier("goodsdbDataSource") DataSource dataSource) {return new DataSourceTransactionManager(dataSource);}@Bean@Primarypublic SqlSessionTemplate goodsdbSqlSessionTemplate(@Qualifier("goodsdbSqlSessionFactory") SqlSessionFactory sqlSessionFactory) throws Exception { return new SqlSessionTemplate(sqlSessionFactory);}}配置goodsdb数据源,注意因为要使⽤mybatis,所以指明了mapper⽂件所在的包,和xml⽂件所在的路径因为这个数据源不是由shardingjdbc所管理,所以要注意两个数据源的mapper程序和xml⽂件要隔离开分别放在 mapper/goodsdb 和 mapper/sharding4,ShardingDataSourceConfig.java@Configuration@MapperScan(basePackages = "com.shardingmulti.demo.mapper.sharding", sqlSessionFactoryRef = "shardingSqlSessionFactory")public class ShardingDataSourceConfig {//分表算法@Resourceprivate OrderTablePreciseShardingAlgorithm orderTablePreciseShardingAlgorithm;//分库算法@Resourceprivate DatabasePreciseShardingAlgorithm databasePreciseShardingAlgorithm;//第⼀个订单库@Bean(name = "saleorder01")@ConfigurationProperties(prefix = "spring.datasource.druid.saleorder01")public DataSource saleorder01(){return DruidDataSourceBuilder.create().build();}//第⼆个订单库@Bean(name = "saleorder02")@ConfigurationProperties(prefix = "spring.datasource.druid.saleorder02")public DataSource saleorder02(){return DruidDataSourceBuilder.create().build();}//第三个库,订单统计库,做为默认@Bean(name = "orderdb")@ConfigurationProperties(prefix = "spring.datasource.druid.orderdb")public DataSource orderdb(){return DruidDataSourceBuilder.create().build();}//创建数据源,需要把分库的库都传递进去@Bean("dataSource")public DataSource dataSource(@Qualifier("saleorder01") DataSource saleorder01,@Qualifier("saleorder02") DataSource saleorder02,@Qualifier("orderdb") DataSource orderdb) throws SQLException {// 配置真实数据源Map<String, DataSource> dataSourceMap = new HashMap<String, DataSource>();dataSourceMap.put("orderdb", orderdb);dataSourceMap.put("saleorder01", saleorder01);dataSourceMap.put("saleorder02", saleorder02);ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();shardingRuleConfig.setDefaultDataSourceName("orderdb");//如果有多个数据表需要分表,依次添加到这⾥shardingRuleConfig.getTableRuleConfigs().add(getOrderTableRuleConfiguration());Properties p = new Properties();p.setProperty("sql.show", Boolean.TRUE.toString());// 获取数据源对象DataSource dataSource = ShardingDataSourceFactory.createDataSource(dataSourceMap, shardingRuleConfig,p);return dataSource;}// 创建SessionFactory@Bean(name = "shardingSqlSessionFactory")public SqlSessionFactory shardingSqlSessionFactory(@Qualifier("dataSource") DataSource dataSource) throws Exception {SqlSessionFactoryBean bean = new SqlSessionFactoryBean();bean.setDataSource(dataSource);bean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:mapper/sharding/*.xml"));return bean.getObject();}// 创建事务管理器@Bean("shardingTransactionManger")public DataSourceTransactionManager shardingTransactionManger(@Qualifier("dataSource") DataSource dataSource) {return new DataSourceTransactionManager(dataSource);}// 创建SqlSessionTemplate@Bean(name = "shardingSqlSessionTemplate")public SqlSessionTemplate shardingSqlSessionTemplate(@Qualifier("shardingSqlSessionFactory") SqlSessionFactory sqlSessionFactory) { return new SqlSessionTemplate(sqlSessionFactory);}//订单表的分表规则配置private TableRuleConfiguration getOrderTableRuleConfiguration() {TableRuleConfiguration result = new TableRuleConfiguration("t_order","saleorder01.t_order_$->{1..2},saleorder02.t_order_$->{3..4}");result.setDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("orderId",databasePreciseShardingAlgorithm)); result.setTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("orderId",orderTablePreciseShardingAlgorithm));return result;}//分页@Bean(name="pageHelper")public PageHelper getPageHelper() {PageHelper pageHelper = new PageHelper();Properties properties = new Properties();properties.setProperty("reasonable", "true");properties.setProperty("supportMethodsArguments", "true");properties.setProperty("returnPageInfo", "true");properties.setProperty("params", "count=countSql");pageHelper.setProperties(properties);return pageHelper;}}shardingjdbc的数据源,创建时要使⽤ShardingDataSourceFactory它负责连接3个库:两个分表库:saleorder01,saleorder02, ⼀个⾮分表库:orderdb注意shardingjdbc所管理的数据源中,只能有⼀个⾮分表的库,⽽且要设置为默认库,否则不能正常访问5,HomeController.java@Controller@RequestMapping("/home")public class HomeController {@Resourceprivate GoodsMapper goodsMapper;@Resourceprivate OrderMapper orderMapper;@Resourceprivate OrderShardingMapper orderShardingMapper;//商品详情参数:商品id@GetMapping("/goodsinfo")@ResponseBody@DS("goodsdb")public Goods goodsInfo(@RequestParam(value="goodsid",required = true,defaultValue = "0") Long goodsId) {Goods goods = goodsMapper.selectOneGoods(goodsId);return goods;}//订单统计库,参数:订单id@GetMapping("/orderinfo")@ResponseBodypublic Order orderInfo(@RequestParam(value="orderid",required = true,defaultValue = "0") Long orderId) {Order order = orderMapper.selectOneOrder(orderId);return order;}//两个分表库中的订单列表@GetMapping("/orderlist")public String list(Model model, @RequestParam(value="currentPage",required = false,defaultValue = "1") Integer currentPage){ PageHelper.startPage(currentPage, 5);List<OrderSharding> orderList = orderShardingMapper.selectAllOrder();model.addAttribute("orderlist",orderList);PageInfo<OrderSharding> pageInfo = new PageInfo<>(orderList);model.addAttribute("pageInfo", pageInfo);System.out.println("------------------------size:"+orderList.size());return "order/list";}}实现到各个数据源的访问6,OrderShardingMapper.xml<?xml version="1.0" encoding="UTF-8" ?><!DOCTYPE mapper PUBLIC "-////DTD Mapper 3.0//EN" "/dtd/mybatis-3-mapper.dtd" ><mapper namespace="com.shardingmulti.demo.mapper.sharding.OrderShardingMapper"><select id="selectAllOrder" resultType="com.shardingmulti.demo.pojo.OrderSharding">select * from t_order order by orderId desc</select></mapper>7,OrderMapper.xml<?xml version="1.0" encoding="UTF-8" ?><!DOCTYPE mapperPUBLIC "-////DTD Mapper 3.0//EN""/dtd/mybatis-3-mapper.dtd"><mapper namespace="com.shardingmulti.demo.mapper.sharding.OrderMapper"><select id="selectOneOrder" parameterType="long" resultType="com.shardingmulti.demo.pojo.Order">select * from orderinfo where orderId=#{orderId}</select></mapper>8,GoodsMapper.xml<?xml version="1.0" encoding="UTF-8" ?><!DOCTYPE mapperPUBLIC "-////DTD Mapper 3.0//EN""/dtd/mybatis-3-mapper.dtd"><mapper namespace="com.shardingmulti.demo.mapper.goodsdb.GoodsMapper"><select id="selectOneGoods" parameterType="long" resultType="com.shardingmulti.demo.pojo.Goods">select * from goods where goodsId=#{goodsId}</select></mapper>五,测试效果1,访问goodsdbhttp://127.0.0.1:8080/home/goodsinfo?goodsid=3返回:2,访问orderdbhttp://127.0.0.1:8080/home/orderinfo?orderid=77返回:3,访问分表库:http://127.0.0.1:8080/home/orderlist/返回:4,从druid的监控页⾯查看创建的连接: http://127.0.0.1:8080/druid可以看到连接有共4个数据源:六,查看spring boot版本: . ____ _ __ _ _/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \\\/ ___)| |_)| | | | | || (_| | ) ) ) )' |____| .__|_| |_|_| |_\__, | / / / /=========|_|==============|___/=/_/_/_/ :: Spring Boot :: (v2.3.3.RELEASE)。
ShardingSpherejdbc集成多数据源的实现步骤1. 引入ShardingSphere的依赖:在项目的pom.xml文件中添加ShardingSphere的相关依赖。
具体的依赖配置可以参考ShardingSphere的官方文档。
2.配置数据源:在项目的配置文件中配置数据源的相关信息。
可以配置多个数据源,并指定每个数据源的驱动类、连接地址、用户名、密码等信息。
3.配置分片规则:如果需要对数据进行分片,需要配置对应的分片规则。
分片规则定义了数据如何进行拆分和路由。
可以根据具体的需求选择合适的分片算法和键值。
4. 配置数据源和分片规则的映射关系:将每个数据源和对应的分片规则进行映射。
可以使用ShardingSphere提供的配置方式,也可以通过编程方式进行配置。
5. 编写DAO层代码:在DAO层编写数据库操作的代码。
可以使用ShardingSphere提供的API,也可以使用原生的JDBC API。
在编写代码时,需要指定使用哪个数据源进行操作。
6. 配置事务管理:如果需要使用事务,需要配置对应的事务管理器。
可以使用Spring框架提供的事务管理器,也可以使用其他第三方的事务管理器。
7.启动应用程序:启动应用程序,并进行测试。
可以通过执行数据库操作的代码,验证多数据源的集成是否成功。
总结起来,使用ShardingSphere集成多数据源的步骤包括:引入依赖、配置数据源、配置分片规则、配置数据源和分片规则的映射关系、编写DAO层代码、配置事务管理、启动应用程序。
通过这些步骤,可以实现多数据源的集成,并能够对数据进行分片和路由操作。
分库分表的9种分布式主键ID⽣成⽅案,挺全乎的来⾃公众号:程序员内点事中我们介绍了sharding-jdbc 4种分⽚策略的使⽤场景,可以满⾜基础的分⽚功能开发,这篇我们来看看分库分表后,应该如何为分⽚表⽣成全局唯⼀的主键ID。
引⼊任何⼀种技术都是存在风险的,分库分表当然也不例外,除⾮库、表数据量持续增加,⼤到⼀定程度,以⾄于现有⾼可⽤架构已⽆法⽀撑,否则不建议⼤家做分库分表,因为做了数据分⽚后,你会发现⾃⼰踏上了⼀段踩坑之路,⽽分布式主键ID就是遇到的第⼀个坑。
不同数据节点间⽣成全局唯⼀主键是个棘⼿的问题,⼀张逻辑表t_order拆分成多个真实表t_order_n,然后被分散到不同分⽚库db_0、db_1... ,各真实表的⾃增键由于⽆法互相感知从⽽会产⽣重复主键,此时数据库本⾝的⾃增主键,就⽆法满⾜分库分表对主键全局唯⼀的要求。
db_0--|-- t_order_0|-- t_order_1|-- t_order_2db_1--|-- t_order_0|-- t_order_1|-- t_order_2尽管我们可以通过严格约束,各个分⽚表⾃增主键的初始值和步长的⽅式来解决ID重复的问题,但这样会让运维成本陡增,⽽且可扩展性极差,⼀旦要扩容分⽚表数量,原表数据变动⽐较⼤,所以这种⽅式不太可取。
步长 step = 分表张数db_0--|-- t_order_0 ID: 0、6、12、18...|-- t_order_1 ID: 1、7、13、19...|-- t_order_2 ID: 2、8、14、20...db_1--|-- t_order_0 ID: 3、9、15、21...|-- t_order_1 ID: 4、10、16、22...|-- t_order_2 ID: 5、11、17、23...⽬前已经有了许多第三放解决⽅案可以完美解决这个问题,⽐如基于UUID、SNOWFLAKE算法、segment号段,使⽤特定算法⽣成不重复键,或者直接引⽤主键⽣成服务,像美团(Leaf)和滴滴(TinyId)等。
ShardingSpherejdbc集成多数据源的实现步骤⽬录集成sharding jdbc1. 引⼊依赖2. 配置分表规则问题集成多数据源1. 引⼊依赖2. 多数据源配置3. 增加多数据源配置4. 使⽤总结最近有个项⽬的⼏张表,数量级在千万以上,技术栈是SpringBoot+Mybatis-plus+MySQL。
如果使⽤单表,在进⾏查询操作,⾮常耗时,经过⼀番调研,决定使⽤分表中间件:ShardingSphere。
ShardingSphere今年4⽉份成为了 Apache 软件基⾦会的顶级项⽬,⽬前⽀持数据分⽚、读写分离、多数据副本、数据加密、影⼦库压测等功能,同时兼容多种数据库,通过可插拔架构,理想情况下,可以做到对业务代码⽆感知。
ShardingSphere下有两款成熟的产品:sharding jdbc和sharding proxysharding jdbc:可理解为增强版的 JDBC 驱动;sharding proxy:透明化的数据库代理端,可以看做是⼀个虚拟的数据库服务。
集成sharding jdbc仅是集成sharding jdbc还是很简单的,为了更好的理解,这⾥以订单表为例。
1. 引⼊依赖<properties><sharding-sphere.version>4.1.0</sharding-sphere.version></properties><!-- 分库分表:https:///artifact/org.apache.shardingsphere/sharding-jdbc-spring-boot-starter --><dependency><groupId>org.apache.shardingsphere</groupId><artifactId>sharding-jdbc-spring-boot-starter</artifactId><version>${sharding-sphere.version}</version></dependency>2. 配置分表规则spring:shardingsphere:datasource:names: sharding-order-systemsharding-order-system:type: com.alibaba.druid.pool.DruidDataSourcedriverClassName: com.mysql.jdbc.Driverurl: jdbc:mysql://127.0.0.1:3306/order_system?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=GMT%2B8&useTimezone=trueusername: rootpassword: rootprops:# ⽇志显⽰SQLsql.show: truesharding:tables:# 订单表分表:20order:# 真实表 order_0actualDataNodes: sharding-order-system.order_$->{0..19}# 分库策略databaseStrategy:none:# 分表策略tableStrategy:inline:shardingColumn: order_key# 分⽚算法⾏表达式,需符合groovy语法 '& Integer.MAX_VALUE' 位运算使hash值为正数algorithmExpression: order_$->{(order_key.hashCode() & Integer.MAX_VALUE) % 20}问题上⾯虽然完成了对订单表(order)的分表,但是sharding jdbc对⼀些语法不⽀持,官⽅的⽂档⾥说的⽐较笼统,如下图:像insert into ... select这些语法是不⽀持的,**⽽且对于没有涉及到分表的语句,也有同样的限制。
shardingsphere 原理
ShardingSphere 是一个开源的分布式数据库中间件解决方案。
它基于分布式数据库技术,旨在简化分布式数据库架构的开发和维护工作,使用户可以根据业务需要自行选择分片策略。
分布式数据库有三个组件组成:分片、路由和管理。
其中,分片是将大容量数据集拆分成小块被称为分片,以减轻单台服务器的负载压力;路由用于将业务请求转发至正确的分片;管理则是对分布式数据库进行注册、任务管理、故障排查等。
ShardingSphere 针对上述三个组件开发了三个子系统:Sharding-JDBC、Sharding-Proxy和Sharding-Sidecar,每个子系统都负责其对应的功能:
1. Sharding-JDBC 主要用于配置JDBC 连接和实现分片策略;
2. Sharding-Proxy 是一个数据库代理,用于将客户端的请求路由到正确的应用服务器,并实现读写分离;
3. Sharding-Sidecar 是一个容器管理层,负责管理和维护分片,分片注册、拆分、数据同步等管理任务。
此外,ShardingSphere 还提供了以上三个子系统的统一入口——ShardingSphere Console,用于管理和监控分布式数据库。
如何使用MySQL进行数据分片与分布式部署引言:随着互联网应用的不断发展和用户量的不断增加,传统的单一MySQL数据库架构已经无法满足高并发读写的需求。
为了解决这个问题,分片技术和分布式部署成为了一种常用的解决方案。
本文将介绍如何使用MySQL进行数据分片与分布式部署,帮助读者更好地理解和应用这些技术。
一、什么是数据分片数据分片是将大数据集合切分为较小的数据片段,分布存储在不同的数据库节点上。
每个数据片段又称为一个分片,不同的分片可以存储在不同的数据库服务器上。
通过数据分片,实现了数据的横向扩展,提高了系统的并发读写能力。
1.1 分片策略在进行数据分片之前,需要选择合适的分片策略。
常用的分片策略有以下几种:1.按照主键范围进行分片:将主键按照一定规则进行切分,例如按照主键范围进行分片,每个分片存储一定范围内的主键数据。
2.按照哈希值进行分片:将主键进行哈希计算,根据哈希值进行分片。
哈希算法要求分片的数据分布均匀,避免出现热点数据集中在某个分片的情况。
3.按照业务属性进行分片:根据业务属性对数据进行分类,并将相同属性的数据存储在同一个分片中。
这种分片策略适用于需要频繁进行数据查询的场景。
1.2 分片键与非分片键在进行数据分片时,需要将数据根据某个字段进行切分为不同的分片。
这个字段被称为分片键。
除了分片键之外的字段称为非分片键。
分片键应该具备以下属性:1.唯一性:每个分片键的取值必须唯一,避免数据在分片间重复存储。
2.分布均匀:分片键的取值应该分布均匀,避免数据倾斜导致某个分片的负载过重。
二、分布式部署分布式部署是将数据分片部署在不同的数据库节点上,通过分片和节点间的协作来完成数据的读写操作。
在分布式部署中,常用的架构有主从复制架构、主主复制架构和多主架构。
2.1 主从复制架构主从复制架构是最常见的分布式部署方式之一。
其中,一个节点(主节点)负责处理写操作,而其他一或多个节点(从节点)负责处理读操作。
shardingsphere通过注解实现分库ShardingSphere 是一款非常流行的分布式数据库中间件,它提供了灵活且强大的数据库分库分表功能。
ShardingSphere 的分库功能可以通过注解实现,使得开发人员可以在代码层面轻松地指定数据分库的逻辑。
ShardingSphere 的注解实现分库功能主要基于 Java 语言的注解功能。
因此,在使用 ShardingSphere 进行分库之前,首先需要配置相关的依赖项。
首先,在 Maven 中添加 ShardingSphere 的相关依赖项:```xml<dependency><groupId>org.apache.shardingsphere</groupId><artifactId>sharding-jdbc-spring-boot-starter</artifactId> <version>5.0.0-alpha</version></dependency>```接下来,在 Spring Boot 中配置 ShardingSphere 的数据源和分库规则。
在配置文件中添加以下内容:```yaml#数据库连接配置#分库规则配置上述配置中,我们定义了两个数据源 ds0 和 ds1,分别对应了两个物理数据库。
接着,我们定义了分库规则,使用了内联分片算法,根据user_id 的奇偶性将数据分到不同的数据源中。
如上所示,当 user_id 为偶数时,数据将存储到 ds0 数据源中;当 user_id 为奇数时,数据将存储到 ds1 数据源中。
```javapublic class UserInfoprivate Integer id;private Integer userId;//...``````javapublic class UserRepositoryprivate EntityManager entityManager;public int save(UserInfo userInfo)entityManager.persist(userInfo);entityManager.flush(;return userInfo.getId(;}```通过上述的配置和代码,我们就可以实现数据库的分库功能。
shardingsphere实现springboot集成多数据源分库分表实现⾸先引⼊依赖<dependency><groupId>org.apache.shardingsphere</groupId><artifactId>sharding-jdbc-spring-boot-starter</artifactId><version>4.0.0-RC1</version></dependency>进⾏配置package mon.configuration;import com.alibaba.druid.pool.DruidDataSource;import ng.StringUtil;import mon.collect.Lists;import lombok.Data;import org.apache.shardingsphere.api.config.sharding.ShardingRuleConfiguration;import org.apache.shardingsphere.api.config.sharding.TableRuleConfiguration;import org.apache.shardingsphere.api.config.sharding.strategy.InlineShardingStrategyConfiguration;import org.apache.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory;import org.slf4j.Logger;import org.slf4j.LoggerFactory;import javax.sql.DataSource;import java.sql.SQLException;import java.util.HashMap;import java.util.Map;import java.util.Properties;/*** @author zhangsy* @date 2021/8/2 9:20* @description 分库分表数据库配置*/@Datapublic class ShardingDataSourceConfig {private final static Logger log = LoggerFactory.getLogger(ShardingDataSourceConfig.class);public String driverClassName;public String url;public String username;public String password;public int initialSize;public int minIdle;public int maxActive;public long maxWait;public long timeBetweenEvictionRunsMillis;public long minEvictableIdleTimeMillis;public String validationQuery;public Boolean testOnBorrow;public Boolean testOnReturn;public Boolean testWhileIdle;public int validationQueryTimeout;public Boolean keepAlive;public Boolean removeAbandoned;public int removeAbandonedTimeout;public Boolean logAbandoned;public Boolean poolPreparedStatements;public int maxPoolPreparedStatementPerConnectionSize;public String filters;public String connectionProperties;private String dbNames;private Map<String, DataSource> dataSourceMap = new HashMap<>();public DataSource buildDataSource(){initDataSourceMap();String dbName = "detail";// 具体分库分表策略,按什么规则来分ShardingRuleConfiguration conf = new ShardingRuleConfiguration();// table ruleTableRuleConfiguration tableRule = new TableRuleConfiguration("m_user_group_detail", "detail.m_user_group_detail_$->{1..12}");/* // 分表策略ShardingStrategyConfiguration tableShardingStrategyConfig = new StandardShardingStrategyConfiguration("user_group_filter_time", new MyTablePreciseShardingAlgorithm());tableRule.setTableShardingStrategyConfig(tableShardingStrategyConfig);*/InlineShardingStrategyConfiguration inlineShardingStrategyConfiguration = new InlineShardingStrategyConfiguration("user_group_filter_time","m_user_group_detail_$->{user_group_filter_time}"); tableRule.setTableShardingStrategyConfig(inlineShardingStrategyConfiguration);conf.setTableRuleConfigs(Lists.newArrayList(tableRule));Properties props = new Properties();props.put("sql.show", false);DataSource dataSource = null;try {dataSource = ShardingDataSourceFactory.createDataSource(dataSourceMap, conf, props);} catch (SQLException e) {log.error("数据源初始化失败", e);}return dataSource;}// @Beanpublic void initDataSourceMap() {dbNames = "detail";for (String name : dbNames.split(",")) {DruidDataSource druidDataSource = new DruidDataSource();druidDataSource.setDriverClassName(driverClassName);druidDataSource.setUrl(url);druidDataSource.setUsername(username);druidDataSource.setPassword(password);if(initialSize > 0){druidDataSource.setInitialSize(initialSize);}if(minIdle > 0){druidDataSource.setMinIdle(minIdle);}if(maxActive > 0){druidDataSource.setMaxActive(maxActive);}if(maxWait > 0){druidDataSource.setMaxWait(maxWait);}if(timeBetweenEvictionRunsMillis > 0){druidDataSource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);}if(minEvictableIdleTimeMillis > 0){druidDataSource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);}if(validationQueryTimeout > 0){druidDataSource.setValidationQueryTimeout(validationQueryTimeout);}if(removeAbandonedTimeout > 0){druidDataSource.setRemoveAbandonedTimeout(removeAbandonedTimeout);}if(maxPoolPreparedStatementPerConnectionSize > 0){druidDataSource.setMaxPoolPreparedStatementPerConnectionSize(maxPoolPreparedStatementPerConnectionSize); }if(StringUtil.isNotBlank(validationQuery)){druidDataSource.setValidationQuery(validationQuery);}if(StringUtil.isNotBlank(connectionProperties)){druidDataSource.setConnectionProperties(connectionProperties);}if(testOnReturn != null){druidDataSource.setTestOnReturn(testOnReturn);}if(testWhileIdle != null){druidDataSource.setTestWhileIdle(testWhileIdle);}if(keepAlive != null){druidDataSource.setKeepAlive(keepAlive);}if(removeAbandoned != null){druidDataSource.setRemoveAbandoned(removeAbandoned);}if(logAbandoned != null){druidDataSource.setLogAbandoned(logAbandoned);}if(poolPreparedStatements != null){druidDataSource.setPoolPreparedStatements(poolPreparedStatements);}try {if(StringUtil.isNotBlank(filters)){druidDataSource.setFilters(filters);}dataSourceMap.put(name, druidDataSource);} catch (SQLException e) {e.printStackTrace();}}}}ymal配置s = detailspring.shardingsphere.datasource.detail.type = com.alibaba.druid.pool.DruidDataSourcespring.shardingsphere.datasource.detail.driverClassName = com.mysql.cj.jdbc.Driverspring.shardingsphere.datasource.detail.url = jdbc:mysql://localhost:3306/****?useSSL=false&useUnicode=true&characterEncoding=utf8&serverTimezone=Asia/Shanghaiername = rootspring.shardingsphere.datasource.detail.password = rootspring.shardingsphere.datasource.detail.filters = stat,slf4j,wallspring.shardingsphere.datasource.detail.initialSize = 5spring.shardingsphere.datasource.detail.keepAlive = truespring.shardingsphere.datasource.detail.maxActive = 10spring.shardingsphere.datasource.detail.maxWait = 10000spring.shardingsphere.datasource.detail.minEvictableIdleTimeMillis = 300000spring.shardingsphere.datasource.detail.minIdle = 5spring.shardingsphere.datasource.detail.removeAbandoned = truespring.shardingsphere.datasource.detail.removeAbandonedTimeout = 80spring.shardingsphere.datasource.detail.testOnBorrow = truespring.shardingsphere.datasource.detail.testOnReturn = falsespring.shardingsphere.datasource.detail.testWhileIdle = truespring.shardingsphere.datasource.detail.timeBetweenEvictionRunsMillis = 60000spring.shardingsphere.datasource.detail.validationQuery = SELECT 1spring.shardingsphere.datasource.detail.validationQueryTimeout = 1spring.shardingsphere.sharding.tables.m_user_group_detail.logic-table = detail.m_user_group_detailspring.shardingsphere.sharding.tables.m_user_group_detail.actual-data-nodes = detail.m_user_group_detail_0$->{1..9},detail.m_user_group_detail_$->{10..12} spring.shardingsphere.sharding.tables.m_user_group_detail.table-strategy.inline.sharding-column = user_group_filter_timespring.shardingsphere.sharding.tables.m_user_group_detail.table-strategy.inline.algorithm-expression = m_user_group_detail_$->{user_group_filter_time} spring.shardingsphere.sharding.default-data-source-name = detailspring.shardingsphere.sharding.binding-tables = m_user_group_detailspring.shardingsphere.props.sql.show = trueconfig配置package com.epay.dtc.ms.cust.provider.configuration.mysql.detail;import mon.configuration.ShardingDataSourceConfig;import org.apache.ibatis.session.SqlSessionFactory;import org.mybatis.spring.SqlSessionFactoryBean;import org.mybatis.spring.SqlSessionTemplate;import org.mybatis.spring.annotation.MapperScan;import org.springframework.beans.factory.annotation.Qualifier;import org.springframework.boot.context.properties.ConfigurationProperties;import org.springframework.context.annotation.Bean;import org.springframework.core.io.support.PathMatchingResourcePatternResolver;import ponent;import javax.sql.DataSource;/*** @author zh* @date 2021/07/06 09:48* @description*/@Component@MapperScan(basePackages = "mon.mapper.mysql.detail", sqlSessionFactoryRef = "detailSqlSessionFactory")@ConfigurationProperties(prefix = "spring.shardingsphere.datasource.detail")public class DetailDataSourceConfig extends ShardingDataSourceConfig {private String dbNames;@Bean("detailDataSource")public DataSource getDetailDataSource(){return buildDataSource();}@Bean("detailSqlSessionFactory")public SqlSessionFactory detailSqlSessionFactory(@Qualifier("detailDataSource") DataSource dataSource) throws Exception {SqlSessionFactoryBean bean = new SqlSessionFactoryBean();bean.setDataSource(dataSource);bean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath*:mapper/mysql/detail/*.xml"));return bean.getObject();}@Bean("detailSqlSessionTemplate")public SqlSessionTemplate detailSqlSessionTemplate(@Qualifier("detailSqlSessionFactory") SqlSessionFactory sqlSessionFactory){return new SqlSessionTemplate(sqlSessionFactory);}}。