随着项目运行的时间越来越久,公司的主要的订单表也越来越大,总放在一张表里迟早要爆掉,所以需要将以往的数据水平分库分表!
首先加入maven依赖 这里只列出shardingjdbc和用到的工具包的依赖,其他的比如spring mvc ,mybatis ,postgres驱动啥的因项目而异特别说明我用的spring mvc 4.1.9 release!
<dependency> <groupId>org.apache.shardingsphere</groupId> <artifactId>sharding-jdbc-spring-namespace</artifactId> <version>4.0.0-RC2</version> </dependency> <dependency> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> <version>1.18.12</version> <optional>true</optional> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-lang3</artifactId> <version>3.1</version> </dependency>
加入shardingjdbc spring的相关配置xml如下,这里我创建了2个数据源ds2025,ds2026 分别对应2个真实库,用来存在2025年和2026年的数据
<?xml version="1.0" encoding="UTF-8"?> <beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:aop="http://www.springframework.org/schema/aop" xmlns:tx="http://www.springframework.org/schema/tx" xmlns:context="http://www.springframework.org/schema/context" xmlns:p="http://www.springframework.org/schema/p" xmlns:cache="http://www.springframework.org/schema/cache" xmlns:sharding="http://shardingsphere.apache.org/schema/shardingsphere/sharding" xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-4.1.xsd http://www.springframework.org/schema/tx http://www.springframework.org/schema/tx/spring-tx-4.1.xsd http://shardingsphere.apache.org/schema/shardingsphere/sharding http://shardingsphere.apache.org/schema/shardingsphere/sharding/sharding.xsd http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-4.1.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-4.1.xsd http://www.springframework.org/schema/cache http://www.springframework.org/schema/cache/spring-cache-4.1.xsd"> <!-- 数据源 --> <bean id="ds2025" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close"> <!-- 数据库基本信息配置 --> <property name="url" value="${druid.jdbcUrl1}" /> <property name="username" value="${druid.username}" /> <property name="password" value="${druid.password}" /> <property name = "driverClassName" value = "${druid.driverClassName}" /> <!-- 初始化连接数量 --> <property name="initialSize" value="${druid.initialSize}" /> <!-- 最小空闲连接数 --> <property name="minIdle" value="${druid.minIdle}" /> <!-- 最大并发连接数 --> <property name="maxActive" value="${druid.maxActive}" /> <!-- 配置获取连接等待超时的时间 --> <property name="maxWait" value="${druid.maxWait}" /> <!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 --> <property name="timeBetweenEvictionRunsMillis" value="${druid.timeBetweenEvictionRunsMillis}" /> <!-- 配置一个连接在池中最小生存的时间,单位是毫秒 --> <property name="minEvictableIdleTimeMillis" value="${druid.minEvictableIdleTimeMillis}" /> <property name="validationQuery" value="${druid.validationQuery}" /> <property name="testWhileIdle" value="${druid.testWhileIdle}" /> <property name="testOnBorrow" value="${druid.testOnBorrow}" /> <property name="testOnReturn" value="${druid.testOnReturn}" /> <!-- 打开PSCache,并且指定每个连接上PSCache的大小 如果用Oracle,则把poolPreparedStatements配置为true,mysql可以配置为false。 --> <property name="poolPreparedStatements" value="${druid.poolPreparedStatements}" /> <property name="maxPoolPreparedStatementPerConnectionSize" value="${druid.maxPoolPreparedStatementPerConnectionSize}" /> <!-- 配置监控统计拦截的filters --> <property name="filters" value="${druid.filters}" /> </bean> <!-- 数据源 --> <bean id="ds2026" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close"> <!-- 数据库基本信息配置 --> <property name="url" value="${druid.jdbcUrl2}" /> <property name="username" value="${druid.username}" /> <property name="password" value="${druid.password}" /> <property name = "driverClassName" value = "${druid.driverClassName}" /> <!-- 初始化连接数量 --> <property name="initialSize" value="${druid.initialSize}" /> <!-- 最小空闲连接数 --> <property name="minIdle" value="${druid.minIdle}" /> <!-- 最大并发连接数 --> <property name="maxActive" value="${druid.maxActive}" /> <!-- 配置获取连接等待超时的时间 --> <property name="maxWait" value="${druid.maxWait}" /> <!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 --> <property name="timeBetweenEvictionRunsMillis" value="${druid.timeBetweenEvictionRunsMillis}" /> <!-- 配置一个连接在池中最小生存的时间,单位是毫秒 --> <property name="minEvictableIdleTimeMillis" value="${druid.minEvictableIdleTimeMillis}" /> <property name="validationQuery" value="${druid.validationQuery}" /> <property name="testWhileIdle" value="${druid.testWhileIdle}" /> <property name="testOnBorrow" value="${druid.testOnBorrow}" /> <property name="testOnReturn" value="${druid.testOnReturn}" /> <!-- 打开PSCache,并且指定每个连接上PSCache的大小 如果用Oracle,则把poolPreparedStatements配置为true,mysql可以配置为false。 --> <property name="poolPreparedStatements" value="${druid.poolPreparedStatements}" /> <property name="maxPoolPreparedStatementPerConnectionSize" value="${druid.maxPoolPreparedStatementPerConnectionSize}" /> <!-- 配置监控统计拦截的filters --> <property name="filters" value="${druid.filters}" /> </bean> <bean id="rangeDatabaseShardingAlgorithm" class="com.lianxin.shardingAlgorithm.RangeDatabaseShardingAlgorithm" /> <bean id="rangeTableShardingAlgorithm" class="com.lianxin.shardingAlgorithm.RangeTableShardingAlgorithm" /> <bean id="preciseDatabaseShardingAlgorithm" class="com.lianxin.shardingAlgorithm.PreciseDatabaseShardingAlgorithm" /> <bean id="preciseTableShardingAlgorithm" class="com.lianxin.shardingAlgorithm.PreciseTableShardingAlgorithm" /> <sharding:standard-strategy id="databaseShardingStrategy" sharding-column="create_time" precise-algorithm-ref="preciseDatabaseShardingAlgorithm" range-algorithm-ref="rangeDatabaseShardingAlgorithm" /> <sharding:standard-strategy id="tableShardingStrategy" sharding-column="create_time" precise-algorithm-ref="preciseTableShardingAlgorithm" range-algorithm-ref="rangeTableShardingAlgorithm" /> <sharding:data-source id="shardingDataSource"> <sharding:props> <prop key="sql.show">true</prop> </sharding:props> <sharding:sharding-rule data-source-names="ds2025,ds2026"> <sharding:table-rules> <sharding:table-rule logic-table="stu" actual-data-nodes="ds$->{2025..2026}.stu_$->{1..4}" database-strategy-ref="databaseShardingStrategy" table-strategy-ref="tableShardingStrategy" /> </sharding:table-rules> </sharding:sharding-rule> </sharding:data-source> <!-- myBatis文件 --> <bean id="sqlSessionFactory2" class="org.mybatis.spring.SqlSessionFactoryBean"> <property name="dataSource" ref="shardingDataSource" /> <property name="configLocation" value="classpath:mybatis-config.xml"></property> <!-- 自动扫描entity目录, 省掉Configuration.xml里的手工配置 --> <property name="mapperLocations" value="classpath*:mapping/sd/*.xml" /> </bean> <bean class="org.mybatis.spring.mapper.MapperScannerConfigurer"> <property name="basePackage" value="com.lianxin.dao.sd" /> <property name="sqlSessionFactoryBeanName" value="sqlSessionFactory2" /> </bean> </beans>
这里要说明下 table-rule配置节 logic-table指的是逻辑表这张表只是在mybatis xml里面写sql语句时用的库里是不存在的,最终shardingjdbc会映射到具体的物理表,也就是actual-date-nodes里面配置的表数据库截图如下
建表如下
drop table if EXISTS stu_1; create table stu_1 ( id varchar(64) not null /*编号*/, name varchar(64), cash numeric(5,2), create_time int8 ); alter table stu_1 add constraint PK_stu_id_001c primary key (id); drop table if EXISTS stu_2; create table stu_2 ( id varchar(64) not null /*编号*/, name varchar(64), cash numeric(5,2), create_time int8 ); alter table stu_2 add constraint PK_stu_id_002c primary key (id); drop table if EXISTS stu_3; create table stu_3 ( id varchar(64) not null /*编号*/, name varchar(64), cash numeric(5,2), create_time int8 ); alter table stu_3 add constraint PK_stu_id_003c primary key (id); drop table if EXISTS stu_4; create table stu_4 ( id varchar(64) not null /*编号*/, name varchar(64), cash numeric(5,2), create_time int8 ); alter table stu_4 add constraint PK_stu_id_004c primary key (id);
这里要注意的是我们用来分片的字段 create_time最好用int8存一个时间戳,不能用我们常用的timestamp类型不然会导致最终查询报类型转换错误,或者不走分片查询逻辑,这个没有具体研究为什么,只是在做的过程中发现的!
关键的自定义分片规则如下代码
package com.lianxin.shardingAlgorithm; import org.apache.commons.lang3.time.DateFormatUtils; import org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm; import org.apache.shardingsphere.api.sharding.standard.PreciseShardingValue; import java.util.Collection; public class PreciseTableShardingAlgorithm implements PreciseShardingAlgorithm<Long> { /** * 精确分片算法 * * @param availableTargetNames 所有配置的表列表,这里代表所匹配到库的所有表 * @param shardingValue 分片值,也就是dau_id的值 * @return 所匹配表的结果 */ @Override public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) { if (shardingValue == null) throw new UnsupportedOperationException("preciseShardingValue is null"); //表后缀 int month = Integer.valueOf(DateFormatUtils.format(shardingValue.getValue(),"MM")); int qauter = 0; switch (month){ case 1: case 2: case 3: qauter = 1;break; case 4: case 5: case 6: qauter = 2;break; case 7: case 8: case 9: qauter = 3;break; case 10: case 11: case 12: qauter = 4;break; } final String tableSuffix = shardingValue.getLogicTableName() + "_" + qauter; for (String availableTargetName : availableTargetNames) { if (availableTargetName.endsWith(tableSuffix)) { return availableTargetName; } } throw new UnsupportedOperationException(); } }
package com.lianxin.shardingAlgorithm; import com.lianxin.common.DateUtil; import org.apache.commons.lang3.time.DateFormatUtils; import org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm; import org.apache.shardingsphere.api.sharding.standard.PreciseShardingValue; import java.util.Collection; import java.util.Date; public class PreciseDatabaseShardingAlgorithm implements PreciseShardingAlgorithm<Long> { /** * 精确分片算法 * * @param availableTargetNames 所有配置的库列表 * @param shardingValue 分片值,也就是save_time_com的值 * @return 所匹配库的结果 */ @Override public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) { if (shardingValue == null) throw new UnsupportedOperationException("preciseShardingValue is null"); // 库后缀 String yearStr = DateFormatUtils.format(shardingValue.getValue(),"yyyy"); for (String availableTargetName : availableTargetNames) { if (availableTargetName.endsWith(yearStr)) { return availableTargetName; } } throw new UnsupportedOperationException(); } }
package com.lianxin.shardingAlgorithm; import com.google.common.collect.Range; import org.apache.shardingsphere.api.sharding.standard.RangeShardingAlgorithm; import org.apache.shardingsphere.api.sharding.standard.RangeShardingValue; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Date; public class RangeDatabaseShardingAlgorithm implements RangeShardingAlgorithm<Long> { /** * 范围分片算法 * * @param availableTargetNames 所有配置的库列表 * @param rangeShardingValue 分片值,也就是save_time_com的值,范围分片算法必须提供开始时间和结束时间 * @return 所匹配库的结果 */ @Override public Collection<String> doSharding(Collection<String> availableTargetNames, RangeShardingValue<Long> rangeShardingValue) { final ArrayList<String> result = new ArrayList<>(); Range<Long> range = rangeShardingValue.getValueRange(); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); // 将时间戳转换为日期 String fmtBeginDate = sdf.format(new Date(range.lowerEndpoint())); String beginYear = fmtBeginDate.substring(0,4); String fmtEndDate = sdf.format(new Date(range.upperEndpoint())); String endYear = fmtEndDate.substring(0,4); if(beginYear.equalsIgnoreCase(endYear)){ // 同一年,说明只需要一个库 for (String availableTargetName : availableTargetNames) { if (availableTargetName.endsWith(beginYear)){ result.add(availableTargetName); break; } } return result; }else{ throw new UnsupportedOperationException("不支持跨年查询!"); } } }
package com.lianxin.shardingAlgorithm; import com.google.common.collect.Range; import org.apache.shardingsphere.api.sharding.standard.RangeShardingAlgorithm; import org.apache.shardingsphere.api.sharding.standard.RangeShardingValue; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Date; public class RangeTableShardingAlgorithm implements RangeShardingAlgorithm<Long> { @Override public Collection<String> doSharding(Collection<String> availableTargetNames, RangeShardingValue<Long> rangeShardingValue) { final ArrayList<String> result = new ArrayList<>(); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); final Range<Long> range = rangeShardingValue.getValueRange(); String fmtBeginDate = sdf.format(new Date(range.lowerEndpoint())); String beginYear = fmtBeginDate.substring(0,4); int beginMonth = Integer.valueOf(fmtBeginDate.replace(beginYear + "-","").substring(0,2)); int beginQauter = 0; switch (beginMonth){ case 1: case 2: case 3: beginQauter = 1;break; case 4: case 5: case 6: beginQauter = 2;break; case 7: case 8: case 9: beginQauter = 3;break; case 10: case 11: case 12: beginQauter = 4;break; } String fmtEndDate = sdf.format(new Date(range.upperEndpoint())); String endYear = fmtEndDate.substring(0,4); int endMonth = Integer.valueOf(fmtEndDate.replace(endYear + "-","").substring(0,2)); int endQauter = 0; switch (endMonth){ case 1: case 2: case 3: endQauter = 1;break; case 4: case 5: case 6: endQauter = 2;break; case 7: case 8: case 9: endQauter = 3;break; case 10: case 11: case 12: endQauter = 4;break; } if(beginYear.equalsIgnoreCase(endYear)){ if(beginQauter == endQauter){ // 同一个季度只需一张表,否则需要查询多张 for (String availableTargetName : availableTargetNames) { if (availableTargetName.endsWith(rangeShardingValue.getLogicTableName() + "_" + beginQauter)){ result.add(availableTargetName); break; } } return result; } else { for(;beginQauter <= endQauter;beginQauter++){ for (String availableTargetName : availableTargetNames) { if (availableTargetName.endsWith(rangeShardingValue.getLogicTableName() + "_" + beginQauter)){ result.add(availableTargetName); } } } return result; } }else{ throw new UnsupportedOperationException("不支持跨年查询!"); } } }
spring mvc mapper xml model 如下
@RequestMapping(value = "/createFooData", method = RequestMethod.GET) public @ResponseBody Object createFooData() throws ParseException { String datePartten = "yyyy-MM-dd HH:mm:ss"; Date startDate = DateUtils.parseDate("2025-01-01 00:00:00",datePartten); for(int i =0; i < 1000;i++){ Student st1 = new Student(); st1.setId(UUID.randomUUID().toString()); st1.setName(DateFormatUtils.format(startDate,"yyyyMMddHH")); st1.setCash(BigDecimal.ZERO); st1.setCreateTime(startDate.getTime()); stuSdMapper.insert(st1); startDate = DateUtils.addHours(startDate,12); } return "batch ok!"; } @RequestMapping(value = "/getRangeStus", method = RequestMethod.POST) public @ResponseBody Object getRangeStus(StuQuery query){ return stuSdMapper.selectStudents(query); }
package com.lianxin.dao.sd; import com.lianxin.model.StuQuery; import com.lianxin.pojo.Student; import org.apache.ibatis.annotations.Param; import java.util.List; public interface StudentMapper { List<Student> selectStudents(StuQuery query); int insert(Student student); int insertBatch(@Param("students") List<Student> students); }
<?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="com.lianxin.dao.sd.StudentMapper" > <select id="selectStudents" resultType="com.lianxin.pojo.Student" parameterType="com.lianxin.model.StuQuery" > select a.id, a.name, a.cash,a.create_time from stu a where 1 = 1 and a.create_time between ${startDate} and ${endDate} <if test="name != null and name != ''"> and a.name = #{name} </if> order by <choose> <when test="orderFeilds != null and orderFeilds.size() != 0"> <foreach item="item" index="index" collection="orderFeilds" open="" separator="," close=""> a.${item} </foreach> </when> <otherwise> a.create_time desc </otherwise> </choose> offset #{skip} limit #{take} </select> <insert id="insert" parameterType="com.lianxin.pojo.Student" > insert into stu (id, name, cash, create_time) values (#{id,jdbcType=VARCHAR}, #{name,jdbcType=VARCHAR}, #{cash}, #{createTime}) </insert> <insert id="insertBatch" > insert into stu (id, name, cash, create_time) values <foreach item="item" index="index" collection="students" open=" " separator="," close=" "> (#{item.id,jdbcType=VARCHAR}, #{item.name,jdbcType=VARCHAR}, #{item.cash}, #{item.createTime}) </foreach> </insert> </mapper>
public class StuQuery extends QueryBase { private Long startDate; private Long endDate; private String name; public Long getStartDate() { return startDate; } public void setStartDate(Long startDate) { this.startDate = startDate; } public Long getEndDate() { return endDate; } public void setEndDate(Long endDate) { this.endDate = endDate; } public String getName() { return name; } public void setName(String name) { this.name = name; } }
package com.lianxin.model; import org.springframework.web.context.request.RequestAttributes; import org.springframework.web.context.request.RequestContextHolder; import org.springframework.web.context.request.ServletRequestAttributes; import javax.servlet.http.HttpServletRequest; import java.util.List; public class QueryBase { //分页相关字段 private Integer page; private Integer pageSize; private Integer take; private Integer skip = 0; //排序基础字段 private List<String> orderFeilds; //公司数据隔离基础字段 private String companyId; //关键字查询基础字段 private String searchKey; //权限过滤基础字段 private List<String> zoneIds; private String appVersion; HttpServletRequest request; public QueryBase(){ RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes(); if (requestAttributes != null) { request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest(); if (request != null && request.getParameter("page") != null) { page = Integer.valueOf(request.getParameter("page")); }else page = 1; if (request != null && request.getParameter("pageSize") != null) { pageSize = Integer.valueOf(request.getParameter("pageSize")); } else pageSize = 50; } else { page = 1; pageSize = 50; } take = pageSize; skip = (page - 1) * pageSize; } public void buildPagerParam(){ take = pageSize; skip = (page - 1) * pageSize; } public Integer getPage() { return page; } public void setPage(Integer page) { this.page = page; } public Integer getPageSize() { return pageSize; } public void setPageSize(Integer pageSize) { this.pageSize = pageSize; } public Integer getTake() { return take; } public void setTake(Integer take) { this.take = take; } public Integer getSkip() { return skip; } public void setSkip(Integer skip) { this.skip = skip; } public List<String> getOrderFeilds() { return orderFeilds; } public void setOrderFeilds(List<String> orderFeilds) { this.orderFeilds = orderFeilds; } public String getCompanyId() { return companyId; } public void setCompanyId(String companyId) { this.companyId = companyId; } public String getSearchKey() { return searchKey; } public void setSearchKey(String searchKey) { this.searchKey = searchKey; } public List<String> getZoneIds() { return zoneIds; } public void setZoneIds(List<String> zoneIds) { this.zoneIds = zoneIds; } public String getAppVersion() { return appVersion; } public void setAppVersion(String appVersion) { this.appVersion = appVersion; } }


所以我想实际应避免跨库跨表查询,文章中我想按照每年每季度拆分,所以用户界面就得做查询限制按照每年每季度查询数据!
文章参考
https://blog.csdn.net/weixin_47077809/article/details/113694604
欢迎转载文章,但请标明出处!
浙公网安备 33010602011771号