最近工作中使用到批量删除,试了试网上的几种方法,下面三种方法都是插入2万条数据
特别注意:mysql默认接受sql的大小是1048576(1M),若数据量超过1M会报如异常错误者可以,进行分开处理,每次提交一定的数据到数据库,还可以通过调整MySQL安装目录下的my.ini文件中[mysqld]段的"max_allowed_packet = 1M"),增加max_allowed_packet 的容量。
使用到oracle批量插入的方法insert all
<insert id="insertBatch3" useGeneratedKeys="false" parameterType="list">
INSERT ALL
INTO APPLICATION_CONSUMABLES_TB(
BUSINESS_ID,
GENERATE_DATE,
CREATE_DATE,
UPDATE_DATE,
ARCHIVE_DATE,
PATIENT_ID,
AREA_ID,
ORG_ID,
ORG_NAME,
WS98_00_909_35,
WS98_00_909_36,
ZJ99_JH_SBS,
ZJ99_TY_CG_CS,
ZJ99_TY_DB_CS,
ZJ99_CG_JH_ZXS,
ZJ99_CG_ZS,
ZJ99_GYS_PS_WCZS,
ZJ99_XPS_ZS
)
VALUES
(
#{applicationConsumables.businessId,jdbcType=VARCHAR},
decode(#{applicationConsumables.generateDate,jdbcType=DATE},null,null,to_date(to_char(#{applicationConsumables.generateDate,jdbcType=DATE},'yyyy-MM-dd'),'yyyy-MM-dd')),
decode(#{applicationConsumables.createDate,jdbcType=DATE},null,sysdate,to_date(#{applicationConsumables.createDate,jdbcType=DATE},'yyyy-MM-dd')),
decode(#{applicationConsumables.updateDate,jdbcType=DATE},null,sysdate,to_date(to_char(#{applicationConsumables.updateDate,jdbcType=DATE},'yyyy-MM-dd'),'yyyy-MM-dd')),
decode(#{applicationConsumables.archiveDate,jdbcType=DATE},null,null,to_date(to_char(#{applicationConsumables.archiveDate,jdbcType=DATE},'yyyy-MM-dd'),'yyyy-MM-dd')),
#{applicationConsumables.patientId,jdbcType=VARCHAR},
#{applicationConsumables.areaId,jdbcType=VARCHAR},
#{applicationConsumables.orgId,jdbcType=VARCHAR},
#{applicationConsumables.orgName,jdbcType=VARCHAR},
#{applicationConsumables.ws980090935,jdbcType=VARCHAR},
#{applicationConsumables.ws980090936,jdbcType=VARCHAR},
#{applicationConsumables.zj99JhSbs,jdbcType=NUMERIC},
#{applicationConsumables.zj99TyCgCs,jdbcType=NUMERIC},
#{applicationConsumables.zj99TyDbCs,jdbcType=NUMERIC},
#{applicationConsumables.zj99CgJhZxs,jdbcType=NUMERIC},
#{applicationConsumables.zj99CgZs,jdbcType=NUMERIC},
#{applicationConsumables.zj99GysPsWczs,jdbcType=NUMERIC},
#{applicationConsumables.zj99XpsZs,jdbcType=NUMERIC}
)
foreach>
SELECT 1 FROM DUAL
insert>
把json数据转对象,一次提交插入500条
@Override
public boolean insertBatch3(Map param) throws Exception {
List list = JsonArrayToPojoList.jsonToList(param, ApplicationConsumablesPojo.class);
int nums = 500; // 一次插入200条
int times = (int)Math.ceil((float)list.size() / nums);// 插入次数
try{
ApplicationConsumablesPojo pojo= (ApplicationConsumablesPojo)list.get(0);
applicationConsumablesMapper.deleteBatch(pojo.getGenerateDate(),pojo.getOrgId());
for(int i=0; i < times; i++){
if(i == times - 1) {
applicationConsumablesMapper.insertBatch3(list.subList(i * nums, list.size()));
}else {
applicationConsumablesMapper.insertBatch3(list.subList(i * nums, (i+1) * nums));
}
}
} catch(Exception e){
return false;
}
return true;
}
useGeneratedKeys=“false”,表示不需要JDBC自动生成主键
<insert id="insertBatch" useGeneratedKeys="false" parameterType="list">
BEGIN
INSERT INTO APPLICATION_CONSUMABLES_TB(
BUSINESS_ID,
GENERATE_DATE,
CREATE_DATE,
UPDATE_DATE,
ARCHIVE_DATE,
PATIENT_ID,
AREA_ID,
ORG_ID,
ORG_NAME,
WS98_00_909_35,
WS98_00_909_36,
ZJ99_JH_SBS,
ZJ99_TY_CG_CS,
ZJ99_TY_DB_CS,
ZJ99_CG_JH_ZXS,
ZJ99_CG_ZS,
ZJ99_GYS_PS_WCZS,
ZJ99_XPS_ZS
)
VALUES
(
#{applicationConsumables.businessId,jdbcType=VARCHAR},
decode(#{applicationConsumables.generateDate,jdbcType=DATE},null,null,to_date(to_char(#{applicationConsumables.generateDate,jdbcType=DATE},'yyyy-MM-dd'),'yyyy-MM-dd')),
decode(#{applicationConsumables.createDate,jdbcType=DATE},null,sysdate,to_date(#{applicationConsumables.createDate,jdbcType=DATE},'yyyy-MM-dd')),
decode(#{applicationConsumables.updateDate,jdbcType=DATE},null,sysdate,to_date(to_char(#{applicationConsumables.updateDate,jdbcType=DATE},'yyyy-MM-dd'),'yyyy-MM-dd')),
decode(#{applicationConsumables.archiveDate,jdbcType=DATE},null,null,to_date(to_char(#{applicationConsumables.archiveDate,jdbcType=DATE},'yyyy-MM-dd'),'yyyy-MM-dd')),
#{applicationConsumables.patientId,jdbcType=VARCHAR},
#{applicationConsumables.areaId,jdbcType=VARCHAR},
#{applicationConsumables.orgId,jdbcType=VARCHAR},
#{applicationConsumables.orgName,jdbcType=VARCHAR},
#{applicationConsumables.ws980090935,jdbcType=VARCHAR},
#{applicationConsumables.ws980090936,jdbcType=VARCHAR},
#{applicationConsumables.zj99JhSbs,jdbcType=NUMERIC},
#{applicationConsumables.zj99TyCgCs,jdbcType=NUMERIC},
#{applicationConsumables.zj99TyDbCs,jdbcType=NUMERIC},
#{applicationConsumables.zj99CgJhZxs,jdbcType=NUMERIC},
#{applicationConsumables.zj99CgZs,jdbcType=NUMERIC},
#{applicationConsumables.zj99GysPsWczs,jdbcType=NUMERIC},
#{applicationConsumables.zj99XpsZs,jdbcType=NUMERIC}
)
foreach>
;END ;
insert>
@Override
public boolean insertBatch(Map param) throws Exception{
List list = JsonArrayToPojoList.jsonToList(param, ApplicationConsumablesPojo.class);
int result = 1;
SqlSession batchSqlSession = null;
try {
ApplicationConsumablesPojo pojo= (ApplicationConsumablesPojo)list.get(0);
applicationConsumablesMapper.deleteBatch(pojo.getGenerateDate(),pojo.getOrgId());
batchSqlSession = this.sqlSessionTemplate
.getSqlSessionFactory()
.openSession(ExecutorType.BATCH, false);// 获取批量方式的sqlsession
int batchCount = 500;// 每批commit的个数
int batchLastIndex = batchCount;// 每批最后一个的下标
long startDate=System.currentTimeMillis();
for (int index = 0; index < list.size(); ) {
if (batchLastIndex >= list.size()) {
batchLastIndex = list.size();
result = result + applicationConsumablesMapper.insertBatch(list.subList(index, batchLastIndex));
batchSqlSession.commit();
//清理缓存,防止溢出
batchSqlSession.clearCache();
System.out.println("index:" + index + " batchLastIndex:" + batchLastIndex);
break;// 数据插入完毕,退出循环
} else {
result = result + applicationConsumablesMapper.insertBatch(list.subList(index, batchLastIndex));
batchSqlSession.commit();
//清理缓存,防止溢出
batchSqlSession.clearCache();
System.out.println("index:" + index + " batchLastIndex:" + batchLastIndex);
index = batchLastIndex;// 设置下一批下标
batchLastIndex = index + (batchCount - 1);
}
}
long end=System.currentTimeMillis();
long costTime=end-startDate;
log.info("----------------------------"+costTime+"--------------------");
batchSqlSession.commit();
} catch (Exception e) {
e.printStackTrace();
return false;
} finally {
batchSqlSession.close();
}
return true;
}
<insert id="insertBatch2" useGeneratedKeys="false" parameterType="list">
INSERT INTO APPLICATION_CONSUMABLES_TB(
BUSINESS_ID,
GENERATE_DATE,
CREATE_DATE,
UPDATE_DATE,
ARCHIVE_DATE,
PATIENT_ID,
AREA_ID,
ORG_ID,
ORG_NAME,
WS98_00_909_35,
WS98_00_909_36,
ZJ99_JH_SBS,
ZJ99_TY_CG_CS,
ZJ99_TY_DB_CS,
ZJ99_CG_JH_ZXS,
ZJ99_CG_ZS,
ZJ99_GYS_PS_WCZS,
ZJ99_XPS_ZS
)
(
select
#{applicationConsumables.businessId,jdbcType=VARCHAR},
decode(#{applicationConsumables.generateDate,jdbcType=DATE},null,null,to_date(to_char(#{applicationConsumables.generateDate,jdbcType=DATE},'yyyy-MM-dd'),'yyyy-MM-dd')),
decode(#{applicationConsumables.createDate,jdbcType=DATE},null,sysdate,to_date(#{applicationConsumables.createDate,jdbcType=DATE},'yyyy-MM-dd')),
decode(#{applicationConsumables.updateDate,jdbcType=DATE},null,sysdate,to_date(to_char(#{applicationConsumables.updateDate,jdbcType=DATE},'yyyy-MM-dd'),'yyyy-MM-dd')),
decode(#{applicationConsumables.archiveDate,jdbcType=DATE},null,null,to_date(to_char(#{applicationConsumables.archiveDate,jdbcType=DATE},'yyyy-MM-dd'),'yyyy-MM-dd')),
#{applicationConsumables.patientId,jdbcType=VARCHAR},
#{applicationConsumables.areaId,jdbcType=VARCHAR},
#{applicationConsumables.orgId,jdbcType=VARCHAR},
#{applicationConsumables.orgName,jdbcType=VARCHAR},
#{applicationConsumables.ws980090935,jdbcType=VARCHAR},
#{applicationConsumables.ws980090936,jdbcType=VARCHAR},
#{applicationConsumables.zj99JhSbs,jdbcType=NUMERIC},
#{applicationConsumables.zj99TyCgCs,jdbcType=NUMERIC},
#{applicationConsumables.zj99TyDbCs,jdbcType=NUMERIC},
#{applicationConsumables.zj99CgJhZxs,jdbcType=NUMERIC},
#{applicationConsumables.zj99CgZs,jdbcType=NUMERIC},
#{applicationConsumables.zj99GysPsWczs,jdbcType=NUMERIC},
#{applicationConsumables.zj99XpsZs,jdbcType=NUMERIC}
from dual
foreach>
)
insert>
@Override
public boolean insertBatch2(Map param) throws Exception {
List list = JsonArrayToPojoList.jsonToList(param, ApplicationConsumablesPojo.class);
int nums = 500; // 一次插入200条
int times = (int)Math.ceil((float)list.size() / nums);// 插入次数
try{
ApplicationConsumablesPojo pojo= (ApplicationConsumablesPojo)list.get(0);
applicationConsumablesMapper.deleteBatch(pojo.getGenerateDate(),pojo.getOrgId());
for(int i=0; i < times; i++){
if(i == times - 1) {
applicationConsumablesMapper.insertBatch2(list.subList(i * nums, list.size()));
}else {
applicationConsumablesMapper.insertBatch2(list.subList(i * nums, (i+1) * nums));
}
}
} catch(Exception e){
return false;
}
return true;
}
批量插入条数200条时
方式 | 数据量 | 耗时(秒) |
---|---|---|
利用存储过程实现批量插入 | 2万 | 22.233 |
利用foreach标签 | 2万 | 13.023 |
使用oracle的insert all | 2万 | 17.489 |
批量插入条数500条时
方式 | 数据量 | 耗时(秒) |
---|---|---|
利用存储过程实现批量插入 | 2万 | 16.906 |
利用foreach标签 | 2万 | 13.058 |
使用oracle的insert all | 2万 | 16.139 |
批量插入条数1000条时
方式 | 数据量 | 耗时(秒) |
---|---|---|
利用存储过程实现批量插入 | 2万 | 60.583 |
利用foreach标签 | 2万 | 13.124 |
使用oracle的insert all | 2万 | 16.478 |
每次写入的数量,以及网络环境对花费的时间也是有很大的影响的。