参考地址
什么是分片上传?
我们文件项目中涉及到文件上传的业务,如果文件上传过大,一下子网络断开了,还要重新上传,那不就G了,严重降低客户的使用效率和使用体验感。这个时候,我们可以 把文件分成一块一块的,每次上传一块,中途网络断开了我们还能够从上传了的最后一块的下一块开始继续上传,这样就能够保证客户的使用体验。
使用场景
什么是WebUploader?
WebUploader是由Baidu WebFE(FEX)团队开发的一个简单的以HTML5为主,FLASH为辅的现代文件上传组件。在现代的浏览器里面能充分发挥HTML5的优势,同时又不摒弃主流IE浏览器,沿用原来的FLASH运行时,兼容IE6+,iOS 6+, android 4+。两套运行时,同样的调用方式,可供用户任意选用。
采用大文件分片并发上传,极大的提高了文件上传效率。
前端上传的html代码
<html>
<head>
<meta charset="utf-8">
<title>BigFile-WebUploadertitle>
<link rel="stylesheet" href="https://cdn.bootcss.com/bootstrap/3.3.7/css/bootstrap.min.css">
<link rel="stylesheet" href="css/webuploader.css">
<script type="text/javascript" src="http://cdn.bootcss.com/jquery/1.12.4/jquery.min.js">script>
<script type="text/javascript" src="https://cdn.bootcss.com/bootstrap/3.3.7/js/bootstrap.min.js">script>
<script type="text/javascript" src="js/webuploader.js">script>
head>
<body>
<div id="uploader" class="wu-example">
<div id="thelist" class="uploader-list">div>
<div class="btns">
<div id="picker">选择大文件div>
<button id="ctlBtn" class="btn btn-default">开始上传button>
<button id="stopBtn" class="btn btn-default">暂停button>
<button id="restart" class="btn btn-default">开始button>
div>
div>
body>
<script>
var $btn = $('#ctlBtn');
var $thelist = $('#thelist');
var startDate;
// HOOK 这个必须要再uploader实例化前面
WebUploader.Uploader.register({
// 在文件发送之前执行
'before-send-file': 'beforeSendFile',
// 在文件分片(如果没有启用分片,整个文件被当成一个分片)后,上传之前执行
'before-send': 'beforeSend',
// 在文件所有分片都上传完后,且服务端没有错误返回后执行
"after-send-file": "afterSendFile"
}, {
beforeSendFile: function (file) {
startDate = new Date();
console.log("开始上传时间" + startDate)
console.log("beforeSendFile");
// Deferred对象在钩子回掉函数中经常要用到,用来处理需要等待的异步操作。
var deferred = WebUploader.Deferred();
//1、计算文件的唯一标记MD5,用于断点续传
uploader.md5File(file, 0, 3 * 1024 * 1024).progress(function (percentage) {
// 上传进度
console.log('上传进度:', percentage);
getProgressBar(file, percentage, "MD5", "MD5");
}).then(function (val) { // 完成
console.log('File MD5 Result:', val);
file.md5 = val;
file.uid = WebUploader.Base.guid();
// 判断文件是否上传过,是否存在分片,断点续传
$.ajax({
type: "POST",
url: "bigfile/check",
async: false,
data: {
fileMd5: val
},
success: function (data) {
var resultCode = data.resultCode;
// 秒传
if(resultCode == -1){
// 文件已经上传过,忽略上传过程,直接标识上传成功;
uploader.skipFile(file);
file.pass = true;
}else{
//文件没有上传过,下标为0
//文件上传中断过,返回当前已经上传到的下标
file.indexcode = resultCode;
}
}, error: function () {
}
});
//获取文件信息后进入下一步
deferred.resolve();
});
return deferred.promise();
},
beforeSend: function (block) {
//获取已经上传过的下标
var indexchunk = block.file.indexcode;
var deferred = WebUploader.Deferred();
if (indexchunk > 0) {
if (block.chunk > indexchunk) {
//分块不存在,重新发送该分块内容
deferred.resolve();
} else {
//分块存在,跳过
deferred.reject();
}
} else {
//分块不存在,重新发送该分块内容
deferred.resolve();
}
//返回Deferred的Promise对象。
return deferred.promise();
}
, afterSendFile: function (file) {
//如果所有分块上传成功,则通知后台合并分块
$.ajax({
type: "POST",
url: "bigfile/merge",
data: {
fileName: file.name,
fileMd5: file.md5
},
success: function (data) {
}, error: function () {
}
});
}
});
// 实例化
var uploader = WebUploader.create({
pick: {
id: '#picker',
label: '点击选择文件'
},
duplicate: true,//去重, 根据文件名字、文件大小和最后修改时间来生成hash Key
swf: 'js/Uploader.swf',
chunked: true,
chunkSize: 10 * 1024 * 1024, // 10M 每个分片的大小限制
threads: 3,
server: 'bigfile/upload',
auto: true,
// 禁掉全局的拖拽功能。这样不会出现图片拖进页面的时候,把图片打开。
disableGlobalDnd: true,
fileNumLimit: 1024,
fileSizeLimit: 50 * 1024 * 1024 * 1024,//50G 验证文件总大小是否超出限制, 超出则不允许加入队列
fileSingleSizeLimit: 10 * 1024 * 1024 * 1024 //10G 验证单个文件大小是否超出限制, 超出则不允许加入队列
});
// 当有文件被添加进队列的时候
uploader.on('fileQueued', function (file) {
$thelist.append('+ file.id + '" class="item">' +
''
+ file.name + '' +
'等待上传...
' +
'');
$("#stopBtn").click(function () {
uploader.stop(true);
});
$("#restart").click(function () {
uploader.upload(file);
});
});
//当某个文件的分块在发送前触发,主要用来询问是否要添加附带参数,大文件在开起分片上传的前提下此事件可能会触发多次。
uploader.onUploadBeforeSend = function (obj, data) {
//console.log("onUploadBeforeSend");
var file = obj.file;
data.md5 = file.md5 || '';
data.uid = file.uid;
};
// 上传中
uploader.on('uploadProgress', function (file, percentage) {
getProgressBar(file, percentage, "FILE", "上传进度");
});
// 上传返回结果
uploader.on('uploadSuccess', function (file) {
var endDate = new Date();
console.log("文件上传耗时:" + (endDate - startDate) / 1000 + "s")
var text = '已上传';
if (file.pass) {
text = "文件妙传功能,文件已上传。"
}
$('#' + file.id).find('p.state').text(text);
});
uploader.on('uploadError', function (file) {
$('#' + file.id).find('p.state').text('上传出错');
});
uploader.on('uploadComplete', function (file) {
// 隐藏进度条
fadeOutProgress(file, 'MD5');
fadeOutProgress(file, 'FILE');
});
// 文件上传
$btn.on('click', function () {
uploader.upload();
});
/**
* 生成进度条封装方法
* @param file 文件
* @param percentage 进度值
* @param id_Prefix id前缀
* @param titleName 标题名
*/
function getProgressBar(file, percentage, id_Prefix, titleName) {
var $li = $('#' + file.id), $percent = $li.find('#' + id_Prefix + '-progress-bar');
// 避免重复创建
if (!$percent.length) {
$percent = $('+ id_Prefix + '-progress" class="progress progress-striped active">' +
'+ id_Prefix + '-progress-bar" class="progress-bar" role="progressbar" style="width: 0%">' +
'' +
''
).appendTo($li).find('#' + id_Prefix + '-progress-bar');
}
var progressPercentage = parseInt(percentage * 100) + '%';
$percent.css('width', progressPercentage);
$percent.html(titleName + ':' + progressPercentage);
}
/**
* 隐藏进度条
* @param file 文件对象
* @param id_Prefix id前缀
*/
function fadeOutProgress(file, id_Prefix) {
$('#' + file.id).find('#' + id_Prefix + '-progress').fadeOut();
}
script>
html>
后端代码
import org.springframework.web.multipart.MultipartFile;
public class MultipartFileParam {
// 用户id
private String uid;
//任务ID
private String id;
//总分片数量
private int chunks;
//当前为第几块分片
private int chunk;
//当前分片大小
private long size = 0L;
//文件名
private String name;
//分片对象
private MultipartFile file;
// MD5
private String md5;
public String getUid() {
return uid;
}
public void setUid(String uid) {
this.uid = uid;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public int getChunks() {
return chunks;
}
public void setChunks(int chunks) {
this.chunks = chunks;
}
public int getChunk() {
return chunk;
}
public void setChunk(int chunk) {
this.chunk = chunk;
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public MultipartFile getFile() {
return file;
}
public void setFile(MultipartFile file) {
this.file = file;
}
public String getMd5() {
return md5;
}
public void setMd5(String md5) {
this.md5 = md5;
}
@Override
public String toString() {
return "MultipartFileParam{" +
"uid='" + uid + '\'' +
", id='" + id + '\'' +
", chunks=" + chunks +
", chunk=" + chunk +
", size=" + size +
", name='" + name + '\'' +
", file=" + file +
", md5='" + md5 + '\'' +
'}';
}
}
public class JsonResult<T> {
private int resultCode;
private String resultMsg;
private Object resultData;
public JsonResult() {
}
public JsonResult(int resultCode, String resultMsg, Object resultData) {
this.resultCode = resultCode;
this.resultMsg = resultMsg;
this.resultData = resultData;
}
public int getResultCode() {
return this.resultCode;
}
public void setResultCode(int resultCode) {
this.resultCode = resultCode;
}
public String getResultMsg() {
return this.resultMsg;
}
public void setResultMsg(String resultMsg) {
this.resultMsg = resultMsg;
}
public Object getResultData() {
return this.resultData;
}
public void setResultData(Object resultData) {
this.resultData = resultData;
}
}
contrller
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseBody;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.util.*;
/**
* @Title: 大文件上传
* @ClassName: com.lovecyy.file.up.example3.controller.BreakPointController.java
* @Description: 断点续传.秒传.分块上传
*
* @Copyright 2020-2021 - Powered By 研发中心
*/
@Controller
@RequestMapping(value = "/bigfile")
public class BigFileController {
private Logger logger = LoggerFactory.getLogger(BigFileController.class);
@Autowired
private StringRedisTemplate stringRedisTemplate;
@Value("${breakpoint.upload.dir}")
private String fileStorePath;
/**
* @Title: 判断文件是否上传过,是否存在分片,断点续传
* @MethodName: checkBigFile
* @param fileMd5
* @Return com.lovecyy.file.up.example3.vo.JsonResult
* @Exception
* @Description:
* 文件已存在,下标为-1
* 文件没有上传过,下标为零
* 文件上传中断过,返回当前已经上传到的下标
*/
@RequestMapping(value = "/check", method = RequestMethod.POST)
@ResponseBody
public JsonResult checkBigFile(String fileMd5) {
JsonResult jr = new JsonResult();
// 秒传
File mergeMd5Dir = new File(fileStorePath + "/" + "merge"+ "/" + fileMd5);
if(mergeMd5Dir.exists()){
mergeMd5Dir.mkdirs();
jr.setResultCode(-1);//文件已存在,下标为-1
return jr;
}
// 读取目录里的所有文件
File dir = new File(fileStorePath + "/" + fileMd5);
File[] childs = dir.listFiles();
if(childs==null){
jr.setResultCode(0);//文件没有上传过,下标为零
}else{
jr.setResultCode(childs.length-1);//文件上传中断过,返回当前已经上传到的下标
}
return jr;
}
/**
* 上传文件
*
* @param param
* @param request
* @return
* @throws Exception
*/
@RequestMapping(value = "/upload", method = RequestMethod.POST)
@ResponseBody
public void filewebUpload(MultipartFileParam param, HttpServletRequest request) {
boolean isMultipart = ServletFileUpload.isMultipartContent(request);
// 文件名
String fileName = param.getName();
// 文件每次分片的下标
int chunkIndex = param.getChunk();
if (isMultipart) {
File file = new File(fileStorePath + "/" + param.getMd5());
if (!file.exists()) {
file.mkdir();
}
File chunkFile = new File(
fileStorePath + "/" + param.getMd5() + "/" + chunkIndex);
try{
FileUtils.copyInputStreamToFile(param.getFile().getInputStream(), chunkFile);
}catch (Exception e){
e.printStackTrace();
}
}
logger.info("文件-:{}的小标-:{},上传成功",fileName,chunkIndex);
return;
}
/**
* 分片上传成功之后,合并文件
* @param request
* @return
*/
@RequestMapping(value = "/merge", method = RequestMethod.POST)
@ResponseBody
public JsonResult filewebMerge(HttpServletRequest request) {
FileChannel outChannel = null;
try {
String fileName = request.getParameter("fileName");
String fileMd5 = request.getParameter("fileMd5");
// 读取目录里的所有文件
File dir = new File(fileStorePath + "/" + fileMd5);
File[] childs = dir.listFiles();
if(Objects.isNull(childs)|| childs.length==0){
return null;
}
// 转成集合,便于排序
List<File> fileList = new ArrayList<File>(Arrays.asList(childs));
Collections.sort(fileList, new Comparator<File>() {
@Override
public int compare(File o1, File o2) {
if (Integer.parseInt(o1.getName()) < Integer.parseInt(o2.getName())) {
return -1;
}
return 1;
}
});
// 合并后的文件
File outputFile = new File(fileStorePath + "/" + "merge"+ "/" + fileMd5 + "/" + fileName);
// 创建文件
if(!outputFile.exists()){
File mergeMd5Dir = new File(fileStorePath + "/" + "merge"+ "/" + fileMd5);
if(!mergeMd5Dir.exists()){
mergeMd5Dir.mkdirs();
}
logger.info("创建文件");
outputFile.createNewFile();
}
outChannel = new FileOutputStream(outputFile).getChannel();
FileChannel inChannel = null;
try {
for (File file : fileList) {
inChannel = new FileInputStream(file).getChannel();
inChannel.transferTo(0, inChannel.size(), outChannel);
inChannel.close();
// 删除分片
file.delete();
}
}catch (Exception e){
e.printStackTrace();
//发生异常,文件合并失败 ,删除创建的文件
outputFile.delete();
dir.delete();//删除文件夹
}finally {
if(inChannel!=null){
inChannel.close();
}
}
dir.delete(); //删除分片所在的文件夹
// FIXME: 数据库操作, 记录文件存档位置
} catch (IOException e) {
e.printStackTrace();
}finally {
try {
if(outChannel!=null){
outChannel.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
return null;
}
}
配置文件
# Tomcat
server:
# 端口配置
port: 8080
#开发环境
breakpoint:
upload:
# 文件上传的路径
dir: D:/workplace/uploads/
#1024*1024=1 048 576,5M=5 242 880
chunkSize: 5 242 880