关于node 的stream Api ,网上有挺多介绍的,这里推荐一篇github上的,可以帮助你了解什么是stream,读完之后再来看这个实际的使用
这里我写了个例子,可以利用stream实现切片上传,秒传和断点续传
这里我客户端那边使用了webuploader,构建了一个可以切片上传的前端,服务端在收到所有的切片之后,将切片重新合并成一个完整的文件,这个合并的过程我们使用的就是stream。
流程是这样的,客户端在表单中添加进了一个文件,计算其md5值之后,发送给服务端(包括文件信息),服务端根据这个md5值,查询数据库中是否存在
,把查询结果返回个客户端,客户端根据结果来决定是否上传这个文件。
假设查询到了这个值,直接将这个文件标记为已上传,这就是简单的秒传实现了。
假如没有查询到这个值,便进行上传。
每个切片上传之前,请求服务器查询stack 集合里面是否存在,
import React from "react"
import ReactDOM from "react-dom"
import WebUploader from "tb-webuploader/dist/webuploader.js"
class App extends React.Component {
constructor(props) {
super(props)
}
uploader() {
}
componentDidMount() {
WebUploader.Uploader.register({
'before-send': 'beforeSend'
}, {
beforeSend: block=> {
var deferred = WebUploader.Base.Deferred()
$.ajax({
type:"GET",
url:"/files/chunk",
data:{
chunk:block.chunk,
chunks:block.chunks,
fileName:block.file.name
},
success:res=>{
console.log(res)
if(res.err){
deferred.reject()
}
else{
deferred.resolve()
}
}
})
return deferred.promise();
}
})
this.uploader=WebUploader.create({
swf: '/js/Uploader.swf',
server: '/files',
pick: '#picker',
resize: false,
chunked: true,
threads: 1
})
.on("fileQueued", file=> {
this.uploader.md5File(file)
.progress(function (percentage) {
console.log('Percentage:', percentage);
})
.then(md5=>{
$.ajax({
type:"GET",
url:"/files/md5/"+md5,
success:res=>{
console.log(res)
if(res.err) this.uploader.skipFile(file)
else this.uploader.upload()
}
})
});
})
.on("uploadProgress",(file,precent)=>{
console.log(precent)
})
.on( 'uploadSuccess', function( file ) {
console.log("已上传")
})
}
render() {
return <div id="uploader" className="wu-example">
<div id="thelist" className="uploader-list">div>
<div className="btns">
<div id="picker">选择文件div>
div>
div>
}
}
ReactDOM.render( , document.getElementById('content'))
这是我前端上传组件的代码,大致了解一下就可以了,因为这篇文章的重点在服务端,了解stream的应用。
如果想要知道上面代码的意思,对照webuploader的官方api就可以了。这里只要注意一下,关于官方api里面钩子的应用就可以了。
接下来就是app.js文件
var http = require('http');
var express = require('express');
var routes = require('./routes');
var users = require('./routes/users');
var path = require('path');
//var favicon = require('serve-favicon');
var logger = require('morgan');
var methodOverride = require('method-override');
var session = require('express-session');
var bodyParser = require('body-parser');
var multer = require('multer');
var errorHandler = require('errorhandler');
var mongoose = require("mongoose")
var app = express();
var db = mongoose.connection
mongoose.connect("mongodb://127.0.0.1/test")
db.once("open",function(){
console.log("db connected")
})
// view engine setup
app.set('views', path.join(__dirname, 'views'));
app.set('view engine', 'jade');
// uncomment after placing your favicon in /public
//app.use(favicon(path.join(__dirname, 'public', 'favicon.ico')));
app.use(logger('dev'));
app.use(methodOverride());
app.use(session({ resave: true,
saveUninitialized: true,
secret: 'uwotm8' }));
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({ extended: true }));
app.use(multer());
app.use(express.static(path.join(__dirname, 'public')));
app.use('/', routes);
app.use('/users', users);
// catch 404 and forward to error handler
app.use(function(req, res, next) {
var err = new Error('Not Found');
err.status = 404;
next(err);
});
// error handlers
// development error handler
// will print stacktrace
if (app.get('env') === 'development') {
app.use(function(err, req, res, next) {
res.status(err.status || 500);
res.render('error', {
message: err.message,
error: err
});
});
}
// production error handler
// no stacktraces leaked to user
app.use(function(err, req, res, next) {
res.status(err.status || 500);
res.render('error', {
message: err.message,
error: {}
});
});
module.exports = app;
很常规,也没什么好讲的,就是有连接mongodb。
接下来就是比较重要的
router.js
var express = require('express')
var _ = require("lodash")
var collections = require("../collections")
var fs = require("fs")
var Promise = require("bluebird")
var router = express.Router()
var fn = require("../upload")
router.get('/', function (req, res, next) {
res.render('index', {title: 'Express'});
});
router.get("/files/chunk", function (req, res) {
if(req.session.md5) {
var f = fn.filter(req, res)
f.length?res.json({err:"skip"}):res.json({err:null})
}
else res.send("error")
})
router.get("/files/md5/:md5",function(req,res){
var q = req.params
collections
.files
.findOne(q)
.exec()
.then(function(cb){
if(cb) res.json({err:"exit",cb:cb})
else{
req.session.md5=q.md5
res.json({err:null})
}
})
})
router.post("/files", function (req, res) {
var body = req.body
var file = req.files.file
if(file&&body) fn.resume(file,body,req,res)
else res.send("error")
})
module.exports = router;
关于这个上传服务,主要就是三个请求,
接下来分别将每个请求
“/files/md5/:md5”
通过mongoose,查询mongodb中是否存在这个{md5:”文件的MD5值”}对象,如果存在,则返回{err:”exit”,cb:cb},否则返回{err:null}并且session.md5 = q.md5
“/files/chunk”
MD5请求通过之后,就进行上传,请求正确的情况下,将请求参数传入fn.filter方法
filter: function (req, res) {
if (req.session.fileName == req.query.fileName) {
var c = _.filter(req.session.stack, _.matches(req.query.chunk))
return c
}
else {
req.session.stack = []
req.session.fileName = req.query.fileName
return this.filter(req, res)
}
},
if的方法适用于断点续传,else的方法可以看做初始化。
最后是post “/files”
请求正确的情况下调用fn.resume方法
resume: function (file, body, req, res) {
var chunk = _.merge(file, body)
chunk.md5 = req.session.md5
var stack = req.session.stack
stack.push(chunk)
res.json({err: null})
if (stack.length == chunk.chunks) {
this.merge(stack)
}
},
这里用了lodash的merge,将请求body的files信息合并,假如文件切片完全上传完毕后调用fn.merge
merge: function (stack) {
var path = process.cwd() + "/public/files/"
var ws = fs.createWriteStream(path + stack[0].md5 + "." + stack[0].extension)
function todo(){
return new Promise(function(resolve,reject){
var chunk = stack.shift()
var rs = fs.createReadStream(chunk.path)
rs.pipe(ws, {end: false})
rs.on("end", function () {
resolve()
})
}).then(function(){
if(stack.length) return todo()
else ws.end("Done")
}) }
todo().then(function(){
console.log("done")
})
}
这里我用了一个promise的递归写法完成文件的合并。你可以把每个切面文件想象成一个堆栈,我依次读取stack集合中的每一条数据,将对应的切片读取为readstream,然后流入writestream,一个读完之后读取下一个,知道stack集合中为空为止
完整的fn
var Promise = require("bluebird")
var _ = require("lodash")
var fs = require("fs")
module.exports = {
filter: function (req, res) {
if (req.session.fileName == req.query.fileName) {
var c = _.filter(req.session.stack, _.matches(req.query.chunk))
return c
}
else {
req.session.stack = []
req.session.fileName = req.query.fileName
return this.filter(req, res)
}
},
resume: function (file, body, req, res) {
var chunk = _.merge(file, body)
chunk.md5 = req.session.md5
var stack = req.session.stack
stack.push(chunk)
res.json({err: null})
if (stack.length == chunk.chunks) {
this.merge(stack)
}
},
merge: function (stack) {
var path = process.cwd() + "/public/files/"
var ws = fs.createWriteStream(path + stack[0].md5 + "." + stack[0].extension)
function todo(){
return new Promise(function(resolve,reject){
var chunk = stack.shift()
var rs = fs.createReadStream(chunk.path)
rs.pipe(ws, {end: false})
rs.on("end", function () {
resolve()
})
}).then(function(){
if(stack.length) return todo()
else ws.end("Done")
})
}
todo().then(function(){
console.log("done")
})
}
}
完整文件地址https://github.com/zezhipeng/chunks-uploader