背景:为了实现大文件上传的功能
1新建数据表sql
file_chunk
CREATE TABLE `file_chunk` (`id` bigint UNSIGNED NOT NULL AUTO_INCREMENT,`file_name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '文件名',`chunk_number` int NULL DEFAULT NULL COMMENT '当前分片,从1开始',`chunk_size` bigint NULL DEFAULT NULL COMMENT '分片大小',`current_chunk_size` bigint NULL DEFAULT NULL COMMENT '当前分片大小',`total_size` bigint NULL DEFAULT NULL COMMENT '文件总大小',`total_chunk` int NULL DEFAULT NULL COMMENT '总分片数',`identifier` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '文件校验码,md5',`relative_path` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '相对路径',`create_by` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '创建者',`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',`update_by` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '更新人',`update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 1865947819987177473 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_520_ci COMMENT = '文件块存储' ROW_FORMAT = Dynamic;SET FOREIGN_KEY_CHECKS = 1;
file_storage
CREATE TABLE `file_storage` (`id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键',`real_name` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '文件真实姓名',`file_name` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '文件名',`suffix` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '文件后缀',`file_path` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '文件路径',`file_type` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '文件类型',`size` bigint NULL DEFAULT NULL COMMENT '文件大小',`identifier` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '检验码 md5',`create_by` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '创建者',`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',`update_by` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci NULL DEFAULT NULL COMMENT '更新人',`update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 1865947820054286338 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_520_ci COMMENT = '文件存储表' ROW_FORMAT = Dynamic;SET FOREIGN_KEY_CHECKS = 1;
2创建实体类:
FileChunk
lombok.Data;import java.io.Serializable;
import java.time.LocalDateTime;/**** @Description:* @date 2024年12月05日 11:20* @Version: 1.0*/
@Data
public class FileChunk implements Serializable {/**主键**/private Long id;/**文件名**/private String fileName;/**当前分片,从1开始**/private Integer chunkNumber;/**分片大小**/private Long chunkSize;/**当前分片大小**/private Long currentChunkSize;/**文件总大小**/private Long totalSize;/**总分片数**/private Integer totalChunk;/**文件标识 md5校验码**/private String identifier;/**相对路径**/private String relativePath;/**创建者**/private String createBy;/**创建时间**/private LocalDateTime createTime;/**更新人**/private String updateBy;/**更新时间**/private LocalDateTime updateTime;}
FileStorage:
import lombok.Data;import java.io.Serializable;
import java.time.LocalDateTime;/**** @Description:* @date 2024年12月05日 11:23* @Version: 1.0*/
@Data
public class FileStorage implements Serializable {/**主键**/private Long id;/**文件真实姓名**/private String realName;/**文件名**/private String fileName;/**文件后缀**/private String suffix;/**文件路径**/private String filePath;/**文件类型**/private String fileType;/**文件大小**/private Long size;/**检验码 md5**/private String identifier;/**创建者**/private String createBy;/**创建时间**/private LocalDateTime createTime;/**更新人**/private String updateBy;/**更新时间**/private LocalDateTime updateTime;}
2service
package com.jx.springbootbigfileupload.service;import com.baomidou.mybatisplus.extension.service.IService;
import com.jx.springbootbigfileupload.dto.CheckResultVo;
import com.jx.springbootbigfileupload.dto.FileChunkDto;
import com.jx.springbootbigfileupload.entity.FileChunk;/*** @author* @Description:* @date 2024年12月05日 11:48* @Version: 1.0*/
public interface FileChunkService extends IService<FileChunk> {/*** 校验文件* @param dto 入参* @return obj*/CheckResultVo checkUpload(FileChunkDto dto);
}
package com.jx.springbootbigfileupload.service;import com.baomidou.mybatisplus.extension.service.IService;
import com.jx.springbootbigfileupload.dto.FileChunkDto;
import com.jx.springbootbigfileupload.entity.FileStorage;import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;public interface FileStorageService extends IService<FileStorage> {/*** 文件上传* @date 2024/12/5 16:51* @param dto* @return java.lang.Boolean*/Boolean uploadFile(FileChunkDto dto);/*** 文件下载** @date 2024/12/5 16:52* @param request* @param response* @param identifier*/void downLoadByIndentifier(HttpServletRequest request, HttpServletResponse response, String identifier);
}
package com.jx.springbootbigfileupload.service.impl;import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.jx.springbootbigfileupload.dto.CheckResultVo;
import com.jx.springbootbigfileupload.dto.FileChunkDto;
import com.jx.springbootbigfileupload.entity.FileChunk;
import com.jx.springbootbigfileupload.mapper.FileChunkMapper;
import com.jx.springbootbigfileupload.service.FileChunkService;
import org.springframework.stereotype.Service;import java.util.ArrayList;
import java.util.List;/*** @author* @Description:* @date 2024年12月05日 11:52* @Version: 1.0*/
@Service
public class FileChunkServiceImpl extends ServiceImpl<FileChunkMapper, FileChunk> implements FileChunkService {@Overridepublic CheckResultVo checkUpload(FileChunkDto dto) {CheckResultVo checkResultVo = new CheckResultVo();List<FileChunk> list = this.list(new LambdaQueryWrapper<FileChunk>().eq(FileChunk::getIdentifier, dto.getIdentifier()).orderByDesc(FileChunk::getChunkNumber));if (list.size() ==0){checkResultVo.setUploaded(false);return checkResultVo;}FileChunk fileChunk = list.get(0);if (fileChunk.getTotalChunk() == 1){checkResultVo.setUploaded(true);return checkResultVo;}List<Integer> uploadedFiles=new ArrayList<>();for (FileChunk chunk : list) {uploadedFiles.add(chunk.getChunkNumber());}checkResultVo.setUploadFiles(uploadedFiles);return checkResultVo;}}
package com.jx.springbootbigfileupload.service.impl;import cn.hutool.core.bean.BeanUtil;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.jx.springbootbigfileupload.dto.FileChunkDto;
import com.jx.springbootbigfileupload.entity.FileChunk;
import com.jx.springbootbigfileupload.entity.FileStorage;
import com.jx.springbootbigfileupload.mapper.FileStorageMapper;
import com.jx.springbootbigfileupload.service.FileChunkService;
import com.jx.springbootbigfileupload.service.FileStorageService;
import com.jx.springbootbigfileupload.util.BulkFileUtil;
import com.jx.springbootbigfileupload.util.FileUtil;
import com.jx.springbootbigfileupload.util.RedisCache;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.List;
import java.util.stream.IntStream;import static cn.hutool.core.bean.BeanUtil.copyProperties;
import static cn.hutool.core.bean.BeanUtil.findEditor;/*** @author* @Description:* @date 2024年12月05日 11:49* @Version: 1.0*/
@Slf4j
@Service
public class FileStorageServiceImpl extends ServiceImpl<FileStorageMapper, FileStorage> implements FileStorageService {@Resourceprivate RedisCache redisCache;@Value("${file.chunk-size}")private Long defaultChunkSize;@Value("${file.path}")private String baseFileSavePath;@Resourceprivate FileChunkService fileChunkService;@Overridepublic Boolean uploadFile(FileChunkDto dto) {if (dto.getFile()==null){throw new RuntimeException("文件不能为空");}String fileRouteName=baseFileSavePath+ File.separator+dto.getFilename();Boolean uploadFlag;if (dto.getTotalChunks()==1){//单片上传uploadFlag=this.uploadSingFile(fileRouteName,dto);}else{//分片上传uploadFlag=this.uploadSharding(fileRouteName,dto);}if (uploadFlag){this.saveFile(dto,fileRouteName);}return uploadFlag;}@SneakyThrows@Overridepublic void downLoadByIndentifier(HttpServletRequest request, HttpServletResponse response, String identifier) {FileStorage one = this.getOne(new LambdaQueryWrapper<FileStorage>().eq(FileStorage::getIdentifier, identifier));if (BeanUtil.isNotEmpty(one)){File file = new File(baseFileSavePath + File.separator + one.getFilePath());BulkFileUtil.downlodFile(request,response,file);}else {throw new RuntimeException("文件不存在");}}/*** 保存文件** @date 2024/12/5 16:15* @param dto* @param fileRouteName*/private void saveFile(FileChunkDto dto, String fileRouteName) {FileChunk fileChunk= BeanUtil.copyProperties(dto, FileChunk.class);fileChunk.setFileName(dto.getFilename());fileChunk.setTotalChunk(dto.getTotalChunks());fileChunkService.save(fileChunk);//重新给缓存赋值redisCache.setCacheListByOne(dto.getIdentifier(),dto.getChunkNumber());//如果所有的分片快都上传完成,那么在redis存储List<Integer> cacheList = redisCache.getCacheList(dto.getIdentifier());Integer totalChunks=dto.getTotalChunks();int[] chunks= IntStream.rangeClosed(1,totalChunks).toArray();if (IntStream.rangeClosed(1,totalChunks).allMatch(cacheList::contains)){//所有的分片上传完成,组合分片并保存到数据库中String name=dto.getFilename();MultipartFile file = dto.getFile();FileStorage fileStorage = new FileStorage();fileStorage.setRealName(file.getOriginalFilename());fileStorage.setFileName(fileRouteName);fileStorage.setSize(dto.getTotalSize());fileStorage.setIdentifier(dto.getIdentifier());fileStorage.setFilePath(dto.getRelativePath());fileStorage.setFileType(file.getContentType());fileStorage.setSuffix(FileUtil.getFileSuffix(name));this.save(fileStorage);}}/*** 分片上传方法* 这里使用RandomAccessFile 方法,也可以使用MapperByteBuffer方法上传* 可以省去文件合并的过程** @date 2024/12/5 16:03* @param fileRouteName 文件名* @param dto 文件dto* @return java.lang.Boolean*/private Boolean uploadSharding(String fileRouteName, FileChunkDto dto) {//try 自动资源管理try(RandomAccessFile randomAccessFile=new RandomAccessFile(fileRouteName,"rw")){//分片大小必须和前端匹配,否则会出现文件损坏long chunkSize= dto.getChunkSize()==0L?defaultChunkSize:dto.getChunkSize();//偏移量,就是从第一个位置往文件写入,每一片的大小*已经存的快数long offset=chunkSize*(dto.getChunkNumber()-1);randomAccessFile.seek(offset);//写入randomAccessFile.write(dto.getFile().getBytes());}catch(IOException e){log.info("文件上传失败:",e);return Boolean.FALSE;}return Boolean.TRUE;}/*** 单片上传** @date 2024/12/5 15:57* @param fileRouteName* @param dto* @return java.lang.Boolean*/private Boolean uploadSingFile(String fileRouteName, FileChunkDto dto) {try {File localPath = new File(fileRouteName);dto.getFile().transferTo(localPath);return Boolean.TRUE;} catch (IOException e) {throw new RuntimeException(e);}}
}
4mapper
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.jx.springbootbigfileupload.entity.FileChunk;
import org.apache.ibatis.annotations.Mapper;@Mapper
public interface FileChunkMapper extends BaseMapper<FileChunk> {}
package com.jx.springbootbigfileupload.mapper;import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.jx.springbootbigfileupload.entity.FileStorage;
import org.apache.ibatis.annotations.Mapper;@Mapper
public interface FileStorageMapper extends BaseMapper<FileStorage> {}
5
import lombok.Data;import java.util.List;/*** @author* @Description:* @date 2024年12月05日 13:32* @Version: 1.0*/
@Data
public class CheckResultVo {private Boolean uploaded;private List<Integer> uploadFiles;
}
package com.jx.springbootbigfileupload.dto;import lombok.Data;
import org.springframework.web.multipart.MultipartFile;/*** @author* @Description:* @date 2024年12月05日 11:29* @Version: 1.0*/
@Data
public class FileChunkDto {/*** 当前块的次序,第一个块是 1,注意不是从 0 开始的*/private Integer chunkNumber;/*** 文件被分成块的总数。*/private Integer totalChunks;/*** 分块大小,根据 totalSize 和这个值你就可以计算出总共的块数。注意最后一块的大小可能会比这个要大*/private Long chunkSize;/*** 当前要上传块的大小,实际大小*/private Long currentChunkSize;/*** 文件总大小*/private Long totalSize;/*** 这个就是每个文件的唯一标示*/private String identifier;/*** 文件名*/private String filename;/*** 文件夹上传的时候文件的相对路径属性*/private String relativePath;/*** 文件*/private MultipartFile file;}
6controller
package com.jx.springbootbigfileupload.controller;import com.jx.springbootbigfileupload.dto.CheckResultVo;
import com.jx.springbootbigfileupload.dto.FileChunkDto;
import com.jx.springbootbigfileupload.response.Result;
import com.jx.springbootbigfileupload.service.FileChunkService;
import com.jx.springbootbigfileupload.service.FileStorageService;
import org.springframework.web.bind.annotation.*;import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;/*** @author* @Description:* @date 2024年12月05日 13:09* @Version: 1.0*/
@RestController
@RequestMapping("fileStorage")
public class FileStorageController {@Resourceprivate FileStorageService fileStorageService;@Resourceprivate FileChunkService fileChunkService;@GetMapping("/upload")public Result<CheckResultVo> checkUpload(FileChunkDto dto){return Result.ok(fileChunkService.checkUpload(dto));}@PostMapping("/upload")public Result<?> uploadFile(FileChunkDto dto, HttpServletResponse response){try {Boolean status=fileStorageService.uploadFile(dto);if (status){return Result.ok("文件上传成功");}else {response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);return Result.error("文件上传失败");}} catch (Exception e) {response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);return Result.error("文件上传失败");}}@GetMapping("/download/{identifier}")public void downLoadByIndentifier(HttpServletRequest request, HttpServletResponse response, @PathVariable("identifier")String identifier){try {fileStorageService.downLoadByIndentifier(request,response,identifier);} catch (Exception e) {throw new RuntimeException(e);}}}
CorsConfig
package com.jx.springbootbigfileupload.config;import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.cors.CorsConfiguration;
import org.springframework.web.cors.UrlBasedCorsConfigurationSource;
import org.springframework.web.filter.CorsFilter;/*** @author* @Description:* @date 2024年12月09日 9:58* @Version: 1.0*/
@Configuration
public class CorsConfig {private static final long MAX_AGE = 24 * 60 * 60;@Beanpublic CorsFilter corsFilter() {UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();CorsConfiguration corsConfiguration = new CorsConfiguration();corsConfiguration.setAllowCredentials(true);corsConfiguration.addAllowedOriginPattern("*");corsConfiguration.addAllowedHeader("*");corsConfiguration.addAllowedMethod("*");corsConfiguration.setMaxAge(MAX_AGE);source.registerCorsConfiguration("/**", corsConfiguration);return new CorsFilter(source);}}
package com.jx.springbootbigfileupload.response;import lombok.Data;import java.io.Serializable;/*** @author* @Description:* @date 2024年12月05日 13:23* @Version: 1.0*/@Data
public class Result<T> implements Serializable {private Boolean success;private Integer code;private String msg;private T data;public Result(){this.success=true;this.code=200;this.msg="成功";}public Result(Integer code,T data){this.code=code;this.data=data;}public Result(Integer code,String msg,T data){this.code=code;this.msg=msg;this.data=data;}public Result(Integer code,String msg){this.code=code;this.msg=msg;}public Result<?> error(Integer code,String msg){this.code=code;this.msg=msg;this.success=false;return this;}public static Result error(String msg){return new Result(500,msg);}public static Result ok(Object data) {Result result = new Result();result.setData(data);return result;}public static Result ok(String msg) {Result result = new Result();result.setData(null);result.setCode(200);result.setMsg(msg);return result;}}
前端安装的依赖;
npm install axios core-js element-ui jquery simple-uploader.js spark-md5 vue-simple-uploader vue-router
main.js
import ElementUI from 'element-ui';
import 'element-ui/lib/theme-chalk/index.css';// 在main.js中:
import uploader from 'vue-simple-uploader'
Vue.use(uploader)
Vue.use(ElementUI);// The Vue build version to load with the `import` command
// (runtime-only or standalone) has been set in webpack.base.conf with an alias.
// The Vue build version to load with the `import` command
// (runtime-only or standalone) has been set in webpack.base.conf with an alias.
import Vue from 'vue'
import App from './App'
import router from './router'
Vue.config.productionTip = false
/* eslint-disable no-new */
new Vue({el: '#app',router,components: { App },template: '<App/>'
})
Upload.vue
<template><div><uploaderref="uploader":options="options":autoStart="false":file-status-text="fileStatusText"@file-added="onFileAdded"@file-success="onFileSuccess"@file-error="onFileError"@file-progress="onFileProgress"class="uploader-example"><uploader-drop><p>拖动文件到这里上传</p><uploader-btn>选择文件</uploader-btn></uploader-drop><uploader-list><el-collapse v-model="activeName" accordion><el-collapse-item title="文件列表" name="1"><ul class="file-list"><li v-for="file in uploadFileList" :key="file.id"><uploader-file :file="file" :list="true" ref="uploaderFile"><div slot-scope="props" style="display: flex;align-items: center;height: 100%;"><el-progressstyle="width: 85%":stroke-width="18":show-text="true":text-inside="true":format="e=> showDetail(e,props)":percentage="percentage(props)":color="e=>progressColor(e,props)"></el-progress><el-button :icon="icon" circle v-if="props.paused || props.isUploading"@click="pause(file)" size="mini"></el-button><el-button icon="el-icon-close" circle @click="remove(file)"size="mini"></el-button><el-button icon="el-icon-download" circle v-if="props.isComplete"@click="download(file)"size="mini"></el-button></div></uploader-file></li><div class="no-file" v-if="!uploadFileList.length"><i class="icon icon-empty-file"></i> 暂无待上传文件</div></ul></el-collapse-item></el-collapse></uploader-list></uploader></div>
</template>
<script>const CHUNK_SIZE = 1024 * 1024 * 20import SparkMD5 from 'spark-md5';
export default {name: 'Upload',data () {return {options: {target: 'http://192.168.1.87:9001/fileStorage/upload',testChunks: true,uploadMethod: 'POST',chunkSize: CHUNK_SIZE,simultaneousUploads: 3,/*** 判断分片是否上传,**/checkChunkUploadedByResponse: (chunk, message) => {console.log("message", message)let dataObj=JSON.parse(message);if (dataObj.uploaded!=null){return dataObj.uploaded;}return (dataObj.uploadedChunks||[]).indexOf(chunk.offset+1)>=0;},parseTimeRemaining: function (timeRemaining, parsedTimeRemaining) {return parsedTimeRemaining.replace(/\syears?/, '年').replace(/\days?/, '天').replace(/\shours?/, '小时').replace(/\sminutes?/, '分钟').replace(/\sseconds?/, '秒')}},// 修改上传状态fileStatusTextObj: {success: "上传成功",error: "上传错误",uploading: "正在上传",paused: "停止上传",waiting: "等待中",},uploadFileList: [],collapse: true,activeName: 1,icon: `el-icon-video-pause`}},methods:{onFileProgress(rootfile,file,chunk){console.log(`当前进度:${Math.ceil(file._prevProgress*100)}`);},onFileError(rootfile,file,message,chunk){console.log("上传出错:"+rootfile,file,message,chunk);},onFileSuccess(rootfile,file,response,chunk){console.log("上传成功",rootfile,file,response,chunk);},// 点击下载download(file, id) {console.log("file:>> ", file);window.location.href = `http://192.168.1.87:9001/fileStorage/download/${file.uniqueIdentifier}`;},//控制下进度条的颜色,异常的情况下显示红色progressColor(e,props){if(props.error){return "#f56c62"}if (e>0){return "#1989fa"}},pause(file,id){console.log("file:>>",file);console.log("id:>>",id);if (file.paused){file.resume();this.icon=`el-icon-video-pause`}else{file.pause();this.icon=`el-icon-video-play`}},//点击删除remove(file){this.uploadFileList.findIndex((item,index)=>{if (item.id === file.id){this.$nextTick(()=>{this.uploadFileList.splice(index,1);})}})},//展示详情showDetail(e,props){let flieName=props.file.name;let isComplete=props.isComplete;let formatUpload=this.formatFileSize(props.uploadedSize,2);let fileSize=`${props.formatedSize}`;let timeRemaining=!isComplete?`剩余时间:${props.formatedTimeRemaining}`:"";let uploaded=!isComplete?`已上传:${formatUpload}/${fileSize}`:`大小:${fileSize}`;let speed=!isComplete ?`速度:${props.formatedSpeed}/s`:"";if (props.error){return `${flieName}上传失败`}else{return `${flieName}\n${uploaded}\n${speed}\n${timeRemaining} \n 进度:${e}`}},//显示进度percentage(props){let progress=props.progress.toFixed(2)*100;return progress-1 <0?0:progress;},formatFileSize(bytes,decimalPoint=2){if (bytes==0) return "0 Bytes";let k=1000,sizes = ['Bytes','KB','MB','GB','TB','PB','EB','ZB','YB'],i = Math.floor(Math.log(bytes) / Math.log(k));return (parseFloat((bytes/Math.pow(k,i)).toFixed(decimalPoint)+""+sizes[i]));},onFileAdded(file,event){console.log("eeeee",event)// event.preventDefault();this.uploadFileList.push(file);console.log("file :>> ", file);// 有时 fileType为空,需截取字符console.log("文件类型:" + file.fileType + "文件大小:" + file.size + "B");// 1. todo 判断文件类型是否允许上传// 2. 计算文件 MD5 并请求后台判断是否已上传,是则取消上传console.log("校验MD5");this.getFileMD5(file, (md5) => {if (md5 !== "") {// 修改文件唯一标识file.uniqueIdentifier = md5;// 请求后台判断是否上传// 恢复上传file.resume();}});},getFileMD5(file,callback){let spark = new SparkMD5.ArrayBuffer();let fileReader = new FileReader();//获取文件分片对象(注意它的兼容性,在不同浏览器的写法不同)let blobSlice =File.prototype.slice ||File.prototype.mozSlice ||File.prototype.webkitSlice;// 当前分片下标let currentChunk = 0;// 分片总数(向下取整)let chunks = Math.ceil(file.size / CHUNK_SIZE);// MD5加密开始时间let startTime = new Date().getTime();// 暂停上传file.pause();loadNext();// fileReader.readAsArrayBuffer操作会触发onload事件fileReader.onload = function (e) {// console.log("currentChunk :>> ", currentChunk);// 通过 e.target.result 获取到当前分片的内容,并将其追加到 MD5 计算实例 spark 中,以便后续计算整个文件的 MD5 值。spark.append(e.target.result);// 通过比较当前分片的索引 currentChunk 是否小于总分片数 chunks 判断是否还有下一个分片需要读取。if (currentChunk < chunks) {// 如果存在下一个分片,则将当前分片索引递增并调用 loadNext() 函数加载下一个分片;currentChunk++;loadNext();} else {// 否则,表示所有分片已经读取完毕,可以进行最后的 MD5 计算。// 该文件的md5值let md5 = spark.end();console.log(`MD5计算完毕:${md5},耗时:${new Date().getTime() - startTime} ms.`);// 回调传值md5callback(md5);}};fileReader.onerror = function () {this.$message.error("文件读取错误");file.cancel();};// 加载下一个分片function loadNext() {// start 的计算方式为当前分片的索引乘以分片大小 CHUNK_SIZEconst start = currentChunk * CHUNK_SIZE;// end 的计算方式为 start 加上 CHUNK_SIZE,但如果超过了文件的总大小,则取文件的大小作为结束位置。const end = start + CHUNK_SIZE >= file.size ? file.size : start + CHUNK_SIZE;// 文件分片操作,读取下一分片(fileReader.readAsArrayBuffer操作会触发onload事件)// 通过调用 blobSlice.call(file.file, start, end) 方法获取当前分片的 Blob 对象,即指定开始和结束位置的文件分片。// 接着,使用 fileReader.readAsArrayBuffer() 方法读取该 Blob 对象的内容,从而触发 onload 事件,继续进行文件的处理fileReader.readAsArrayBuffer(blobSlice.call(file.file, start, end));}},fileStatusText(status,response){if (status === "md5") {return "校验MD5";} else {return this.fileStatusTextObj[status];}}}
}
</script>
router.js
import Vue from 'vue'
import Router from 'vue-router'
import Upload from '@/view/Upload'
import Index from '@/view/Index'Vue.use(Router)export default new Router({routes: [{path: '/',name: 'Index',component: Index},{path: '/upload',name: 'Upload',component: Upload}]
})