Javascript Chrome内存问题-文件API+;安格拉斯
我有一个web应用程序需要将大型文件上载到Azure BLOB存储。我的解决方案使用HTML5文件API将块分割成块,然后作为blob块放置,块的ID存储在数组中,然后块作为blob提交 该解决方案在IE中运行良好。在64位Chrome上,我已成功上载4Gb文件,但内存使用量非常大(2Gb+)。在32位Chrome上,特定的Chrome进程将达到500-550Mb左右,然后崩溃 我看不到任何明显的内存泄漏,也看不到任何可以改变以帮助垃圾收集的东西。我将块ID存储在一个数组中,因此显然会有一些内存,但这不应该太大。这几乎就像文件API保存着它所分割到内存中的整个文件一样 它是作为控制器调用的角度服务编写的,我认为服务代码是相关的:Javascript Chrome内存问题-文件API+;安格拉斯,javascript,html,google-chrome,azure,fileapi,Javascript,Html,Google Chrome,Azure,Fileapi,我有一个web应用程序需要将大型文件上载到Azure BLOB存储。我的解决方案使用HTML5文件API将块分割成块,然后作为blob块放置,块的ID存储在数组中,然后块作为blob提交 该解决方案在IE中运行良好。在64位Chrome上,我已成功上载4Gb文件,但内存使用量非常大(2Gb+)。在32位Chrome上,特定的Chrome进程将达到500-550Mb左右,然后崩溃 我看不到任何明显的内存泄漏,也看不到任何可以改变以帮助垃圾收集的东西。我将块ID存储在一个数组中,因此显然会有一些内存
(function() {
'use strict';
angular
.module('app.core')
.factory('blobUploadService',
[
'$http', 'stringUtilities',
blobUploadService
]);
function blobUploadService($http, stringUtilities) {
var defaultBlockSize = 1024 * 1024; // Default to 1024KB
var stopWatch = {};
var state = {};
var initializeState = function(config) {
var blockSize = defaultBlockSize;
if (config.blockSize) blockSize = config.blockSize;
var maxBlockSize = blockSize;
var numberOfBlocks = 1;
var file = config.file;
var fileSize = file.size;
if (fileSize < blockSize) {
maxBlockSize = fileSize;
}
if (fileSize % maxBlockSize === 0) {
numberOfBlocks = fileSize / maxBlockSize;
} else {
numberOfBlocks = parseInt(fileSize / maxBlockSize, 10) + 1;
}
return {
maxBlockSize: maxBlockSize,
numberOfBlocks: numberOfBlocks,
totalBytesRemaining: fileSize,
currentFilePointer: 0,
blockIds: new Array(),
blockIdPrefix: 'block-',
bytesUploaded: 0,
submitUri: null,
file: file,
baseUrl: config.baseUrl,
sasToken: config.sasToken,
fileUrl: config.baseUrl + config.sasToken,
progress: config.progress,
complete: config.complete,
error: config.error,
cancelled: false
};
};
/* config: {
baseUrl: // baseUrl for blob file uri (i.e. http://<accountName>.blob.core.windows.net/<container>/<blobname>),
sasToken: // Shared access signature querystring key/value prefixed with ?,
file: // File object using the HTML5 File API,
progress: // progress callback function,
complete: // complete callback function,
error: // error callback function,
blockSize: // Use this to override the defaultBlockSize
} */
var upload = function(config) {
state = initializeState(config);
var reader = new FileReader();
reader.onloadend = function(evt) {
if (evt.target.readyState === FileReader.DONE && !state.cancelled) { // DONE === 2
var uri = state.fileUrl + '&comp=block&blockid=' + state.blockIds[state.blockIds.length - 1];
var requestData = new Uint8Array(evt.target.result);
$http.put(uri,
requestData,
{
headers: {
'x-ms-blob-type': 'BlockBlob',
'Content-Type': state.file.type
},
transformRequest: []
})
.success(function(data, status, headers, config) {
state.bytesUploaded += requestData.length;
var percentComplete = ((parseFloat(state.bytesUploaded) / parseFloat(state.file.size)) * 100
).toFixed(2);
if (state.progress) state.progress(percentComplete, data, status, headers, config);
uploadFileInBlocks(reader, state);
})
.error(function(data, status, headers, config) {
if (state.error) state.error(data, status, headers, config);
});
}
};
uploadFileInBlocks(reader, state);
return {
cancel: function() {
state.cancelled = true;
}
};
};
function cancel() {
stopWatch = {};
state.cancelled = true;
return true;
}
function startStopWatch(handle) {
if (stopWatch[handle] === undefined) {
stopWatch[handle] = {};
stopWatch[handle].start = Date.now();
}
}
function stopStopWatch(handle) {
stopWatch[handle].stop = Date.now();
var duration = stopWatch[handle].stop - stopWatch[handle].start;
delete stopWatch[handle];
return duration;
}
var commitBlockList = function(state) {
var uri = state.fileUrl + '&comp=blocklist';
var requestBody = '<?xml version="1.0" encoding="utf-8"?><BlockList>';
for (var i = 0; i < state.blockIds.length; i++) {
requestBody += '<Latest>' + state.blockIds[i] + '</Latest>';
}
requestBody += '</BlockList>';
$http.put(uri,
requestBody,
{
headers: {
'x-ms-blob-content-type': state.file.type
}
})
.success(function(data, status, headers, config) {
if (state.complete) state.complete(data, status, headers, config);
})
.error(function(data, status, headers, config) {
if (state.error) state.error(data, status, headers, config);
// called asynchronously if an error occurs
// or server returns response with an error status.
});
};
var uploadFileInBlocks = function(reader, state) {
if (!state.cancelled) {
if (state.totalBytesRemaining > 0) {
var fileContent = state.file.slice(state.currentFilePointer,
state.currentFilePointer + state.maxBlockSize);
var blockId = state.blockIdPrefix + stringUtilities.pad(state.blockIds.length, 6);
state.blockIds.push(btoa(blockId));
reader.readAsArrayBuffer(fileContent);
state.currentFilePointer += state.maxBlockSize;
state.totalBytesRemaining -= state.maxBlockSize;
if (state.totalBytesRemaining < state.maxBlockSize) {
state.maxBlockSize = state.totalBytesRemaining;
}
} else {
commitBlockList(state);
}
}
};
return {
upload: upload,
cancel: cancel,
startStopWatch: startStopWatch,
stopStopWatch: stopStopWatch
};
};
})();
(函数(){
"严格使用",;
有棱角的
.module('app.core'))
.工厂(“blobUploadService”,
[
“$http”,“stringUtilities”,
blobUploadService
]);
函数blobUploadService($http,stringUtilities){
var defaultBlockSize=1024*1024;//默认值为1024KB
var秒表={};
var state={};
var initializeState=函数(配置){
var blockSize=defaultBlockSize;
如果(config.blockSize)blockSize=config.blockSize;
var maxBlockSize=块大小;
var numberOfBlocks=1;
var file=config.file;
var fileSize=file.size;
如果(文件大小<块大小){
maxBlockSize=文件大小;
}
如果(文件大小%maxBlockSize==0){
numberOfBlocks=文件大小/maxBlockSize;
}否则{
numberOfBlocks=parseInt(fileSize/maxBlockSize,10)+1;
}
返回{
maxBlockSize:maxBlockSize,
numberOfBlocks:numberOfBlocks,
totalBytesRemaining:文件大小,
currentFilePointer:0,
BlockId:新数组(),
blockIdPrefix:“块-”,
字节:0,
submitUri:null,
档案:档案,
baseUrl:config.baseUrl,
sasToken:config.sasToken,
fileUrl:config.baseUrl+config.sasToken,
progress:config.progress,
complete:config.complete,
错误:config.error,
取消:假
};
};
/*配置:{
baseUrl://blob文件uri的baseUrl(即http://
我看不到任何明显的内存泄漏或我可以改变的东西来帮助
垃圾收集。我将块ID存储在一个数组中,非常明显
会有一些记忆信条,但这不应该是巨大的,它是巨大的
几乎就好像文件API保存着它所分割的整个文件一样
记忆
您是对的。由.slice()
创建的新Blob
将保存在内存中
解决方案是在处理Blob
或File
对象完成时,在Blob
引用上调用Blob.prototype.close()
另外请注意,如果多次调用upload
函数,则在问题中的javascript
也会创建FileReader
的新实例
slice()
方法返回一个新的Blob
对象,其字节范围为
从可选的start
参数到但不包括
可选的end
参数,并带有作为
可选contentType
参数的值
Blob
实例在document
的生命周期内存在。但是Blob
一旦从Blob URL存储中删除,就应该进行垃圾收集
注意:用户代理可以免费对从中删除的资源进行垃圾收集
这个
每个Blob
必须有一个内部,必须
最初设置为基础存储的状态(如果有)
基础存储已存在,必须通过
。快照状态的进一步规范性定义可以
可以找到文件
s
close()
方法被称为一个Blob
,必须充当
如下:
如果上下文对象的属性为,则终止此算法
否则,将上下文对象的可读性状态
设置为关闭
如果上下文对象在中有条目,请删除与上下文对象相对应的条目
如果Blob
对象被传递到URL.createObjectURL()
,则在Blob
或文件
对象上调用URL.revokeObjectURL()
,然后调用.close()
静态法
通过从Blob url存储中删除相应的条目来撤销字符串url
中提供的。此方法必须执行
详情如下:
1.如果引用的Blob
具有CLOSED
的可读性状态
,或者如果为url
参数提供的值为
不是blobURL
,或者如果为URL
参数提供的值
在Blob URL存储中没有条目,此方法调用没有
无。用户代理可能会在错误控制台上显示消息。
2.否则,对于URL
,用户代理必须从Blob URL存储区
您可以通过打开来查看这些调用的结果
chrome://blob-internals
查看创建Blob
并关闭Blob
的前后调用的详细信息
例如,从
xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
Refcount: 1
Content Type: text/plain
Type: data
Length: 3
到
以下调用.close()
。类似于
blob:http://example.com/c2823f75-de26-46f9-a4e5-95f57b8230bd
Uuid: 29e430a6-f093-40c2-bc70-2b6838a713bc
另一种方法是以ArrayBuffer
或数组缓冲区块的形式发送文件
blob:http://example.com/c2823f75-de26-46f9-a4e5-95f57b8230bd
Uuid: 29e430a6-f093-40c2-bc70-2b6838a713bc
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<input id="file" type="file">
<br>
<progress value="0"></progress>
<br>
<output for="file"><img alt="preview"></output>
<script type="text/javascript">
const [input, output, img, progress, fr, handleError, CHUNK] = [
document.querySelector("input[type='file']")
, document.querySelector("output[for='file']")
, document.querySelector("output img")
, document.querySelector("progress")
, new FileReader
, (err) => console.log(err)
, 1024 * 1024
];
progress.addEventListener("progress", e => {
progress.value = e.detail.value;
e.detail.promise();
});
let [chunks, NEXT, CURR, url, blob] = [Array(), 0, 0];
input.onchange = () => {
NEXT = CURR = progress.value = progress.max = chunks.length = 0;
if (url) {
URL.revokeObjectURL(url);
if (blob.hasOwnProperty("close")) {
blob.close();
}
}
if (input.files.length) {
console.log(input.files[0]);
progress.max = input.files[0].size;
progress.step = progress.max / CHUNK;
fr.readAsArrayBuffer(input.files[0]);
}
}
fr.onload = () => {
const VIEW = new Uint8Array(fr.result);
const LEN = VIEW.byteLength;
const {type, name:filename} = input.files[0];
const stream = new ReadableStream({
pull(controller) {
if (NEXT < LEN) {
controller
.enqueue(VIEW.subarray(NEXT, !NEXT ? CHUNK : CHUNK + NEXT));
NEXT += CHUNK;
} else {
controller.close();
}
},
cancel(reason) {
console.log(reason);
throw new Error(reason);
}
});
const [reader, processData] = [
stream.getReader()
, ({value, done}) => {
if (done) {
return reader.closed.then(() => chunks);
}
chunks.push(value);
return new Promise(resolve => {
progress.dispatchEvent(
new CustomEvent("progress", {
detail:{
value:CURR += value.byteLength,
promise:resolve
}
})
);
})
.then(() => reader.read().then(data => processData(data)))
.catch(e => reader.cancel(e))
}
];
reader.read()
.then(data => processData(data))
.then(data => {
blob = new Blob(data, {type});
console.log("complete", data, blob);
if (/image/.test(type)) {
url = URL.createObjectURL(blob);
img.onload = () => {
img.title = filename;
input.value = "";
}
img.src = url;
} else {
input.value = "";
}
})
.catch(e => handleError(e))
}
</script>
</body>
</html>
fetch(new Request("/path/to/server/", {method:"PUT", body:blob}))