Javascript 需要关于代码的建议,因为它会杀死我的程序
) 让它开始吧。我想实现下一个想法:我想使用webrtc(交换视频和音频数据)与不同计算机上的其他用户连接,然后了解他的情绪。所以在这个项目中,我使用(这里是)。所以我下载了一些示例并测试了视频合成示例,所有的工作都很好。 下一部分是识别面部情绪。对于这个任务,我使用。我已经测试过了。我不会附上照片,因为现在我正在使用ubuntu,但在windows上测试了它,相信我,一切都很好。所以现在是时候把这两个模块结合起来了 作为我使用NodeWebRTC示例的主要项目,所有后续解释都将围绕此模块进行。所以,要运行结果,您应该将权重文件夹从face api复制到节点webrtc/examples/video compositing文件夹中,然后替换下面的代码,而不是节点webrtc/example/video compositing/server.jsJavascript 需要关于代码的建议,因为它会杀死我的程序,javascript,webrtc,tensorflow.js,face-api,Javascript,Webrtc,Tensorflow.js,Face Api,) 让它开始吧。我想实现下一个想法:我想使用webrtc(交换视频和音频数据)与不同计算机上的其他用户连接,然后了解他的情绪。所以在这个项目中,我使用(这里是)。所以我下载了一些示例并测试了视频合成示例,所有的工作都很好。 下一部分是识别面部情绪。对于这个任务,我使用。我已经测试过了。我不会附上照片,因为现在我正在使用ubuntu,但在windows上测试了它,相信我,一切都很好。所以现在是时候把这两个模块结合起来了 作为我使用NodeWebRTC示例的主要项目,所有后续解释都将围绕此模块进行
'use strict';
require('@tensorflow/tfjs-node');
const tf = require('@tensorflow/tfjs');
const nodeFetch = require('node-fetch');
const fapi = require('face-api.js');
const path = require('path');
const { createCanvas, createImageData } = require('canvas');
const { RTCVideoSink, RTCVideoSource, i420ToRgba, rgbaToI420 } = require('wrtc').nonstandard;
fapi.env.monkeyPatch({ fetch: nodeFetch });
const MODELS_URL = path.join(__dirname, '/weights');
const width = 640;
const height = 480;
Promise.all([
fapi.nets.tinyFaceDetector.loadFromDisk(MODELS_URL),
fapi.nets.faceLandmark68Net.loadFromDisk(MODELS_URL),
fapi.nets.faceRecognitionNet.loadFromDisk(MODELS_URL),
fapi.nets.faceExpressionNet.loadFromDisk(MODELS_URL)
]);
function beforeOffer(peerConnection) {
const source = new RTCVideoSource();
const track = source.createTrack();
const transceiver = peerConnection.addTransceiver(track);
const sink = new RTCVideoSink(transceiver.receiver.track);
let lastFrame = null;
function onFrame({ frame }) {
lastFrame = frame;
}
sink.addEventListener('frame', onFrame);
// TODO(mroberts): Is pixelFormat really necessary?
const canvas = createCanvas(width, height);
const context = canvas.getContext('2d', { pixelFormat: 'RGBA24' });
context.fillStyle = 'white';
context.fillRect(0, 0, width, height);
let emotion = '';
const interval = setInterval(() => {
if (lastFrame) {
const lastFrameCanvas = createCanvas(lastFrame.width, lastFrame.height);
const lastFrameContext = lastFrameCanvas.getContext('2d', { pixelFormat: 'RGBA24' });
const rgba = new Uint8ClampedArray(lastFrame.width * lastFrame.height * 4);
const rgbaFrame = createImageData(rgba, lastFrame.width, lastFrame.height);
i420ToRgba(lastFrame, rgbaFrame);
lastFrameContext.putImageData(rgbaFrame, 0, 0);
context.drawImage(lastFrameCanvas, 0, 0);
const emotionsArr = { 0: 'neutral', 1: 'happy', 2: 'sad', 3: 'angry', 4: 'fearful', 5: 'disgusted', 6: 'surprised' };
async function detectEmotion() {
let frameTensor3D = tf.browser.fromPixels(lastFrameCanvas)
let face = await fapi.detectSingleFace(frameTensor3D, new fapi.TinyFaceDetectorOptions()).withFaceExpressions();
//console.log(face);
function getEmotion(face) {
try {
let mostLikelyEmotion = emotionsArr[0];
let predictionArruracy = face.expressions[emotionsArr[0]];
for (let i = 0; i < Object.keys(face.expressions).length; i++) {
if (face.expressions[emotionsArr[i]] > predictionArruracy && face.expressions[emotionsArr[i]] < 1 ){
mostLikelyEmotion = emotionsArr[i];
predictionArruracy = face.expressions[emotionsArr[i]];
}
}
return mostLikelyEmotion;
}
catch (e){
return '';
}
}
let emot = getEmotion(face);
return emot;
}
detectEmotion().then(function(res) {
emotion = res;
});
} else {
context.fillStyle = 'rgba(255, 255, 255, 0.025)';
context.fillRect(0, 0, width, height);
}
if (emotion != ''){
context.font = '60px Sans-serif';
context.strokeStyle = 'black';
context.lineWidth = 1;
context.fillStyle = `rgba(${Math.round(255)}, ${Math.round(255)}, ${Math.round(255)}, 1)`;
context.textAlign = 'center';
context.save();
context.translate(width / 2, height);
context.strokeText(emotion, 0, 0);
context.fillText(emotion, 0, 0);
context.restore();
}
const rgbaFrame = context.getImageData(0, 0, width, height);
const i420Frame = {
width,
height,
data: new Uint8ClampedArray(1.5 * width * height)
};
rgbaToI420(rgbaFrame, i420Frame);
source.onFrame(i420Frame);
});
const { close } = peerConnection;
peerConnection.close = function() {
clearInterval(interval);
sink.stop();
track.stop();
return close.apply(this, arguments);
};
}
module.exports = { beforeOffer };
“严格使用”;
需要(“@tensorflow/tfjs节点”);
const tf=require('@tensorflow/tfjs');
const nodeFetch=require('node-fetch');
const fapi=require('face-api.js');
const path=require('path');
const{createCanvas,createImageData}=require('canvas');
const{rtcdeosink,rtcdeosource,i420ToRgba,rgbaToI420}=require('wrtc')。非标准;
fapi.env.monkeyPatch({fetch:nodeFetch});
const MODELS_URL=path.join(u dirname,“/weights”);
常数宽度=640;
常数高度=480;
我保证([
fapi.nets.tinyFaceDetector.loadFromDisk(MODELS_URL),
fapi.nets.faceLandmark68Net.loadFromDisk(MODELS_URL),
fapi.nets.faceRecognitionNet.loadFromDisk(型号URL),
fapi.nets.faceExpressionNet.loadFromDisk(MODELS\u URL)
]);
提供前功能(对等连接){
const source=新的RTCDeosource();
const track=source.createTrack();
const收发器=peerConnection.add收发器(轨道);
const sink=新的RTC视频接收器(收发器.接收器.轨道);
设lastFrame=null;
函数onFrame({frame}){
lastFrame=帧;
}
sink.addEventListener('frame',onFrame);
//TODO(mroberts):像素格式真的有必要吗?
const canvas=createCanvas(宽度、高度);
const context=canvas.getContext('2d',{pixelFormat:'RGBA24'});
context.fillStyle='white';
fillRect(0,0,宽度,高度);
让情感='';
常量间隔=设置间隔(()=>{
如果(最后一帧){
const lastFrameCanvas=createCanvas(lastFrame.width,lastFrame.height);
const lastFrameContext=lastFrameCanvas.getContext('2d',{pixelFormat:'RGBA24'});
const rgba=新Uint8ClampedArray(lastFrame.width*lastFrame.height*4);
const rgbaFrame=createImageData(rgba,lastFrame.width,lastFrame.height);
i420ToRgba(最后一帧,rgbaFrame);
putImageData(rgbaFrame,0,0);
drawImage(lastFrameCanvas,0,0);
const emotionsArr={0:'中立',1:'高兴',2:'悲伤',3:'愤怒',4:'恐惧',5:'厌恶',6:'惊讶';
异步函数检测运动(){
让frameTensor3D=tf.browser.fromPixels(lastFrameCanvas)
let face=wait fapi.detectSingleFace(frameTensor3D,新的fapi.TinyFaceDetectorOptions()).withFaceExpressions();
//控制台日志(面);
情感功能(面部){
试一试{
让mostLikelyEmotion=emotionsArr[0];
让predictionaruracy=face.expressions[emotionsArr[0];
for(设i=0;ipredictionArruracy&&face.expressions[emotionsArr[i]]<1){
Mostlikelymotion=情绪[i];
predictionArruracy=face.expressions[emotionsArr[i]];
}
}
返回Mostlikelyeemotion;
}
捕获(e){
返回“”;
}
}
让emot=getEmotion(face);
返回emot;
}
detectEmotion().then(函数(res){
情感=res;
});
}否则{
context.fillStyle='rgba(255,255,255,0.025)';
fillRect(0,0,宽度,高度);
}
如果(情绪!=''){
context.font='60px无衬线';
context.strokeStyle='black';
context.lineWidth=1;
context.fillStyle=`rgba(${Math.round(255)},${Math.round(255)},${Math.round(255)},1)`;
context.textAlign='center';
context.save();
翻译(宽度/2,高度);
strokeText(情绪,0,0);
上下文。填充文本(情感,0,0);
restore();
}
const rgbaFrame=context.getImageData(0,0,宽度,高度);
常数i420Frame={
宽度,
高度,
数据:新Uint8ClampedArray(1.5*宽*高)
};
rgbaToI420(rgbaFrame,i420Frame);
源.onFrame(i420Frame);
});
const{close}=对等连接;
peerConnection.close=函数(){
间隔时间;
下沉。停止();
track.stop();
返回close.apply(这个,参数);
};
}
module.exports={beforeOffer};
这是,而且,一切都很好)。。。嗯,不,2-3分钟后,我的电脑停止了任何操作,我甚至不能移动我的鼠标,然后我在终端中被错误“杀死”。我读到这个错误,因为我只更改了项目中的一个脚本,所以我怀疑代码中的某个地方有数据泄漏,并且我的RAM随着时间的推移正在填充。有人能帮我解决这个问题吗?为什么程序以终止过程结束?如果有人想自己测试它,我会留下json包来轻松安装所有需求
{
"name": "node-webrtc-examples",
"version": "0.1.0",
"description": "This project presents a few example applications using node-webrtc.",
"private": true,
"main": "index.js",
"scripts": {
"lint": "eslint index.js examples lib test",
"start": "node index.js",
"test": "npm run test:unit && npm run test:integration",
"test:unit": "tape 'test/unit/**/*.js'",
"test:integration": "tape 'test/integration/**/*.js'"
},
"keywords": [
"Web",
"Audio"
],
"author": "Mark Andrus Roberts <markandrusroberts@gmail.com>",
"license": "BSD-3-Clause",
"dependencies": {
"@tensorflow/tfjs": "^1.2.9",
"@tensorflow/tfjs-core": "^1.2.9",
"@tensorflow/tfjs-node": "^1.2.9",
"Scope": "github:kevincennis/Scope",
"body-parser": "^1.18.3",
"browserify-middleware": "^8.1.1",
"canvas": "^2.6.0",
"color-space": "^1.16.0",
"express": "^4.16.4",
"face-api.js": "^0.21.0",
"node-fetch": "^2.3.0",
"uuid": "^3.3.2",
"wrtc": "^0.4.1"
},
"devDependencies": {
"eslint": "^5.15.1",
"tape": "^4.10.0"
}
}
{
“名称”:“节点webrtc示例”,
“版本”:“0.1.0”,
“说明”:“本项目介绍了几个使用节点webrtc的示例应用程序。”,
“私人”:没错,
“main”:“index.js”,
“脚本”:{
“lint”:“eslint index.js示例库测试”,
“开始”:“node index.js”,
“测试”:“npm运行测试:单元和npm运行测试:集成”,
“测试:单元”:“磁带'test/unit/***.js',
“测试:集成”:“磁带”测试/集成
frameTensor3D.dispose();
"use strict";
require("@tensorflow/tfjs-node");
const tf = require("@tensorflow/tfjs");
const nodeFetch = require("node-fetch");
const fapi = require("face-api.js");
const path = require("path");
const { createCanvas, createImageData } = require("canvas");
const {
RTCVideoSink,
RTCVideoSource,
i420ToRgba,
rgbaToI420
} = require("wrtc").nonstandard;
fapi.env.monkeyPatch({ fetch: nodeFetch });
const MODELS_URL = path.join(__dirname, "/weights");
const width = 640;
const height = 480;
Promise.all([
fapi.nets.tinyFaceDetector.loadFromDisk(MODELS_URL),
fapi.nets.faceLandmark68Net.loadFromDisk(MODELS_URL),
fapi.nets.faceRecognitionNet.loadFromDisk(MODELS_URL),
fapi.nets.faceExpressionNet.loadFromDisk(MODELS_URL)
]);
function beforeOffer(peerConnection) {
const source = new RTCVideoSource();
const track = source.createTrack();
const transceiver = peerConnection.addTransceiver(track);
const sink = new RTCVideoSink(transceiver.receiver.track);
let lastFrame = null;
function onFrame({ frame }) {
lastFrame = frame;
}
sink.addEventListener("frame", onFrame);
// TODO(mroberts): Is pixelFormat really necessary?
const canvas = createCanvas(width, height);
const context = canvas.getContext("2d", { pixelFormat: "RGBA24" });
context.fillStyle = "white";
context.fillRect(0, 0, width, height);
const emotionsArr = {
0: "neutral",
1: "happy",
2: "sad",
3: "angry",
4: "fearful",
5: "disgusted",
6: "surprised"
};
async function detectEmotion(lastFrameCanvas) {
const frameTensor3D = tf.browser.fromPixels(lastFrameCanvas);
const face = await fapi
.detectSingleFace(
frameTensor3D,
new fapi.TinyFaceDetectorOptions({ inputSize: 160 })
)
.withFaceExpressions();
//console.log(face);
const emo = getEmotion(face);
frameTensor3D.dispose();
return emo;
}
function getEmotion(face) {
try {
let mostLikelyEmotion = emotionsArr[0];
let predictionArruracy = face.expressions[emotionsArr[0]];
for (let i = 0; i < Object.keys(face.expressions).length; i++) {
if (
face.expressions[emotionsArr[i]] > predictionArruracy &&
face.expressions[emotionsArr[i]] < 1
) {
mostLikelyEmotion = emotionsArr[i];
predictionArruracy = face.expressions[emotionsArr[i]];
}
}
//console.log(mostLikelyEmotion);
return mostLikelyEmotion;
} catch (e) {
return "";
}
}
let emotion = "";
const interval = setInterval(() => {
if (lastFrame) {
const lastFrameCanvas = createCanvas(lastFrame.width, lastFrame.height);
const lastFrameContext = lastFrameCanvas.getContext("2d", {
pixelFormat: "RGBA24"
});
const rgba = new Uint8ClampedArray(
lastFrame.width * lastFrame.height * 4
);
const rgbaFrame = createImageData(
rgba,
lastFrame.width,
lastFrame.height
);
i420ToRgba(lastFrame, rgbaFrame);
lastFrameContext.putImageData(rgbaFrame, 0, 0);
context.drawImage(lastFrameCanvas, 0, 0);
detectEmotion(lastFrameCanvas).then(function(res) {
emotion = res;
});
} else {
context.fillStyle = "rgba(255, 255, 255, 0.025)";
context.fillRect(0, 0, width, height);
}
if (emotion != "") {
context.font = "60px Sans-serif";
context.strokeStyle = "black";
context.lineWidth = 1;
context.fillStyle = `rgba(${Math.round(255)}, ${Math.round(
255
)}, ${Math.round(255)}, 1)`;
context.textAlign = "center";
context.save();
context.translate(width / 2, height);
context.strokeText(emotion, 0, 0);
context.fillText(emotion, 0, 0);
context.restore();
}
const rgbaFrame = context.getImageData(0, 0, width, height);
const i420Frame = {
width,
height,
data: new Uint8ClampedArray(1.5 * width * height)
};
rgbaToI420(rgbaFrame, i420Frame);
source.onFrame(i420Frame);
});
const { close } = peerConnection;
peerConnection.close = function() {
clearInterval(interval);
sink.stop();
track.stop();
return close.apply(this, arguments);
};
}
module.exports = { beforeOffer };