Javascript 网络摄像机视频不';不要在浏览器中启动。轨道5
您好,我在rails5中有一个应用程序,带有人脸识别的face-api.js库。但在我的浏览器中,网络摄像头的视频不工作,在我的控制台中也没有显示任何错误。我不明白发生了什么事 这是我的视图代码:Javascript 网络摄像机视频不';不要在浏览器中启动。轨道5,javascript,ruby-on-rails,face-api,Javascript,Ruby On Rails,Face Api,您好,我在rails5中有一个应用程序,带有人脸识别的face-api.js库。但在我的浏览器中,网络摄像头的视频不工作,在我的控制台中也没有显示任何错误。我不明白发生了什么事 这是我的视图代码: <head> <%= javascript_include_tag 'face_recognition.js'%> <%= javascript_include_tag "face-api.js/dist/face-api.min.js" %&
<head>
<%= javascript_include_tag 'face_recognition.js'%>
<%= javascript_include_tag "face-api.js/dist/face-api.min.js" %>
<%= javascript_include_tag "face-api.js/dist/face-api.js" %>
<%= javascript_include_tag 'face_api'%>
<style>
canvas{
position: relative;
top: -420px;
left: 350px;
}
</style>
</head>
<body>
<video id="camfr" autoplay="autoplay" width="400" height="400" muted></video>
</body>
在我的lib/assets/javascripts中,我有一个face_recognitionon.js文件,其中加载相机、画布和face-api.js的模型
const camfr = document.getElementById('camfr')
const startVideo = () => {
var constraints = { audio: false, video: { width: 1280, height: 720 } };
navigator.mediaDevices.getUserMedia(constraints)
.then(function(mediaStream) {
var video = document.querySelector('video');
video.srcObject = mediaStream;
video.onloadedmetadata = function(e) {
video.play();
};
})
}
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri("<%= asset_path('vendorface-api.js/models/tiny_face_detector/tiny_face_detector_model-weights_manifest.json') %>"),
faceapi.nets.faceLandmark68Net.loadFromUri("<%= asset_path('face-api.js/models/face_landmark_68/face_landmark_68_model-weights_manifest.json') %>"), //desenha os traços do rosto
faceapi.nets.faceRecognitionNet.loadFromUri("<%= asset_path('face-api.js/models/face_recognition/face_recognition_model-weights_manifest.json') %>"),//faz o conhecimento do rosto
faceapi.nets.faceExpressionNet.loadFromUri("<%= asset_path('face-api.js/models/face_expression/face_expression_model-weights_manifest.json') %>"),//detecta expressoes
faceapi.nets.ageGenderNet.loadFromUri("<%= asset_path('face-api.js/models/age_gender_model/age_gender_model-weights_manifest.json') %>"),//idade e genero
faceapi.nets.ssdMobilenetv1.loadFromUri("<%= asset_path('face-api.js/models/ssd_mobilenetv1/ssd_mobilenetv1_model-weights_manifest.json') %>") // usada para detectar rosto
]).then(startVideo)
camfr.addEventListener('play', async () => {
const canvas = faceapi.createCanvasFromMedia(camfr)
const canvasSize = {
width: camfr.width,
height: camfr.height
}
faceapi.matchDimensions(canvas, canvasSize)
document.body.appendChild(canvas)
setInterval(async () => {
const detections = await faceapi
.detectAllFaces(
camfr,
new faceapi.TinyFaceDetectorOptions()
)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
const resizedDetections = faceapi.resizeResults(detections, canvasSize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
resizedDetections.forEach(detection => {
const {age, gender, genderProbability} = detection
new faceapi.draw.DrawTextField([
`${parseInt(age, 10)} years`,
`${gender} (${ parseInt(genderProbability * 100, 10)})`
], detection.detection.box.topRight).draw(canvas)
})
}, 100)
})
const camfr=document.getElementById('camfr'))
常量startVideo=()=>{
var约束={音频:false,视频:{宽度:1280,高度:720};
navigator.mediaDevices.getUserMedia(约束)
.then(功能(媒体流){
var video=document.querySelector('video');
video.srcObject=mediaStream;
video.onloadedmetadata=函数(e){
video.play();
};
})
}
我保证([
faceapi.nets.tinyFaceDetector.loadFromUri(“”),
faceapi.nets.faceLandmark68Net.loadFromUri(“”,//desenha os traços do rosto
faceapi.nets.faceRecognitionNet.loadFromUri(“”),//faz o conhecimento do rosto
faceapi.nets.faceExpressionNet.loadFromUri(“”,//detecta Expressions
faceapi.nets.ageGenderNet.loadFromUri(“”,//idade e慷慨
faceapi.nets.ssdMobilenetv1.loadFromUri(“”//usada para detectar rosto
]).然后(startVideo)
camfr.addEventListener('play',async()=>{
const canvas=faceapi.createCanvasFromMedia(camfr)
常量画布大小={
宽度:camfr.width,
高度:camfr.height
}
faceapi.matchDimensions(画布、画布大小)
document.body.appendChild(画布)
setInterval(异步()=>{
常数检测=等待faceapi
.面部表情(
camfr,
新的faceapi.TinyFaceDetectorOptions()
)
.withFaceLandmarks()
.withFaceExpressions()
.带年龄和性别()
const resizedDetections=faceapi.resizeResults(检测、画布大小)
canvas.getContext('2d').clearRect(0,0,canvas.width,canvas.height)
faceapi.draw.drawDetections(画布、大小检测)
faceapi.draw.drawFaceLandmarks(画布、大小检测)
faceapi.draw.drawFaceExpressions(画布、大小检测)
resizedDetections.forEach(检测=>{
常数{年龄,性别,性别概率}=检测
新建faceapi.draw.DrawTextField([
`${parseInt(年龄,10)}年`,
`${gender}(${parseInt(genderProbability*100,10)})`
],detection.detection.box.topRight)。绘制(画布)
})
}, 100)
})
您在https上试用过吗?如果未在安全连接中提供webrtc,则webrtc无法在chrome上工作。
const camfr = document.getElementById('camfr')
const startVideo = () => {
var constraints = { audio: false, video: { width: 1280, height: 720 } };
navigator.mediaDevices.getUserMedia(constraints)
.then(function(mediaStream) {
var video = document.querySelector('video');
video.srcObject = mediaStream;
video.onloadedmetadata = function(e) {
video.play();
};
})
}
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri("<%= asset_path('vendorface-api.js/models/tiny_face_detector/tiny_face_detector_model-weights_manifest.json') %>"),
faceapi.nets.faceLandmark68Net.loadFromUri("<%= asset_path('face-api.js/models/face_landmark_68/face_landmark_68_model-weights_manifest.json') %>"), //desenha os traços do rosto
faceapi.nets.faceRecognitionNet.loadFromUri("<%= asset_path('face-api.js/models/face_recognition/face_recognition_model-weights_manifest.json') %>"),//faz o conhecimento do rosto
faceapi.nets.faceExpressionNet.loadFromUri("<%= asset_path('face-api.js/models/face_expression/face_expression_model-weights_manifest.json') %>"),//detecta expressoes
faceapi.nets.ageGenderNet.loadFromUri("<%= asset_path('face-api.js/models/age_gender_model/age_gender_model-weights_manifest.json') %>"),//idade e genero
faceapi.nets.ssdMobilenetv1.loadFromUri("<%= asset_path('face-api.js/models/ssd_mobilenetv1/ssd_mobilenetv1_model-weights_manifest.json') %>") // usada para detectar rosto
]).then(startVideo)
camfr.addEventListener('play', async () => {
const canvas = faceapi.createCanvasFromMedia(camfr)
const canvasSize = {
width: camfr.width,
height: camfr.height
}
faceapi.matchDimensions(canvas, canvasSize)
document.body.appendChild(canvas)
setInterval(async () => {
const detections = await faceapi
.detectAllFaces(
camfr,
new faceapi.TinyFaceDetectorOptions()
)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
const resizedDetections = faceapi.resizeResults(detections, canvasSize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
resizedDetections.forEach(detection => {
const {age, gender, genderProbability} = detection
new faceapi.draw.DrawTextField([
`${parseInt(age, 10)} years`,
`${gender} (${ parseInt(genderProbability * 100, 10)})`
], detection.detection.box.topRight).draw(canvas)
})
}, 100)
})