C# 如何通过提供Windows.Media.FaceAnalysis DetectedFace列表,使用Microsoft认知服务检测人脸属性?
我能够从实时网络摄像头中获取人脸,作为C# 如何通过提供Windows.Media.FaceAnalysis DetectedFace列表,使用Microsoft认知服务检测人脸属性?,c#,uwp,microsoft-cognitive,face-api,azure-cognitive-services,C#,Uwp,Microsoft Cognitive,Face Api,Azure Cognitive Services,我能够从实时网络摄像头中获取人脸,作为Windows.Media.FaceAnalysis检测到的人脸对象列表。现在,我想将这些人脸传递给Microsoft认知服务API,以检测人脸并获取人脸属性。我该怎么做 IList<DetectedFace> faces = null; // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap i
Windows.Media.FaceAnalysis检测到的人脸对象列表。现在,我想将这些人脸传递给Microsoft认知服务API,以检测人脸并获取人脸属性。我该怎么做
IList<DetectedFace> faces = null;
// Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
// GetPreviewFrame will convert the native webcam frame into this format.
const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
{
await this.mediaCapture.GetPreviewFrameAsync(previewFrame);
// The returned VideoFrame should be in the supported NV12 format but we need to verify this.
if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
{
faces = await this.faceDetector.DetectFacesAsync(previewFrame.SoftwareBitmap);
// Now pass this faces to Cognitive services API
// faceClient.DetectAsync
}
}
IList faces=null;
//创建一个视频帧对象,指定我们希望捕获图像的像素格式(本例中为NV12位图)。
//GetPreviewFrame将本机网络摄像头帧转换为此格式。
常量BitmapPixelFormat InputPixelFormat=BitmapPixelFormat.Nv12;
使用(VideoFrame previewFrame=新视频帧(InputPixelFormat,(int)this.videoProperties.Width,(int)this.videoProperties.Height))
{
等待这个.mediaCapture.GetPreviewFrameAsync(previewFrame);
//返回的视频帧应为支持的NV12格式,但我们需要对此进行验证。
if(支持FaceDetector.IsBitMapPixelFormat(previewFrame.SoftwareBitmap.BitmapPixelFormat))
{
faces=wait this.faceDetector.DetectFacesAsync(previewFrame.SoftwareBitmap);
//现在将此面传递给认知服务API
//faceClient.DetectAsync
}
}
检测到的面
对象包含实际面的边界框。因此,您可以使用这些知识创建人脸的内存流,并将其发送到
private async Task DetectAsync()
{
IList faces=null;
常量BitmapPixelFormat InputPixelFormat=BitmapPixelFormat.Nv12;
使用(VideoFrame destinationPreviewFrame=新视频帧(InputPixelFormat,640480))
{
等待此消息。_mediaCapture.GetPreviewFrameAsync(destinationPreviewFrame);
if(支持FaceDetector.IsBitMapPixelFormat(InputPixelFormat))
{
faces=wait this.faceDetector.DetectFacesAsync(destinationPreviewFrame.SoftwareBitmap);
foreach(面中的面变量)
{
//将NV12转换为RGBA16格式
SoftwareBitmap convertedBitmap=SoftwareBitmap.Convert(目标预览帧.SoftwareBitmap,BitmapPixelFormat.Rgba16);
//获取检测到的面的原始字节
byte[]rawBytes=等待GetBytesFromBitmap(转换的位图,BitmapEncoder.BmpEncoderId,face.FaceBox);
//读取位图并将其发送到face客户端
使用(Stream Stream=rawBytes.AsBuffer().AsStream())
{
var faceAttributesToReturn=新列表()
{
FaceAttributeType.Age,
FaceAttributeType.情感,
FaceAttributeType.头发
};
Face[]detectedFaces=等待this.faceClient.DetectAsync(stream,true,true,faceAttributesToReturn);
Assert(detectedFaces.Length>0);
}
}
}
}
}
专用异步任务GetBytesFromBitmap(软件位图软件、Guid encoderId、BitmapBounds)
{
字节[]数组=null;
使用(var ms=new InMemoryRandomAccessStream())
{
BitmapEncoder编码器=等待BitmapEncoder.CreateAsync(encoderId,ms);
编码器。设置软件EBITMAP(软);
//应用面的边界
encoder.BitmapTransform.Bounds=边界;
等待编码器。FlushAsync();
数组=新字节[ms.Size];
等待ms.ReadAsync(array.AsBuffer(),(uint)ms.Size,InputStreamOptions.None);
}
返回数组;
}
Hi@Maria Ines Parnisari,谢谢你的回答。你的回答涉及创建一个临时文件。我们有没有办法避免创建一个临时文件。我们可以在没有临时文件帮助的情况下创建一个解决方案?
private async Task DetectAsync()
{
IList<DetectedFace> faces = null;
const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
using (VideoFrame destinationPreviewFrame = new VideoFrame(InputPixelFormat, 640, 480))
{
await this._mediaCapture.GetPreviewFrameAsync(destinationPreviewFrame);
if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
{
faces = await this.faceDetector.DetectFacesAsync(destinationPreviewFrame.SoftwareBitmap);
foreach (var face in faces)
{
// convert NV12 to RGBA16 format
SoftwareBitmap convertedBitmap = SoftwareBitmap.Convert(destinationPreviewFrame.SoftwareBitmap, BitmapPixelFormat.Rgba16);
// get the raw bytes of the detected face
byte[] rawBytes = await GetBytesFromBitmap(convertedBitmap, BitmapEncoder.BmpEncoderId, face.FaceBox);
// read the bitmap and send it to the face client
using (Stream stream = rawBytes.AsBuffer().AsStream())
{
var faceAttributesToReturn = new List<FaceAttributeType>()
{
FaceAttributeType.Age,
FaceAttributeType.Emotion,
FaceAttributeType.Hair
};
Face[] detectedFaces = await this.faceClient.DetectAsync(stream, true, true, faceAttributesToReturn);
Debug.Assert(detectedFaces.Length > 0);
}
}
}
}
}
private async Task<byte[]> GetBytesFromBitmap(SoftwareBitmap soft, Guid encoderId, BitmapBounds bounds)
{
byte[] array = null;
using (var ms = new InMemoryRandomAccessStream())
{
BitmapEncoder encoder = await BitmapEncoder.CreateAsync(encoderId, ms);
encoder.SetSoftwareBitmap(soft);
// apply the bounds of the face
encoder.BitmapTransform.Bounds = bounds;
await encoder.FlushAsync();
array = new byte[ms.Size];
await ms.ReadAsync(array.AsBuffer(), (uint)ms.Size, InputStreamOptions.None);
}
return array;
}