C# Kinect深度检测
我知道如何在WPF中实现这一点,但在winforms应用程序中捕获深度时遇到了问题 我发现一些代码如下:C# Kinect深度检测,c#,winforms,kinect,C#,Winforms,Kinect,我知道如何在WPF中实现这一点,但在winforms应用程序中捕获深度时遇到了问题 我发现一些代码如下: private void Kinect_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e) { using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) { if (depthFrame != null)
private void Kinect_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null)
{
Bitmap DepthBitmap = new Bitmap(depthFrame.Width, depthFrame.Height, PixelFormat.Format32bppRgb);
if (_depthPixels.Length != depthFrame.PixelDataLength)
{
_depthPixels = new DepthImagePixel[depthFrame.PixelDataLength];
_mappedDepthLocations = new ColorImagePoint[depthFrame.PixelDataLength];
}
//Copy the depth frame data onto the bitmap
var _pixelData = new short[depthFrame.PixelDataLength];
depthFrame.CopyPixelDataTo(_pixelData);
BitmapData bmapdata = DepthBitmap.LockBits(new Rectangle(0, 0, depthFrame.Width,
depthFrame.Height), ImageLockMode.WriteOnly, DepthBitmap.PixelFormat);
IntPtr ptr = bmapdata.Scan0;
Marshal.Copy(_pixelData, 0, ptr, depthFrame.Width * depthFrame.Height);
DepthBitmap.UnlockBits(bmapdata);
pictureBox2.Image = DepthBitmap;
}
}
}
但这并没有给我灰度的深度,而是紫色的。有什么改进或帮助吗?因此我认为rgb框架在这种情况下适合您: 首先,要启用深度摄影机,您需要调用:
sensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH|all stuff you use also);
if (int(streams&_Kinect_zed)) ret=sensor->NuiImageStreamOpen(
NUI_IMAGE_TYPE_DEPTH, // Depth camera or rgb camera?
NUI_IMAGE_RESOLUTION_640x480, // Image resolution
NUI_IMAGE_STREAM_FLAG_DISTINCT_OVERFLOW_DEPTH_VALUES, // Image stream flags // NUI_IMAGE_STREAM_FLAG_ENABLE_NEAR_MODE nefunguje !!!
2, // Number of frames to buffer
NULL, // Event handle
&stream_hzed); else stream_hzed=NULL;
第二,要开始流媒体,您需要呼叫:
sensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH|all stuff you use also);
if (int(streams&_Kinect_zed)) ret=sensor->NuiImageStreamOpen(
NUI_IMAGE_TYPE_DEPTH, // Depth camera or rgb camera?
NUI_IMAGE_RESOLUTION_640x480, // Image resolution
NUI_IMAGE_STREAM_FLAG_DISTINCT_OVERFLOW_DEPTH_VALUES, // Image stream flags // NUI_IMAGE_STREAM_FLAG_ENABLE_NEAR_MODE nefunguje !!!
2, // Number of frames to buffer
NULL, // Event handle
&stream_hzed); else stream_hzed=NULL;
- 注意,并非所有分辨率/标志组合都适用于所有型号的kinect李>
- 这一个上面是安全的,即使是像我的老型号
- 其他所有函数(必须)都是通过SDK头中的COM获取的李>
- 如果您自己链接并使用它们,那么您将无法连接到物理Kinect李>
void Kinect_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null)
{
this.depthFrame32 = new byte[depthFrame.Width * depthFrame.Height * 4];
//Update the image to the new format
this.depthPixelData = new short[depthFrame.PixelDataLength];
depthFrame.CopyPixelDataTo(this.depthPixelData);
byte[] convertedDepthBits = this.ConvertDepthFrame(this.depthPixelData, ((KinectSensor)sender).DepthStream);
Bitmap bmap = new Bitmap(depthFrame.Width, depthFrame.Height, PixelFormat.Format32bppRgb);
BitmapData bmapdata = bmap.LockBits(new Rectangle(0, 0, depthFrame.Width, depthFrame.Height), ImageLockMode.WriteOnly, bmap.PixelFormat);
IntPtr ptr = bmapdata.Scan0;
Marshal.Copy(convertedDepthBits, 0, ptr, 4 * depthFrame.PixelDataLength);
bmap.UnlockBits(bmapdata);
pictureBox2.Image = bmap;
}
}
}
private byte[] ConvertDepthFrame(short[] depthFrame, DepthImageStream depthStream)
{
//Run through the depth frame making the correlation between the two arrays
for (int i16 = 0, i32 = 0; i16 < depthFrame.Length && i32 < this.depthFrame32.Length; i16++, i32 += 4)
{
// Console.WriteLine(i16 + "," + i32);
//We don’t care about player’s information here, so we are just going to rule it out by shifting the value.
int realDepth = depthFrame[i16] >> DepthImageFrame.PlayerIndexBitmaskWidth;
//We are left with 13 bits of depth information that we need to convert into an 8 bit number for each pixel.
//There are hundreds of ways to do this. This is just the simplest one.
//Lets create a byte variable called Distance.
//We will assign this variable a number that will come from the conversion of those 13 bits.
byte Distance = 0;
//XBox Kinects (default) are limited between 800mm and 4096mm.
int MinimumDistance = 800;
int MaximumDistance = 4096;
//XBox Kinects (default) are not reliable closer to 800mm, so let’s take those useless measurements out.
//If the distance on this pixel is bigger than 800mm, we will paint it in its equivalent gray
if (realDepth > MinimumDistance)
{
//Convert the realDepth into the 0 to 255 range for our actual distance.
//Use only one of the following Distance assignments
//White = Far
//Black = Close
//Distance = (byte)(((realDepth – MinimumDistance) * 255 / (MaximumDistance-MinimumDistance)));
//White = Close
//Black = Far
Distance = (byte)(255 - ((realDepth - MinimumDistance) * 255 / (MaximumDistance - MinimumDistance)));
//Use the distance to paint each layer (R G & of the current pixel.
//Painting R, G and B with the same color will make it go from black to gray
this.depthFrame32[i32 + RedIndex] = (byte)(Distance);
this.depthFrame32[i32 + GreenIndex] = (byte)(Distance);
this.depthFrame32[i32 + BlueIndex] = (byte)(Distance);
}
//If we are closer than 800mm, the just paint it red so we know this pixel is not giving a good value
else
{
this.depthFrame32[i32 + RedIndex] = 0;
this.depthFrame32[i32 + GreenIndex] = 0;
this.depthFrame32[i32 + BlueIndex] = 0;
}
}
void Kinect\u DepthFrameReady(对象发送方,DepthImageFrameReadyEventArgs e)
{
使用(DepthImageFrame depthFrame=e.OpenDepthImageFrame())
{
if(depthFrame!=null)
{
this.depthFrame32=新字节[depthFrame.Width*depthFrame.Height*4];
//将图像更新为新格式
this.depthPixelData=新短[depthFrame.PixelDataLength];
depthFrame.CopyPixelDataTo(this.depthPixelData);
字节[]convertedDepthBits=this.ConvertedDepthFrame(this.depthPixelData,((KinectSensor)发送方).DepthStream);
位图bmap=新位图(depthFrame.Width、depthFrame.Height、PixelFormat.Format32bppRgb);
BitmapData bmapdata=bmap.LockBits(新矩形(0,0,depthFrame.Width,depthFrame.Height),ImageLockMode.WriteOnly,bmap.PixelFormat);
IntPtr ptr=bmapdata.Scan0;
Marshal.Copy(convertedDepthBits,0,ptr,4*depthFrame.PixelDataLength);
bmap.解锁位(bmapdata);
pictureBox2.Image=bmap;
}
}
}
专用字节[]ConvertDepthFrame(短[]depthFrame,DepthImageStream depthStream)
{
//穿过深度框,在两个阵列之间建立关联
对于(int i16=0,i32=0;i16>DepthImageFrame.PlayerIndexBitmaskWidth;
//剩下的13位深度信息需要转换成每个像素的8位数字。
//有数百种方法可以做到这一点。这只是最简单的一种。
//让我们创建一个名为Distance的字节变量。
//我们将为这个变量分配一个数字,该数字将来自这13位的转换。
字节距离=0;
//XBox Kinect(默认)限制在800毫米和4096毫米之间。
int最小距离=800;
int最大距离=4096;
//XBox Kinect(默认)在800毫米附近不可靠,所以让我们去掉这些无用的测量值。
//如果该像素上的距离大于800mm,我们将以其等效的灰色绘制它
if(真实深度>最小距离)
{
//将realDepth转换为实际距离的0到255范围。
//仅使用以下距离指定之一
//白色=远
//黑色=接近
//距离=(字节)((realDepth–MinimumDistance)*255/(MaximumDistance MinimumDistance));
//白色=接近
//黑=远
距离=(字节)(255-((实深度-最小距离)*255/(最大距离-最小距离));
//使用距离绘制当前像素的每一层(R&G)。
//将R、G和B涂成相同的颜色会使其从黑色变为灰色
这个.depthFrame32[i32+RedIndex]=(字节)(距离);
this.depthFrame32[i32+GreenIndex]=(字节)(距离);
this.depthFrame32[i32+BlueIndex]=(字节)(距离);
}
//如果我们的距离小于800mm,就把它涂成红色,这样我们就知道这个像素没有给出一个好的值
其他的
{
此.depthFrame32[i32+RedIndex]=0;
此.depthFrame32[i32+绿色索引]=0;
this.depthFrame32[i32+BlueIndex]=0;
}
}
基本上,kinect sdk是为WPf应用程序开发的。在windows窗体中,您已将深度数据的短数组转换为位图以在picturebox上显示。根据我的经验,WPf更适合使用kinect编程
下面是我用来将深度帧转换为位图以在图片框中显示的函数
private Bitmap ImageToBitmap(DepthImageFrame Image)
{
short[] pixeldata = new short[Image.PixelDataLength];
int stride = Image.Width * 2;
Image.CopyPixelDataTo(pixeldata);
Bitmap bmap = new Bitmap(Image.Width, Image.Height, PixelFormat.Format16bppRgb555);
BitmapData bmapdata = bmap.LockBits(new Rectangle(0, 0, Image.Width, Image.Height), ImageLockMode.WriteOnly, bmap.PixelFormat);
IntPtr ptr = bmapdata.Scan0;
Marshal.Copy(pixeldata, 0, ptr, Image.PixelDataLength);
bmap.UnlockBits(bmapdata);
return bmap;
}
你可以这样称呼它:
DepthImageFrame VFrame = e.OpenDepthImageFrame();
if (VFrame == null) return;
short[] pixelS = new short[VFrame.PixelDataLength];
Bitmap bmap = ImageToBitmap(VFrame);
这段代码不是很好用。没有返回,没有定义RedIndex/etc。我花了几分钟的时间,但希望这是一个更好的答案,因为它确实有效。
DepthImageFrame VFrame = e.OpenDepthImageFrame();
if (VFrame == null) return;
short[] pixelS = new short[VFrame.PixelDataLength];
Bitmap bmap = ImageToBitmap(VFrame);