Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/csharp/281.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C# 从深度图像获取三维点云_C#_C++_Kinect_Point Clouds - Fatal编程技术网

C# 从深度图像获取三维点云

C# 从深度图像获取三维点云,c#,c++,kinect,point-clouds,C#,C++,Kinect,Point Clouds,我希望开发一个程序来获得kinect深度图像,并将其转换为3D,作为我最后一年的项目 我必须写一个程序来保存这些深度图像到项目的bin目录。但我无法将这些图像转换为3d点云 如果有人有一个关于如何实施这个或任何工作项目的想法,请帮助我 我建议你退房。这是一个3D点云处理的开放项目,并且有很好的文档记录。有很多教程,但是对于您的任务,您应该看: (使用PCL和OpenNI库快速访问点云) 有关如何制作点云,请参见。如果你只是想做一个而不理解它(我强烈建议你不要这样做),下面是代码: XAML:

我希望开发一个程序来获得kinect深度图像,并将其转换为3D,作为我最后一年的项目

我必须写一个程序来保存这些深度图像到项目的bin目录。但我无法将这些图像转换为3d点云


如果有人有一个关于如何实施这个或任何工作项目的想法,请帮助我

我建议你退房。这是一个3D点云处理的开放项目,并且有很好的文档记录。有很多教程,但是对于您的任务,您应该看:

  • (使用PCL和OpenNI库快速访问点云)
有关如何制作点云,请参见。如果你只是想做一个而不理解它(我强烈建议你不要这样做),下面是代码:
XAML:


C#:

使用系统;
使用System.Collections.Generic;
使用System.Linq;
使用系统文本;
使用System.Windows;
使用System.Windows.Controls;
使用System.Windows.Data;
使用System.Windows.Documents;
使用System.Windows.Input;
使用System.Windows.Media;
使用System.Windows.Media.Imaging;
使用System.Windows.Navigation;
使用System.Windows.Shapes;
使用System.Windows.Media.Media3D;
使用Microsoft.Kinect;
命名空间点云WPF
{
/// 
///MainWindow.xaml的交互逻辑
/// 
公共部分类主窗口:窗口
{
GeometryModel3D[]点=新的GeometryModel3D[320*240];
int s=4;
运动传感器;
公共主窗口()
{
初始化组件();
}
void DepthFrameReady(对象发送方,DepthImageFrameReadyEventArgs e)
{
DepthImageFrame imageFrame=e.OpenDepthImageFrame();
如果(imageFrame!=null)
{
short[]pixelData=新的short[imageFrame.PixelDataLength];
imageFrame.CopyPixelDataTo(pixelData);
内部温度=0;
int i=0;
对于(int y=0;y<240;y+=s)
对于(int x=0;x<320;x+=s)
{
温度=((ushort)像素数据[x+y*320])>>3;
((TranslateTransform3D)点[i].变换).OffsetZ=temp;
i++;
}
}
}
专用几何模型3D三角形(双x、双y、双s、SolidColorBrush颜色)
{
Point3DCollection角点=新的Point3DCollection();
添加(新的点3D(x,y,0));
添加(新的点3D(x,y+s,0));
添加(新的点3D(x+s,y+s,0));
Int32Collection三角形=新的Int32Collection();
三角形。添加(0);
三角形。添加(1);
三角形。添加(2);
MeshGeometry3D tmesh=新MeshGeometry3D();
t啮合位置=转角;
tmesh.TriangleIndices=三角形;
添加(新矢量3D(0,0,-1));
GeometryModel3D msheet=新的GeometryModel3D();
msheet.Geometry=tmesh;
msheet.Material=新扩散材料(颜色);
返回msheet;
}
已加载私有无效窗口(对象发送器、路由目标)
{
DirectionalLight DirLight1=新的DirectionalLight();
DirLight1.Color=Colors.White;
DirLight1.方向=新矢量3D(1,1,1);
透视摄像头摄像头1=新的透视摄像头();
Camera1.FarPlaneDistance=8000;
Camera1.NearPlaneDistance=100;
Camera1.FieldOfView=10;
Camera1.位置=新点3D(160、120、-1000);
Camera1.LookDirection=新矢量3D(0,0,1);
Camera1.UpDirection=新矢量3D(0,-1,0);
Model3DGroup modelGroup=新的Model3DGroup();
int i=0;
对于(int y=0;y<240;y+=s)
{
对于(int x=0;x<320;x+=s)
{
点[i]=三角形(x,y,s,新SolidColorBrush(Colors.White));
//点[i]=MCube(x,y);
点[i]。变换=新的TranslateTransform3D(0,0,0);
modelGroup.Children.Add(点[i]);
i++;
}
}
modelGroup.Children.Add(DirLight1);
ModelVisual3D modelsVisual=新的ModelVisual3D();
modelsVisual.Content=modelGroup;
Viewport3D myViewport=新的Viewport3D();
myViewport.ishitestvisible=false;
myViewport.Camera=Camera1;
myViewport.Children.Add(modelsVisual);
canvas1.Children.Add(myViewport);
myViewport.Height=canvas1.Height;
myViewport.Width=canvas1.Width;
Canvas.SetTop(myViewport,0);
Canvas.SetLeft(myViewport,0);
传感器=Kinect传感器。Kinect传感器[0];
sensor.SkeletonStream.Enable();
sensor.depthream.Enable(DepthImageFormat.Resolution320x240Fps30);
sensor.DepthFrameReady+=DepthFrameReady;
sensor.Start();
}
}
}

您需要向我们提供更多细节。您已经尝试了什么,为什么无法转换它们?
<Window x:Class="PointCloudWPF.MainWindow"
    xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
    xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
    Title="Point Cloud" Height="653" Width="993" Background="Black" Loaded="Window_Loaded">
<Grid Height="1130" Width="1626">
    <Canvas Height="611" HorizontalAlignment="Left" Name="canvas1" VerticalAlignment="Top" Width="967" Background="Black" />
</Grid>
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using System.Windows.Media.Media3D;
using Microsoft.Kinect;


namespace PointCloudWPF
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
    GeometryModel3D[] points = new GeometryModel3D[320 * 240];
    int s = 4;
    KinectSensor sensor;

    public MainWindow()
    {
        InitializeComponent();
    }



    void DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
    {
        DepthImageFrame imageFrame = e.OpenDepthImageFrame();
        if (imageFrame != null)
        {
            short[] pixelData = new short[imageFrame.PixelDataLength];
            imageFrame.CopyPixelDataTo(pixelData);
            int temp = 0;
            int i = 0;

            for (int y = 0; y < 240; y += s)
                for (int x = 0; x < 320; x += s)
                {
                    temp = ((ushort)pixelData[x + y * 320]) >> 3;
                    ((TranslateTransform3D)points[i].Transform).OffsetZ = temp;
                    i++;
                }
        }
    }


    private GeometryModel3D Triangle(double x, double y, double s, SolidColorBrush color)
    {
        Point3DCollection corners = new Point3DCollection();
        corners.Add(new Point3D(x, y, 0));
        corners.Add(new Point3D(x, y + s, 0));
        corners.Add(new Point3D(x + s, y + s, 0));

        Int32Collection Triangles = new Int32Collection();
        Triangles.Add(0);
        Triangles.Add(1);
        Triangles.Add(2);

        MeshGeometry3D tmesh = new MeshGeometry3D();
        tmesh.Positions = corners;
        tmesh.TriangleIndices = Triangles;
        tmesh.Normals.Add(new Vector3D(0, 0, -1));

        GeometryModel3D msheet = new GeometryModel3D();
        msheet.Geometry = tmesh;
        msheet.Material = new DiffuseMaterial(color);
        return msheet;
    }

    private void Window_Loaded(object sender, RoutedEventArgs e)
    {
        DirectionalLight DirLight1 = new DirectionalLight();
        DirLight1.Color = Colors.White;
        DirLight1.Direction = new Vector3D(1, 1, 1);


        PerspectiveCamera Camera1 = new PerspectiveCamera();
        Camera1.FarPlaneDistance = 8000;
        Camera1.NearPlaneDistance = 100;
        Camera1.FieldOfView = 10;
        Camera1.Position = new Point3D(160, 120, -1000);
        Camera1.LookDirection = new Vector3D(0, 0, 1);
        Camera1.UpDirection = new Vector3D(0, -1, 0);

        Model3DGroup modelGroup = new Model3DGroup();

        int i = 0;

        for (int y = 0; y < 240; y += s)
        {
            for (int x = 0; x < 320; x += s)
            {
                points[i] = Triangle(x, y, s, new SolidColorBrush(Colors.White));
                // points[i]=MCube(x,y);
                points[i].Transform = new TranslateTransform3D(0, 0, 0);
                modelGroup.Children.Add(points[i]);
                i++;
            }
        }

        modelGroup.Children.Add(DirLight1);
        ModelVisual3D modelsVisual = new ModelVisual3D();
        modelsVisual.Content = modelGroup;
        Viewport3D myViewport = new Viewport3D();
        myViewport.IsHitTestVisible = false;
        myViewport.Camera = Camera1;
        myViewport.Children.Add(modelsVisual);
        canvas1.Children.Add(myViewport);

        myViewport.Height = canvas1.Height;
        myViewport.Width = canvas1.Width;
        Canvas.SetTop(myViewport, 0);
        Canvas.SetLeft(myViewport, 0);

        sensor = KinectSensor.KinectSensors[0];
        sensor.SkeletonStream.Enable();
        sensor.DepthStream.Enable(DepthImageFormat.Resolution320x240Fps30);
        sensor.DepthFrameReady += DepthFrameReady;
        sensor.Start();

    }
}
}