Coordinates OpenKinect和处理–;Can';t显示Z坐标

Coordinates OpenKinect和处理–;Can';t显示Z坐标,coordinates,kinect,processing,openkinect,Coordinates,Kinect,Processing,Openkinect,我有来自Daniel Shiffman(以下)的代码。我试图读出Z坐标。我一点也不知道怎么做,所以任何帮助都将不胜感激 AveragePointTracking.pde // Daniel Shiffman // Tracking the average location beyond a given depth threshold // Thanks to Dan O'Sullivan // http://www.shiffman.net // https://github.com/shiff

我有来自Daniel Shiffman(以下)的代码。我试图读出Z坐标。我一点也不知道怎么做,所以任何帮助都将不胜感激

AveragePointTracking.pde

// Daniel Shiffman
// Tracking the average location beyond a given depth threshold
// Thanks to Dan O'Sullivan
// http://www.shiffman.net
// https://github.com/shiffman/libfreenect/tree/master/wrappers/java/processing

import org.openkinect.*;
import org.openkinect.processing.*;

// Showing how we can farm all the kinect stuff out to a separate class
KinectTracker tracker;
// Kinect Library object
Kinect kinect;

void setup() {
  size(640,600);
  kinect = new Kinect(this);
  tracker = new KinectTracker();
}

void draw() {
  background(255);

  // Run the tracking analysis
  tracker.track();
  // Show the image
  tracker.display();

  // Let's draw the raw location
  PVector v1 = tracker.getPos();
  fill(50,100,250,200);
  noStroke();
  ellipse(v1.x,v1.y,10,10);

  // Let's draw the "lerped" location
  //PVector v2 = tracker.getLerpedPos();
  //fill(100,250,50,200);
  //noStroke();
  //ellipse(v2.x,v2.y,20,20);

  // Display some info
  int t = tracker.getThreshold();
  fill(0);
  text("Location-X: " + v1.x,10,500);
  text("Location-Y: " + v1.y,10,530);
  text("Location-Z: ",10,560);
  text("threshold: " + t,10,590);
}

void stop() {
  tracker.quit();
  super.stop();
}
class KinectTracker {

  // Size of kinect image
  int kw = 640;
  int kh = 480;
  int threshold = 500;

  // Raw location
  PVector loc;

  // Interpolated location
  PVector lerpedLoc;

  // Depth data
  int[] depth;


  PImage display;

  KinectTracker() {
    kinect.start();
    kinect.enableDepth(true);

    // We could skip processing the grayscale image for efficiency
    // but this example is just demonstrating everything
    kinect.processDepthImage(true);

    display = createImage(kw,kh,PConstants.RGB);

    loc = new PVector(0,0);
    lerpedLoc = new PVector(0,0);
  }

  void track() {

    // Get the raw depth as array of integers
    depth = kinect.getRawDepth();

    // Being overly cautious here
    if (depth == null) return;

    float sumX = 0;
    float sumY = 0;
    float count = 0;

    for(int x = 0; x < kw; x++) {
      for(int y = 0; y < kh; y++) {
        // Mirroring the image
        int offset = kw-x-1+y*kw;
        // Grabbing the raw depth
        int rawDepth = depth[offset];

        // Testing against threshold
        if (rawDepth < threshold) {
          sumX += x;
          sumY += y;
          count++;
        }
      }
    }
    // As long as we found something
    if (count != 0) {
      loc = new PVector(sumX/count,sumY/count);
    }

    // Interpolating the location, doing it arbitrarily for now
    lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
    lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);
  }

  PVector getLerpedPos() {
    return lerpedLoc;
  }

  PVector getPos() {
    return loc;
  }

  void display() {
    PImage img = kinect.getDepthImage();

    // Being overly cautious here
    if (depth == null || img == null) return;

    // Going to rewrite the depth image to show which pixels are in threshold
    // A lot of this is redundant, but this is just for demonstration purposes
    display.loadPixels();
    for(int x = 0; x < kw; x++) {
      for(int y = 0; y < kh; y++) {
        // mirroring image
        int offset = kw-x-1+y*kw;
        // Raw depth
        int rawDepth = depth[offset];

        int pix = x+y*display.width;
        if (rawDepth < threshold) {
          // A red color instead
          display.pixels[pix] = color(245,100,100);
        } 
        else {
          display.pixels[pix] = img.pixels[offset];
        }
      }
    }
    display.updatePixels();

    // Draw the image
    image(display,0,0);
  }

  void quit() {
    kinect.quit();
  }

  int getThreshold() {
    return threshold;
  }

  void setThreshold(int t) {
    threshold =  t;
  }
}
KinectTracker.pde

// Daniel Shiffman
// Tracking the average location beyond a given depth threshold
// Thanks to Dan O'Sullivan
// http://www.shiffman.net
// https://github.com/shiffman/libfreenect/tree/master/wrappers/java/processing

import org.openkinect.*;
import org.openkinect.processing.*;

// Showing how we can farm all the kinect stuff out to a separate class
KinectTracker tracker;
// Kinect Library object
Kinect kinect;

void setup() {
  size(640,600);
  kinect = new Kinect(this);
  tracker = new KinectTracker();
}

void draw() {
  background(255);

  // Run the tracking analysis
  tracker.track();
  // Show the image
  tracker.display();

  // Let's draw the raw location
  PVector v1 = tracker.getPos();
  fill(50,100,250,200);
  noStroke();
  ellipse(v1.x,v1.y,10,10);

  // Let's draw the "lerped" location
  //PVector v2 = tracker.getLerpedPos();
  //fill(100,250,50,200);
  //noStroke();
  //ellipse(v2.x,v2.y,20,20);

  // Display some info
  int t = tracker.getThreshold();
  fill(0);
  text("Location-X: " + v1.x,10,500);
  text("Location-Y: " + v1.y,10,530);
  text("Location-Z: ",10,560);
  text("threshold: " + t,10,590);
}

void stop() {
  tracker.quit();
  super.stop();
}
class KinectTracker {

  // Size of kinect image
  int kw = 640;
  int kh = 480;
  int threshold = 500;

  // Raw location
  PVector loc;

  // Interpolated location
  PVector lerpedLoc;

  // Depth data
  int[] depth;


  PImage display;

  KinectTracker() {
    kinect.start();
    kinect.enableDepth(true);

    // We could skip processing the grayscale image for efficiency
    // but this example is just demonstrating everything
    kinect.processDepthImage(true);

    display = createImage(kw,kh,PConstants.RGB);

    loc = new PVector(0,0);
    lerpedLoc = new PVector(0,0);
  }

  void track() {

    // Get the raw depth as array of integers
    depth = kinect.getRawDepth();

    // Being overly cautious here
    if (depth == null) return;

    float sumX = 0;
    float sumY = 0;
    float count = 0;

    for(int x = 0; x < kw; x++) {
      for(int y = 0; y < kh; y++) {
        // Mirroring the image
        int offset = kw-x-1+y*kw;
        // Grabbing the raw depth
        int rawDepth = depth[offset];

        // Testing against threshold
        if (rawDepth < threshold) {
          sumX += x;
          sumY += y;
          count++;
        }
      }
    }
    // As long as we found something
    if (count != 0) {
      loc = new PVector(sumX/count,sumY/count);
    }

    // Interpolating the location, doing it arbitrarily for now
    lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
    lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);
  }

  PVector getLerpedPos() {
    return lerpedLoc;
  }

  PVector getPos() {
    return loc;
  }

  void display() {
    PImage img = kinect.getDepthImage();

    // Being overly cautious here
    if (depth == null || img == null) return;

    // Going to rewrite the depth image to show which pixels are in threshold
    // A lot of this is redundant, but this is just for demonstration purposes
    display.loadPixels();
    for(int x = 0; x < kw; x++) {
      for(int y = 0; y < kh; y++) {
        // mirroring image
        int offset = kw-x-1+y*kw;
        // Raw depth
        int rawDepth = depth[offset];

        int pix = x+y*display.width;
        if (rawDepth < threshold) {
          // A red color instead
          display.pixels[pix] = color(245,100,100);
        } 
        else {
          display.pixels[pix] = img.pixels[offset];
        }
      }
    }
    display.updatePixels();

    // Draw the image
    image(display,0,0);
  }

  void quit() {
    kinect.quit();
  }

  int getThreshold() {
    return threshold;
  }

  void setThreshold(int t) {
    threshold =  t;
  }
}
类KinectTracker{
//kinect图像的大小
int千瓦=640;
int-kh=480;
int阈值=500;
//原始位置
PVector-loc;
//插值定位
PVector lerpedLoc;
//深度数据
int[]深度;
图像显示;
KinectTracker(){
kinect.start();
kinect.enableDepth(真);
//为了提高效率,我们可以跳过对灰度图像的处理
//但这个例子只是演示了一切
kinect.processDepthImage(true);
display=createImage(kw、kh、PConstants.RGB);
loc=新PVector(0,0);
lerpedLoc=新PVector(0,0);
}
无效轨道(){
//以整数数组的形式获取原始深度
depth=kinect.getRawDepth();
//在这里过于谨慎
if(depth==null)返回;
浮点数sumX=0;
浮点数sumY=0;
浮点数=0;
对于(int x=0;x
有两种方法可以做到这一点

Daniel的代码现在访问坐标的方式是使用二维向量(即X和Y)。 您可以将其更改为三维向量(因此它还存储一个Z坐标),OpenKinect库应该以与X和Y相同的方式返回Z坐标。。。我想;-)(必须核实他的消息来源)。但这将返回每个像素的Z坐标,然后必须循环,这既麻烦又计算昂贵


现在,Daniel在本例中实际执行的方式是查找特定XY位置的深度,如果超过某个阈值,则返回给您。。。这是您在KinectTracker中看到的rawDepth整数。。。因此,它会测试这是否小于阈值(可以更改),如果小于阈值,它会为这些像素着色并将其写入图像缓冲区。。。然后你可以询问图像的XY坐标,例如,或者把它传递给水滴检测程序,等等

有两个主要步骤:

  • 获取深度(KinectTracker已在track()方法中执行此操作)
  • 使用偏移量获取当前像素的深度,以基于二维位置(x,y)在一维深度数组中查找位置(这同样是在track()方法中完成的:
    int offset=kw-x-1+y*kw;
  • 请注意,坐标是镜像的,但索引通常是这样计算的:

    index = y*width+x
    
    正如在报告中所解释的那样

    从理论上讲,我们只需要在track()方法的末尾这样做:

    像这样:

    void track() {
    
        // Get the raw depth as array of integers
        depth = kinect.getRawDepth();
    
        // Being overly cautious here
        if (depth == null) return;
    
        float sumX = 0;
        float sumY = 0;
        float count = 0;
    
        for(int x = 0; x < kw; x++) {
          for(int y = 0; y < kh; y++) {
            // Mirroring the image
            int offset = kw-x-1+y*kw;
            // Grabbing the raw depth
            int rawDepth = depth[offset];
    
            // Testing against threshold
            if (rawDepth < threshold) {
              sumX += x;
              sumY += y;
              count++;
            }
          }
        }
        // As long as we found something
        if (count != 0) {
          loc = new PVector(sumX/count,sumY/count);
        }
    
        // Interpolating the location, doing it arbitrarily for now
        lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
        lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);
        lerpedLoc.z = depth[kw-((int)lerpedLoc.x)-1+((int)lerpedLoc.y)*kw];
      }
    

    在void track()的末尾添加此选项有效:

    然后,我将void draw()中的最后一个块更改为此,以读取Z值:

    // Display some info
    int t = tracker.getThreshold();
    fill(0);
    text("Location-X: " + v1.x,10,500);
    text("Location-Y: " + v1.y,10,530);
    text("Location-Z: " + v2.z,10,560);  // <<Adding this worked!
    text("threshold: " + t,10,590);
    
    //显示一些信息
    int t=tracker.getThreshold();
    填充(0);
    文本(“位置-X:+v1.X,10500);
    文本(“位置-Y:+v1.Y,10530”);
    
    文本(“位置-Z:+v2.Z,10560);//我是一个真正的新手当谈到处理,有没有可能你可以写一个例子,我可以读出一个Z坐标?嗨,乔治,你知道我如何可以使它多点触摸?因此,它可以有效地识别阈值中两个单独违规的平均值?这段视频可能会更好地解释这一点:嘿@DavidIngledow:)很高兴这起作用。当然,一旦在track()方法中添加了z,它就可以通过getPos()和x,y,z属性从外部获得。关于这两个独立的分支,我不认为你指的是阈值,因为这意味着深度最近的点,以及基于z值的它后面的点。您所演示的是使用两个具有相似z值但不同x、y值的位置。丹尼尔的跟踪器是用来处理一个最近的点,而不是多个。您可以尝试将深度图像传递到OpenCV包装器,在那里设置阈值,然后执行水滴检测,这将为您同时提供类似z值的x、y点(值得使用滑块/键盘快捷键快速调整/调整z阈值以找到“最佳点”)。一旦你得到了,非常相似
    // Display some info
    int t = tracker.getThreshold();
    fill(0);
    text("Location-X: " + v1.x,10,500);
    text("Location-Y: " + v1.y,10,530);
    text("Location-Z: " + v2.z,10,560);  // <<Adding this worked!
    text("threshold: " + t,10,590);