Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/logging/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Processing daniel shiffmans openkinect是否有命令参考库_Processing_Kinect_Openkinect - Fatal编程技术网

Processing daniel shiffmans openkinect是否有命令参考库

Processing daniel shiffmans openkinect是否有命令参考库,processing,kinect,openkinect,Processing,Kinect,Openkinect,我正在尝试更新路易斯·卡萨达在2010年制作的剧本。他修改了Daniel Shiffman的点云示例文件,将点云数据收集为文本文件。这是教程 问题是所有的命令似乎都过时了,例如Kinect.enabledepth(true);更改为kinect2后,initDepth(true)返回错误“不适用于参数(布尔)”,或者如果我删除了此错误,则下一行返回错误,表示“函数processDepthImage(布尔)不存在”,并且在示例文件中找不到等效的命令 是否有某种参考库,在那里我可以看到所有可能的命令

我正在尝试更新路易斯·卡萨达在2010年制作的剧本。他修改了Daniel Shiffman的点云示例文件,将点云数据收集为文本文件。这是教程

问题是所有的命令似乎都过时了,例如Kinect.enabledepth(true);更改为kinect2后,initDepth(true)返回错误“不适用于参数(布尔)”,或者如果我删除了此错误,则下一行返回错误,表示“函数processDepthImage(布尔)不存在”,并且在示例文件中找不到等效的命令

是否有某种参考库,在那里我可以看到所有可能的命令(甚至更好的是那些命令等价于预处理器),这样我就可以通过脚本更新命令来使用新的openkinect库,如果没有,那么作为一名处理新手,这对我来说是一项可能完成的任务吗


以下是一些有帮助的事情:

  • 阅读
  • 如果这还不够,并且javadocs/reference不可用,请查看中的公共方法并阅读上面的注释
  • 在Processing 3中,您可以使用auto complete查看整个代码(包括库)中可用的方法和属性。您甚至可以启用Ctrl+Space自动完成(如果您在“首选项”中设置它)
  • 这可能会揭示函数
    initDepth(true)
    为什么“不适用于参数(布尔值)”。使用Processing 3 completion功能,您可以看到实际上有一个
    initDepth()
    函数,但它不接受任何参数(因此删除
    true

    使用相同的方法,您可以很容易地发现Kinect(v1)中的一些函数在Kinect 2类中缺失(如
    processDepthImage()
    getDepthFPS()
    ),您应该删除这些函数,而其他函数具有相同的功能,但名称/签名不同(例如Kinect(v1)中的
    quit()
    )但Kinect2中的
    stopDevice()
    dispose()

    这是一个粗略的代码重构版本,应该可以编译,但可能无法100%工作,因为我没有Kinect 2可供使用:

    import org.openkinect.freenect.*;
    import org.openkinect.freenect2.*;
    import org.openkinect.processing.*;
    import org.openkinect.tests.*;
    
    // Daniel Shiffman
    // Kinect Point Cloud example
    // http://www.shiffman.net
    // https://github.com/shiffman/libfreenect/tree/master/wrappers/java/processing
    
    import java.util.*;
    import java.io.*;
    
    // Kinect Library object
    // Kinect Library object
    Kinect2 kinect2;
    
    float a = 0;
    
    // Size of kinect image
    int w = 640;
    int h = 480;
    
    // writing state indicator
    boolean write = false;
    
    // treshold filter initial value
    int fltValue = 950;
    
    
    // "recording" object. each vector element holds a coordinate map vector
    Vector <Object> recording = new Vector<Object>(); 
    
    
    // We'll use a lookup table so that we don't have to repeat the math over and over
    float[] depthLookUp = new float[2048];
    
    void setup() {
      size(800, 600, P3D);
      kinect2 = new Kinect2(this);
      kinect2.init();
      kinect2.initDevice();
      kinect2.initDepth();
      // We don't need the grayscale image in this example
      // so this makes it more efficient
      //kinect2.processDepthImage(false);
    
    
      // Lookup table for all possible depth values (0 - 2047)
      for (int i = 0; i < depthLookUp.length; i++) {
        depthLookUp[i] = rawDepthToMeters(i);
      }
    }
    
    void draw() {
    
      background(0);
      fill(255);
      textMode(SCREEN);
      text("Processing FR: " + (int)frameRate, 10, 16);
    
      // Get the raw depth as array of integers
      int[] depth = kinect2.getRawDepth();
    
      // We're just going to calculate and draw every 4th pixel (equivalent of 160x120)
      int skip = 4;
    
      // Translate and rotate
      translate(width/2, height/2, -50);
      rotateY(a);
    
      //noStroke();
      //lights();
    
    
      int index = 0;
    
    
      PVector[] frame = new PVector[19200];
    
    
      for (int x=0; x<w; x+=skip) {
        for (int y=0; y<h; y+=skip) {
          int offset = x+y*w;
    
          // Convert kinect data to world xyz coordinate
          int rawDepth = depth[offset];
    
          boolean flt = true;
          PVector v = depthToWorld(x, y, rawDepth);
          if (flt && rawDepth > fltValue)
          {
            v = depthToWorld(x, y, 2047);
          }
    
          frame[index] = v;
    
          index++;   
    
          stroke(map(rawDepth, 0, 2048, 0, 256));
          pushMatrix();
          // Scale up by 200
          float factor = 400;
          translate(v.x*factor, v.y*factor, factor-v.z*factor);
          //sphere(1);
          point(0, 0);
    
          //line (0,0,1,1);
          popMatrix();
        }
      }
    
      if (write == true) {
        recording.add(frame);
      }
    
    
      // Rotate
      //a += 0.015f;
    }
    
    // These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html
    float rawDepthToMeters(int depthValue) {
      if (depthValue < 2047) {
        return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
      }
      return 0.0f;
    }
    
    PVector depthToWorld(int x, int y, int depthValue) {
    
      final double fx_d = 1.0 / 5.9421434211923247e+02;
      final double fy_d = 1.0 / 5.9104053696870778e+02;
      final double cx_d = 3.3930780975300314e+02;
      final double cy_d = 2.4273913761751615e+02;
    
      PVector result = new PVector();
      double depth =  depthLookUp[depthValue];//rawDepthToMeters(depthValue);
      result.x = (float)((x - cx_d) * depth * fx_d);
      result.y = (float)((y - cy_d) * depth * fy_d);
      result.z = (float)(depth);
      return result;
    }
    
    void stop() {
      kinect2.stopDevice();
      kinect2.dispose();
      super.stop();
    }
    
    
    int currentFile = 0;
    
    void saveFile() {
    }
    
    void keyPressed() { // Press a key to save the data
    
      if (key == '1')
      {
        fltValue += 50;
        println("fltValue: " + fltValue);
      } else if (key == '2')
      {
        fltValue -= 50;
        println("fltValue: " + fltValue);
      } else if (key=='4') {
        if (write == true) {
          write = false;
    
          println( "recorded " + recording.size() + " frames.");
    
          // saveFile();
    
          // save    
    
          Enumeration e = recording.elements();
    
          println("Stopped Recording " + currentFile);
          int i = 0;
          while (e.hasMoreElements()) {
    
            // Create one directory
            boolean success = (new File("out"+currentFile)).mkdir(); 
    
    
            PrintWriter output = createWriter("out"+currentFile+"/frame" + i++ +".txt");
            PVector [] frame = (PVector []) e.nextElement();
    
            for (int j = 0; j < frame.length; j++) {
              output.println(j + ", " + frame[j].x + ", " + frame[j].y + ", " + frame[j].z );
            }
            output.flush(); // Write the remaining data
            output.close();
          }
          currentFile++;
        }
      } else if (key == '3') {
        println("Started Recording "+currentFile);
        recording.clear();
    
        write = true;
      }
    }
    
    // Daniel Shiffman //<>//
    // Thomas Sanchez Lengeling
    // Kinect Point Cloud example
    
    // https://github.com/shiffman/OpenKinect-for-Processing
    // http://shiffman.net/p5/kinect/
    
    import org.openkinect.processing.*;
    import java.nio.FloatBuffer;
    
    // Kinect Library object
    Kinect2 kinect2;
    
    // Angle for rotation
    float a = 3.1;
    
    //depth filtering
    float depthNear = 700;
    float depthFar = 950;
    
    //recording point clouds to disk
    boolean isRecording;
    ArrayList<ArrayList<PVector>> frames = new ArrayList<ArrayList<PVector>>();
    
    void setup() {
    
      // Rendering in P3D
      size(800, 600, P3D);
    
      kinect2 = new Kinect2(this);
      kinect2.initDepth();
      kinect2.initDevice();
    }
    
    
    void draw() {
      background(0);
    
      // Translate and rotate
      pushMatrix();
      translate(width/2, height/2, 50);
      rotateY(a);
    
      // We're just going to calculate and draw every 2nd pixel
      int skip = 2;
    
      // Get the raw depth as array of integers
      int[] depth = kinect2.getRawDepth();
    
      //create a new point cloud frame - a list of points
      ArrayList<PVector> frame = new ArrayList<PVector>(); 
    
      stroke(255);
      beginShape(POINTS);
      for (int x = 0; x < kinect2.depthWidth; x+=skip) {
        for (int y = 0; y < kinect2.depthHeight; y+=skip) {
          int offset = x + y * kinect2.depthWidth;
          float depthValue = depth[offset];
    
          //filter based on depth
          if(depthValue >= depthNear && depthValue <= depthFar){
    
          //calculte the x, y, z camera position based on the depth information
          PVector point = depthToPointCloudPos(x, y, depthValue);
    
          //add the point to the current frame if it's recording
          if(isRecording) frame.add(point);
    
          // Draw a point
          vertex(point.x, point.y, point.z);
    
          }
        }
      }
      endShape();
    
      popMatrix();
    
      //add the frame to the recording (list of frames) if recording
      if(isRecording) frames.add(frame);
    
      fill(255, 0, 0);
      text((int)frameRate+"fps\nrecording: " + isRecording + "\ndepthFar: " + depthFar + " depthNear: " + depthNear
           +"\nkeys:\nSPACE - toggle recording\nN/n - increase/decrease near clipping\nF/f - increase/decrease far clipping", 50, 50);
    
      // Rotate
      //a += 0.0015f;
    }
    
    void keyReleased(){
      //toggle recording using space
      if(key == ' ') {
        isRecording = !isRecording;
        //if it was recording, but now it's not, there should be some frames there to save
        if(!isRecording) saveFramesToDisk();
        else             frames.clear();//otherwise, there might have been a recording already, so clear any previous frames
      }
      //modify near/far filters
      int depthFilterAmount = 10;//depth filter increment
      if(key == 'n') depthNear -= depthFilterAmount;
      if(key == 'N') depthNear += depthFilterAmount;
      if(key == 'f') depthFar -= depthFilterAmount;
      if(key == 'F') depthFar += depthFilterAmount;
    }
    
    void saveFramesToDisk(){
      //create a timestamp string
      String folderName = "rec_"+day()+"-"+month()+"-"+year()+" "+hour()+":"+minute()+":"+second()+":"+millis();
      //make a folder with that name
      new File(folderName).mkdir();
      //count the number of frames
      int numFrames = frames.size();
      //for each frame
      for(int i = 0 ; i < numFrames; i++){
        //access the list of points
        ArrayList<PVector> frame = frames.get(i);
        //make a new text file for each frame - checkout nf() - really handy for naming files sequentially
        PrintWriter output = createWriter(folderName+"/frame" + nf(i,4) +".txt");
        //for each point in a frame
        for (int j = 0; j < frame.size(); j++) {
          //retrieve the point
          PVector p = frame.get(j);
          //write to file: index, x, y,z + new line character
          output.println(j + ", " + p.x + ", " + p.y + ", " + p.z );
        }
    
        output.flush(); // Write the remaining data
        output.close();
    
      }
    
      println("Wrote " + numFrames + " frames in " + folderName);
    
    }
    
    //calculte the xyz camera position based on the depth data
    PVector depthToPointCloudPos(int x, int y, float depthValue) {
      PVector point = new PVector();
      point.z = (depthValue);// / (1.0f); // Convert from mm to meters
      point.x = (x - CameraParams.cx) * point.z / CameraParams.fx;
      point.y = (y - CameraParams.cy) * point.z / CameraParams.fy;
      return point;
    }
    //camera information based on the Kinect v2 hardware
    static class CameraParams {
      static float cx = 254.878f;
      static float cy = 205.395f;
      static float fx = 365.456f;
      static float fy = 365.456f;
      static float k1 = 0.0905474;
      static float k2 = -0.26819;
      static float k3 = 0.0950862;
      static float p1 = 0.0;
      static float p2 = 0.0;
    }