Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/perl/11.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Android 如何在tflite中使用posenet模型的输出_Android_Tensorflow_Computer Vision_Tensorflow Lite_Pose Estimation - Fatal编程技术网

Android 如何在tflite中使用posenet模型的输出

Android 如何在tflite中使用posenet模型的输出,android,tensorflow,computer-vision,tensorflow-lite,pose-estimation,Android,Tensorflow,Computer Vision,Tensorflow Lite,Pose Estimation,我正在使用来自的posenet的tflite模型。它接受输入1*353*257*3输入图像,并返回4个维度数组1*23*17*17、1*23*17*34、1*23*17*64和1*23*17*1。该模型的输出步幅为16。如何获得输入图像上所有17个姿势点的坐标?我已经试着从out1阵列的热图中打印信心分数,但每个像素的值接近0.00。代码如下: public class MainActivity extends AppCompatActivity { private static final i

我正在使用来自的posenet的tflite模型。它接受输入1*353*257*3输入图像,并返回4个维度数组1*23*17*17、1*23*17*34、1*23*17*64和1*23*17*1。该模型的输出步幅为16。如何获得输入图像上所有17个姿势点的坐标?我已经试着从out1阵列的热图中打印信心分数,但每个像素的值接近0.00。代码如下:

public class MainActivity extends AppCompatActivity {
private static final int CAMERA_REQUEST = 1888;
private ImageView imageView;
private static final int MY_CAMERA_PERMISSION_CODE = 100;
Interpreter tflite = null;
private String TAG = "rohit";
//private Canvas canvas;

Map<Integer, Object> outputMap = new HashMap<>();
float[][][][] out1 = new float[1][23][17][17];
float[][][][] out2 = new float[1][23][17][34];
float[][][][] out3 = new float[1][23][17][64];
float[][][][] out4 = new float[1][23][17][1];

@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);
    String modelFile="multi_person_mobilenet_v1_075_float.tflite";
    try {
        tflite=new Interpreter(loadModelFile(MainActivity.this,modelFile));
    } catch (IOException e) {
        e.printStackTrace();
    }
    final Tensor no = tflite.getInputTensor(0);
    Log.d(TAG, "onCreate: Input shape"+ Arrays.toString(no.shape()));

    int c = tflite.getOutputTensorCount();
    Log.d(TAG, "onCreate: Output Count" +c );
    for (int i = 0; i <4 ; i++) {
        final Tensor output = tflite.getOutputTensor(i);
        Log.d(TAG, "onCreate: Output shape" + Arrays.toString(output.shape()));
    }
    this.imageView =  this.findViewById(R.id.imageView1);
    Button photoButton = this.findViewById(R.id.button1);
    photoButton.setOnClickListener(new View.OnClickListener() {

        @Override
        public void onClick(View v) {
            if (checkSelfPermission(Manifest.permission.CAMERA)
                    != PackageManager.PERMISSION_GRANTED) {
                requestPermissions(new String[]{Manifest.permission.CAMERA},
                        MY_CAMERA_PERMISSION_CODE);
            } else {
                Intent cameraIntent = new Intent(android.provider.MediaStore.ACTION_IMAGE_CAPTURE);
                startActivityForResult(cameraIntent, CAMERA_REQUEST);
            }
        }
    });
}

public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {
    super.onRequestPermissionsResult(requestCode, permissions, grantResults);
    if (requestCode == MY_CAMERA_PERMISSION_CODE) {
        if (grantResults[0] == PackageManager.PERMISSION_GRANTED) {
            Toast.makeText(this, "camera permission granted", Toast.LENGTH_LONG).show();
            Intent cameraIntent = new
                    Intent(android.provider.MediaStore.ACTION_IMAGE_CAPTURE);
            startActivityForResult(cameraIntent, CAMERA_REQUEST);
        } else {
            Toast.makeText(this, "camera permission denied", Toast.LENGTH_LONG).show();
        }
    }
}

protected void onActivityResult ( int requestCode, int resultCode, Intent data){
    if (requestCode == CAMERA_REQUEST && resultCode == Activity.RESULT_OK) {
        Bitmap photo = (Bitmap) data.getExtras().get("data");
        Log.d(TAG,"bhai:"+photo.getWidth()+":"+photo.getHeight());
        //imageView.setImageBitmap(photo);
        photo = Bitmap.createScaledBitmap(photo, 353, 257, false);
        photo = photo.copy(Bitmap.Config.ARGB_8888,true);
        Log.d(TAG, "onActivityResult: Bitmap resized");

        int width =photo.getWidth();
        int height = photo.getHeight();
        float[][][][] result = new float[1][width][height][3];
        int[] pixels = new int[width*height];
        photo.getPixels(pixels, 0, width, 0, 0, width, height);
        int pixelsIndex = 0;
        for (int i = 0; i < width; i++)
        {
            for (int j = 0; j < height; j++)
            {
                // result[i][j] =  pixels[pixelsIndex];
                int p = pixels[pixelsIndex];
                result[0][i][j][0]  = (p >> 16) & 0xff;
                result[0][i][j][1]  = (p >> 8) & 0xff;
                result[0][i][j][2]  = p & 0xff;
                pixelsIndex++;
            }
        }
        Object [] inputs = {result};
        //inputs[0] = inp;

        outputMap.put(0, out1);
        outputMap.put(1, out2);
        outputMap.put(2, out3);
        outputMap.put(3, out4);

        tflite.runForMultipleInputsOutputs(inputs,outputMap);
        out1 = (float[][][][]) outputMap.get(0);
        out2 = (float[][][][]) outputMap.get(1);
        out3 = (float[][][][]) outputMap.get(2);
        out4 = (float[][][][]) outputMap.get(3);

        Canvas canvas = new Canvas(photo);
        Paint p = new Paint();
        p.setColor(Color.RED);

        float[][][] scores = new float[out1[0].length][out1[0][0].length][17];
        int[][] heatmap_pos = new int[17][2];

        for(int i=0;i<17;i++)
        {
            float max = -1;

            for(int j=0;j<out1[0].length;j++)
            {
                for(int k=0;k<out1[0][0].length;k++)
                {
                  //  Log.d("mylog", "onActivityResult: "+out1[0][j][k][i]);
                        scores[j][k][i]  = sigmoid(out1[0][j][k][i]);
                        if(max<scores[j][k][i])
                        {
                            max = scores[j][k][i];
                            heatmap_pos[i][0] = j;
                            heatmap_pos[i][1] = k;
                        }
                }

            }
       //     Log.d(TAG, "onActivityResult: "+max+"    "+heatmap_pos[i][0]+"    "+heatmap_pos[i][1]);
        }

        for(int i=0;i<17;i++)
        {
            float max = -1;

            for(int j=0;j<out1[0].length;j++)
            {
                for(int k=0;k<out1[0][0].length;k++)
                {
                    Log.d("mylog", "onActivityResult: "+out1[0][j][k][i]);
                    scores[j][k][i]  = sigmoid(out1[0][j][k][i]);
                    if(max<scores[j][k][i])
                    {
                        max = scores[j][k][i];
                        heatmap_pos[i][0] = j;
                        heatmap_pos[i][1] = k;
                    }
                }

            }
            //     Log.d(TAG, "onActivityResult: "+max+"    "+heatmap_pos[i][0]+"    "+heatmap_pos[i][1]);
        }
        for(int i=0;i<17;i++)
        {
            Log.d("heatlog", "onActivityResult: "+heatmap_pos[i][0]+"    "+heatmap_pos[i][1]);
        }
        float[][] offset_vector = new float[17][2];
        float[][] keypoint_pos = new float[17][2];
        for(int i=0;i<17;i++)
        {
            offset_vector[i][0] = out2[0][heatmap_pos[i][0]][heatmap_pos[i][1]][i];
            offset_vector[i][1] = out2[0][heatmap_pos[i][0]][heatmap_pos[i][1]][i+17];
            Log.d("myoff",offset_vector[i][0]+":"+offset_vector[i][1]);
            keypoint_pos[i][0] = heatmap_pos[i][0]*16+offset_vector[i][0];
            keypoint_pos[i][1] = heatmap_pos[i][1]*16+offset_vector[i][1];
            Log.d(TAG, "onActivityResult: "+keypoint_pos[i][0]+"    "+keypoint_pos[i][1]);
            canvas.drawCircle(keypoint_pos[i][0]+353/2,keypoint_pos[i][1]-257/2,5,p);
        }

        imageView.setImageBitmap(photo);
    }
}

private MappedByteBuffer loadModelFile(Activity activity, String MODEL_FILE) throws IOException {
    AssetFileDescriptor fileDescriptor = activity.getAssets().openFd(MODEL_FILE);
    FileInputStream inputStream = new FileInputStream(fileDescriptor.getFileDescriptor());
    FileChannel fileChannel = inputStream.getChannel();
    long startOffset = fileDescriptor.getStartOffset();
    long declaredLength = fileDescriptor.getDeclaredLength();
    return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength);
}

public float sigmoid(float value) {
    float p =  (float)(1.0 / (1 + Math.exp(-value)));
    return p;
}
}
public类MainActivity扩展了AppCompatActivity{
专用静态最终int摄像机_请求=1888;
私人影像视图;
私有静态最终int MY_CAMERA_PERMISSION_CODE=100;
解释器tflite=null;
私有字符串TAG=“rohit”;
//私人帆布;
Map outputMap=newhashmap();
浮动汇率[][]out1=新浮动汇率[1][23][17][17];
float[][]out2=新的float[1][23][17][34];
float[]out3=新的float[1][23][17][64];
浮动[]输出4=新浮动[1][23][17][1];
@凌驾
创建时受保护的void(Bundle savedInstanceState){
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
String modelFile=“multi\u person\u mobilenet\u v1\u 075\u float.tflite”;
试一试{
tflite=新的解释器(loadModelFile(MainActivity.this,modelFile));
}捕获(IOE异常){
e、 printStackTrace();
}
最终张量no=tflite.GetInputSensor(0);
Log.d(标记“onCreate:InputShape”+Arrays.toString(no.shape()));
int c=tflite.getOutputTensorCount();
Log.d(标记“onCreate:Output Count”+c);
对于(int i=0;i>16)&0xff;
结果[0][i][j][1]=(p>>8)&0xff;
结果[0][i][j][2]=p&0xff;
pixelsIndex++;
}
}
对象[]输入={result};
//输入[0]=inp;
outputMap.put(0,out1);
outputMap.put(1,out2);
outputMap.put(2,out3);
outputMap.put(3,out4);
runForMultipleInputsOutputs(输入,输出映射);
out1=(float[])outputMap.get(0);
out2=(float[])outputMap.get(1);
out3=(float[])outputMap.get(2);
out4=(float[])outputMap.get(3);
画布=新画布(照片);
油漆p=新油漆();
p、 setColor(Color.RED);
浮动[][]分数=新浮动[out1[0]。长度][out1[0][0]。长度][17];
int[][]热图位置=新int[17][2];

对于(inti=0;i我认为这个tflite模型文件有问题。 因此,我尝试使用模型中的权重创建posenet-tflite模型。 可以从tfjs模型下载模型中的所有权重:

然后,您可以生成模型,并按照以下回购协议进行所有前期和后期处理:

生成posenet模型后,可以导出到.pb文件或.tflite文件。
我已经成功地尝试了这个过程,并且posenet模型可以在我的Android应用程序中使用GPU成功运行。

我认为这个tflite模型文件有问题。 因此,我尝试使用模型中的权重创建posenet-tflite模型。 可以从tfjs模型下载模型中的所有权重:

然后,您可以生成模型,并按照以下回购协议进行所有前期和后期处理:

生成posenet模型后,可以导出到.pb文件或.tflite文件。
我已经成功地尝试了这个过程,并且posenet模型可以在我的Android应用程序中使用GPU成功运行。

谢谢你的努力。现在我不在城里,但我会尽快尝试。下载url不起作用。以下是我尝试的内容:你可以使用中的权重。你能为我们提供你的tflite文件Ying Li吗?@Ramandepsing,tflite file在这里:但是输入和输出大小不同。谢谢你的努力。现在我不在城里,但我会尽快尝试。下载url不起作用。我尝试的是:你可以在中使用权重。你能提供你的tflite文件吗?Ying Li?@Ramandepsing,tflite文件在这里:但是输入和输出大小不同。