Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/308.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/android/227.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 我的应用程序在尝试启动时崩溃,如何修复此错误消息?_Java_Android - Fatal编程技术网

Java 我的应用程序在尝试启动时崩溃,如何修复此错误消息?

Java 我的应用程序在尝试启动时崩溃,如何修复此错误消息?,java,android,Java,Android,上下文:我正在尝试将tensorflow lite与相机捕获结合使用 当我启动我的应用程序时,我的应用程序崩溃,我的手机上显示此错误消息(在构建步骤中没有错误): java.lang.NoClassDefFoundError:解析失败: Lcom/google/android/things/pio/GpioCallback; at.things.contrib.driver.button.ButtonInputDriver.() http://com.google.android.things.

上下文:我正在尝试将tensorflow lite与相机捕获结合使用

当我启动我的应用程序时,我的应用程序崩溃,我的手机上显示此错误消息(在构建步骤中没有错误):

java.lang.NoClassDefFoundError:解析失败:
Lcom/google/android/things/pio/GpioCallback; at.things.contrib.driver.button.ButtonInputDriver.() http://com.google.android.things.contrib.driver.rainbowhat.rainbowhat.CreateButtonInInputDriver(htt-ps://t.co/pCxFVemJoJ) 在http://com.google.android.things.contrib.driver.rainbowhat.rainbowhat.createButtonCInputDriver(htt-ps://t.co/L5GdT2osCC) 在com.example.androidthings.imageclassifier.ImageClassificateRactivity.initButton(http://ImageClassificateRactivity.java:186)上 在com.example.androidthings.imageclassifier.ImageClassifierActivity.onCreate()上 at.Activity.performCreate(http://t.co/iagpjxdqn) at.Activity.performCreate(http://t.co/x1jCqk83Vz) at.Instrumentation.callActivityOnCreate(http://s:// t、 co/SH4ZVZ) at.ActivityThread.performLaunchActivity(http://s:// t、 文书主任/7ivkHUx5ol) at.ActivityThread.handleLaunchActivity(http://s) t、 文书主任(JrImVgSukO) at.servertransaction.LaunchActivityItem.execute(http://s:// t、 co/xJzsqMgH1S) at.servertransaction.TransactionExecutor.executeCallbacks(htt ps:// t、 文书主任/pmDJcEYPAe) at.servertransaction.TransactionExecutor.execute(htt ps://t.co/B56pWorl1W) at.ActivityThread$H.handleMessage(htt-ps:// t、 文书主任/3pIiwfbaZj) 位于android.os.Handler.dispatchMessage(ht-tp://Handler.java:106) 在android.os.Looper.loop(ht-tp://Looper.java:201) at.ActivityThread.main(ht-tps://t.co/X8nXQTeKz7) 位于java.lang.reflect.Method.invoke(本机方法) at.internal.os.RuntimeInit$MethodAndArgsCaller.run(ht-tps://t.co/5IltBH9HjT) at.internal.os.ZygoteInit.main(ht-tps://t.co/UDaAWnMnAC) 原因:java.lang.ClassNotFoundException:未找到类“h”ttp://com.google.android .things.pio.GpioCallback“位于路径:DexPathList[[zip文件”//system/framework/org.apache.http.legacy.boot.jar”,zip文件“/data/app/com.example.androidthings.imageclassifier-n33NGwKNRyCZU-UICaS1Qw==/base.apk”],nativeLibraryDirectories=[/data/app/com.example.androidthings.imageclassifier-n33NGwKNRyCZU-UICaS1Qw=/lib/arm64,/data/app/com.example.androidthings.imageclassifier-n33NGwKNRyCZU-UICaS1Qw=/base.apk!/lib/arm64-v8a,/system/lib64,/vendor/lib64]] 位于dalvik.system.BaseDexClassLoader.findClass(htp://BaseDexClassLoader.java:171) 位于java.lang.ClassLoader.loadClass(ht-tp://ClassLoader.java:379) 位于java.lang.ClassLoader.loadClass(ht-tp://ClassLoader.java:312)

以下是图像分类实用性

package com.example.androidthings.imageclassifier;

import android.app.Activity;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.media.ImageReader;
import android.os.Bundle;
import android.util.Log;
import android.view.KeyEvent;
import android.view.WindowManager;
import android.widget.ImageView;
import android.widget.TextView;

import com.example.androidthings.imageclassifier.classifier.Recognition;
import com.example.androidthings.imageclassifier.classifier.TensorFlowHelper;
import com.google.android.things.contrib.driver.button.ButtonInputDriver;
import com.google.android.things.contrib.driver.rainbowhat.RainbowHat;

import org.tensorflow.lite.Interpreter;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;

public class ImageClassifierActivity extends Activity {
    private static final String TAG = "ImageClassifierActivity";

    /** Camera image capture size */
    private static final int PREVIEW_IMAGE_WIDTH = 640;
    private static final int PREVIEW_IMAGE_HEIGHT = 480;
    /** Image dimensions required by TF model */
    private static final int TF_INPUT_IMAGE_WIDTH = 224;
    private static final int TF_INPUT_IMAGE_HEIGHT = 224;
    /** Dimensions of model inputs. */
    private static final int DIM_BATCH_SIZE = 1;
    private static final int DIM_PIXEL_SIZE = 3;
    /** TF model asset files */
    private static final String LABELS_FILE = "labels.txt";
    private static final String MODEL_FILE = "mobilenet_quant_v1_224.tflite";

    private ButtonInputDriver mButtonDriver;
    private boolean mProcessing;

    private ImageView mImage;
    private TextView mResultText;

    private Interpreter mTensorFlowLite;
    private List<String> mLabels;
    private CameraHandler mCameraHandler;
    private ImagePreprocessor mImagePreprocessor;

    /**
     * Initialize the classifier that will be used to process images.
     */
    private void initClassifier() {
        try {
            mTensorFlowLite = new Interpreter(TensorFlowHelper.loadModelFile(this, MODEL_FILE));
            mLabels = TensorFlowHelper.readLabels(this, LABELS_FILE);
        } catch (IOException e) {
            Log.w(TAG, "Unable to initialize TensorFlow Lite.", e);
        }
    }

    /**
     * Clean up the resources used by the classifier.
     */
    private void destroyClassifier() {
        mTensorFlowLite.close();
    }

    /**
     * Process an image and identify what is in it. When done, the method
     * {@link #onPhotoRecognitionReady(Collection)} must be called with the results of
     * the image recognition process.
     *
     * @param image Bitmap containing the image to be classified. The image can be
     *              of any size, but preprocessing might occur to resize it to the
     *              format expected by the classification process, which can be time
     *              and power consuming.
     */
    private void doRecognize(Bitmap image) {
        // Allocate space for the inference results
        byte[][] confidencePerLabel = new byte[1][mLabels.size()];
        // Allocate buffer for image pixels.
        int[] intValues = new int[TF_INPUT_IMAGE_WIDTH * TF_INPUT_IMAGE_HEIGHT];
        ByteBuffer imgData = ByteBuffer.allocateDirect(
                DIM_BATCH_SIZE * TF_INPUT_IMAGE_WIDTH * TF_INPUT_IMAGE_HEIGHT * DIM_PIXEL_SIZE);
        imgData.order(ByteOrder.nativeOrder());

        // Read image data into buffer formatted for the TensorFlow model
        TensorFlowHelper.convertBitmapToByteBuffer(image, intValues, imgData);

        // Run inference on the network with the image bytes in imgData as input,
        // storing results on the confidencePerLabel array.
        mTensorFlowLite.run(imgData, confidencePerLabel);

        // Get the results with the highest confidence and map them to their labels
        Collection<Recognition> results = TensorFlowHelper.getBestResults(confidencePerLabel, mLabels);
        // Report the results with the highest confidence
        onPhotoRecognitionReady(results);
    }

    /**
     * Initialize the camera that will be used to capture images.
     */
    private void initCamera() {
        mImagePreprocessor = new ImagePreprocessor(PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT,
                TF_INPUT_IMAGE_WIDTH, TF_INPUT_IMAGE_HEIGHT);
        mCameraHandler = CameraHandler.getInstance();
        mCameraHandler.initializeCamera(this,
                PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT, null,
                new ImageReader.OnImageAvailableListener() {
                    @Override
                    public void onImageAvailable(ImageReader imageReader) {
                        Bitmap bitmap = mImagePreprocessor.preprocessImage(imageReader.acquireNextImage());
                        onPhotoReady(bitmap);
                    }
                });
    }

    /**
     * Clean up resources used by the camera.
     */
    private void closeCamera() {
        mCameraHandler.shutDown();
    }

    /**
     * Load the image that will be used in the classification process.
     * When done, the method {@link #onPhotoReady(Bitmap)} must be called with the image.
     */
    private void loadPhoto() {
        mCameraHandler.takePicture();
    }


// --------------------------------------------------------------------------------------
// NOTE: The normal codelab flow won't require you to change anything below this line,
// although you are encouraged to read and understand it.

@Override
protected void onCreate(final Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);

    setContentView(R.layout.activity_camera);
    mImage = findViewById(R.id.imageView);
    mResultText = findViewById(R.id.resultText);

    updateStatus(getString(R.string.initializing));
    initCamera();
    initClassifier();
    initButton();
    updateStatus(getString(R.string.help_message));
}

/**
 * Register a GPIO button that, when clicked, will generate the {@link KeyEvent#KEYCODE_ENTER}
 * key, to be handled by {@link #onKeyUp(int, KeyEvent)} just like any regular keyboard
 * event.
 *
 * If there's no button connected to the board, the doRecognize can still be triggered by
 * sending key events using a USB keyboard or `adb shell input keyevent 66`.
 */
private void initButton() {
    try {
        mButtonDriver = RainbowHat.createButtonCInputDriver(KeyEvent.KEYCODE_ENTER);
        mButtonDriver.register();
    } catch (IOException e) {
        Log.w(TAG, "Cannot find button. Ignoring push button. Use a keyboard instead.", e);
    }
}

private Bitmap getStaticBitmap() {
    Log.d(TAG, "Using sample photo in res/drawable/sampledog_224x224.png");
    return BitmapFactory.decodeResource(this.getResources(), R.drawable.sampledog_224x224);
}

@Override
public boolean onKeyUp(int keyCode, KeyEvent event) {
    if (keyCode == KeyEvent.KEYCODE_ENTER) {
        if (mProcessing) {
            updateStatus("Still processing, please wait");
            return true;
        }
        updateStatus("Running photo recognition");
        mProcessing = true;
        loadPhoto();
        return true;
    }
    return super.onKeyUp(keyCode, event);
}

/**
 * Image capture process complete
 */
private void onPhotoReady(Bitmap bitmap) {
    mImage.setImageBitmap(bitmap);
    doRecognize(bitmap);
}

/**
 * Image classification process complete
 */
private void onPhotoRecognitionReady(Collection<Recognition> results) {
    updateStatus(formatResults(results));
    mProcessing = false;
}

/**
 * Format results list for display
 */
private String formatResults(Collection<Recognition> results) {
    if (results == null || results.isEmpty()) {
        return getString(R.string.empty_result);
    } else {
        StringBuilder sb = new StringBuilder();
        Iterator<Recognition> it = results.iterator();
        int counter = 0;
        while (it.hasNext()) {
            Recognition r = it.next();
            sb.append(r.getTitle());
            counter++;
            if (counter < results.size() - 1) {
                sb.append(", ");
            } else if (counter == results.size() - 1) {
                sb.append(" or ");
            }
        }

        return sb.toString();
    }
}

/**
 * Report updates to the display and log output
 */
private void updateStatus(String status) {
    Log.d(TAG, status);
    mResultText.setText(status);
}

@Override
protected void onDestroy() {
    super.onDestroy();
    try {
        destroyClassifier();
    } catch (Throwable t) {
        // close quietly
    }
    try {
        closeCamera();
    } catch (Throwable t) {
        // close quietly
    }
    try {
        if (mButtonDriver != null) mButtonDriver.close();
    } catch (Throwable t) {
        // close quietly
    }
}
}
package com.example.androidthings.imageclassifier;
导入android.app.Activity;
导入android.graphics.Bitmap;
导入android.graphics.BitmapFactory;
导入android.media.ImageReader;
导入android.os.Bundle;
导入android.util.Log;
导入android.view.KeyEvent;
导入android.view.WindowManager;
导入android.widget.ImageView;
导入android.widget.TextView;
导入com.example.androidthings.imageclassifier.classifier.Recognition;
导入com.example.androidthings.imageclassifier.classifier.TensorFlowHelper;
导入com.google.android.things.contrib.driver.button.ButtonInputDriver;
导入com.google.android.things.contrib.driver.rainbowhat.rainbowhat;
导入org.tensorflow.lite.Interpreter;
导入java.io.IOException;
导入java.nio.ByteBuffer;
导入java.nio.ByteOrder;
导入java.util.Collection;
导入java.util.Iterator;
导入java.util.List;
公共类ImageClassificateRactivity扩展了活动{
私有静态最终字符串标记=“ImageClassifierActivity”;
/**相机图像捕获尺寸*/
私有静态最终整型预览图像宽度=640;
私有静态最终整数预览\图像\高度=480;
/**TF模型所需的图像尺寸*/
私有静态最终int TF_输入_图像_宽度=224;
私有静态最终int TF_输入_图像_高度=224;
/**模型输入的维度*/
专用静态最终整型尺寸批量尺寸=1;
私有静态最终整数尺寸像素尺寸=3;
/**TF模型资产文件*/
私有静态最终字符串标签\u FILE=“LABELS.txt”;
私有静态最终字符串模型\u FILE=“mobilenet\u quant\u v1\u 224.tflite”;
专用按钮输入驱动程序MButHandriver;
私有布尔处理;
私有图像视图;
私有文本查看mResultText;
私人翻译mTensorFlowLite;
私有列表标记;
私人摄影师汉德勒·麦默拉汉德勒;
专用图像预处理器;
/**
*初始化将用于处理图像的分类器。
*/
私有void initClassifier(){
试一试{
mTensorFlowLite=新的解释器(TensorFlowHelper.loadModelFile(这个,MODEL_文件));
mLabels=TensorFlowHelper.readLabels(这个,LABELS\u文件);
}捕获(IOE异常){
Log.w(标记“无法初始化TensorFlow Lite”,e);
}
}
/**
*清理分类器使用的资源。
*/
私有无效标识符(){
mTensorFlowLite.close();
}
/**
*处理图像并识别其中的内容。完成后,方法
*必须使用的结果调用{@link#onPhotoRecognitionReady(Collection)}
*图像识别过程。
*
*@param image包含要分类的图像的位图。该图像可以
*任何大小,但可能会进行预处理以将其调整为
*分类过程所需的格式,可以是
*而且耗电。
*/
私有void doRecognize(位图图像){
//为推理结果分配空间
字节[][]confidencePerLabel=新字节[1][mLabels.size()];
//为图像像素分配缓冲区。