Android 无法将形状为[1,3087,2]的TensorFlowLite张量(标识_1)复制到形状为[1,3087]的Java对象
我正在尝试在Android上运行一个已转换为.tflite的YoloV4模型。我的输入形状看起来不错[1224224,4],但应用程序在我的输出形状上崩溃了。我使用的代码来自tflite上的Udacity课程 我在运行以下代码时出现上述错误:Android 无法将形状为[1,3087,2]的TensorFlowLite张量(标识_1)复制到形状为[1,3087]的Java对象,android,tensorflow,kotlin,object-detection,tensorflow-lite,Android,Tensorflow,Kotlin,Object Detection,Tensorflow Lite,我正在尝试在Android上运行一个已转换为.tflite的YoloV4模型。我的输入形状看起来不错[1224224,4],但应用程序在我的输出形状上崩溃了。我使用的代码来自tflite上的Udacity课程 我在运行以下代码时出现上述错误: class TFLiteObjectDetectionAPIModel private constructor() : Classifier { override val statString: String get() = TODO("
class TFLiteObjectDetectionAPIModel private constructor() : Classifier {
override val statString: String
get() = TODO("not implemented") //To change initializer of created properties use File | Settings | File Templates.
private var isModelQuantized: Boolean = false
// Config values.
private var inputSize: Int = 0
// Pre-allocated buffers.
private val labels = Vector<String>()
private var intValues: IntArray? = null
// outputLocations: array of shape [Batchsize, NUM_DETECTIONS,4]
// contains the location of detected boxes
private var outputLocations: Array<Array<FloatArray>>? = null
// outputClasses: array of shape [Batchsize, NUM_DETECTIONS]
// contains the classes of detected boxes
private var outputClasses: Array<FloatArray>? = null
// outputScores: array of shape [Batchsize, NUM_DETECTIONS]
// contains the scores of detected boxes
private var outputScores: Array<FloatArray>? = null
// numDetections: array of shape [Batchsize]
// contains the number of detected boxes
private var numDetections: FloatArray? = null
private var imgData: ByteBuffer? = null
private var tfLite: Interpreter? = null
override fun recognizeImage(bitmap: Bitmap): List<Classifier.Recognition> {
// Log this method so that it can be analyzed with systrace.
Trace.beginSection("recognizeImage")
Trace.beginSection("preprocessBitmap")
// Preprocess the image data from 0-255 int to normalized float based
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.width, 0, 0, bitmap.width, bitmap.height)
imgData!!.rewind()
for (i in 0 until inputSize) {
for (j in 0 until inputSize) {
val pixelValue = intValues!![i * inputSize + j]
if (isModelQuantized) {
// Quantized model
imgData!!.put((pixelValue shr 16 and 0xFF).toByte())
imgData!!.put((pixelValue shr 8 and 0xFF).toByte())
imgData!!.put((pixelValue and 0xFF).toByte())
} else { // Float model
imgData!!.putFloat(((pixelValue shr 16 and 0xFF) - IMAGE_MEAN) / IMAGE_STD)
imgData!!.putFloat(((pixelValue shr 8 and 0xFF) - IMAGE_MEAN) / IMAGE_STD)
imgData!!.putFloat(((pixelValue and 0xFF) - IMAGE_MEAN) / IMAGE_STD)
}
}
}
Trace.endSection() // preprocessBitmap
// Copy the input data into TensorFlow.
Trace.beginSection("feed")
outputLocations = Array(1) { Array(NUM_DETECTIONS) { FloatArray(4) } }
outputClasses = Array(1) { FloatArray(NUM_DETECTIONS) }
outputScores = Array(1) { FloatArray(NUM_DETECTIONS) }
numDetections = FloatArray(1)
val inputArray = arrayOf<Any>(imgData!!)
val outputMap = ArrayMap<Int, Any>()
outputMap[0] = outputLocations!!
outputMap[1] = outputClasses!!
outputMap[2] = outputScores!!
outputMap[3] = numDetections!!
Trace.endSection()
// Run the inference call.
Trace.beginSection("run")
tfLite!!.runForMultipleInputsOutputs(inputArray, outputMap)
Trace.endSection()
// Show the best detections.
// after scaling them back to the input size.
val recognitions = ArrayList<Classifier.Recognition>(NUM_DETECTIONS)
for (i in 0 until NUM_DETECTIONS) {
val detection = RectF(
outputLocations!![0][i][1] * inputSize,
outputLocations!![0][i][0] * inputSize,
outputLocations!![0][i][3] * inputSize,
outputLocations!![0][i][2] * inputSize)
// SSD Mobilenet V1 Model assumes class 0 is background class
// in label file and class labels start from 1 to number_of_classes+1,
// while outputClasses correspond to class index from 0 to number_of_classes
val labelOffset = 1
recognitions.add(
Classifier.Recognition(
"" + i,
labels[outputClasses!![0][i].toInt() + labelOffset],
outputScores!![0][i],
detection))
}
Trace.endSection() // "recognizeImage"
return recognitions
}
override fun enableStatLogging(debug: Boolean) {
//Not implemented
}
override fun close() {
//Not needed.
}
override fun setNumThreads(numThreads: Int) {
if (tfLite != null) tfLite!!.setNumThreads(numThreads)
}
override fun setUseNNAPI(isChecked: Boolean) {
if (tfLite != null) tfLite!!.setUseNNAPI(isChecked)
}
companion object {
// Only return this many results.
private const val NUM_DETECTIONS = 3087
// Float model
private const val IMAGE_MEAN = 128.0f
private const val IMAGE_STD = 128.0f
/** Memory-map the model file in Assets. */
@Throws(IOException::class)
private fun loadModelFile(assets: AssetManager, modelFilename: String): MappedByteBuffer {
val fileDescriptor = assets.openFd(modelFilename)
val inputStream = FileInputStream(fileDescriptor.fileDescriptor)
val fileChannel = inputStream.channel
val startOffset = fileDescriptor.startOffset
val declaredLength = fileDescriptor.declaredLength
return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength)
}
/**
* Initializes a native TensorFlow session for classifying images.
*
* @param assetManager The asset manager to be used to load assets.
* @param modelFilename The filepath of the model GraphDef protocol buffer.
* @param labelFilename The filepath of label file for classes.
* @param inputSize The size of image input
* @param isQuantized Boolean representing model is quantized or not
*/
@Throws(IOException::class)
fun create(
assetManager: AssetManager,
modelFilename: String,
labelFilename: String,
inputSize: Int,
isQuantized: Boolean): Classifier {
val d = TFLiteObjectDetectionAPIModel()
val labelsInput: InputStream?
val actualFilename = labelFilename.split("file:///android_asset/".toRegex())
.dropLastWhile { it.isEmpty() }.toTypedArray()[1]
labelsInput = assetManager.open(actualFilename)
val br: BufferedReader?
br = BufferedReader(InputStreamReader(labelsInput!!))
while (br.readLine()?.let { d.labels.add(it) } != null);
br.close()
d.inputSize = inputSize
try {
val options = Interpreter.Options()
options.setNumThreads(4)
d.tfLite = Interpreter(loadModelFile(assetManager, modelFilename), options)
} catch (e: Exception) {
throw RuntimeException(e)
}
d.isModelQuantized = isQuantized
// Pre-allocate buffers.
val numBytesPerChannel: Int = if (isQuantized) {
1 // Quantized
} else {
4 // Floating point
}
d.imgData = ByteBuffer.allocateDirect(1 * d.inputSize * d.inputSize * 3 * numBytesPerChannel)
d.imgData!!.order(ByteOrder.nativeOrder())
d.intValues = IntArray(d.inputSize * d.inputSize)
d.outputLocations = Array(1) { Array(NUM_DETECTIONS) { FloatArray(2) } }
d.outputClasses = Array(1) { FloatArray(NUM_DETECTIONS) }
d.outputScores = Array(1) { FloatArray(NUM_DETECTIONS) }
d.numDetections = FloatArray(1)
return d
}
}
我遇到以下错误无法将形状为[1,3087,4]的TensorFlowLite tensor(Identity)复制到形状为[1,3087,2]的Java对象。
什么是身份和身份1?我在Netron上看过我的模型,可以看到两者,但我不确定如何理解该模型
有人能帮忙吗?还有什么我可以改变的,或者我的模型不适合移动平台吗?给我们一个构建和调试的链接给我们一个构建和调试的链接
outputLocations = Array(1) { Array(NUM_DETECTIONS) { FloatArray(2) } }