Python 如何在tf lite中修改后量化后的模型和权重值

Python 如何在tf lite中修改后量化后的模型和权重值,python,tensorflow,tensorflow-lite,Python,Tensorflow,Tensorflow Lite,我正在使用Tensorflow Lite在MNIST上训练和量化一个简单的网络。以下是Tensorflow Lite文档中的一个示例: 然而,在我的方法中,我想在量化之后和测试模型之前修改和更改一些权重和模型值。我知道有两个命令“get_tensor”和“set_tensor”来读取和写入张量,然而,“set_tensor”似乎只是加载和修改输入的工作。有没有办法在TF Lite中修改权重值?谢谢你的指导。代码如下:我得到了张量,并将其称为weight1和modified weight1。然后我

我正在使用Tensorflow Lite在MNIST上训练和量化一个简单的网络。以下是Tensorflow Lite文档中的一个示例: 然而,在我的方法中,我想在量化之后和测试模型之前修改和更改一些权重和模型值。我知道有两个命令“get_tensor”和“set_tensor”来读取和写入张量,然而,“set_tensor”似乎只是加载和修改输入的工作。有没有办法在TF Lite中修改权重值?谢谢你的指导。代码如下:我得到了张量,并将其称为weight1和modified weight1。然后我想把它分配回量化模型。我使用了“set_tensor”,但我得到了以下错误:
进程结束,退出代码138(被信号10:SIGBUS中断)

欢迎使用堆栈溢出!分享你的代码怎么样?这可能会让事情变得更清楚汉克斯,我添加了代码
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)

try:
  import tensorflow.compat.v2 as tf
except Exception:
  pass
tf.enable_v2_behavior()

from tensorflow import keras
import numpy as np
import pathlib

# Train and export the model
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()

# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0

# Define the model architecture
model = keras.Sequential([
  keras.layers.InputLayer(input_shape=(28, 28)),
  keras.layers.Reshape(target_shape=(28, 28, 1)),
  keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
  keras.layers.MaxPooling2D(pool_size=(2, 2)),
  keras.layers.Flatten(),
  keras.layers.Dense(10, activation=tf.nn.softmax)
])

# Train the digit classification model
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(
  train_images,
  train_labels,
  epochs=1,
  validation_data=(test_images, test_labels)
)

# Convert to a TensorFlow Lite model
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()

tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)

tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)

# Convert using quantization
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]

mnist_train, _ = tf.keras.datasets.mnist.load_data()
images = tf.cast(mnist_train[0], tf.float32) / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
def representative_data_gen():
  for input_value in mnist_ds.take(100):
    yield [input_value]

converter.representative_dataset = representative_data_gen

tflite_model_quant = converter.convert()
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_model_quant)

converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8

tflite_model_quant = converter.convert()
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant_io.tflite"
tflite_model_quant_file.write_bytes(tflite_model_quant)

# Run the TensorFlow Lite models

interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()

interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_quant_file))
interpreter_quant.allocate_tensors()
input_index_quant = interpreter_quant.get_input_details()[0]["index"]
output_index_quant = interpreter_quant.get_output_details()[0]["index"]

# # Load TFLite model and allocate tensors.
# interpreter = tf.lite.Interpreter(model_path=str(tflite_model_quant_file))
# interpreter.allocate_tensors()
# _________________________________ get_tensor______________________
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

# get details for each layer
all_layers_details = interpreter_quant.get_tensor_details()
# ___________________________________________________________________
weight1 = interpreter_quant.get_tensor(2)
weight2 = interpreter_quant.get_tensor(6)

# Example of modification:
for val in weight1:
   if val> th:
      val = 0

interpreter_quant.set_tensor(all_layers_details[2]['index'], weight1)

# Evaluate the models

# A helper function to evaluate the TF Lite model using "test" dataset.


def evaluate_model(interpreter):
  input_index = interpreter.get_input_details()[0]["index"]
  output_index = interpreter.get_output_details()[0]["index"]

  # Run predictions on every image in the "test" dataset.
  prediction_digits = []
  for test_image in test_images:
    # Pre-processing: add batch dimension and convert to float32 to match with
    # the model's input data format.
    test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
    interpreter.set_tensor(input_index, test_image)

    # Run inference.
    interpreter.invoke()

    # Post-processing: remove batch dimension and find the digit with highest
    # probability.
    output = interpreter.tensor(output_index)
    digit = np.argmax(output()[0])
    prediction_digits.append(digit)

  # Compare prediction results with ground truth labels to calculate accuracy.
  accurate_count = 0
  for index in range(len(prediction_digits)):
    if prediction_digits[index] == test_labels[index]:
      accurate_count += 1
  accuracy = accurate_count * 1.0 / len(prediction_digits)

  return accuracy

print(evaluate_model(interpreter))

# NOTE: Colab runs on server CPUs, and TensorFlow Lite currently
# doesn't have super optimized server CPU kernels. So this part may be
# slower than the above float interpreter. But for mobile CPUs, considerable
# speedup can be observed.

print(evaluate_model(interpreter_quant))