C++ TF Lite中完全连接的op存在问题

C++ TF Lite中完全连接的op存在问题,c++,microcontroller,tensorflow-lite,C++,Microcontroller,Tensorflow Lite,我想运行一个简单的神经网络模型,在Rasperry微控制器上使用Keras。我使用图层时遇到问题。代码定义如下: #include "main.h" #include <string.h> #include "tensorflow/lite/micro/kernels/micro_ops.h" #include "tensorflow/lite/micro/micro_error_reporter.h" #include &

我想运行一个简单的神经网络模型,在Rasperry微控制器上使用Keras。我使用图层时遇到问题。代码定义如下:

#include "main.h"
#include <string.h>
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/version.h"
#include "my_model.h"

/* Private variables    
TIM_HandleTypeDef htim16;

UART_HandleTypeDef huart2;

/* USER CODE BEGIN PV */

// TFLite globals
namespace {
tflite::ErrorReporter* error_reporter = nullptr;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* model_input = nullptr;
TfLiteTensor* model_output = nullptr;

// Create an area of memory to use for input, output, and other TensorFlow
// arrays. You'll need to adjust this by compiling, running, and looking
// for errors.
constexpr int kTensorArenaSize = 2 * 1024;
__attribute__((aligned(16)))uint8_t tensor_arena[kTensorArenaSize];
} // namespace


void SystemClock_Config(void);
static void MX_GPIO_Init(void);
static void MX_USART2_UART_Init(void);
static void MX_TIM16_Init(void);
/* USER CODE BEGIN PFP */

/* USER CODE END PFP */

/* Private user code 
/* USER CODE BEGIN 0 */

int main(void)
{
/* USER CODE BEGIN 1 */
char buf[50];
int buf_len = 0;
TfLiteStatus tflite_status;
uint32_t num_elements;
uint32_t timestamp;
float y_val;

/* USER CODE END 1 */

/* MCU Configuration--------------------------------------------------------*/

/* Reset of all peripherals, Initializes the Flash interface and the Systick. */
HAL_Init();

/* USER CODE BEGIN Init */

/* USER CODE END Init */

/* Configure the system clock */
SystemClock_Config();

/* USER CODE BEGIN SysInit */

/* USER CODE END SysInit */

/* Initialize all configured peripherals */
MX_GPIO_Init();
MX_USART2_UART_Init();
MX_TIM16_Init();
/* USER CODE BEGIN 2 */

// Start timer/counter
HAL_TIM_Base_Start(&htim16);

// Set up logging (modify tensorflow/lite/micro/debug_log.cc)
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = &micro_error_reporter;

// Say something to test error reporter
error_reporter->Report("STM32 TensorFlow Lite test");

// Map the model into a usable data structure
model = tflite::GetModel(my_model);
if (model->version() != TFLITE_SCHEMA_VERSION)
{
  error_reporter->Report("Model version does not match Schema");
  while(1);
}

// Pull in only needed operations (should match NN layers). Template parameter
// <n> is number of ops to be added. Available ops:
// tensorflow/lite/micro/kernels/micro_ops.h
static tflite::MicroMutableOpResolver<1> micro_op_resolver;

// Add dense neural network layer operation
tflite_status = micro_op_resolver.AddBuiltin(
    tflite::BuiltinOperator_FULLY_CONNECTED,
    tflite::ops::micro::Register_FULLY_CONNECTED());
if (tflite_status != kTfLiteOk)
{
  error_reporter->Report("Could not add FULLY CONNECTED op");
  while(1);
}

// Build an interpreter to run the model with.
static tflite::MicroInterpreter static_interpreter(
    model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
interpreter = &static_interpreter;

// Allocate memory from the tensor_arena for the model's tensors.
tflite_status = interpreter->AllocateTensors();
if (tflite_status != kTfLiteOk)
{
  error_reporter->Report("AllocateTensors() failed");
  while(1);
}

// Assign model input and output buffers (tensors) to pointers
model_input = interpreter->input(0);
model_output = interpreter->output(0);

// Get number of elements in input tensor
num_elements = model_input->bytes / sizeof(float);
buf_len = sprintf(buf, "Number of input elements: %lu\r\n", num_elements);
HAL_UART_Transmit(&huart2, (uint8_t *)buf, buf_len, 100);

/* USER CODE END 2 */

/* Infinite loop */
/* USER CODE BEGIN WHILE */
while (1)
{
  // Fill input buffer (use test value)
  for (uint32_t i = 0; i < num_elements; i++)
  {
    model_input->data.f[i] = 2.0f;
  }

  // Get current timestamp
  timestamp = htim16.Instance->CNT;

  // Run inference
  tflite_status = interpreter->Invoke();
  if (tflite_status != kTfLiteOk)
  {
    error_reporter->Report("Invoke failed");
  }

  // Read output (predicted y) of neural network
  y_val = model_output->data.f[0];

  // Print output of neural network along with inference time (microseconds)
  buf_len = sprintf(buf,
                    "Output: %f | Duration: %lu\r\n",
                    y_val,
                    htim16.Instance->CNT - timestamp);
  HAL_UART_Transmit(&huart2, (uint8_t *)buf, buf_len, 100);

  // Wait before doing it again
  HAL_Delay(500);

  /* USER CODE END WHILE */

  /* USER CODE BEGIN 3 */
}
/* USER CODE END 3 */
}
#包括“main.h”
#包括
#包括“tensorflow/lite/micro/kernels/micro_ops.h”
#包括“tensorflow/lite/micro/micro_error_reporter.h”
#包括“tensorflow/lite/micro/micro_解释器.h”
#包括“tensorflow/lite/micro/micro可变旋转变压器.h”
#包括“tensorflow/lite/version.h”
#包括“my_model.h”
/*私有变量
TIM_HandleTypeDef htim16;
UART_HandleTypeDef huart2;
/*用户代码开始PV*/
//TFLite全局
名称空间{
tflite::ErrorReporter*error\u reporter=nullptr;
常量tflite::Model*Model=nullptr;
tflite::Micro解释器*解释器=nullptr;
TfLiteTensor*model_input=nullptr;
TfLiteTensor*模型_输出=nullptr;
//创建用于输入、输出和其他TensorFlow的内存区域
//数组。您需要通过编译、运行和查找来调整它
//对于错误。
constexpr int kTensorArenaSize=2*1024;
__属性(对齐(16))为8张量竞技场[kTensorArenaSize];
}//名称空间
无效系统时钟配置(无效);
静态void MX_GPIO_Init(void);
静态void MX_USART2_UART_Init(void);
静态空隙MX_TIM16_Init(空隙);
/*用户代码开始PFP*/
/*用户代码端PFP*/
/*专用用户代码
/*用户代码从0开始*/
内部主(空)
{
/*用户代码开始1*/
char-buf[50];
int buf_len=0;
TfLiteStatus tflite_状态;
单元32个单元;
uint32_t时间戳;
浮动y_val;
/*用户代码结束1*/
/*微控制器配置--------------------------------------------------------*/
/*重置所有外围设备,初始化闪存接口和Systick*/
HAL_Init();
/*用户代码开始初始化*/
/*用户代码结束初始化*/
/*配置系统时钟*/
SystemClock_Config();
/*用户代码BEGIN SysInit*/
/*用户代码结束SysInit*/
/*初始化所有配置的外围设备*/
MX_GPIO_Init();
MX_USART2_UART_Init();
MX_TIM16_Init();
/*用户代码开始2*/
//启动计时器/计数器
HAL_TIM_Base_Start(&htim16);
//设置日志记录(修改tensorflow/lite/micro/debug_log.cc)
静态tflite::MicroErrorReporter micro_error_reporter;
error\u reporter=µ\u error\u reporter;
//说点什么来测试错误报告器
错误报告器->报告(“STM32 TensorFlow Lite测试”);
//将模型映射到可用的数据结构中
model=tflite::GetModel(my_model);
if(model->version()!=TFLITE\u SCHEMA\u version)
{
错误报告器->报告(“模型版本与架构不匹配”);
而(1),;
}
//仅拉入所需操作(应匹配NN层)。模板参数
//是要添加的操作数。可用操作数:
//tensorflow/lite/micro/kernels/micro_ops.h
静态tflite::微可变溶出器micro_op_分解器;
//添加稠密神经网络层操作
tflite\u status=micro\u op\u resolver.AddBuiltin(
tflite::内置操作器完全连接,
tflite::ops::micro::Register_FULLY_CONNECTED());
如果(tflite_状态!=kTfLiteOk)
{
错误报告器->报告(“无法添加完全连接的op”);
而(1),;
}
//构建一个解释器来运行模型。
静态tflite::微解释器静态\u解释器(
模型、微型旋转变压器、张量、kTensorArenaSize、误差报告器);
解释器=&静态解释器;
//从tensor_arena为模型的张量分配内存。
tflite_status=解释器->分配传感器();
如果(tflite_状态!=kTfLiteOk)
{
错误报告器->报告(“AllocateSensors()失败”);
而(1),;
}
//将模型输入和输出缓冲区(张量)分配给指针
模型输入=解释器->输入(0);
模型输出=解释器->输出(0);
//获取输入张量中的元素数
num\u elements=model\u input->bytes/sizeof(float);
buf_len=sprintf(buf,“输入元素的数量:%lu\r\n”,num_元素);
HAL_UART_传输(和huart2,(uint8_t*)buf,buf_len,100);
/*用户代码结束2*/
/*无限循环*/
/*用户代码在*/
而(1)
{
//填充输入缓冲区(使用测试值)
对于(uint32_t i=0;i数据.f[i]=2.0f;
}
//获取当前时间戳
timestamp=htim16.Instance->CNT;
//运行推理
tflite_status=解释器->调用();
如果(tflite_状态!=kTfLiteOk)
{
错误报告器->报告(“调用失败”);
}
//神经网络的读取输出(预测y)
y_val=model_output->data.f[0];
//打印神经网络输出以及推断时间(微秒)
buf_len=sprintf(buf,
“输出:%f |持续时间:%lu\r\n”,
尤瓦尔,
htim16.Instance->CNT-时间戳);
HAL_UART_传输(和huart2,(uint8_t*)buf,buf_len,100);
//等一下再做
HAL_延迟(500);
/*用户代码结束时*/
/*用户代码开始3*/
}
/*用户代码结束3*/
}
当我运行该程序时,我遇到了以下编译问题:

../Core/Src/main.cpp: In function 'int main()':
../Core/Src/main.cpp:181:57: error: no matching function for call to 'tflite::MicroMutableOpResolver<1>::AddBuiltin(tflite::BuiltinOperator, TfLiteRegistration*)'
   tflite::ops::micro::Register_FULLY_CONNECTED());
                                                 ^

In file included from ../Core/Src/main.cpp:31:0:
STM32CubeIDE/workspace_1.3.0/sine/tensorflow_lite/tensorflow/lite/micro/micro_mutable_op_resolver.h:470:16: note: candidate: TfLiteStatus tflite::MicroMutableOpResolver<tOpCount>::AddBuiltin(tflite::BuiltinOperator, const TfLiteRegistration&, tflite::MicroOpResolver::BuiltinParseFunction) [with unsigned int tOpCount = 1; TfLiteStatus = TfLiteStatus; TfLiteRegistration = TfLiteRegistration; tflite::MicroOpResolver::BuiltinParseFunction = TfLiteStatus (*)(const tflite::Operator*, tflite::BuiltinOperator, tflite::ErrorReporter*, tflite::BuiltinDataAllocator*, void**)]
 TfLiteStatus AddBuiltin(tflite::BuiltinOperator op,
              ^~~~~~~~~~
STM32CubeIDE/workspace_1.3.0/sine/tensorflow_lite/tensorflow/lite/micro/micro_mutable_op_resolver.h:470:16: note:   candidate expects 3 arguments, 2 provided
make: *** [Core/Src/subdir.mk:37: Core/Src/main.o] Error 1
"make -j2 all" terminated with exit code 2. Build might be incomplete.
。/Core/Src/main.cpp:在函数“int main()”中:
../Core/Src/main.cpp:181:57:错误:调用“tflite::MicroMutableOpResolver::AddBuiltin(tflite::BuiltinOperator,TfLiteRegistration*)”时没有匹配的函数
tflite::ops::micro::Register_FULLY_CONNECTED());
^
在../Core/Src/main.cpp:31:0中包含的文件中:
STM32CubeIDE/workspace_1.3.0/sine/tensorflow_lite/tensorflow/lite/micro/micro_mutable_op_resolver.h:470:16:注:候选者:TfLiteStatus tflite::MicroMutableOpResolver::AddBuiltin(tflite::BuiltinOperator,const TfLiteRegistration&,tflite::MicroOpResolver::builtinparesfunction)[无符号int-tOpCount=1;TfLiteStatus=TfLiteStatus;TfLiteRegistration=TfLiteRegistration;tflite::MicroOpResolver::BuiltinParseFunction=TfLiteStatus(*)(常量tflite::Operator*,tflite::BuiltinOperator,tflite::ErrorReporter*,tflite::BuiltinDataAllocator*,void**)]
TfLiteStatus AddBuiltin(tflite::BuiltinOperator,
^~~~~~~~~~
STM32CubeIDE/workspace_1.3.0/sine/tensorflow_lite/tensorflow/lite/micro/micro_可变分解器。h:470:16:注:候选
static tflite::MicroMutableOpResolver<1> micro_op_resolver;
tflite_status = micro_op_resolver.AddFullyConnected();