C++ 分段断层(岩芯倾倒)-使用Tensorflow C和#x2B进行推断+;来自SavedModel的API
我使用TysFooS+C++ API加载<代码> SaveDeave并运行推断。模型加载良好,但当我运行推断时,出现以下错误:C++ 分段断层(岩芯倾倒)-使用Tensorflow C和#x2B进行推断+;来自SavedModel的API,c++,tensorflow,tensorflow-serving,C++,Tensorflow,Tensorflow Serving,我使用TysFooS+C++ API加载 SaveDeave并运行推断。模型加载良好,但当我运行推断时,出现以下错误: $ ./bazel-bin/tensorflow/gan_loader/gan_loader 2020-06-21 19:29:18.669604: I tensorflow/cc/saved_model/reader.cc:31] Reading SavedModel from: /home/eduardo/Documents/GitHub/edualvarado/tenso
$ ./bazel-bin/tensorflow/gan_loader/gan_loader
2020-06-21 19:29:18.669604: I tensorflow/cc/saved_model/reader.cc:31] Reading SavedModel from: /home/eduardo/Documents/GitHub/edualvarado/tensorflow/tensorflow/gan_loader/generator_model_final
2020-06-21 19:29:18.671368: I tensorflow/cc/saved_model/reader.cc:54] Reading meta graph with tags { serve }
2020-06-21 19:29:18.671385: I tensorflow/cc/saved_model/loader.cc:295] Reading SavedModel debug info (if present) from: /home/eduardo/Documents/GitHub/edualvarado/tensorflow/tensorflow/gan_loader/generator_model_final
2020-06-21 19:29:18.671474: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2 FMA
2020-06-21 19:29:18.688557: I tensorflow/cc/saved_model/loader.cc:234] Restoring SavedModel bundle.
2020-06-21 19:29:18.707707: I tensorflow/cc/saved_model/loader.cc:183] Running initialization op on SavedModel bundle at path: /home/eduardo/Documents/GitHub/edualvarado/tensorflow/tensorflow/gan_loader/generator_model_final
2020-06-21 19:29:18.714949: I tensorflow/cc/saved_model/loader.cc:364] SavedModel load for tags { serve }; Status: success: OK. Took 45356 microseconds.
Segmentation fault (core dumped)
完整的infering.py
代码如下所示。在开头,您可以找到有关SavedModel
的信息
/* INFO ABOUT SAVEDMODEL
The given SavedModel SignatureDef contains the following input(s):
inputs['dense_1_input'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 100)
name: serving_default_dense_1_input:0
The given SavedModel SignatureDef contains the following output(s):
outputs['conv2d_2'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 28, 28, 1)
name: StatefulPartitionedCall:0
Method name is: tensorflow/serving/predict
*/
#include <fstream>
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/cc/saved_model/tag_constants.h"
// These are all common classes it's handy to reference with no namespace.
using tensorflow::Flag;
using tensorflow::int32;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::Tensor;
using tensorflow::tstring;
/*
TODO: Functions
*/
Tensor CreateLatentSpace(const int latent_dim, const int num_samples) {
Tensor tensor(tensorflow::DT_FLOAT, tensorflow::TensorShape({num_samples, latent_dim}));
auto tensor_mapped = tensor.tensor<float, 2>();
for (int idx = 0; idx < tensor.dim_size(0); ++idx) {
for (int i = 0; i < tensor.dim_size(1); ++i) {
tensor_mapped(idx, i) = drand48() - 0.5;
}
}
return tensor;
}
int main(int argc, char* argv[]) {
// These are the command-line flags the program can understand.
// They define where the graph and input data is located, and what kind of
// input the model expects.
// To create latent space
int32 latent_dim = 100;
int32 samples_per_row = 5;
int32 num_samples = 25;
// Input/Output names
string input_layer = "serving_default_dense_1_input";
string output_layer = "StatefulPartitionedCall";
// Arguments
std::vector<Flag> flag_list = {
Flag("latent_dim", &latent_dim, "latent dimensions"),
Flag("samples_per_row", &samples_per_row, "samples per row"),
Flag("num_samples", &num_samples, "number of samples"),
Flag("input_layer", &input_layer, "name of input layer"),
Flag("output_layer", &output_layer, "name of output layer"),
};
string usage = tensorflow::Flags::Usage(argv[0], flag_list);
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
LOG(ERROR) << usage;
return -1;
}
// We need to call this to set up global state for TensorFlow.
tensorflow::port::InitMain(argv[0], &argc, &argv);
if (argc > 1) {
LOG(ERROR) << "Unknown argument " << argv[1] << "\n" << usage;
return -1;
}
// TODO: First we load and initialize the model.
std::unique_ptr<tensorflow::Session> session;
tensorflow::SavedModelBundle model;
tensorflow::SessionOptions session_options;
tensorflow::RunOptions run_options;
const string export_dir = "/home/eduardo/Documents/GitHub/edualvarado/tensorflow/tensorflow/gan_loader/generator_model_final";
const std::unordered_set<std::string> tags = {"serve"};
auto load_graph_status = tensorflow::LoadSavedModel(session_options, run_options, export_dir, tags, &model);
if (!load_graph_status.ok()) {
std::cerr << "Failed: " << load_graph_status;
return -1;
}
// TODO: Create latent space
auto latent_space_tensor = CreateLatentSpace(100, 1);
// TODO: Run the latent space through the model
std::vector<Tensor> outputs;
Status run_status = session->Run({{input_layer, latent_space_tensor}},
{output_layer}, {}, &outputs);
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed: " << run_status;
return -1;
}
// TODO: Save the figure
return 0;
}
/*有关SAVEDMODEL的信息
给定的SavedModel SignatureDef包含以下输入:
输入['dense_1_input']张量信息:
dtype:DT_FLOAT
形状:(-1100)
名称:服务\u默认\u密集\u 1\u输入:0
给定的SavedModel SignatureDef包含以下输出:
输出['conv2d_2']张量信息:
dtype:DT_FLOAT
形状:(-1,28,28,1)
名称:StatefulPartitionedCall:0
方法名称为:tensorflow/serving/predict
*/
#包括
#包括
#包括
#包括“tensorflow/cc/ops/const_op.h”
#包括“tensorflow/cc/ops/image_ops.h”
#包括“tensorflow/cc/ops/standard_ops.h”
#包括“tensorflow/core/framework/graph.pb.h”
#包括“tensorflow/core/framework/tensor.h”
#包括“tensorflow/core/graph/default_device.h”
#包括“tensorflow/core/graph/graph_def_builder.h”
#包括“tensorflow/core/lib/core/errors.h”
#包括“tensorflow/core/lib/core/stringpiece.h”
#包括“tensorflow/core/lib/core/threadpool.h”
#包括“tensorflow/core/lib/io/path.h”
#包括“tensorflow/core/lib/strings/str_util.h”
#包括“tensorflow/core/lib/strings/stringprintf.h”
#包括“tensorflow/core/platform/env.h”
#包括“tensorflow/core/platform/init_main.h”
#包括“tensorflow/core/platform/logging.h”
#包括“tensorflow/core/platform/types.h”
#包括“tensorflow/core/public/session.h”
#包括“tensorflow/core/util/command\u line\u flags.h”
#包括“tensorflow/cc/saved_model/loader.h”
#包括“tensorflow/cc/saved_model/tag_constants.h”
//这些都是公共类,无需名称空间即可方便地引用。
使用tensorflow::Flag;
使用tensorflow::int32;
使用tensorflow::Status;
使用tensorflow::string;
使用tensorflow::Tensor;
使用tensorflow::t字符串;
/*
TODO:功能
*/
Tensor CreateLatentSpace(const int潜伏期,const int num_样本){
张量张量(tensorflow::DT_FLOAT,tensorflow::TensorShape({num_samples,潜隐_dim}));
自动张量映射=张量.张量();
对于(intidx=0;idx 日志(错误)在代码段中,调用run(..)之前未初始化会话
ptr
这将调用tensorflow::Session
的默认构造函数,现在您的ptr指向构造的对象,并在ptr超出范围时管理它的释放。嘿,请将我指向初始化。我无法从上面的代码中找出初始化。在TODO之后:首先,我们加载并初始化模型
,很清楚。我认为这里有一些混淆:std::unique\u ptr session;
-这意味着您只是声明了一个类型为tensorflow::session
的唯一指针,它没有指向任何明确的内存位置,因为您没有将它初始化为任何内容。此声明后面的代码似乎都不正确在调用session->run(..)
之前先登录session。在上面的答案中添加了更多信息。希望这对您有所帮助。
std::unique_ptr<tensorflow::Session> session;
Status run_status = session->Run({{input_layer, latent_space_tensor}},
{output_layer}, {}, &outputs);
std::unique_ptr<tensorflow::Session> session = make_unique<tensorflow::Session>()