Python 如何用C++ TySoFraceAPI创建给定形状的随机张量?

Python 如何用C++ TySoFraceAPI创建给定形状的随机张量?,python,c++,tensorflow,bazel,tensorflow-serving,Python,C++,Tensorflow,Bazel,Tensorflow Serving,我有一个神经网络,一个从随机噪声中保存生成图像的GAN,在推断过程中使用随机张量作为输入。.pb格式的My model或GraphDef使用保存的\u model\u cli显示输入和输出的以下签名 在python的推理文件中,我有一个创建简单随机法向量的函数 生成潜在的数据 现在,我尝试将我的推理文件Python部署到C++可执行文件中。我已经能够使用bazel构建Tensorflow。我现在正在转换到gan_loader.cc文件,该文件稍后使用bazel构建。我将把迄今为止我所做的两个文件

我有一个神经网络,一个从随机噪声中保存生成图像的GAN,在推断过程中使用随机张量作为输入。.pb格式的My model或GraphDef使用保存的\u model\u cli显示输入和输出的以下签名

在python的推理文件中,我有一个创建简单随机法向量的函数 生成潜在的数据

现在,我尝试将我的推理文件Python部署到C++可执行文件中。我已经能够使用bazel构建Tensorflow。我现在正在转换到gan_loader.cc文件,该文件稍后使用bazel构建。我将把迄今为止我所做的两个文件放在一起,以便您可以比较:

推理机

gan_loader.cc

我已经完成了变量的初始化和图形的加载。然而,我仍然在努力创建随机向量并通过模型运行,以及在C++中保存图形。你能给我一些指导或建议吗?你有关于如何做到这一点的例子吗


谢谢大家!

我认为这样的方法可以奏效:

Tensor CreateLatentSpace(const int latent_dim, const int num_samples) {
  Tensor tensor(tensorflow::DT_FLOAT, tensorflow::TensorShape({num_samples, latent_dim}));
  auto tensor_mapped = tensor.tensor<float, 2>(); 
  for (int idx = 0; idx < tensor.dim_size(0); ++idx) {
    for (int i = 0; i < tensor.dim_size(1); ++i) {
      tensor_mapped(idx, i) = drand48() - 0.5;
    }
  }
  return tensor;
}

谢谢你的回答。但是,我得到了一个错误:T未在此范围内声明。你能解释一下那条线有什么用吗?非常感谢你!抱歉,tensor.tensor应该是tensor.tensor-该对象包含浮点值,并且有两个维度。我编辑了答案。此方法将tensorflow::Tensor拥有的数据作为不同的对象表示为Eigen::TensorMap,并便于填充/修改其值。谢谢你的回答!不幸的是,我现在犯了一个不同的错误。我提出了另一个问题,因为这似乎是另一回事:
def generate_latent_data(latent_dim, num_samples):
    """
    Prepare latent dimensions for Generator.
    It creates random gaussian values for "latent_dim" dimensions.
    The number of dimensions can be changed.
    :return: random latent data
    """
    x_input_generator = randn(latent_dim * num_samples)
    x_input_generator = x_input_generator.reshape(num_samples, latent_dim)
    return x_input_generator

from pathlib import Path
from numpy.random import randn
from matplotlib import pyplot as plt
from tensorflow.keras.models import load_model

# ================ #
LATENT_DIM = 100
SAMPLES_PER_ROW = 5
# ================ #


def generate_latent_data(latent_dim, num_samples):
    """
    Prepare latent dimensions for Generator.
    It creates random gaussian values for "latent_dim" dimensions.
    The number of dimensions can be changed.
    :return: random latent data
    """
    x_input_generator = randn(latent_dim * num_samples)
    x_input_generator = x_input_generator.reshape(num_samples, latent_dim)
    return x_input_generator


def save_fig_inference(image, row_num_images=10):
    """
    Save generated "fake" images during inference in root directory when project is located.
    Each time is called, it will save a set of subplots (size: row_num_images ** 2) with grayscale generated images.
    Function used as well for the inference.
    :return: fake dataset X and fake labels Y
    """
    filename = "generated_images_inference/generated_image_inference.png"
    for i in range(row_num_images * row_num_images):
        plt.subplot(row_num_images, row_num_images, 1 + i)
        plt.axis("off")
        plt.imshow(image[i, :, :, 0], cmap="gray_r")
    plt.savefig(filename)
    plt.close()


# Create folder for images
print("[INFO] Create folder for saving images during inference...")
Path("generated_images_inference").mkdir(parents=True, exist_ok=True)

# Load pre-trained Keras model
print("[INFO] Loading pre-trained model...")
#gan_model = load_model('generator_model_015.h5')
gan_model = load_model('generator_model_final')

# Generate input for Generator
print("[INFO] Generating latent data...")
x_latent = generate_latent_data(LATENT_DIM, 25)

# Inference
print("[INFO] Creating and saving prediction...")
generated_image = gan_model.predict(x_latent)
save_fig_inference(generated_image, SAMPLES_PER_ROW)
/*
The given SavedModel SignatureDef contains the following input(s):
  inputs['dense_1_input'] tensor_info:
      dtype: DT_FLOAT
      shape: (-1, 100)
      name: serving_default_dense_1_input:0
The given SavedModel SignatureDef contains the following output(s):
  outputs['conv2d_2'] tensor_info:
      dtype: DT_FLOAT
      shape: (-1, 28, 28, 1)
      name: StatefulPartitionedCall:0
Method name is: tensorflow/serving/predict
*/


#include <fstream>
#include <utility>
#include <vector>

#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"

// These are all common classes it's handy to reference with no namespace.
using tensorflow::Flag;
using tensorflow::int32;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::Tensor;
using tensorflow::tstring;


Status CreateLatentSpace(const int latent_dim, const int num_samples) {
/*
TODO: Create random vector, equivalent to generate_latent_data in python file
*/
}


int main(int argc, char* argv[]) {
  // These are the command-line flags the program can understand.
  // They define where the graph and input data is located, and what kind of
  // input the model expects. If you train your own model, or use something
  // other than inception_v3, then you'll need to update these.

  string graph =
      "generator_model_final/saved_model.pb";

  int32 latent_dim = 100;
  int32 samples_per_row = 5;
  int32 num_samples = 25;

  string input_layer = "serving_default_dense_1_input";
  string output_layer = "StatefulPartitionedCall";
  string root_dir = "";

  std::vector<Flag> flag_list = {
      Flag("graph", &graph, "graph to be executed"),
      Flag("latent_dim", &latent_dim, "latent dimensions"),
      Flag("samples_per_row", &samples_per_row, "samples per row"),
      Flag("num_samples", &num_samples, "number of samples"),
      Flag("input_layer", &input_layer, "name of input layer"),
      Flag("output_layer", &output_layer, "name of output layer"),
      Flag("root_dir", &root_dir, "interpret image and graph file names relative to this directory"),
  };

  string usage = tensorflow::Flags::Usage(argv[0], flag_list);
  const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
  if (!parse_result) {
    LOG(ERROR) << usage;
    return -1;
  }

  // We need to call this to set up global state for TensorFlow.
  tensorflow::port::InitMain(argv[0], &argc, &argv);
  if (argc > 1) {
    LOG(ERROR) << "Unknown argument " << argv[1] << "\n" << usage;
    return -1;
  }

  // First we load and initialize the model.
  std::unique_ptr<tensorflow::Session> session;
  string graph_path = tensorflow::io::JoinPath(root_dir, graph);
  Status load_graph_status = LoadGraph(graph_path, &session);
  if (!load_graph_status.ok()) {
    LOG(ERROR) << load_graph_status;
    return -1;
  }

  // TODO: Call function to create latent space


  // TODO: Run the latent space through the model


  // TODO: Save the figure


  return 0;
}

Tensor CreateLatentSpace(const int latent_dim, const int num_samples) {
  Tensor tensor(tensorflow::DT_FLOAT, tensorflow::TensorShape({num_samples, latent_dim}));
  auto tensor_mapped = tensor.tensor<float, 2>(); 
  for (int idx = 0; idx < tensor.dim_size(0); ++idx) {
    for (int i = 0; i < tensor.dim_size(1); ++i) {
      tensor_mapped(idx, i) = drand48() - 0.5;
    }
  }
  return tensor;
}
auto latent_space_tensor = CreateLatentSpace(100, 1);
std::vector<Tensor> outputs;
TF_CHECK_OK(session->Run({"dense_1_input", latent_space_tensor}, {"conv2d_2"}, {}, &outputs));