Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/317.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python FailedPremissionError:GetNext()在加载Tensorflow保存的\u模型后失败_Python_Tensorflow_Tensorflow Datasets - Fatal编程技术网

Python FailedPremissionError:GetNext()在加载Tensorflow保存的\u模型后失败

Python FailedPremissionError:GetNext()在加载Tensorflow保存的\u模型后失败,python,tensorflow,tensorflow-datasets,Python,Tensorflow,Tensorflow Datasets,我建立了一个专门的类来构建、训练、保存并加载我的模型。保存通过tf.saved\u model.simple\u save完成,然后通过tf.saved\u model.loader.load恢复 使用数据集API完成培训和推理。使用经过训练的模型时,一切都正常 但是,如果恢复保存的模型,则推理会中断并抛出此错误: FailedPremissionError(请参见上面的回溯):GetNext()失败,因为迭代器尚未初始化。在获取下一个元素之前,请确保已为此迭代器运行初始值设定项操作 [[Nod

我建立了一个专门的类来构建、训练、保存并加载我的模型。保存通过
tf.saved\u model.simple\u save
完成,然后通过
tf.saved\u model.loader.load
恢复

使用数据集API完成培训和推理。使用经过训练的模型时,一切都正常

但是,如果恢复保存的模型,则推理会中断并抛出此错误:

FailedPremissionError(请参见上面的回溯):GetNext()失败,因为迭代器尚未初始化。在获取下一个元素之前,请确保已为此迭代器运行初始值设定项操作

[[Node:datasets/cond/IteratorGetNext\u 1=IteratorGetNextoutput\u shapes=[[?,?,30],,,?,5]],输出类型=[DT_INT32,DT_INT32],\u device=“/job:localhost/replica:0/任务:0/设备:CPU:0”]

我确信迭代器已初始化(如预期显示
打印
,请参见下面的代码)。这可能与变量所属的图形有关吗?还有别的想法吗?我被困在这里了

(简化)代码

class Model():
    def __init__(self):
        self.graph = tf.Graph()
        self.sess = tf.Session(graph=self.graph)
        with self.graph.as_default():
            model.features_data_ph = tf.Placeholder(...)
            model.labels_data_ph = tf.Placeholder(...)

    def build(self):
        with self.graph.as_default():
            self.logits = my_model(self.input_tensor)
            self.loss = my_loss(self.logits, self.labels_tensor)

    def train(self):
        my_training_procedure()

    def set_datasets(self):
        with self.graph.as_default():
            with tf.variable_scope('datasets'):
                self.dataset = tf.data.Dataset.from_tensor_slices((self.features_data_ph, self.labels_data_ph))
                self.iter = self.dataset.make_initializable_iterator()
                self.input_tensor, self.labels_tensor = self.iter.get_next

    def initialize_iterators(self, inference_data):
        with self.graph.as_default():
            feats = inference_data
            labs = np.zeros((len(feats), self.hp.num_classes))
            self.sess.run(self.iter.initializer,
                feed_dict={self.features_data_ph: feats,
                    self.labels_data_ph: labs})
            print('Iterator ready to infer')

    def infer(self, inference_data):
        self.initialize_iterators(inference_data)
        return sess.run(self.logits)

    def save(self, path):
        inputs = {"features_data_ph": self.features_data_ph,
            "labels_data_ph": self.labels_data_ph}
        outputs = {"logits": self.model.logits}
        tf.saved_model.simple_save(self.sess, path)

    @staticmethod
    def restore(path):
        model = Model()
        tf.saved_model.loader.load(model.sess, [tag_constants.SERVING], path)
        model.features_data_ph = model.graph.get_tensor_by_name("features_data_ph:0")
        model.labels_data_ph = model.graph.get_tensor_by_name("labels_data_ph:0")
        model.logits = model.graph.get_tensor_by_name("model/classifier/dense/BiasAdd:0")
        model.set_datasets()
        return model
model1 = Model()
model1.build()
model1.train()
model1.save(model1_path)

...

model2 = Model.restore(model1_path)
model2.infer(some_numpy_array) # Error here, after print, at sess.run()
例行程序失败

class Model():
    def __init__(self):
        self.graph = tf.Graph()
        self.sess = tf.Session(graph=self.graph)
        with self.graph.as_default():
            model.features_data_ph = tf.Placeholder(...)
            model.labels_data_ph = tf.Placeholder(...)

    def build(self):
        with self.graph.as_default():
            self.logits = my_model(self.input_tensor)
            self.loss = my_loss(self.logits, self.labels_tensor)

    def train(self):
        my_training_procedure()

    def set_datasets(self):
        with self.graph.as_default():
            with tf.variable_scope('datasets'):
                self.dataset = tf.data.Dataset.from_tensor_slices((self.features_data_ph, self.labels_data_ph))
                self.iter = self.dataset.make_initializable_iterator()
                self.input_tensor, self.labels_tensor = self.iter.get_next

    def initialize_iterators(self, inference_data):
        with self.graph.as_default():
            feats = inference_data
            labs = np.zeros((len(feats), self.hp.num_classes))
            self.sess.run(self.iter.initializer,
                feed_dict={self.features_data_ph: feats,
                    self.labels_data_ph: labs})
            print('Iterator ready to infer')

    def infer(self, inference_data):
        self.initialize_iterators(inference_data)
        return sess.run(self.logits)

    def save(self, path):
        inputs = {"features_data_ph": self.features_data_ph,
            "labels_data_ph": self.labels_data_ph}
        outputs = {"logits": self.model.logits}
        tf.saved_model.simple_save(self.sess, path)

    @staticmethod
    def restore(path):
        model = Model()
        tf.saved_model.loader.load(model.sess, [tag_constants.SERVING], path)
        model.features_data_ph = model.graph.get_tensor_by_name("features_data_ph:0")
        model.labels_data_ph = model.graph.get_tensor_by_name("labels_data_ph:0")
        model.logits = model.graph.get_tensor_by_name("model/classifier/dense/BiasAdd:0")
        model.set_datasets()
        return model
model1 = Model()
model1.build()
model1.train()
model1.save(model1_path)

...

model2 = Model.restore(model1_path)
model2.infer(some_numpy_array) # Error here, after print, at sess.run()

(恢复模型有效,原始模型和恢复模型之间的张量值匹配)

我遇到了相同的问题,我认为问题在于您正在初始化一个新的Dataset对象,而不是初始化与模型一起保存的迭代器

尝试:


我通过改变创建
数据集的方式解决了这个问题

iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
dataset_init_op = iterator.make_initializer(dataset, name='dataset_init')
...
#retstoring
dataset_init_op = restored_graph.get_operation_by_name('dataset_init')
sess.run(
    dataset_init_op,
    feed_dict={...}
)

那里有一段工作代码->

一种简单的方法:在循环之前,添加一行代码:

tf.add_to_collection("saved_model_main_op",tf.group([train_iter], name='legacy_init_op'))
“保存的模型主操作”已修复


train_iter是初始化迭代器的选项

您刚才提醒我我已经解决了这个问题,我应该在这里分享。这个想法很相似,我确实缺少迭代器的init op。干杯!是否有方法获取未命名的操作。在培训期间,我没有透露姓名以进行初始化器操作查找
make\u初始化器
op->
dataset\u init\u op=restored\u图形。按名称获取操作(“make\u initializer”)