Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Tensorflow 模型和权重不从检查点加载_Tensorflow_Tensorflow2.0_Tf.keras - Fatal编程技术网

Tensorflow 模型和权重不从检查点加载

Tensorflow 模型和权重不从检查点加载,tensorflow,tensorflow2.0,tf.keras,Tensorflow,Tensorflow2.0,Tf.keras,我正在使用OpenAI gym的cartpole环境训练强化学习模型。尽管目标目录中出现了一个用于我的权重和模型的.h5文件,但在运行以下代码-tf.train.get_checkpoint_state(“C:/Users/dgt/Documents”)后,我没有得到任何文件 这是我的全部代码- ## Slightly modified from the following repository - https://github.com/gsurma/cartpole from __futur

我正在使用OpenAI gym的cartpole环境训练强化学习模型。尽管目标目录中出现了一个用于我的权重和模型的.h5文件,但在运行以下代码-tf.train.get_checkpoint_state(“C:/Users/dgt/Documents”)后,我没有得到任何文件

这是我的全部代码-

## Slightly modified from the following repository - https://github.com/gsurma/cartpole

from __future__ import absolute_import, division, print_function, unicode_literals

import os
import random
import gym
import numpy as np
import tensorflow as tf

from collections import deque
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint


ENV_NAME = "CartPole-v1"

GAMMA = 0.95
LEARNING_RATE = 0.001

MEMORY_SIZE = 1000000
BATCH_SIZE = 20

EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.995

checkpoint_path = "training_1/cp.ckpt"


class DQNSolver:

    def __init__(self, observation_space, action_space):
        # save_dir = args.save_dir
        # self.save_dir = save_dir
        # if not os.path.exists(save_dir):
        #     os.makedirs(save_dir)
        self.exploration_rate = EXPLORATION_MAX

        self.action_space = action_space
        self.memory = deque(maxlen=MEMORY_SIZE)

        self.model = Sequential()
        self.model.add(Dense(24, input_shape=(observation_space,), activation="relu"))
        self.model.add(Dense(24, activation="relu"))
        self.model.add(Dense(self.action_space, activation="linear"))
        self.model.compile(loss="mse", optimizer=Adam(lr=LEARNING_RATE))

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def act(self, state):
        if np.random.rand() < self.exploration_rate:
            return random.randrange(self.action_space)
        q_values = self.model.predict(state)
        return np.argmax(q_values[0])

    def experience_replay(self):
        if len(self.memory) < BATCH_SIZE:
            return
        batch = random.sample(self.memory, BATCH_SIZE)
        for state, action, reward, state_next, terminal in batch:
            q_update = reward
            if not terminal:
                q_update = (reward + GAMMA * np.amax(self.model.predict(state_next)[0]))
            q_values = self.model.predict(state)
            q_values[0][action] = q_update
            self.model.fit(state, q_values, verbose=0)
        self.exploration_rate *= EXPLORATION_DECAY
        self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)


def cartpole():
    env = gym.make(ENV_NAME)
    #score_logger = ScoreLogger(ENV_NAME)
    observation_space = env.observation_space.shape[0]
    action_space = env.action_space.n
    dqn_solver = DQNSolver(observation_space, action_space)
    
    checkpoint = tf.train.get_checkpoint_state("C:/Users/dgt/Documents")
    print('checkpoint:', checkpoint)
    if checkpoint and checkpoint.model_checkpoint_path:
        dqn_solver.model = keras.models.load_model('cartpole.h5')
        dqn_solver.model = model.load_weights('cartpole_weights.h5')        
    run = 0
    i = 0
    while i<2:
        i = i + 1
        #total = 0
        run += 1
        state = env.reset()
        state = np.reshape(state, [1, observation_space])
        step = 0
        while True:
            step += 1
            #env.render()
            action = dqn_solver.act(state)
            state_next, reward, terminal, info = env.step(action)
            #total += reward
            reward = reward if not terminal else -reward
            state_next = np.reshape(state_next, [1, observation_space])
            dqn_solver.remember(state, action, reward, state_next, terminal)
            state = state_next
            dqn_solver.model.save('cartpole.h5')
            dqn_solver.model.save_weights('cartpole_weights.h5')
            if terminal:
                print("Run: " + str(run) + ", exploration: " + str(dqn_solver.exploration_rate) + ", score: " + str(step))
                #score_logger.add_score(step, run)
                break
            dqn_solver.experience_replay()


if __name__ == "__main__":
    cartpole()
##从以下存储库中稍微修改-https://github.com/gsurma/cartpole
从未来导入绝对导入、除法、打印函数、unicode文本
导入操作系统
随机输入
进口健身房
将numpy作为np导入
导入tensorflow作为tf
从集合导入deque
从tensorflow.keras.models导入顺序
从tensorflow.keras.layers导入稠密
从tensorflow.keras.optimizers导入Adam
从tensorflow.keras.callbacks导入模型检查点
环境名称=“CartPole-v1”
伽马=0.95
学习率=0.001
内存大小=1000000
批量大小=20
勘探_MAX=1.0
勘探最小值=0.01
探索衰变=0.995
检查点路径=“培训”1/cp.ckpt
DQN类版本:
定义初始(自我、观察空间、行动空间):
#保存目录=args.save目录
#self.save\u dir=save\u dir
#如果操作系统路径不存在(保存目录):
#os.makedirs(保存目录)
self.exploration\u rate=exploration\u MAX
self.action\u space=action\u space
self.memory=deque(maxlen=memory\u SIZE)
self.model=Sequential()
self.model.add(密集(24,输入形状=(观察空间),激活=“relu”))
self.model.add(密集(24,activation=“relu”))
self.model.add(密集(self.action_空间,activation=“linear”))
self.model.compile(loss=“mse”,optimizer=Adam(lr=LEARNING\u RATE))
def记住(自我、状态、行动、奖励、下一个状态、完成):
self.memory.append((状态、动作、奖励、下一个状态、完成))
def法案(自身、州):
如果np.random.rand()首先,如果尚未保存权重/模型,则代码不会运行。所以我注释掉了下面几行,并第一次运行脚本来生成文件

    checkpoint = tf.train.get_checkpoint_state(".")
    print('checkpoint:', checkpoint)
    if checkpoint and checkpoint.model_checkpoint_path:
        dqn_solver.model = tf.keras.models.load_model('cartpole.h5')
        dqn_solver.model.load_weights('cartpole_weights.h5')
注意,我还修改了上面的代码-之前有一些语法错误。特别是,这一行在你的帖子里

dqn_solver.model = model.load_weights('cartpole_weights.h5')
这可能是导致问题的原因,因为model.load_weights(“file”)方法会改变模型(而不是返回模型)

然后,我测试了模型权重是否正确保存/加载。要做到这一点,你可以做到

dqn_solver = DQNSolver(observation_space, action_space)
dqn_solver.model.trainable_variables
查看模型首次创建时的(随机初始化)权重。然后,您可以使用以下任一方法加载权重:

dqn_solver.model = tf.keras.models.load_model('cartpole.h5')

然后,您可以再次查看可训练的_变量,以确保它们与初始权重不同,并且它们是等效的

保存模型时,它会保存完整的体系结构—层的精确配置。当您保存权重时,它只保存您可以通过可训练的变量看到的所有张量列表。 请注意,当您加载_权重时,需要将其加载到权重所针对的确切体系结构中,否则它将无法正常工作。因此,如果您在DQNSolver中更改了模型的体系结构,然后尝试为旧模型加载_权重,那么它将无法正常工作。如果加载_模型,它会将模型重置为架构的精确状态,并设置权重

编辑-整个修改脚本

## Slightly modified from the following repository - https://github.com/gsurma/cartpole

from __future__ import absolute_import, division, print_function, unicode_literals

import os
import random
import gym
import numpy as np
import tensorflow as tf

from collections import deque
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint


ENV_NAME = "CartPole-v1"

GAMMA = 0.95
LEARNING_RATE = 0.001

MEMORY_SIZE = 1000000
BATCH_SIZE = 20

EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.995

checkpoint_path = "training_1/cp.ckpt"


class DQNSolver:

    def __init__(self, observation_space, action_space):
        # save_dir = args.save_dir
        # self.save_dir = save_dir
        # if not os.path.exists(save_dir):
        #     os.makedirs(save_dir)
        self.exploration_rate = EXPLORATION_MAX

        self.action_space = action_space
        self.memory = deque(maxlen=MEMORY_SIZE)

        self.model = Sequential()
        self.model.add(Dense(24, input_shape=(observation_space,), activation="relu"))
        self.model.add(Dense(24, activation="relu"))
        self.model.add(Dense(self.action_space, activation="linear"))
        self.model.compile(loss="mse", optimizer=Adam(lr=LEARNING_RATE))

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def act(self, state):
        if np.random.rand() < self.exploration_rate:
            return random.randrange(self.action_space)
        q_values = self.model.predict(state)
        return np.argmax(q_values[0])

    def experience_replay(self):
        if len(self.memory) < BATCH_SIZE:
            return
        batch = random.sample(self.memory, BATCH_SIZE)
        for state, action, reward, state_next, terminal in batch:
            q_update = reward
            if not terminal:
                q_update = (reward + GAMMA * np.amax(self.model.predict(state_next)[0]))
            q_values = self.model.predict(state)
            q_values[0][action] = q_update
            self.model.fit(state, q_values, verbose=0)
        self.exploration_rate *= EXPLORATION_DECAY
        self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)


def cartpole():
    env = gym.make(ENV_NAME)
    #score_logger = ScoreLogger(ENV_NAME)
    observation_space = env.observation_space.shape[0]
    action_space = env.action_space.n
    dqn_solver = DQNSolver(observation_space, action_space)

    # checkpoint = tf.train.get_checkpoint_state(".")
    # print('checkpoint:', checkpoint)
    # if checkpoint and checkpoint.model_checkpoint_path:
    #     dqn_solver.model = tf.keras.models.load_model('cartpole.h5')
    #     dqn_solver.model.load_weights('cartpole_weights.h5')
    run = 0
    i = 0
    while i<2:
        i = i + 1
        #total = 0
        run += 1
        state = env.reset()
        state = np.reshape(state, [1, observation_space])
        step = 0
        while True:
            step += 1
            #env.render()
            action = dqn_solver.act(state)
            state_next, reward, terminal, info = env.step(action)
            #total += reward
            reward = reward if not terminal else -reward
            state_next = np.reshape(state_next, [1, observation_space])
            dqn_solver.remember(state, action, reward, state_next, terminal)
            state = state_next
            dqn_solver.model.save('cartpole.h5')
            dqn_solver.model.save_weights('cartpole_weights.h5')
            if terminal:
                print("Run: " + str(run) + ", exploration: " + str(dqn_solver.exploration_rate) + ", score: " + str(step))
                #score_logger.add_score(step, run)
                break
            dqn_solver.experience_replay()


if __name__ == "__main__":
    cartpole()

#%%  to load saved results
env = gym.make(ENV_NAME)
#score_logger = ScoreLogger(ENV_NAME)
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
dqn_solver = DQNSolver(observation_space, action_space)

dqn_solver.model = tf.keras.models.load_model('cartpole.h5')  # or
dqn_solver.model.load_weights('cartpole_weights.h5')
##从以下存储库中稍微修改-https://github.com/gsurma/cartpole
从未来导入绝对导入、除法、打印函数、unicode文本
导入操作系统
随机输入
进口健身房
将numpy作为np导入
导入tensorflow作为tf
从集合导入deque
从tensorflow.keras.models导入顺序
从tensorflow.keras.layers导入稠密
从tensorflow.keras.optimizers导入Adam
从tensorflow.keras.callbacks导入模型检查点
环境名称=“CartPole-v1”
伽马=0.95
学习率=0.001
内存大小=1000000
批量大小=20
勘探_MAX=1.0
勘探最小值=0.01
探索衰变=0.995
检查点路径=“培训”1/cp.ckpt
DQN类版本:
定义初始(自我、观察空间、行动空间):
#保存目录=args.save目录
#self.save\u dir=save\u dir
#如果操作系统路径不存在(保存目录):
#os.makedirs(保存目录)
self.exploration\u rate=exploration\u MAX
self.action\u space=action\u space
self.memory=deque(maxlen=memory\u SIZE)
self.model=Sequential()
self.model.add(密集(24,输入形状=(观察空间),激活=“relu”))
self.model.add(密集(24,activation=“relu”))
self.model.add(密集)(
## Slightly modified from the following repository - https://github.com/gsurma/cartpole

from __future__ import absolute_import, division, print_function, unicode_literals

import os
import random
import gym
import numpy as np
import tensorflow as tf

from collections import deque
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint


ENV_NAME = "CartPole-v1"

GAMMA = 0.95
LEARNING_RATE = 0.001

MEMORY_SIZE = 1000000
BATCH_SIZE = 20

EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.995

checkpoint_path = "training_1/cp.ckpt"


class DQNSolver:

    def __init__(self, observation_space, action_space):
        # save_dir = args.save_dir
        # self.save_dir = save_dir
        # if not os.path.exists(save_dir):
        #     os.makedirs(save_dir)
        self.exploration_rate = EXPLORATION_MAX

        self.action_space = action_space
        self.memory = deque(maxlen=MEMORY_SIZE)

        self.model = Sequential()
        self.model.add(Dense(24, input_shape=(observation_space,), activation="relu"))
        self.model.add(Dense(24, activation="relu"))
        self.model.add(Dense(self.action_space, activation="linear"))
        self.model.compile(loss="mse", optimizer=Adam(lr=LEARNING_RATE))

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def act(self, state):
        if np.random.rand() < self.exploration_rate:
            return random.randrange(self.action_space)
        q_values = self.model.predict(state)
        return np.argmax(q_values[0])

    def experience_replay(self):
        if len(self.memory) < BATCH_SIZE:
            return
        batch = random.sample(self.memory, BATCH_SIZE)
        for state, action, reward, state_next, terminal in batch:
            q_update = reward
            if not terminal:
                q_update = (reward + GAMMA * np.amax(self.model.predict(state_next)[0]))
            q_values = self.model.predict(state)
            q_values[0][action] = q_update
            self.model.fit(state, q_values, verbose=0)
        self.exploration_rate *= EXPLORATION_DECAY
        self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)


def cartpole():
    env = gym.make(ENV_NAME)
    #score_logger = ScoreLogger(ENV_NAME)
    observation_space = env.observation_space.shape[0]
    action_space = env.action_space.n
    dqn_solver = DQNSolver(observation_space, action_space)

    # checkpoint = tf.train.get_checkpoint_state(".")
    # print('checkpoint:', checkpoint)
    # if checkpoint and checkpoint.model_checkpoint_path:
    #     dqn_solver.model = tf.keras.models.load_model('cartpole.h5')
    #     dqn_solver.model.load_weights('cartpole_weights.h5')
    run = 0
    i = 0
    while i<2:
        i = i + 1
        #total = 0
        run += 1
        state = env.reset()
        state = np.reshape(state, [1, observation_space])
        step = 0
        while True:
            step += 1
            #env.render()
            action = dqn_solver.act(state)
            state_next, reward, terminal, info = env.step(action)
            #total += reward
            reward = reward if not terminal else -reward
            state_next = np.reshape(state_next, [1, observation_space])
            dqn_solver.remember(state, action, reward, state_next, terminal)
            state = state_next
            dqn_solver.model.save('cartpole.h5')
            dqn_solver.model.save_weights('cartpole_weights.h5')
            if terminal:
                print("Run: " + str(run) + ", exploration: " + str(dqn_solver.exploration_rate) + ", score: " + str(step))
                #score_logger.add_score(step, run)
                break
            dqn_solver.experience_replay()


if __name__ == "__main__":
    cartpole()

#%%  to load saved results
env = gym.make(ENV_NAME)
#score_logger = ScoreLogger(ENV_NAME)
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
dqn_solver = DQNSolver(observation_space, action_space)

dqn_solver.model = tf.keras.models.load_model('cartpole.h5')  # or
dqn_solver.model.load_weights('cartpole_weights.h5')