Python 3.x 尝试使用Tensorflow 2.3.0和;卡拉

Python 3.x 尝试使用Tensorflow 2.3.0和;卡拉,python-3.x,tensorflow,carla,Python 3.x,Tensorflow,Carla,下面是Sentdex()编写的关于在Python 3中使用Carla的自动驾驶汽车的教程 我仔细地遵循了所有步骤,但当我尝试运行代码时,出现了以下错误: ValueError:在启用“急切模式”的情况下构造模型实例时,不支持在图形模式下调用模型.fit。请在图形模式下构造您的模型实例,或调用模型。在启用渴望模式的情况下进行拟合 我认为这个错误是由于教程中使用的tensorflow版本与我正在运行的版本(2.3.0)之间存在版本差异造成的。任何帮助都将是非常感谢的,因为我在这个问题上被困了很长一段

下面是Sentdex()编写的关于在Python 3中使用Carla的自动驾驶汽车的教程

我仔细地遵循了所有步骤,但当我尝试运行代码时,出现了以下错误:

ValueError:在启用“急切模式”的情况下构造
模型
实例时,不支持在图形模式下调用
模型.fit
。请在图形模式下构造您的
模型
实例,或调用
模型。在启用渴望模式的情况下进行拟合

我认为这个错误是由于教程中使用的tensorflow版本与我正在运行的版本(2.3.0)之间存在版本差异造成的。任何帮助都将是非常感谢的,因为我在这个问题上被困了很长一段时间

我尝试运行的完整代码:

import glob
import os
import sys
import random
import time
import numpy as np
import cv2
import math
import tensorflow as tf
from tensorflow.python.keras import backend
from tensorflow.keras.callbacks import TensorBoard
from threading import Thread
from tensorflow.keras.applications.xception import Xception
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from collections import deque
from tqdm import tqdm

try:
    sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
        sys.version_info.major,
        sys.version_info.minor,
        'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
    pass

import carla

MODEL_NAME = "Gijsw"
AGGREGATE_STATS_EVERY = 10
MIN_EPSILON = 0.001
EPSILON_DECAY = 0.95
EPISODES = 100
DISCOUNT = 0.9
MIN_REWARD = -200
MEMORY_FRACTION = 0.8
UPDATE_TARGET_EVERY = 5
MINIBATCH_SIZE = 16
TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4
PREDICTION_BATCH_SIZE = 1
MIN_REPLAY_MEMORY_SIZE = 1_000
REPLAY_MEMORY_SIZE = 5_000 #5,000 
SECONDS_PER_EPISODE = 10.0
SHOW_PREVIEW = False

IM_WIDTH = 640
IM_HEIGHT = 480
epsilon = 1


# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):

    # Overriding init to set initial step and writer (we want one log file for all .fit() calls)
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.step = 1
        self.writer = tf.summary.create_file_writer(self.log_dir)

    # Overriding this method to stop creating default log writer
    def set_model(self, model):
        pass

    # Overrided, saves logs with our step number
    # (otherwise every .fit() will start writing from 0th step)
    def on_epoch_end(self, epoch, logs=None):
        self.update_stats(**logs)

    # Overrided
    # We train for one batch only, no need to save anything at epoch end
    def on_batch_end(self, batch, logs=None):
        pass

    # Overrided, so won't close writer
    def on_train_end(self, _):
        pass

    # Custom method for saving own metrics
    # Creates writer, writes custom metrics and closes writer
    def update_stats(self, **stats):
        self._write_logs(stats, self.step)


class CarEnv:
    SHOW_CAM = SHOW_PREVIEW
    STEER_AMT = 1.0
    front_camera = None

    def __init__(self):
        self.client = carla.Client("localhost", 1234)
        self.client.set_timeout(5.0)

        # Once we have a client we can retrieve the world that is currently running.
        self.world = self.client.get_world()

        # The world contains the list blueprints that we can use for adding new actors into the simulation.
        self.blueprint_library = self.world.get_blueprint_library()
        self.model_3 = self.blueprint_library.filter("model3")[0]

    def reset(self):
        self.collision_hist = []
        self.actor_list = []
        
        self.transform = random.choice(self.world.get_map().get_spawn_points())
        self.vehicle = self.world.spawn_actor(self.model_3, self.transform)
        self.actor_list.append(self.vehicle)

        self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb')
        self.rgb.set_attribute("image_size_x", f"{self.IM_WIDTH}")
        self.rgb.set_attribute("image_size_y", f"{self.IM_HEIGHT}")
        self.rgb.set_attribute("fov", f"110")

        transform = carla.Transform(carla.Location(x = 2.5, z = 0.7))
        self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to = self.vehicle)
        self.actor_list.append(self.sensor)
        self.sensor.listen(lambda data: self.process_img(data))

        self.vehicle.apply_control(carla.VehicleControl(throttle = 0.0, brake = 0.0))

        # sleep to get things started and to not detect a collision when the car spawns/falls from sky.
        time.sleep(4.0) 

        colsensor = self.blueprint_library.find("sensor.other.collision")
        self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to = self.vehicle)
        self.actor_list.append(self.colsensor)
        self.colsensor.listen(lambda event: self.collision_data(event))

        while self.front_camera is None:
            time.sleep(0.01)

        self.episode_start = time.time()
        self.vehicle.apply_control(carla.VehicleControl(throttle = 0.0, brake = 0.0))

        return self.front_camera

    def collision_data(self, event):
        self.collision_hist.append(event)

    def process_img(self, image):
        '''preproccess image data'''
    
        i = np.array(image.raw_data)
        i2 = i.reshape((self.IM_HEIGHT, self.IM_WIDTH, 4))
        i3 = i2[:, :, :3]

        if self.SHOW_CAM:
            cv2.imshow("", i3)
            cv2.waitKey(1)      
        
        self.front_camera = i3

    def step(self, action):
        '''
        For now let's just pass steer left, center, right?
        0, 1, 2
        '''

        if action == 0:
            self.vehicle.apply_control(carla.VehicleControl(throttle = 1.0, steer = -1*self.STEER_AMT))
        elif action == 1:
            self.vehicle.apply_control(carla.VehicleControl(throttle = 1.0, steer = 0))
        elif action == 2:
            self.vehicle.apply_control(carla.VehicleControl(throttle = 1.0, steer = 1*self.STEER_AMT))

        v = self.vehicle.get_velocity()
        kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))

        if len(self.collision_hist) != 0:
            done = True
            reward = -200

        elif kmh < 50:
            done = False
            reward = -1

        else:
            done = False
            reward = 1

        if self.episode_start + SECONDS_PER_EPISODE < time.time():
            done = True

        return self.front_camera, reward, done, None


class DQNAgent:
    def __init__(self):
        self.model = self.create_model()
        self.target_model = self.create_model()
        self.target_model.set_weights(self.model.get_weights())

        self.replay_memory = deque(maxlen = REPLAY_MEMORY_SIZE)

        self.tensorboard = ModifiedTensorBoard(log_dir = f"logs/{MODEL_NAME}-{int(time.time())}")
        self.target_update_counter = 0
        self.graph = tf.compat.v1.get_default_graph()

        self.terminate  = False
        self.last_logged_episode = 0
        self.training_initialized = False

    def create_model(self):
        base_model = Xception(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH,3))
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        predictions = Dense(3, activation = "linear")(x)
        model = Model(inputs = base_model.input, outputs = predictions)
        model.compile(loss = "mse", optimizer = Adam(lr = 0.001), metrics = ["accuracy"])
        
        return model

    def update_replay_memory(self, transition):
        # transition = (current_state, action, reward, new_state, done)
        self.replay_memory.append(transition)

    def train(self):
        if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
            return

        minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)

        current_states = np.array([transition[0] for transition in minibatch]) / 255
        with self.graph.as_default():
            current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE)

        new_current_states = np.array([transition[3] for transition in minibatch]) / 255
        with self.graph.as_default():
            future_current_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE)

        X = []
        y = []

        for index, (current_state, action, reward, new_state, done) in enumerate(minibatch):
            if not done:
                max_future_q = np.max(future_current_qs_list[index])
                new_q = reward + DISCOUNT * max_future_q

            else:
                new_q = reward

            current_qs = current_qs_list[index]
            current_qs[action] = new_q

            X.append(current_state)
            y.append(current_qs)

        log_this_step = False
        if self.tensorboard.step > self.last_logged_episode:
            log_this_step = True
            self.last_log_episode = self.tensorboard.step

        with self.graph.as_default():
            self.model.fit(np.array(X)/255, np.array(y), batch_size=TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None)

        if log_this_step:
            self.target_update_counter += 1

        if self.target_update_counter > UPDATE_TARGET_EVERY:
            self.target_model.set_weights(self.model.get_weights())
            self.target_update_counter = 0

    def get_qs(self, state):
        return self.model.predict(np.array(state).reshape(-1 *state.shape)/255)[0]

    def train_in_loop(self):
        X = np.random.uniform(size = (1, IM_HEIGHT, IM_WIDTH, 3)).astype(np.float32)
        y = np.random.uniform(size = (1,3)).astype(np.float32)
        with self.graph.as_default():
            self.model.fit(X,y, verbose = False, batch_size = 1)

        self.training_initialized = True

        while True:
            if self.terminate:
                return
            self.train()
            time.sleep(0.01)


if __name__ == '__main__':
    FPS = 20
    ep_rewards = [-200]
    random.seed(1)
    np.random.seed(1)
    tf.random.set_seed(1)
    #gpu_options = #tf.GPUOptions(per_process_gpu_memory_fraction = MEMORY_FRACTION)
    #gpu_options = tf.config.gpu.set_per_process_memory_fraction(MEMORY_FRACTION)
    #backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options = gpu_options)))

    if not os.path.isdir("models"):
        os.makedirs("models")

    agent = DQNAgent()
    env = CarEnv()

    trainer_thread = Thread(target = agent.train_in_loop, daemon = True)
    trainer_thread.start()


    while not agent.training_initialized:
        time.sleep(0.01)

    agent.get_qs(np.ones((env.IM_HEIGHT, env.IM_WIDTH, 3)))

    for episode in tqdm(range(1,EPISODES+1), ascii=True, unit="episodes"):
        env.collision_hist = []
        agent.tensorboard.step = episode
        episode_reward = 0
        step = 1
        current_state = env.reset()
        done = False
        episode_start = time.time()

        while True:
            if np.random.random() > epsilon:
                action = np.argmax(agent.get_qs(current_state))
            else:
                action = np.random.randint(0,3)
                time.sleep(1/FPS)

            new_state, reward, done, _ = env.step(action)
            episode_reward += reward
            agent.update_replay_memory((current_state, action, reward, new_state, done))
            step += 1
            
            if done:
                break

        for actor in env.actor_list:
            actor.destroy()



        # Append episode reward to a list and log stats (every given number of episodes)
        ep_rewards.append(episode_reward)
        if not episode % AGGREGATE_STATS_EVERY or episode == 1:
            average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:])
            min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
            max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
            agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)

            # Save model, but only when min reward is greater or equal a set value
            if min_reward >= MIN_REWARD:
                agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')

        # Decay epsilon
        if epsilon > MIN_EPSILON:
            epsilon *= EPSILON_DECAY
            epsilon = max(MIN_EPSILON, epsilon)


    # Set termination flag for training thread and wait for it to finish
    agent.terminate = True
    trainer_thread.join()
    agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
导入全局
导入操作系统
导入系统
随机输入
导入时间
将numpy作为np导入
进口cv2
输入数学
导入tensorflow作为tf
从tensorflow.python.keras导入后端
从tensorflow.keras.callbacks导入TensorBoard
从线程导入线程
从tensorflow.keras.applications.Exception导入Exception
从tensorflow.keras.layers导入稠密的全局平均池2D
从tensorflow.keras.optimizers导入Adam
从tensorflow.keras.models导入模型
从集合导入deque
从TQM导入TQM
尝试:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg'%(
sys.version_info.major,
sys.version_info.minor,
“win-amd64”如果os.name==“nt”或“linux-x86_64”)[0])
除索引器外:
通过
进口卡拉
MODEL_NAME=“Gijsw”
聚合统计数据每10个
最小ε=0.001
εu衰减=0.95
剧集=100
折扣=0.9
最低奖励=-200
记忆分数=0.8
每5次更新一次目标
小批量_尺寸=16
培训批大小=小批量大小//4
预测\批次\大小=1
最小重播内存大小=1\u 000
重播(内存)大小=5(000)5000
每集秒数=10.0
SHOW_PREVIEW=False
IM_宽度=640
IM_高度=480
ε=1
#自有张力板类
等级修改后的传感器板(张力板):
#重写init以设置初始步骤和writer(我们希望所有.fit()调用都有一个日志文件)
定义初始(自我,**kwargs):
超级()
self.step=1
self.writer=tf.summary.create\u file\u writer(self.log\u dir)
#重写此方法以停止创建默认日志编写器
def set_型号(自身,型号):
通过
#覆盖,使用步骤号保存日志
#(否则,every.fit()将从第0步开始写入)
def on_epoch_end(self、epoch、logs=None):
self.update_stats(**日志)
#压倒
#我们只训练一批,不需要在新纪元结束时保存任何东西
批处理端上的def(自身、批处理、日志=无):
通过
#已重写,因此不会关闭编写器
列车端def(自身):
通过
#保存自己的度量的自定义方法
#创建编写器、编写自定义指标并关闭编写器
def更新_统计(自我,**统计):
self.\u写入日志(统计、self.step)
职业类别:
显示\u CAM=显示\u预览
转向量=1.0
前摄像头=无
定义初始化(自):
self.client=carla.client(“localhost”,1234)
self.client.set_超时(5.0)
#一旦有了客户机,我们就可以检索当前正在运行的世界。
self.world=self.client.get_world()
#世界包含我们可以用于向模拟中添加新参与者的列表蓝图。
self.blueprint\u library=self.world.get\u blueprint\u library()
self.model_3=self.blueprint_library.filter(“model3”)[0]
def重置(自):
self.collision_hist=[]
self.actor_list=[]
self.transform=random.choice(self.world.get\u map().get\u spawn\u points())
self.vehicle=self.world.spawn\u actor(self.model\u 3,self.transform)
self.actor\u list.append(self.vehicle)
self.rgb_cam=self.blueprint_library.find('sensor.camera.rgb'))
self.rgb.set_属性(“image_size_x”,f“{self.IM_WIDTH}”)
self.rgb.set_属性(“image_size_y”,f“{self.IM_HEIGHT}”)
self.rgb.set_属性(“fov”,f“110”)
变换=卡拉变换(卡拉位置(x=2.5,z=0.7))
self.sensor=self.world.spawn\u actor(self.rgb\u cam、transform、attach\u to=self.vehicle)
self.actor\u list.append(self.sensor)
self.sensor.listen(lambda数据:self.process\u img(数据))
自我车辆应用控制(卡拉车辆控制(油门=0.0,制动器=0.0))
#当汽车从天空中产生/坠落时,睡眠可以让事情开始,并且不会检测到碰撞。
时间。睡眠(4.0)
colsensor=self.blueprint\u library.find(“sensor.other.collision”)
self.colsensor=self.world.spawn\u actor(colsensor,transform,attach\u to=self.vehicle)
self.actor\u list.append(self.colsensor)
self.colsensor.listen(lambda事件:self.collision_数据(事件))
而self.front_摄像头为无:
睡眠时间(0.01)
self.eposion\u start=time.time()
自我车辆应用控制(卡拉车辆控制(油门=0.0,制动器=0.0))
返回自前摄像头
def碰撞_数据(自身、事件):
self.collision\u hist.append(事件)
def流程图(自身、图像):
''预处理图像数据''
i=np.array(image.raw_数据)
i2=i.重塑((self.IM_高度,self.IM_宽度,4))
i3=i2[:,:,:3]
如果self.SHOW\u CAM:
cv2.imshow(“,i3)
cv2.等待键(1)
自前摄像头=i3
def步骤(自我、行动):
'''
现在让我们通过左转向,中转向,右转向?
0, 1, 2
'''
如果操作==0:
自我车辆应用控制(卡拉车辆控制(节气门=1.0,转向=-1*自我转向AMT))
elif操作==1:
自我车辆应用控制(卡拉车辆控制(油门=1.0,转向=0))
elif操作==2: