为什么我会收到';FileNotFoundError:[Errno 2]没有这样的文件或目录:';在终端中运行`python./train.py时`
我正在遵循一篇关于训练户外图像分类器的研究论文的作者提供的步骤。 (Github:) 但是,这是我在Ubuntu终端中遇到的错误:为什么我会收到';FileNotFoundError:[Errno 2]没有这样的文件或目录:';在终端中运行`python./train.py时`,python,machine-learning,ubuntu-18.04,Python,Machine Learning,Ubuntu 18.04,我正在遵循一篇关于训练户外图像分类器的研究论文的作者提供的步骤。 (Github:) 但是,这是我在Ubuntu终端中遇到的错误: Traceback (most recent call last): File "./train.py", line 165, in <module> main() File "./train.py", line 63, in main x_datalists = get_data_lists(args.x_data_txt_pa
Traceback (most recent call last):
File "./train.py", line 165, in <module>
main()
File "./train.py", line 63, in main
x_datalists = get_data_lists(args.x_data_txt_path) # a list of x images
File "./train.py", line 47, in get_data_lists
f = open(data_path, 'r')
FileNotFoundError: [Errno 2] No such file or directory: './datasets/x_traindata.txt'
回溯(最近一次呼叫最后一次):
文件“/train.py”,第165行,在
main()
文件“/train.py”,第63行,主
x_datalists=get_data_list(args.x_data_txt_path)#x个图像的列表
获取数据列表中第47行的文件“/train.py”
f=打开(数据路径'r')
FileNotFoundError:[Errno 2]没有这样的文件或目录:'./datasets/x_traindata.txt'
以下是作者提供的Python培训代码:
import argparse
from datetime import datetime
from random import shuffle
import os
import sys
import time
import math
import tensorflow as tf
import numpy as np
from utils import *
from train_image_reader import *
from net import *
parser = argparse.ArgumentParser(description='')
parser.add_argument("--snapshot_dir", default='./snapshots', help="path of snapshots")
parser.add_argument("--image_size", type=int, default=256, help="load image size")
parser.add_argument("--x_data_txt_path", default='./datasets/x_traindata.txt', help="txt of x images")
parser.add_argument("--y_data_txt_path", default='./datasets/y_traindata.txt', help="txt of y images")
parser.add_argument("--random_seed", type=int, default=1234, help="random seed")
parser.add_argument('--base_lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--epoch', dest='epoch', type=int, default=50, help='# of epoch')
parser.add_argument('--epoch_step', dest='epoch_step', type=int, default=20, help='# of epoch to decay lr')
parser.add_argument("--lamda", type=float, default=10.0, help="L1 lamda")
parser.add_argument('--beta1', dest='beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument("--summary_pred_every", type=int, default=200, help="times to summary.")
parser.add_argument("--save_pred_every", type=int, default=8000, help="times to save.")
parser.add_argument("--x_image_forpath", default='./datasets/train/X/images/', help="forpath of x training datas.")
parser.add_argument("--x_label_forpath", default='./datasets/train/X/labels/', help="forpath of x training labels.")
parser.add_argument("--y_image_forpath", default='./datasets/train/Y/images/', help="forpath of y training datas.")
parser.add_argument("--y_label_forpath", default='./datasets/train/Y/labels/', help="forpath of y training labels.")
args = parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def get_data_lists(data_path):
f = open(data_path, 'r')
datas=[]
for line in f:
data = line.strip("\n")
datas.append(data)
return datas
def l1_loss(src, dst):
return tf.reduce_mean(tf.abs(src - dst))
def gan_loss(src, dst):
return tf.reduce_mean((src-dst)**2)
def main():
if not os.path.exists(args.snapshot_dir):
os.makedirs(args.snapshot_dir)
x_datalists = get_data_lists(args.x_data_txt_path) # a list of x images
y_datalists = get_data_lists(args.y_data_txt_path) # a list of y images
tf.set_random_seed(args.random_seed)
x_img = tf.placeholder(tf.float32,shape=[1, args.image_size, args.image_size,3],name='x_img')
x_label = tf.placeholder(tf.float32,shape=[1, args.image_size, args.image_size,3],name='x_label')
y_img = tf.placeholder(tf.float32,shape=[1, args.image_size, args.image_size,3],name='y_img')
y_label = tf.placeholder(tf.float32,shape=[1, args.image_size, args.image_size,3],name='y_label')
fake_y = generator(image=x_img, reuse=False, name='generator_x2y') # G
fake_x_ = generator(image=fake_y, reuse=False, name='generator_y2x') # S
fake_x = generator(image=y_img, reuse=True, name='generator_y2x') # G'
fake_y_ = generator(image=fake_x, reuse=True, name='generator_x2y') # S'
dy_fake = discriminator(image=fake_y, gen_label = x_label, reuse=False, name='discriminator_y') # D
dx_fake = discriminator(image=fake_x, gen_label = y_label, reuse=False, name='discriminator_x') # D'
dy_real = discriminator(image=y_img, gen_label = y_label, reuse=True, name='discriminator_y') # D
dx_real = discriminator(image=x_img, gen_label = x_label, reuse=True, name='discriminator_x') #D'
final_loss = gan_loss(dy_fake, tf.ones_like(dy_fake)) + gan_loss(dx_fake, tf.ones_like(dx_fake)) + args.lamda*l1_loss(x_label, fake_x_) + args.lamda*l1_loss(y_label, fake_y_) # final objective function
dy_loss_real = gan_loss(dy_real, tf.ones_like(dy_real))
dy_loss_fake = gan_loss(dy_fake, tf.zeros_like(dy_fake))
dy_loss = (dy_loss_real + dy_loss_fake) / 2
dx_loss_real = gan_loss(dx_real, tf.ones_like(dx_real))
dx_loss_fake = gan_loss(dx_fake, tf.zeros_like(dx_fake))
dx_loss = (dx_loss_real + dx_loss_fake) / 2
dis_loss = dy_loss + dx_loss # discriminator loss
final_loss_sum = tf.summary.scalar("final_objective", final_loss)
dx_loss_sum = tf.summary.scalar("dx_loss", dx_loss)
dy_loss_sum = tf.summary.scalar("dy_loss", dy_loss)
dis_loss_sum = tf.summary.scalar("dis_loss", dis_loss)
discriminator_sum = tf.summary.merge([dx_loss_sum, dy_loss_sum, dis_loss_sum])
x_images_summary = tf.py_func(cv_inv_proc, [x_img], tf.float32) #(1, 256, 256, 3) float32
y_fake_cv2inv_images_summary = tf.py_func(cv_inv_proc, [fake_y], tf.float32) #(1, 256, 256, 3) float32
x_label_summary = tf.py_func(label_proc, [x_label], tf.float32) #(1, 256, 256, 3) float32
x_gen_label_summary = tf.py_func(label_inv_proc, [fake_x_], tf.float32) #(1, 256, 256, 3) float32
image_summary = tf.summary.image('images', tf.concat(axis=2, values=[x_images_summary, y_fake_cv2inv_images_summary, x_label_summary, x_gen_label_summary]), max_outputs=3)
summary_writer = tf.summary.FileWriter(args.snapshot_dir, graph=tf.get_default_graph())
g_vars = [v for v in tf.trainable_variables() if 'generator' in v.name]
d_vars = [v for v in tf.trainable_variables() if 'discriminator' in v.name]
lr = tf.placeholder(tf.float32, None, name='learning_rate')
d_optim = tf.train.AdamOptimizer(lr, beta1=args.beta1)
g_optim = tf.train.AdamOptimizer(lr, beta1=args.beta1)
d_grads_and_vars = d_optim.compute_gradients(dis_loss, var_list=d_vars)
d_train = d_optim.apply_gradients(d_grads_and_vars) # update weights of D and D'
g_grads_and_vars = g_optim.compute_gradients(final_loss, var_list=g_vars)
g_train = g_optim.apply_gradients(g_grads_and_vars) # update weights of G, G', S and S'
train_op = tf.group(d_train, g_train)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=50)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
counter = 0 # training step
for epoch in range(args.epoch):
shuffle(x_datalists) # change the order of x images
shuffle(y_datalists) # change the order of y images
lrate = args.base_lr if epoch < args.epoch_step else args.base_lr*(args.epoch-epoch)/(args.epoch-args.epoch_step)
for step in range(len(x_datalists)):
counter += 1
x_image_resize, x_label_resize, y_image_resize, y_label_resize = TrainImageReader(args.x_image_forpath, args.x_label_forpath, args.y_image_forpath, args.y_label_forpath, x_datalists, y_datalists, step, args.image_size)
batch_x_image = np.expand_dims(np.array(x_image_resize).astype(np.float32), axis = 0)
batch_x_label = np.expand_dims(np.array(x_label_resize).astype(np.float32), axis = 0)
batch_y_image = np.expand_dims(np.array(y_image_resize).astype(np.float32), axis = 0)
batch_y_label = np.expand_dims(np.array(y_label_resize).astype(np.float32), axis = 0)
start_time = time.time()
feed_dict = { lr : lrate, x_img : batch_x_image, x_label : batch_x_label, y_img : batch_y_image, y_label : batch_y_label}
if counter % args.save_pred_every == 0:
final_loss_value, dis_loss_value, _ = sess.run([final_loss, dis_loss, train_op], feed_dict=feed_dict)
save(saver, sess, args.snapshot_dir, counter)
elif counter % args.summary_pred_every == 0:
final_loss_value, dis_loss_value, final_loss_sum_value, discriminator_sum_value, image_summary_value, _ = \
sess.run([final_loss, dis_loss, final_loss_sum, discriminator_sum, image_summary, train_op], feed_dict=feed_dict)
summary_writer.add_summary(final_loss_sum_value, counter)
summary_writer.add_summary(discriminator_sum_value, counter)
summary_writer.add_summary(image_summary_value, counter)
else:
final_loss_value, dis_loss_value, _ = \
sess.run([final_loss, dis_loss, train_op], feed_dict=feed_dict)
print('epoch {:d} step {:d} \t final_loss = {:.3f}, dis_loss = {:.3f}'.format(epoch, step, final_loss_value, dis_loss_value))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
导入argparse
从日期时间导入日期时间
从随机导入洗牌
导入操作系统
导入系统
导入时间
输入数学
导入tensorflow作为tf
将numpy作为np导入
从utils导入*
从列车图像读取器导入*
来自净进口*
parser=argparse.ArgumentParser(description='')
add_参数(“--snapshot_dir”,默认值=”./snapshots',help=“快照路径”)
add_参数(“--image_size”,type=int,default=256,help=“load image size”)
parser.add_参数(“--x_data_txt_path”,默认值=”./datasets/x_traindata.txt',help=“x图像的txt”)
parser.add_参数(“--y_data_txt_path”,默认值=”./datasets/y_traindata.txt',help=“y图像的txt”)
add_参数(“--random_seed”,type=int,default=1234,help=“random seed”)
add_参数('--base_lr',type=float,default=0.0002,help='adam的初始学习率')
add_参数('--epoch',dest='epoch',type=int,default=50,help='of epoch')
parser.add_参数('--epoch_step',dest='epoch_step',type=int,default=20,help='#of epoch to decause lr'))
add_参数(“--lamda”,type=float,default=10.0,help=“L1 lamda”)
add_参数('--beta1',dest='beta1',type=float,default=0.5,help='momentary term of adam')
添加参数(“--summary\u pred\u every”,type=int,default=200,help=“汇总次数”)
添加参数(“--save\u pred\u every”,type=int,default=8000,help=“保存次数”)
parser.add_参数(“--x_image_forpath”,默认值=”./datasets/train/x/images/”,help=“forpath of x training data.”)
parser.add_参数(“--x_label_forpath”,默认值=”./datasets/train/x/labels/”,help=“forpath of x training labels.”)
parser.add_参数(“--y_image_forpath”,默认值=”./datasets/train/y/images/”,help=“forpath of y training data.”)
parser.add_参数(“--y_label_forpath”,默认值=”./datasets/train/y/labels/”,help=“y训练标签的路径”)
args=parser.parse_args()
def保存(保存程序、sess、logdir、步骤):
模型名称='model'
checkpoint\u path=os.path.join(logdir,model\u name)
如果操作系统路径不存在(logdir):
os.makedirs(logdir)
saver.save(sess、检查点路径、全局步骤=步骤)
打印('已创建检查点')
def get_data_列表(数据路径):
f=打开(数据路径'r')
数据=[]
对于f中的行:
数据=行.strip(“\n”)
data.append(数据)
返回数据
def l1_损失(src、dst):
返回tf.reduce_平均值(tf.abs(src-dst))
def传感器损耗(src、dst):
返回tf.reduce_平均值((src dst)**2)
def main():
如果不存在os.path.exists(args.snapshot\u dir):
os.makedirs(args.snapshot\u dir)
x_datalists=get_data_list(args.x_data_txt_path)#x个图像的列表
y_datalists=get_data_list(args.y_data_txt_path)#y图像的列表
tf.set_random_seed(args.random_seed)
x\u img=tf.placeholder(tf.float32,shape=[1,args.image\u size,args.image\u size,3],name='x\u img')
x\u label=tf.placeholder(tf.float32,shape=[1,args.image\u size,args.image\u size,3],name='x\u label')
y\u img=tf.placeholder(tf.float32,shape=[1,args.image\u size,args.image\u size,3],name='y\u img')
y\u label=tf.placeholder(tf.float32,shape=[1,args.image\u size,args.image\u size,3],name='y\u label')
fake_y=generator(image=x_img,reuse=False,name='generator_x2y')#G
假x=生成器(image=假y,reuse=False,name='generator_y2x')S
假x=generator(image=y\u img,reuse=True,name='generator_y2x')#G'
伪y=generator(image=fake,reuse=True,name='generator'ux2y')的
dy_fake=鉴别器(image=fake_y,gen_label=x_label,reuse=False,name='discriminator_y')#D
dx_fake=鉴别器(image=fake_x,gen_label=y_label,reuse=False,name='discriminator_x')#D'
dy_real=discrimator(image=y_img,gen_label=y_label,reuse=True,name='discrimator_y')#D
dx_real=鉴别器(image=x_img,gen_label=x_label,reuse=True,name='discriminator_x')#D'
最终损失=gan_损失(dy_-fake,tf.ones_-like(dy_-fake))+gan_损失(dx_-fake,tf.ones_-like(dx_-fake))+args.lamda*l1_损失(x_-label,fake_-x)+args.lamda*l1_损失(y_-label,fake_-y)#最终目标函数
dy_loss_real=gan_loss(dy_real,tf.one_like(dy_real))
dy_loss_fake=gan_loss(dy_fake,tf.zero_like(dy_fake))
损失=(真实的损失+虚假的损失)/2
dx_-loss_-real=gan_-loss(dx_-real,tf.ones_-like(dx_-real))
dx_loss_fake=gan_loss(dx_fake,tf.zeros_like(dx_fake))
dx_损失=(dx_损失真+dx_损失假)/2
dis_损耗=dy_损耗+dx_损耗#鉴别器损耗
最终损失总和=tf.summary.scalar(“最终目标”,最终损失)
dx_损耗_总和=tf.summary.scalar(“dx_损耗”,dx_损耗)
dy_loss_sum=tf.summary.scalar(“dy_loss”,dy_loss)
dis_loss_sum=tf.summary.scalar(“dis_loss”,dis_loss)
鉴别器总和=tf.summary.merge([dx\u loss\u sum,dy\u loss\u sum,dis\u loss\u sum])
x_images_summary=tf.py_func(cv_inv_proc,[x_img],tf.float32)#(1,256,256,3)float32
y_fake_cv2inv_images_summary=tf.py_func(cv_inv_proc,[fake_y],tf.float32)#(1,256,256,3)float32
x_label_summary=tf.py_func(label_proc,[x_label],tf.float32)#(1,256,256,3)float32
x_gen_label_summary=tf.py_func(label_inv_proc,[fake_x_],tf.float32)#(1,256,256,3)float32
image\u summary=tf.summary.image('images',t