Python 如何利用保留身份的条件生成对抗网络定制人脸老化训练
我目前正在使用GAN进行一个项目。 通过搜索几个模型,我找到了一个称为条件GAN的模型,并在github上找到了面部老化与身份保留条件生成对抗网络项目。 这个项目的Readme.md描述了如何下载数据以及如何训练模型。 使用该项目提供的数据和经过培训的模型查看结果是成功的。但我没能根据我想要的数据进行训练。 我已经尝试了很多方法,比如改变数据路径等等,但都没有成功。我不确定我错过了什么 这是我的密码Python 如何利用保留身份的条件生成对抗网络定制人脸老化训练,python,tensorflow,machine-learning,deep-learning,Python,Tensorflow,Machine Learning,Deep Learning,我目前正在使用GAN进行一个项目。 通过搜索几个模型,我找到了一个称为条件GAN的模型,并在github上找到了面部老化与身份保留条件生成对抗网络项目。 这个项目的Readme.md描述了如何下载数据以及如何训练模型。 使用该项目提供的数据和经过培训的模型查看结果是成功的。但我没能根据我想要的数据进行训练。 我已经尝试了很多方法,比如改变数据路径等等,但都没有成功。我不确定我错过了什么 这是我的密码 import os.path import os os.environ['CUDA_DEVIC
import os.path
import os
os.environ['CUDA_DEVICES_ORDER'] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
import numpy as np
import tensorflow as tf
from datetime import datetime
from models import FaceAging
import sys
sys.path.append('./tools/')
from source_input import load_source_batch3
from utils import save_images, save_source
from data_generator import ImageDataGenerator
flags = tf.app.flags
flags.DEFINE_float("learning_rate", 0.001, "Learning rate")
flags.DEFINE_integer("batch_size", 32, "The size of batch images")
flags.DEFINE_integer("image_size", 128, "the size of the generated image")
flags.DEFINE_integer("noise_dim", 256, "the length of the noise vector")
flags.DEFINE_integer("feature_size", 128, "image size after stride 2 conv")
flags.DEFINE_integer("age_groups", 5, "the number of different age groups")
flags.DEFINE_integer('max_steps', 200000, 'Number of batches to run')
flags.DEFINE_string("alexnet_pretrained_model", "pre_trained/alexnet.model-292000",
"Directory name to save the checkpoints")
flags.DEFINE_string("age_pretrained_model", "pre_trained/age_classifier.model-300000",
"Directory name to save the checkpoints")
flags.DEFINE_integer('model_index', None, 'the index of trained model')
flags.DEFINE_float("gan_loss_weight", 75, "gan_loss_weight")
flags.DEFINE_float("fea_loss_weight", 0.5e-4, "fea_loss_weight")
flags.DEFINE_float("age_loss_weight", 30, "age_loss_weight")
flags.DEFINE_float("tv_loss_weight", None, "face_loss_weight")
flags.DEFINE_string("checkpoint_dir", "checkpoints/age/0_conv5_lsgan_transfer_g75_0.5f-4_a30", "Directory name to save the checkpoints")
flags.DEFINE_string("source_checkpoint_dir", ' ', "Directory name to save the checkpoints")
flags.DEFINE_string("sample_dir", "age/0_conv5_lsgan_transfer_g75_0.5f-4_a30 ", "Directory name to save the sample images")
flags.DEFINE_string("fea_layer_name", 'conv5', "which layer to use for fea_loss")
flags.DEFINE_string("source_file", "train/", "source file path")
flags.DEFINE_string("root_folder", "CACD_cropped_400/", "folder that contains images")
FLAGS = flags.FLAGS
# How often to run a batch through the validation model.
VAL_INTERVAL = 5000
# How often to save a model checkpoint
SAVE_INTERVAL = 10000
d_iter = 1
g_iter = 1
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Initalize the data generator seperately for the training and validation set
train_generator = ImageDataGenerator(batch_size=FLAGS.batch_size, height=FLAGS.feature_size, width=FLAGS.feature_size,
z_dim=FLAGS.noise_dim, scale_size=(FLAGS.image_size, FLAGS.image_size), mode='train')
def my_train():
with tf.Graph().as_default():
sess = tf.Session(config=config)
model = FaceAging(sess=sess, lr=FLAGS.learning_rate, keep_prob=1., model_num=FLAGS.model_index, batch_size=FLAGS.batch_size,
age_loss_weight=FLAGS.age_loss_weight, gan_loss_weight=FLAGS.gan_loss_weight,
fea_loss_weight=FLAGS.fea_loss_weight, tv_loss_weight=FLAGS.tv_loss_weight)
imgs = tf.placeholder(tf.float32, [FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 3])
true_label_features_128 = tf.placeholder(tf.float32, [FLAGS.batch_size, 128, 128, FLAGS.age_groups])
true_label_features_64 = tf.placeholder(tf.float32, [FLAGS.batch_size, 64, 64, FLAGS.age_groups])
false_label_features_64 = tf.placeholder(tf.float32, [FLAGS.batch_size, 64, 64, FLAGS.age_groups])
age_label = tf.placeholder(tf.int32, [FLAGS.batch_size])
source_img_227, source_img_128, face_label = load_source_batch3(FLAGS.source_file, FLAGS.root_folder, FLAGS.batch_size)
model.train_age_lsgan_transfer(source_img_227, source_img_128, imgs, true_label_features_128,
true_label_features_64, false_label_features_64, FLAGS.fea_layer_name, age_label)
ge_samples = model.generate_images(imgs, true_label_features_128, reuse=True, mode='train')
# Create a saver.
model.saver = tf.train.Saver(model.save_d_vars + model.save_g_vars, max_to_keep=200)
model.alexnet_saver = tf.train.Saver(model.alexnet_vars)
model.age_saver = tf.train.Saver(model.age_vars)
d_error = model.d_loss/model.gan_loss_weight
g_error = model.g_loss/model.gan_loss_weight
fea_error = model.fea_loss/model.fea_loss_weight
age_error = model.age_loss/model.age_loss_weight
# Start running operations on the Graph.
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess)
model.alexnet_saver.restore(sess, FLAGS.alexnet_pretrained_model)
model.age_saver.restore(sess, FLAGS.age_pretrained_model)
if model.load(FLAGS.checkpoint_dir, model.saver):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
print("{} Start training...")
# Loop over max_steps
for step in range(FLAGS.max_steps):
images, t_label_features_128, t_label_features_64, f_label_features_64, age_labels = \
train_generator.next_target_batch_transfer2()
dict = {imgs: images,
true_label_features_128: t_label_features_128,
true_label_features_64: t_label_features_64,
false_label_features_64: f_label_features_64,
age_label: age_labels
}
for i in range(d_iter):
_, d_loss = sess.run([model.d_optim, d_error], feed_dict=dict)
for i in range(g_iter):
_, g_loss, fea_loss, age_loss = sess.run([model.g_optim, g_error, fea_error, age_error],
feed_dict=dict)
format_str = ('%s: step %d, d_loss = %.3f, g_loss = %.3f, fea_loss=%.3f, age_loss=%.3f')
print(format_str % (datetime.now(), step, d_loss, g_loss, fea_loss, age_loss))
# Save the model checkpoint periodically.
if step % SAVE_INTERVAL == SAVE_INTERVAL-1 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.checkpoint_dir)
model.save(checkpoint_path, step, 'acgan')
if step % VAL_INTERVAL == VAL_INTERVAL-1:
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
path = os.path.join(FLAGS.sample_dir, str(step))
if not os.path.exists(path):
os.makedirs(path)
source = sess.run(source_img_128)
save_source(source, [4, 8], os.path.join(path, 'source.jpg'))
for j in range(train_generator.n_classes):
true_label_fea = train_generator.label_features_128[j]
dict = {
imgs: source,
true_label_features_128: true_label_fea
}
samples = sess.run(ge_samples, feed_dict=dict)
save_images(samples, [4, 8], './{}/test_{:01d}.jpg'.format(path, j))
def main(argv=None):
my_train()
if __name__ == '__main__':
tf.app.run()
这是错误消息
Traceback (most recent call last):
File "C:/Users/admin/Desktop/Face/Face-Aging-with-Identity-Preserved-Conditional-Generative-Adversarial-Networks-master/age_lsgan_transfer.py", line 167, in <module>
tf.app.run()
File "C:\Users\admin\PycharmProjects\Deep\venv\lib\site-packages\tensorflow\python\platform\app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "C:\Users\admin\PycharmProjects\Deep\venv\lib\site-packages\absl\app.py", line 300, in run
_run_main(main, args)
File "C:\Users\admin\PycharmProjects\Deep\venv\lib\site-packages\absl\app.py", line 251, in _run_main
sys.exit(main(argv))
File "C:/Users/admin/Desktop/Face/Face-Aging-with-Identity-Preserved-Conditional-Generative-Adversarial-Networks-master/age_lsgan_transfer.py", line 163, in main
my_train()
File "C:/Users/admin/Desktop/Face/Face-Aging-with-Identity-Preserved-Conditional-Generative-Adversarial-Networks-master/age_lsgan_transfer.py", line 88, in my_train
source_img_227, source_img_128, face_label = load_source_batch3(FLAGS.source_file, FLAGS.root_folder, FLAGS.batch_size)
File "C:\Users\admin\Desktop\Face\Face-Aging-with-Identity-Preserved-Conditional-Generative-Adversarial-Networks-master\source_input.py", line 128, in load_source_batch3
img_list, label_list = get_imgAndlabel_list2(filename, img_folder)
File "C:\Users\admin\Desktop\Face\Face-Aging-with-Identity-Preserved-Conditional-Generative-Adversarial-Networks-master\source_input.py", line 173, in get_imgAndlabel_list2
f = open(filename, 'r')
PermissionError: [Errno 13] Permission denied: 'train/'
可能是Hi的副本。我已经检查了此错误并更改了数据路径。但同样的错误。`f=open(filename,'r')`表示它正在尝试读取文件,但
train/
不是目录文件。你能在问题中添加source\u input.py
文件的代码吗?train文件夹中已经有大约30个图像文件了。这是图像扩展jpg。我附加了source\u input.py
。
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
#from read_image import *
from tensorflow.python.platform import gfile
from tensorflow.python.platform import flags
import numpy as np
import scipy.io as scio
from tensorflow.python.framework import ops
from PIL import Image
FLAGS = flags.FLAGS
T = 1
IM_HEIGHT = 400
IM_WIDTH = 400
IM_CHANNELS = 3
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def read_images(filename_queue, new_height=None, new_width=None):
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
image = tf.image.decode_jpeg(value) # use png or jpeg decoder based on your files
image = tf.reshape(image, [IM_HEIGHT, IM_WIDTH, IM_CHANNELS])
if new_height and new_width:
image = tf.image.resize_images(image, [new_height, new_width])
image = tf.cast(image, tf.float32) - np.array([104., 117., 124.])
return image
def read_images2(filename_queue):
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
image = tf.image.decode_jpeg(value) # use png or jpeg decoder based on your files
image = tf.reshape(image, [IM_HEIGHT, IM_WIDTH, IM_CHANNELS])
image_227 = tf.image.resize_images(image, [227, 227])
image_227 = tf.cast(image_227, tf.float32) - np.array([104., 117., 124.])
image_128 = tf.image.resize_images(image, [128, 128])
image_128 = tf.cast(image_128, tf.float32) - np.array([104., 117., 124.])
return image_227, image_128
def read_images3(input_queue):
label = input_queue[1]
file_contents = tf.read_file(input_queue[0])
image = tf.image.decode_image(file_contents, channels=3)
image = tf.reshape(image, [IM_HEIGHT, IM_WIDTH, IM_CHANNELS])
image_227 = tf.image.resize_images(image, [227, 227])
image_227 = tf.cast(image_227, tf.float32) - np.array([104., 117., 124.])
image_128 = tf.image.resize_images(image, [128, 128])
# image_128 = tf.cast(image_128, tf.float32)
image_128 = tf.cast(image_128, tf.float32) - np.array([104., 117., 124.])
return image_227, image_128, label
def load_source_batch(filename, img_folder, batch_size, img_size, shuffle=True):
filenames = get_imgAndlabel_list(filename, img_folder)
print('%d images to train' %(len(filenames)))
if not filenames:
raise RuntimeError('No data files found.')
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(filenames, shuffle=shuffle)
# Even when reading in multiple threads, share the filename queue.
image = read_images(filename_queue, new_height=img_size, new_width=img_size)
image_batch = tf.train.shuffle_batch(
[image],
batch_size=batch_size,
num_threads=4,
capacity=1280,
min_after_dequeue=640)
# image_batch = tf.train.batch(
# [image],
# batch_size=batch_size,
# num_threads=4,
# capacity=1280)
#
return image_batch
def load_source_batch2(filename, img_folder, batch_size, shuffle=True):
filenames = get_imgAndlabel_list(filename, img_folder)
print('%d images to train' % (len(filenames)))
if not filenames:
raise RuntimeError('No data files found.')
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(filenames, shuffle=shuffle)
# Even when reading in multiple threads, share the filename queue.
image_227, image_128 = read_images2(filename_queue)
image_227_batch, image_128_batch = tf.train.shuffle_batch(
[image_227, image_128],
batch_size=batch_size,
num_threads=4,
capacity=1280,
min_after_dequeue=640)
return image_227_batch, image_128_batch
def load_source_batch3(filename, img_folder, batch_size, shuffle=True):
img_list, label_list = get_imgAndlabel_list2(filename, img_folder)
print('%d images to train' % (len(img_list)))
images = ops.convert_to_tensor(img_list, dtype=tf.string)
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
# Makes an input queue
input_queue = tf.train.slice_input_producer([images, labels], shuffle=shuffle)
# Even when reading in multiple threads, share the filename queue.
image_227, image_128, label = read_images3(input_queue)
image_227_batch, image_128_batch, label_batch = tf.train.shuffle_batch(
[image_227, image_128, label],
batch_size=batch_size,
num_threads=4,
capacity=1280,
min_after_dequeue=640)
return image_227_batch, image_128_batch, label_batch
def get_imgAndlabel_list(filename, img_folder):
"""
:param filename:
each line in filename is img_name \space label
:return:
img names list
label list
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
imgname_lists = []
for i in range(len(lines)):
img_name = lines[i].split()[0]
imgname_lists.append(os.path.join(img_folder, img_name))
return imgname_lists
def get_imgAndlabel_list2(filename, img_folder):
"""
:param filename:
each line in filename is img_name \space label
:return:
img names list
label list
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
imgname_lists = []
label_lists = []
for i in range(len(lines)):
img_name, label = lines[i].split()
imgname_lists.append(os.path.join(img_folder, img_name))
label_lists.append(int(label))
return imgname_lists, label_lists