Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/288.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 需要np.整形的数据大小的GAN形状误差(X_,(-1,17,10,1))_Python_Python 3.x_Deep Learning_Generative Adversarial Network - Fatal编程技术网

Python 需要np.整形的数据大小的GAN形状误差(X_,(-1,17,10,1))

Python 需要np.整形的数据大小的GAN形状误差(X_,(-1,17,10,1)),python,python-3.x,deep-learning,generative-adversarial-network,Python,Python 3.x,Deep Learning,Generative Adversarial Network,我正在努力解决在运行GAN模型时不断遇到的形状错误。我曾尝试遵循以下指南,但由于我对Python和GANs的知识有限,我遇到了一个障碍。我有一种感觉,这是因为模型需要与数据形状匹配,但确实不能确定 我搜索过其他帖子,但它们都涉及图像数据,因此没有找到一篇讨论随机形状错误的帖子,所以我恳请您提供帮助。以下是完整的代码: # define the standalone discriminator model def define_discriminator(input_shape): mod

我正在努力解决在运行GAN模型时不断遇到的形状错误。我曾尝试遵循以下指南,但由于我对Python和GANs的知识有限,我遇到了一个障碍。我有一种感觉,这是因为模型需要与数据形状匹配,但确实不能确定

我搜索过其他帖子,但它们都涉及图像数据,因此没有找到一篇讨论随机形状错误的帖子,所以我恳请您提供帮助。以下是完整的代码:

# define the standalone discriminator model
def define_discriminator(input_shape):
    model = Sequential()
    model.add(Conv2D(32, (3,3), strides=(2, 2), padding='same', input_shape=(17,10,1))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))
    model.add(Conv2D(32, (3,3), strides=(2, 2), padding='same'))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    # compile model
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
    return model

# define the standalone generator model
def define_generator(latent_dim):
    model = Sequential()
    # foundation for 7x7 image
    n_nodes = 128 * 7 * 7
    model.add(Dense(n_nodes, input_dim=latent_dim))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Reshape((7, 7, 128)))
    # upsample to 14x14
    model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))
    model.add(LeakyReLU(alpha=0.2))
    # upsample to 28x28
    model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(1, (7,7), activation='sigmoid', padding='same'))
    return model

# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model):
    # make weights in the discriminator not trainable
    d_model.trainable = False
    # connect them
    model = Sequential()
    # add generator
    model.add(g_model)
    # add the discriminator
    model.add(d_model)
    # compile model
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy', optimizer=opt)
    return model

# load and prepare mnist training images
def load_real_samples():
    # load mnist dataset
    # (trainX, _), (_, _) = load_data()
    # expand to 3d, e.g. add channels dimension
    # X = expand_dims(trainX, axis=-1)
    df = pd.read_csv('./data/test.csv')

    X = df.iloc[0:, 3:173]
    X_ = X.to_numpy()
    X__ = np.reshape(X_, (-1, 17, 10, 1))

    # convert from unsigned ints to floats
    X = X__.astype('float32')
    # scale from [0,255] to [0,1]
    X = X / 255.0
    return X

# select real samples
def generate_real_samples(dataset, n_samples):
    # choose random instances
    ix = randint(0, dataset.shape[0], n_samples)
    # retrieve selected images
    X = dataset[ix]
    # generate 'real' class labels (1)
    y = ones((n_samples, 1))
    return X, y

# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n_samples):
    # generate points in the latent space
    x_input = randn(latent_dim * n_samples)
    # reshape into a batch of inputs for the network
    x_input = x_input.reshape(n_samples, latent_dim)
    return x_input

# use the generator to generate n fake examples, with class labels
def generate_fake_samples(g_model, latent_dim, n_samples):
    # generate points in latent space
    x_input = generate_latent_points(latent_dim, n_samples)
    # predict outputs
    X = g_model.predict(x_input)
    # create 'fake' class labels (0)
    y = zeros((n_samples, 1))
    return X, y

# create and save a plot of generated images (reversed grayscale)
def save_plot(examples, epoch, n=10):
    # plot images
    for i in range(n * n):
        # define subplot
        pyplot.subplot(n, n, 1 + i)
        # turn off axis
        pyplot.axis('off')
        # plot raw pixel data
        pyplot.imshow(examples[i, :, :, 0], cmap='gray_r')
    # save plot to file
    filename = 'generated_plot_e%03d.png' % (epoch+1)
    pyplot.savefig(filename)
    pyplot.close()

# evaluate the discriminator, plot generated images, save generator model
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=100):
    # prepare real samples
    X_real, y_real = generate_real_samples(dataset, n_samples)
    # evaluate discriminator on real examples
    _, acc_real = d_model.evaluate(X_real, y_real, verbose=0)
    # prepare fake examples
    x_fake, y_fake = generate_fake_samples(g_model, latent_dim, n_samples)
    # evaluate discriminator on fake examples
    _, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
    # summarize discriminator performance
    print('>Accuracy real: %.0f%%, fake: %.0f%%' % (acc_real*100, acc_fake*100))
    # save plot
    save_plot(x_fake, epoch)
    # save the generator model tile file
    filename = 'generator_model_%03d.h5' % (epoch + 1)
    g_model.save(filename)

# train the generator and discriminator
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=100, n_batch=256):
    bat_per_epo = int(dataset.shape[0] / n_batch)
    half_batch = int(n_batch / 2)
    # manually enumerate epochs
    for i in range(n_epochs):
        # enumerate batches over the training set
        for j in range(bat_per_epo):
            # get randomly selected 'real' samples
            X_real, y_real = generate_real_samples(dataset, half_batch)
            # generate 'fake' examples
            X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
            # create training set for the discriminator
            X, y = vstack((X_real, X_fake)), vstack((y_real, y_fake))
            # update discriminator model weights
            d_loss, _ = d_model.train_on_batch(X, y)
            # prepare points in latent space as input for the generator
            X_gan = generate_latent_points(latent_dim, n_batch)
            # create inverted labels for the fake samples
            y_gan = ones((n_batch, 1))
            # update the generator via the discriminator's error
            g_loss = gan_model.train_on_batch(X_gan, y_gan)
            # summarize loss on this batch
            print('>%d, %d/%d, d=%.3f, g=%.3f' % (i+1, j+1, bat_per_epo, d_loss, g_loss))
        # evaluate the model performance, sometimes
        if (i+1) % 10 == 0:
            summarize_performance(i, g_model, d_model, dataset, latent_dim)



# # load mnist dataset
# (trainX, _), (_, _) = load_data()
# # expand to 3d, e.g. add channels dimension
# X = expand_dims(trainX, axis=-1)
# # convert from unsigned ints to floats
# X = X.astype('float32')
# # scale from [0,255] to [0,1]
# X = X / 255.0




# size of the latent space
latent_dim = 100
# create the discriminator
d_model = define_discriminator((17,10,1))
# create the generator
g_model = define_generator(latent_dim)
# create the gan
gan_model = define_gan(g_model, d_model)
# load image data
dataset = load_real_samples()
# train model
train(g_model, d_model, gan_model, dataset, latent_dim)
# example of training a gan on bed_posture 21.02.04
from numpy import expand_dims
from numpy import zeros
from numpy import ones
from numpy import vstack
from numpy.random import randn
from numpy.random import randint

from keras.datasets.mnist import load_data
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import LeakyReLU
from keras.layers import Dropout
from matplotlib import pyplot
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
keras.backend.clear_session()


##keras gpu 사용 코드###
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
from keras import backend as K
K.tensorflow_backend._get_available_gpus()

###

# define the standalone discriminator model
def define_discriminator(input_shape = (16,10,1)):
    model = Sequential()
    model.add(Conv2D(4, (2,2), strides=(2,2), padding='same', input_shape=input_shape))
    # model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))
    # model.add(Conv2D(32, (3,3), strides=(2, 2), padding='same'))
    # model.add(LeakyReLU(alpha=0.2))
    # model.add(Dropout(0.4))
    model.add(Flatten())
    model.add(Dense(1, activation='relu'))
    # compile model
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
    print(model.summary())
    return model

# define the standalone generator model
def define_generator(latent_dim):
    model = Sequential()
    # foundation for 7x7 image
    n_nodes = 32 * 8 * 5
    model.add(Dense(n_nodes, input_dim=latent_dim))
    # model.add(LeakyReLU(alpha=0.2))
    model.add(Reshape((8, 5, 32)))
    # upsample to 14x14
    model.add(Conv2DTranspose(32, (2,2), strides=(2,2), padding='same'))
    # model.add(LeakyReLU(alpha=0.2))
    # upsample to 28x28
    # model.add(Conv2DTranspose(64, (4,5), strides=(2,2), padding='same'))
    # model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(1, (2,2), activation='relu', padding='same'))
    print(model.summary())
    return model

# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model):
    # make weights in the discriminator not trainable
    d_model.trainable = False
    # connect them
    model = Sequential()
    # add generator
    model.add(g_model)
    # add the discriminator
    model.add(d_model)
    # compile model
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy', optimizer=opt)
    print(model.summary())
    return model

# load and prepare mnist training images
def load_real_samples():
    # (trainX, _), (_, _) = load_data()
    # expand to 3d, e.g. add channels dimension
    # X = expand_dims(trainX, axis=-1)

    df = pd.read_csv('./data/bed_posture_GAN.csv')
    df_ = df
    df__ = df_
    for i in range(0, 2000, 1):
        df__ = df__.append(df)
        if i % 100 == 0:
            print(i)
    print(df__.shape)
    #자세 필터

    df____ = df__[df__['posture'] == pos]


    X = df____.iloc[0:, 3:163]
    X_ = X.to_numpy()
    X__ = np.reshape(X_, (-1, 16, 10, 1))

    # convert from unsigned ints to floats
    X = X__.astype('float32')

    # convert from unsigned ints to floats
    X = X.astype('float32')
    # scale from [0,255] to [0,1]
    # X = X / 255.0
    return X

# select real samples
def generate_real_samples(dataset, n_samples):
    # choose random instances
    ix = randint(0, dataset.shape[0], n_samples)
    # retrieve selected images
    X = dataset[ix]
    # generate 'real' class labels (1)
    y = ones((n_samples, 1))
    return X, y

# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n_samples):
    # generate points in the latent space
    x_input = randn(latent_dim * n_samples)
    # reshape into a batch of inputs for the network
    x_input = x_input.reshape(n_samples, latent_dim)
    return x_input

# use the generator to generate n fake examples, with class labels
def generate_fake_samples(g_model, latent_dim, n_samples):
    # generate points in latent space
    x_input = generate_latent_points(latent_dim, n_samples)
    # predict outputs
    X = g_model.predict(x_input)
    # create 'fake' class labels (0)
    y = zeros((n_samples, 1))
    return X, y

# create and save a plot of generated images (reversed grayscale)
def save_plot(examples, epoch, n=10):
    # plot images
    for i in range(n * n):
        # define subplot
        pyplot.subplot(n, n, 1 + i)
        # turn off axis
        pyplot.axis('off')
        # plot raw pixel data
        pyplot.imshow(examples[i, :, :, 0], cmap='gray_r')
    # save plot to file
    filename = 'generated_plot_e%03d.png' % (epoch+1)
    pyplot.savefig(filename)
    pyplot.close()

# evaluate the discriminator, plot generated images, save generator model
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=100):
    # prepare real samples
    X_real, y_real = generate_real_samples(dataset, n_samples)
    # evaluate discriminator on real examples
    _, acc_real = d_model.evaluate(X_real, y_real, verbose=0)
    # prepare fake examples
    x_fake, y_fake = generate_fake_samples(g_model, latent_dim, n_samples)
    # evaluate discriminator on fake examples
    _, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
    # summarize discriminator performance
    print('>Accuracy real: %.0f%%, fake: %.0f%%' % (acc_real*100, acc_fake*100))
    # save plot
    save_plot(x_fake, epoch)
    # save the generator model tile file
    filename = 'generator_model_%03d.h5' % (epoch + 1)
    g_model.save(filename)

# train the generator and discriminator
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=100, n_batch=256):
    bat_per_epo = int(dataset.shape[0] / n_batch)
    half_batch = int(n_batch / 2)
    # manually enumerate epochs
    for i in range(n_epochs):
        # enumerate batches over the training set
        for j in range(bat_per_epo):
            # get randomly selected 'real' samples
            X_real, y_real = generate_real_samples(dataset, half_batch)
            # generate 'fake' examples
            X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
            # create training set for the discriminator
            X, y = vstack((X_real, X_fake)), vstack((y_real, y_fake))
            # update discriminator model weights
            d_loss, _ = d_model.train_on_batch(X, y)
            # prepare points in latent space as input for the generator
            X_gan = generate_latent_points(latent_dim, n_batch)
            # create inverted labels for the fake samples
            y_gan = ones((n_batch, 1))
            # update the generator via the discriminator's error
            g_loss = gan_model.train_on_batch(X_gan, y_gan)
            # summarize loss on this batch
            print('>%d, %d/%d, d=%.3f, g=%.3f' % (i+1, j+1, bat_per_epo, d_loss, g_loss))
        # evaluate the model performance, sometimes
        if (i+1) % 10 == 0:
            summarize_performance(i, g_model, d_model, dataset, latent_dim)



# # load mnist dataset
# (trainX, _), (_, _) = load_data()
# # expand to 3d, e.g. add channels dimension
# X = expand_dims(trainX, axis=-1)
# # convert from unsigned ints to floats
# X = X.astype('float32')
# # scale from [0,255] to [0,1]
# X = X / 255.0

#0,1,2,8

pos = 0
# size of the latent space
latent_dim = 100
# create the discriminator
d_model = define_discriminator()
# create the generator
g_model = define_generator(latent_dim)
# create the gan
gan_model = define_gan(g_model, d_model)
# load image data
dataset = load_real_samples()
# train model
train(g_model, d_model, gan_model, dataset, latent_dim)
以下是我收到的错误代码:

Traceback (most recent call last):
  File "C:\Users\eo\Anaconda3\envs\eo\lib\site-packages\IPython\core\interactiveshell.py", line 3418, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-3-5f1576c5a8d3>", line 222, in <module>
    train(g_model, d_model, gan_model, dataset, latent_dim)
  File "<ipython-input-3-5f1576c5a8d3>", line 195, in train
    summarize_performance(i, g_model, d_model, dataset, latent_dim)
  File "<ipython-input-3-5f1576c5a8d3>", line 160, in summarize_performance
    _, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
  File "C:\Users\eo\Anaconda3\envs\eo\lib\site-packages\keras\engine\training.py", line 1349, in evaluate
    batch_size=batch_size)
  File "C:\Users\eo\Anaconda3\envs\eo\lib\site-packages\keras\engine\training.py", line 579, in _standardize_user_data
    exception_prefix='input')
  File "C:\Users\eo\Anaconda3\envs\eo\lib\site-packages\keras\engine\training_utils.py", line 145, in standardize_input_data
    str(data_shape))
ValueError: Error when checking input: expected conv2d_1_input to have shape (17, 10, 1) but got array with shape (28, 28, 1)
回溯(最近一次呼叫最后一次):
文件“C:\Users\eo\Anaconda3\envs\eo\lib\site packages\IPython\core\interactiveshell.py”,第3418行,运行代码
exec(代码对象、self.user\u全局、self.user\n)
文件“”,第222行,在
训练(g_模型、d_模型、gan_模型、数据集、潜在维度)
文件“”,第195行,列车中
总结绩效(i、g、d、数据集、潜在维度)
文件“”,第160行,在“性能”中
_,acc_-fake=d_模型。评估(x_-fake,y_-fake,verbose=0)
文件“C:\Users\eo\Anaconda3\envs\eo\lib\site packages\keras\engine\training.py”,第1349行,在evaluate中
批次大小=批次大小)
文件“C:\Users\eo\Anaconda3\envs\eo\lib\site packages\keras\engine\training.py”,第579行,在用户数据中
异常(前缀为“输入”)
文件“C:\Users\eo\Anaconda3\envs\eo\lib\site packages\keras\engine\training\u utils.py”,第145行,标准化输入数据
str(数据形状))
ValueError:检查输入时出错:预期conv2d_1_输入具有形状(17,10,1),但获得具有形状(28,28,1)的数组

很抱歉,我缺乏基础知识,但由于截止日期,我已经没有选择了。

以下是我们为非图像数据实现此GAN找到的解决方案。结果表明,在组合模型时,我们使用的不是17维,而是16维。此外,奇数似乎不可用(现在我想这是有道理的)

以下是完整的代码:

# define the standalone discriminator model
def define_discriminator(input_shape):
    model = Sequential()
    model.add(Conv2D(32, (3,3), strides=(2, 2), padding='same', input_shape=(17,10,1))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))
    model.add(Conv2D(32, (3,3), strides=(2, 2), padding='same'))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    # compile model
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
    return model

# define the standalone generator model
def define_generator(latent_dim):
    model = Sequential()
    # foundation for 7x7 image
    n_nodes = 128 * 7 * 7
    model.add(Dense(n_nodes, input_dim=latent_dim))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Reshape((7, 7, 128)))
    # upsample to 14x14
    model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))
    model.add(LeakyReLU(alpha=0.2))
    # upsample to 28x28
    model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(1, (7,7), activation='sigmoid', padding='same'))
    return model

# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model):
    # make weights in the discriminator not trainable
    d_model.trainable = False
    # connect them
    model = Sequential()
    # add generator
    model.add(g_model)
    # add the discriminator
    model.add(d_model)
    # compile model
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy', optimizer=opt)
    return model

# load and prepare mnist training images
def load_real_samples():
    # load mnist dataset
    # (trainX, _), (_, _) = load_data()
    # expand to 3d, e.g. add channels dimension
    # X = expand_dims(trainX, axis=-1)
    df = pd.read_csv('./data/test.csv')

    X = df.iloc[0:, 3:173]
    X_ = X.to_numpy()
    X__ = np.reshape(X_, (-1, 17, 10, 1))

    # convert from unsigned ints to floats
    X = X__.astype('float32')
    # scale from [0,255] to [0,1]
    X = X / 255.0
    return X

# select real samples
def generate_real_samples(dataset, n_samples):
    # choose random instances
    ix = randint(0, dataset.shape[0], n_samples)
    # retrieve selected images
    X = dataset[ix]
    # generate 'real' class labels (1)
    y = ones((n_samples, 1))
    return X, y

# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n_samples):
    # generate points in the latent space
    x_input = randn(latent_dim * n_samples)
    # reshape into a batch of inputs for the network
    x_input = x_input.reshape(n_samples, latent_dim)
    return x_input

# use the generator to generate n fake examples, with class labels
def generate_fake_samples(g_model, latent_dim, n_samples):
    # generate points in latent space
    x_input = generate_latent_points(latent_dim, n_samples)
    # predict outputs
    X = g_model.predict(x_input)
    # create 'fake' class labels (0)
    y = zeros((n_samples, 1))
    return X, y

# create and save a plot of generated images (reversed grayscale)
def save_plot(examples, epoch, n=10):
    # plot images
    for i in range(n * n):
        # define subplot
        pyplot.subplot(n, n, 1 + i)
        # turn off axis
        pyplot.axis('off')
        # plot raw pixel data
        pyplot.imshow(examples[i, :, :, 0], cmap='gray_r')
    # save plot to file
    filename = 'generated_plot_e%03d.png' % (epoch+1)
    pyplot.savefig(filename)
    pyplot.close()

# evaluate the discriminator, plot generated images, save generator model
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=100):
    # prepare real samples
    X_real, y_real = generate_real_samples(dataset, n_samples)
    # evaluate discriminator on real examples
    _, acc_real = d_model.evaluate(X_real, y_real, verbose=0)
    # prepare fake examples
    x_fake, y_fake = generate_fake_samples(g_model, latent_dim, n_samples)
    # evaluate discriminator on fake examples
    _, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
    # summarize discriminator performance
    print('>Accuracy real: %.0f%%, fake: %.0f%%' % (acc_real*100, acc_fake*100))
    # save plot
    save_plot(x_fake, epoch)
    # save the generator model tile file
    filename = 'generator_model_%03d.h5' % (epoch + 1)
    g_model.save(filename)

# train the generator and discriminator
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=100, n_batch=256):
    bat_per_epo = int(dataset.shape[0] / n_batch)
    half_batch = int(n_batch / 2)
    # manually enumerate epochs
    for i in range(n_epochs):
        # enumerate batches over the training set
        for j in range(bat_per_epo):
            # get randomly selected 'real' samples
            X_real, y_real = generate_real_samples(dataset, half_batch)
            # generate 'fake' examples
            X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
            # create training set for the discriminator
            X, y = vstack((X_real, X_fake)), vstack((y_real, y_fake))
            # update discriminator model weights
            d_loss, _ = d_model.train_on_batch(X, y)
            # prepare points in latent space as input for the generator
            X_gan = generate_latent_points(latent_dim, n_batch)
            # create inverted labels for the fake samples
            y_gan = ones((n_batch, 1))
            # update the generator via the discriminator's error
            g_loss = gan_model.train_on_batch(X_gan, y_gan)
            # summarize loss on this batch
            print('>%d, %d/%d, d=%.3f, g=%.3f' % (i+1, j+1, bat_per_epo, d_loss, g_loss))
        # evaluate the model performance, sometimes
        if (i+1) % 10 == 0:
            summarize_performance(i, g_model, d_model, dataset, latent_dim)



# # load mnist dataset
# (trainX, _), (_, _) = load_data()
# # expand to 3d, e.g. add channels dimension
# X = expand_dims(trainX, axis=-1)
# # convert from unsigned ints to floats
# X = X.astype('float32')
# # scale from [0,255] to [0,1]
# X = X / 255.0




# size of the latent space
latent_dim = 100
# create the discriminator
d_model = define_discriminator((17,10,1))
# create the generator
g_model = define_generator(latent_dim)
# create the gan
gan_model = define_gan(g_model, d_model)
# load image data
dataset = load_real_samples()
# train model
train(g_model, d_model, gan_model, dataset, latent_dim)
# example of training a gan on bed_posture 21.02.04
from numpy import expand_dims
from numpy import zeros
from numpy import ones
from numpy import vstack
from numpy.random import randn
from numpy.random import randint

from keras.datasets.mnist import load_data
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import LeakyReLU
from keras.layers import Dropout
from matplotlib import pyplot
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
keras.backend.clear_session()


##keras gpu 사용 코드###
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
from keras import backend as K
K.tensorflow_backend._get_available_gpus()

###

# define the standalone discriminator model
def define_discriminator(input_shape = (16,10,1)):
    model = Sequential()
    model.add(Conv2D(4, (2,2), strides=(2,2), padding='same', input_shape=input_shape))
    # model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))
    # model.add(Conv2D(32, (3,3), strides=(2, 2), padding='same'))
    # model.add(LeakyReLU(alpha=0.2))
    # model.add(Dropout(0.4))
    model.add(Flatten())
    model.add(Dense(1, activation='relu'))
    # compile model
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
    print(model.summary())
    return model

# define the standalone generator model
def define_generator(latent_dim):
    model = Sequential()
    # foundation for 7x7 image
    n_nodes = 32 * 8 * 5
    model.add(Dense(n_nodes, input_dim=latent_dim))
    # model.add(LeakyReLU(alpha=0.2))
    model.add(Reshape((8, 5, 32)))
    # upsample to 14x14
    model.add(Conv2DTranspose(32, (2,2), strides=(2,2), padding='same'))
    # model.add(LeakyReLU(alpha=0.2))
    # upsample to 28x28
    # model.add(Conv2DTranspose(64, (4,5), strides=(2,2), padding='same'))
    # model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(1, (2,2), activation='relu', padding='same'))
    print(model.summary())
    return model

# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model):
    # make weights in the discriminator not trainable
    d_model.trainable = False
    # connect them
    model = Sequential()
    # add generator
    model.add(g_model)
    # add the discriminator
    model.add(d_model)
    # compile model
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy', optimizer=opt)
    print(model.summary())
    return model

# load and prepare mnist training images
def load_real_samples():
    # (trainX, _), (_, _) = load_data()
    # expand to 3d, e.g. add channels dimension
    # X = expand_dims(trainX, axis=-1)

    df = pd.read_csv('./data/bed_posture_GAN.csv')
    df_ = df
    df__ = df_
    for i in range(0, 2000, 1):
        df__ = df__.append(df)
        if i % 100 == 0:
            print(i)
    print(df__.shape)
    #자세 필터

    df____ = df__[df__['posture'] == pos]


    X = df____.iloc[0:, 3:163]
    X_ = X.to_numpy()
    X__ = np.reshape(X_, (-1, 16, 10, 1))

    # convert from unsigned ints to floats
    X = X__.astype('float32')

    # convert from unsigned ints to floats
    X = X.astype('float32')
    # scale from [0,255] to [0,1]
    # X = X / 255.0
    return X

# select real samples
def generate_real_samples(dataset, n_samples):
    # choose random instances
    ix = randint(0, dataset.shape[0], n_samples)
    # retrieve selected images
    X = dataset[ix]
    # generate 'real' class labels (1)
    y = ones((n_samples, 1))
    return X, y

# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n_samples):
    # generate points in the latent space
    x_input = randn(latent_dim * n_samples)
    # reshape into a batch of inputs for the network
    x_input = x_input.reshape(n_samples, latent_dim)
    return x_input

# use the generator to generate n fake examples, with class labels
def generate_fake_samples(g_model, latent_dim, n_samples):
    # generate points in latent space
    x_input = generate_latent_points(latent_dim, n_samples)
    # predict outputs
    X = g_model.predict(x_input)
    # create 'fake' class labels (0)
    y = zeros((n_samples, 1))
    return X, y

# create and save a plot of generated images (reversed grayscale)
def save_plot(examples, epoch, n=10):
    # plot images
    for i in range(n * n):
        # define subplot
        pyplot.subplot(n, n, 1 + i)
        # turn off axis
        pyplot.axis('off')
        # plot raw pixel data
        pyplot.imshow(examples[i, :, :, 0], cmap='gray_r')
    # save plot to file
    filename = 'generated_plot_e%03d.png' % (epoch+1)
    pyplot.savefig(filename)
    pyplot.close()

# evaluate the discriminator, plot generated images, save generator model
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=100):
    # prepare real samples
    X_real, y_real = generate_real_samples(dataset, n_samples)
    # evaluate discriminator on real examples
    _, acc_real = d_model.evaluate(X_real, y_real, verbose=0)
    # prepare fake examples
    x_fake, y_fake = generate_fake_samples(g_model, latent_dim, n_samples)
    # evaluate discriminator on fake examples
    _, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
    # summarize discriminator performance
    print('>Accuracy real: %.0f%%, fake: %.0f%%' % (acc_real*100, acc_fake*100))
    # save plot
    save_plot(x_fake, epoch)
    # save the generator model tile file
    filename = 'generator_model_%03d.h5' % (epoch + 1)
    g_model.save(filename)

# train the generator and discriminator
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=100, n_batch=256):
    bat_per_epo = int(dataset.shape[0] / n_batch)
    half_batch = int(n_batch / 2)
    # manually enumerate epochs
    for i in range(n_epochs):
        # enumerate batches over the training set
        for j in range(bat_per_epo):
            # get randomly selected 'real' samples
            X_real, y_real = generate_real_samples(dataset, half_batch)
            # generate 'fake' examples
            X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
            # create training set for the discriminator
            X, y = vstack((X_real, X_fake)), vstack((y_real, y_fake))
            # update discriminator model weights
            d_loss, _ = d_model.train_on_batch(X, y)
            # prepare points in latent space as input for the generator
            X_gan = generate_latent_points(latent_dim, n_batch)
            # create inverted labels for the fake samples
            y_gan = ones((n_batch, 1))
            # update the generator via the discriminator's error
            g_loss = gan_model.train_on_batch(X_gan, y_gan)
            # summarize loss on this batch
            print('>%d, %d/%d, d=%.3f, g=%.3f' % (i+1, j+1, bat_per_epo, d_loss, g_loss))
        # evaluate the model performance, sometimes
        if (i+1) % 10 == 0:
            summarize_performance(i, g_model, d_model, dataset, latent_dim)



# # load mnist dataset
# (trainX, _), (_, _) = load_data()
# # expand to 3d, e.g. add channels dimension
# X = expand_dims(trainX, axis=-1)
# # convert from unsigned ints to floats
# X = X.astype('float32')
# # scale from [0,255] to [0,1]
# X = X / 255.0

#0,1,2,8

pos = 0
# size of the latent space
latent_dim = 100
# create the discriminator
d_model = define_discriminator()
# create the generator
g_model = define_generator(latent_dim)
# create the gan
gan_model = define_gan(g_model, d_model)
# load image data
dataset = load_real_samples()
# train model
train(g_model, d_model, gan_model, dataset, latent_dim)