Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/python-3.x/15.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 3.x 在同一个脚本中运行两次模型会在Tensorflow中产生不同的结果_Python 3.x_Tensorflow_Lstm_Recurrent Neural Network_Random Seed - Fatal编程技术网

Python 3.x 在同一个脚本中运行两次模型会在Tensorflow中产生不同的结果

Python 3.x 在同一个脚本中运行两次模型会在Tensorflow中产生不同的结果,python-3.x,tensorflow,lstm,recurrent-neural-network,random-seed,Python 3.x,Tensorflow,Lstm,Recurrent Neural Network,Random Seed,我有一个LSTM,我在我的脚本中运行了两次(得到了两次预测)。因此,当我对第二次迭代进行预测时,它与我从第一次迭代中得到的预测不同。但是当我手动运行模型两次时,在第一次运行结束时运行第二次。它给出了相同的结果 问题:我想在第一次迭代和第二次迭代中得到相同的预测。如何做到这一点?下面显示的是我的代码。 import tensorflow as tf import matplotlib as mplt mplt.use('agg') # Must be before importing matpl

我有一个LSTM,我在我的脚本中运行了两次(得到了两次预测)。因此,当我对第二次迭代进行预测时,它与我从第一次迭代中得到的预测不同。但是当我手动运行模型两次时,在第一次运行结束时运行第二次。它给出了相同的结果

问题:我想在第一次迭代和第二次迭代中得到相同的预测。如何做到这一点?下面显示的是我的代码。

import tensorflow as tf
import matplotlib as mplt
mplt.use('agg')  # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from math import sqrt
import csv
import atexit
from time import time, strftime, localtime
from datetime import timedelta
np.random.seed(1)
tf.set_random_seed(1)


class RNNConfig():

    input_size = 1
    noTimesToRun = 2
    # fileNames = ['store2_1.csv', 'store85_1.csv', 'store259_1.csv', 'store519_1.csv', 'store725_1.csv',
    #              'store749_1.csv',
    #              'store934_1.csv', 'store1019_1.csv']
    # column_min_max_all = [[[0, 11000], [1, 7]], [[0, 17000], [1, 7]], [[0, 23000], [1, 7]], [[0, 14000], [1, 7]],
    #                       [[0, 14000], [1, 7]], [[0, 15000], [1, 7]], [[0, 17000], [1, 7]], [[0, 25000], [1, 7]]]

    columns = ['Sales', 'DayOfWeek', 'SchoolHoliday', 'Promo', 'lagged_Open', 'lagged_promo', 'lagged_SchoolHoliday']

    # fileNames = ['store85_1', 'store519_1', 'store725_1', 'store749_1','store165_1','store925_1','store1089_1','store335_1']
    # column_min_max_all = [[[0, 17000], [1, 7]],  [[0, 14000], [1, 7]], [[0, 14000], [1, 7]], [[0, 15000], [1, 7]],[[0, 9000], [1, 7]], [[0, 15000], [1, 7]], [[0, 21000], [1, 7]], [[0, 33000], [1, 7]]]

    fileNames = ['store85_1']
    column_min_max_all = [[[0, 17000], [1, 7]]]

    features = len(columns)

    num_steps = None
    lstm_size = None
    batch_size = None
    init_learning_rate = None
    learning_rate_decay = None
    init_epoch = None
    max_epoch = None
    hidden1_nodes = None
    hidden2_nodes = None
    dropout_rate = None
    hidden1_activation = None
    hidden2_activation = None
    lstm_activation = None
    fileName = None
    column_min_max = None
    # plotname = None
    writename = None
    RMSE = None
    MAE = None
    MAPE = None
    RMSPE = None




config = RNNConfig()



def secondsToStr(elapsed=None):
    if elapsed is None:
        return strftime("%Y-%m-%d %H:%M:%S", localtime())
    else:
        return str(timedelta(seconds=elapsed))

def log(s, elapsed=None):
    line = "="*40
    print(line)
    print(secondsToStr(), '-', s)
    if elapsed:
        print("Elapsed time:", elapsed)
    print(line)
    print()

def endlog():
    end = time()
    elapsed = end-start
    log("End Program", secondsToStr(elapsed))


def segmentation(data):

    seq = [price for tup in data[config.columns].values for price in tup]

    seq = np.array(seq)

    # split into items of features
    seq = [np.array(seq[i * config.features: (i + 1) * config.features])
           for i in range(len(seq) // config.features)]

    # split into groups of num_steps
    X = np.array([seq[i: i + config.num_steps] for i in range(len(seq) -  config.num_steps)])

    y = np.array([seq[i +  config.num_steps] for i in range(len(seq) -  config.num_steps)])

    # get only sales value
    y = [[y[i][0]] for i in range(len(y))]

    y = np.asarray(y)

    print(y)

    return X, y

def scale(data):

    for i in range (len(config.column_min_max)):
        data[config.columns[i]] = (data[config.columns[i]] - config.column_min_max[i][0]) / ((config.column_min_max[i][1]) - (config.column_min_max[i][0]))

    return data

def rescle(test_pred):

    prediction = [(pred * (config.column_min_max[0][1] - config.column_min_max[0][0])) + config.column_min_max[0][0] for pred in test_pred]

    return prediction


def pre_process():


    store_data = pd.read_csv(config.fileName)

    store_data['lagged_Open'] = store_data['lagged_Open'].astype(int)
    store_data['lagged_promo'] = store_data['lagged_promo'].astype(int)
    store_data['lagged_SchoolHoliday'] = store_data['lagged_SchoolHoliday'].astype(int)

     #
    # store_data = store_data.drop(store_data[(store_data.Open != 0) & (store_data.Sales == 0)].index)

    # ---for segmenting original data --------------------------------
    # original_data = store_data.copy()

    ## train_size = int(len(store_data) * (1.0 - test_ratio))

    # test_len = len(store_data[(store_data.Month == 7) & (store_data.Year == 2015)].index)
    # train_size = int(len(store_data) - (test_len))
    #
    # train_data = store_data[:train_size]
    # test_data = store_data[(train_size - config.num_steps):]
    # original_test_data = test_data.copy()
    #
    # # -------------- processing train data---------------------------------------
    # scaled_train_data = scale(train_data)
    # train_X, train_y = segmentation(scaled_train_data)
    #
    # # -------------- processing test data---------------------------------------
    # scaled_test_data = scale(test_data)
    # test_X, test_y = segmentation(scaled_test_data)
    #
    # # ----segmenting original test data---------------------------------------------
    # nonescaled_test_X, nonescaled_test_y = segmentation(original_test_data)

    validation_len = len(store_data[(store_data.Month == 6) & (store_data.Year == 2015)].index)
    test_len = len(store_data[(store_data.Month == 7) & (store_data.Year == 2015)].index)
    train_size = int(len(store_data) - (validation_len + test_len))

    train_data = store_data[:train_size]
    validation_data = store_data[(train_size - config.num_steps): validation_len + train_size]
    test_data = store_data[((validation_len + train_size) - config.num_steps):]
    original_val_data = validation_data.copy()
    original_test_data = test_data.copy()

    # -------------- processing train data---------------------------------------
    scaled_train_data = scale(train_data)
    train_X, train_y = segmentation(scaled_train_data)

    # -------------- processing validation data---------------------------------------
    scaled_validation_data = scale(validation_data)
    val_X, val_y = segmentation(scaled_validation_data)

    # -------------- processing test data---------------------------------------
    scaled_test_data = scale(test_data)
    test_X, test_y = segmentation(scaled_test_data)

    # ----segmenting original validation data-----------------------------------------------
    nonescaled_val_X, nonescaled_val_y = segmentation(original_val_data)

    # ----segmenting original test data-----------------------------------------------
    nonescaled_test_X, nonescaled_test_y = segmentation(original_test_data)

    return train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y, nonescaled_val_y


def generate_batches(train_X, train_y, batch_size):
    num_batches = int(len(train_X)) // batch_size
    if batch_size * num_batches < len(train_X):
        num_batches += 1

    batch_indices = range(num_batches)
    for j in batch_indices:
        batch_X = train_X[j * batch_size: (j + 1) * batch_size]
        batch_y = train_y[j * batch_size: (j + 1) * batch_size]
        assert set(map(len, batch_X)) == {config.num_steps}
        yield batch_X, batch_y

def mean_absolute_percentage_error(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    itemindex = np.where(y_true == 0)
    y_true = np.delete(y_true, itemindex)
    y_pred = np.delete(y_pred, itemindex)
    return np.mean(np.abs((y_true - y_pred) / y_true)) * 100

def RMSPE(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    itemindex = np.where(y_true == 0)
    y_true = np.delete(y_true, itemindex)
    y_pred = np.delete(y_pred, itemindex)
    return np.sqrt(np.mean(np.square(((y_true - y_pred) / y_true)), axis=0))

# def plot(true_vals,pred_vals,name):
#     fig = plt.figure()
#     fig = plt.figure(dpi=100, figsize=(20, 7))
#     days = range(len(true_vals))
#     plt.plot(days, pred_vals, label='pred sales')
#     plt.plot(days, true_vals, label='truth sales')
#     plt.legend(loc='upper left', frameon=False)
#     plt.xlabel("day")
#     plt.ylabel("sales")
#     plt.grid(ls='--')
#     plt.savefig(name, format='png', bbox_inches='tight', transparent=False)
#     plt.close()

def write_results(true_vals,pred_vals,name):

    print("write method")

    # with open(name, "w") as f:
    #     writer = csv.writer(f)
    #     writer.writerows(zip(true_vals, pred_vals))


def train_test():
    train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y, nonescaled_val_y  = pre_process()

    # tf.set_random_seed(1)

    inputs = tf.placeholder(tf.float32, [None, config.num_steps, config.features], name="inputs")
    targets = tf.placeholder(tf.float32, [None, config.input_size], name="targets")
    model_learning_rate = tf.placeholder(tf.float32, None, name="learning_rate")
    model_dropout_rate = tf.placeholder_with_default(0.0, shape=())
    global_step = tf.Variable(0, trainable=False)

    model_learning_rate = tf.train.exponential_decay(learning_rate=model_learning_rate, global_step=global_step,
                                                     decay_rate=config.learning_rate_decay,
                                                     decay_steps=config.init_epoch, staircase=False)

    cell = tf.contrib.rnn.LSTMCell(config.lstm_size, state_is_tuple=True, activation=config.lstm_activation,
                                   use_peepholes=True)

    val1, _ = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)

    val = tf.transpose(val1, [1, 0, 2])

    last = tf.gather(val, int(val.get_shape()[0]) - 1, name="last_lstm_output")

    # hidden layer
    hidden1 = tf.layers.dense(last, units=config.hidden1_nodes, activation=config.hidden2_activation)
    hidden2 = tf.layers.dense(hidden1, units=config.hidden2_nodes, activation=config.hidden1_activation)

    dropout = tf.layers.dropout(hidden2, rate=model_dropout_rate, training=True,seed=1)

    weight = tf.Variable(tf.truncated_normal([config.hidden2_nodes, config.input_size]))
    bias = tf.Variable(tf.constant(0.1, shape=[config.input_size]))

    prediction = tf.nn.relu(tf.matmul(dropout, weight) + bias)

    loss = tf.losses.mean_squared_error(targets, prediction)
    optimizer = tf.train.AdamOptimizer(model_learning_rate)
    minimize = optimizer.minimize(loss, global_step=global_step)


    # --------------------training------------------------------------------------------
    tf.set_random_seed(1)
    sess = tf.Session()

    sess.run(tf.global_variables_initializer())

    iteration = 1

    for epoch_step in range(config.max_epoch):

        for batch_X, batch_y in generate_batches(train_X, train_y, config.batch_size):
            train_data_feed = {
                inputs: batch_X,
                targets: batch_y,
                model_learning_rate: config.init_learning_rate,
                model_dropout_rate: config.dropout_rate
            }

            train_loss, _, value = sess.run([loss, minimize, val1], train_data_feed)

            if iteration % 5 == 0:
                print("Epoch: {}/{}".format(epoch_step, config.max_epoch),
                      "Iteration: {}".format(iteration),
                      "Train loss: {:.6f}".format(train_loss))
            iteration += 1

    saver = tf.train.Saver()
    saver.save(sess, "checkpoints_sales/sales_pred.ckpt")

    # --------------------testing------------------------------------------------------
    saver.restore(sess, tf.train.latest_checkpoint('checkpoints_sales'))

    test_data_feed = {
        inputs: test_X,
    }

    test_pred = sess.run(prediction, test_data_feed)

    # rmsse = sess.run(correct_prediction, test_data_feed)

    pred_vals = rescle(test_pred)

    pred_vals = np.array(pred_vals)

    pred_vals = (np.round(pred_vals, 0)).astype(np.int32)

    pred_vals = pred_vals.flatten()

    pred_vals = pred_vals.tolist()

    nonescaled_y = nonescaled_test_y.flatten()

    nonescaled_y = nonescaled_y.tolist()

    # plot(nonescaled_y, pred_vals, config.plotname)
    # write_results(nonescaled_y, pred_vals, config.writename)

    meanSquaredError = mean_squared_error(nonescaled_y, pred_vals)
    rootMeanSquaredError = sqrt(meanSquaredError)
    print("RMSE:", rootMeanSquaredError)
    mae = mean_absolute_error(nonescaled_y, pred_vals)
    print("MAE:", mae)
    mape = mean_absolute_percentage_error(nonescaled_y, pred_vals)
    print("MAPE:", mape)
    rmse_val = RMSPE(nonescaled_y, pred_vals)
    print("RMSPE:", rmse_val)

    config.RMSE = rootMeanSquaredError
    config.MAE = mae
    config.MAPE = mape
    config.RMSPE = rmse_val

    # sess.close()
    # tf.reset_default_graph()


if __name__ == '__main__':

    start = time()

    for i in range(len(config.fileNames)):
        for j in range (config.noTimesToRun):

            config.fileName = '{}{}{}'.format('/home/suleka/Documents/sales_prediction/', config.fileNames[i], '.csv')
            # /home/suleka/Documents/sales_prediction/
            # '/home/wso2/suleka/salesPred/

            # config.plotname = '{}{}{}'.format('Sales_Prediction_testset_with_zero_bsl_plot_', config.fileNames[i],
            #                                   '.png')

            config.writename = '{}{}{}{}{}'.format('prediction_data/Sales_Prediction_testset_with_zero_bsl_results_',j ,'_',config.fileNames[i],'.csv')

            write_file = '{}{}{}{}{}'.format('test_results/test__data_',j,'_' ,config.fileNames[i], '.csv')

            config.column_min_max = config.column_min_max_all[i]

            hyperparameters = pd.read_csv('allStores_test.csv', header=None, float_precision='round_trip')

            config.num_steps = hyperparameters.iloc[i:, 1].get_values()[0].astype(np.int32)
            config.lstm_size = hyperparameters.iloc[i:, 2].get_values()[0].astype(np.int32)
            config.hidden2_nodes = hyperparameters.iloc[i:, 3].get_values()[0].astype(np.int32)
            config.hidden2_activation = hyperparameters.iloc[i:, 4].get_values()[0]
            config.hidden1_activation = hyperparameters.iloc[i:, 5].get_values()[0]
            config.hidden1_nodes = hyperparameters.iloc[i:, 6].get_values()[0].astype(np.int32)
            config.lstm_activation = hyperparameters.iloc[i:, 7].get_values()[0]
            config.init_epoch = hyperparameters.iloc[i:, 8].get_values()[0].astype(np.int32)
            config.max_epoch = hyperparameters.iloc[i:, 9].get_values()[0].astype(np.int32)
            config.learning_rate_decay = hyperparameters.iloc[i:, 10].get_values()[0].astype(np.float32)
            config.dropout_rate = hyperparameters.iloc[i:, 11].get_values()[0].astype(np.float32)
            config.batch_size = hyperparameters.iloc[i:, 12].get_values()[0].astype(np.int32)
            config.init_learning_rate = hyperparameters.iloc[i:, 13].get_values()[0].astype(np.float32)



            config.hidden1_activation = eval(config.hidden1_activation)
            config.hidden2_activation = eval(config.hidden2_activation)
            config.lstm_activation = eval(config.lstm_activation)

            train_test()
            tf.reset_default_graph()



    atexit.register(endlog)
    log("Start Program")
将tensorflow导入为tf
将matplotlib作为mplt导入
mplt.use('agg')#必须在导入matplotlib.pyplot或pylab之前!
将matplotlib.pyplot作为plt导入
将numpy作为np导入
作为pd进口熊猫
从sklearn.metrics导入均方误差
从sklearn.metrics导入平均绝对误差
从数学导入sqrt
导入csv
进口退欧
从时间导入时间、strftime、localtime
从日期时间导入时间增量
np.随机种子(1)
tf.set_random_seed(1)
类RNNConfig():
输入大小=1
noTimesToRun=2
#文件名=['store2_1.csv'、'store85_1.csv'、'store259_1.csv'、'store519_1.csv'、'store725_1.csv',
#“store749_1.csv”,
#'store934_1.csv'、'store1019_1.csv']
#列_min_max_all=[[0,11000],[1,7],[0,17000],[1,7],[0,23000],[1,7],[0,14000],[1,7],
#                       [[0, 14000], [1, 7]], [[0, 15000], [1, 7]], [[0, 17000], [1, 7]], [[0, 25000], [1, 7]]]
列=['Sales'、'DayOfWeek'、'SchoolHoliday'、'Promo'、'lagged_Open'、'lagged_Promo'、'lagged_SchoolHoliday']
#文件名=['store85_1'、'store519_1'、'store725_1'、'store749_1'、'store165_1'、'store925_1'、'store1089_1'、'store335_1']
#列最小值最大值全部=[[017000],[1,7],[[014000],[1,7],[[014000],[1,7],[[0,15000],[1,7],[0,9000],[1,7],[0,15000],[1,7],[0,21000],[1,7],[0,33000],[1,7]]
文件名=['store85_1']
列_min_max_all=[[017000],[1,7]]
features=len(列)
步骤数=无
lstm_大小=无
批次大小=无
初始学习率=无
学习率衰减=无
初始时间=无
最大历元=无
hidden1_节点=无
hidden2_节点=无
辍学率=无
hidden1\u激活=无
hidden2_激活=无
lstm_激活=无
文件名=无
列\最小\最大=无
#plotname=None
writename=None
RMSE=无
MAE=无
MAPE=None
RMSPE=无
config=RNNConfig()
def secondsToStr(已用时间=无):
如果“无”:
返回strftime(“%Y-%m-%d%H:%m:%S”,localtime())
其他:
返回str(timedelta(秒=经过的时间))
def日志,已用时间=无:
行=“=”*40
打印(行)
打印(secondsToStr(),'-',s)
如果已过:
打印(“已用时间:”,已用)
打印(行)
打印()
def endlog():
结束=时间()
经过=结束-开始
日志(“结束程序”,第二个STOSTR(已用))
def分段(数据):
seq=[数据[config.columns]中tup的价格]。tup中的价格值]
seq=np.数组(seq)
#拆分为功能项
seq=[np.array(seq[i*config.features:(i+1)*config.features])
对于范围内的i(len(seq)//config.features)]
#分成几个步骤组
X=np.array([seq[i:i+config.num\u steps]表示范围内的i(len(seq)-config.num\u steps)])
y=np.array([seq[i+config.num_steps]表示范围内的i(len(seq)-config.num_steps)])
#仅获取销售价值
y=[[y[i][0]]表示范围内的i(len(y))]
y=np.asarray(y)
打印(y)
返回X,y
def刻度(数据):
对于范围内的i(len(config.column_min_max)):
data[config.columns[i]=(data[config.columns[i]]-config.column_min_max[i][0])/((config.column_min_max[i][1])-(config.column_min_max[i][0]))
返回数据
def rescle(测试前):
预测=[(pred*(config.column_min_max[0][1]-config.column_min_max[0][0]))+config.column_min_max[0][0]用于测试中的pred\u pred]
收益预测
def预处理()
store\u data=pd.read\u csv(config.fileName)
store_data['lagged_Open']=store_data['lagged_Open'].aType(int)
store_data['lagged_promo']=store_data['lagged_promo']]。astype(int)
存储数据['lagged_SchoolHoliday']=存储数据['lagged_SchoolHoliday']。astype(int)
#
#store_data=store_data.drop(store_data[(store_data.Open!=0)&(store_data.Sales==0)].index)
#---用于分割原始数据--------------------------------
#原始数据=存储数据。复制()
##列车尺寸=整数(长度(存储数据)*(1.0-测试比率))
#测试长度=长度(存储长度数据[(存储长度数据月==7)和(存储长度数据年==2015)]索引)
#列车尺寸=整数(长度(存储数据)-(测试长度))
#
#列车数据=存储列车数据[:列车大小]
#测试数据=存储数据[(列大小-config.num\u步骤):]
#原始测试数据=测试数据。复制()
#
#————列车数据处理---------------------------------------
#缩放的列车数据=缩放(列车数据)
#列车X、列车y=分段(缩放列车数据)
#
#————处理测试数据---------------------------------------
#缩放测试数据=缩放(测试数据)
#测试X,测试y=分段(缩放测试数据)
#
##----分割原始测试数据---------------------------------------------
#非缩放测试X,非缩放测试y=分段(原始测试数据)
验证长度=长度(存储长度数据[(存储长度数据月==6)和(存储长度数据年==2015)]索引)
测试长度=长度(存储长度数据[(存储长度数据月==7)和(存储长度数据年==2015)]索引)
列车尺寸=整数(长度(存储数据)-(验证长度+测试长度))
列车数据=存储列车数据[:列车大小]
验证数据=存储数据[(训练大小-config.num\u步骤):验证长度+训练大小]
测试数据=存储数据[((验证长度+序列大小)-config.num步骤):]
起源
# Reset the default graph
tf.reset_default_graph()
# Set the random seed
tf.set_random_seed(seed)
# Build the graph
# ....
# After creating the cell make sure intialize it
cell.build(inputs_shape)
# Initialize all variables in the graph
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Train the model
# ....
# Evaluate the model
# ....