Warning: file_get_contents(/data/phpspider/zhask/data//catemap/1/list/4.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Tensorflow RNN中的参数太多_Tensorflow_Neural Network_Keras - Fatal编程技术网

Tensorflow RNN中的参数太多

Tensorflow RNN中的参数太多,tensorflow,neural-network,keras,Tensorflow,Neural Network,Keras,我有50个测量天气和污染值的站点(每个站点6个参数),在对一列(风向)进行一次热编码后,添加了8个新列,并删除了原始列。最后,我有50*13=650列和35000条记录(每小时进行一次测量,基本上是过去4年)。 将所有这些信息提供给如下制作的RNN模型,会导致由于充分利用硬件资源而导致崩溃。我已经通过TimeSeriesGenerator沿时间分割了数据,创建了128个批次,例如每个批次24小时(因此批次大小为128,24是序列长度) 是否有一种方法可以计算培训、验证和测试,一次给出一个站的测量

我有50个测量天气和污染值的站点(每个站点6个参数),在对一列(风向)进行一次热编码后,添加了8个新列,并删除了原始列。最后,我有50*13=650列和35000条记录(每小时进行一次测量,基本上是过去4年)。 将所有这些信息提供给如下制作的RNN模型,会导致由于充分利用硬件资源而导致崩溃。我已经通过TimeSeriesGenerator沿时间分割了数据,创建了128个批次,例如每个批次24小时(因此批次大小为128,24是序列长度)

是否有一种方法可以计算培训、验证和测试,一次给出一个站的测量值(因此每次有13列的批次)?我知道这不清楚,请随时询问更多信息

all_data_generator = iter(TimeseriesGenerator(data = x_train_scaled, 
                                            targets = y_train_scaled, 
                                            length = sequence_length, 
                                            shuffle=False, 
                                            batch_size=(num_train - sequence_length + 1)))

train_data_x, train_data_y = next(all_data_generator)
train_data_size = train_data_x.shape[0]

validation_data_size = int(train_data_size * validation_split)
val_data_x = train_data_x[train_data_size-validation_data_size:]
val_data_y = train_data_y[train_data_size-validation_data_size:]

train_data_x = train_data_x[0:train_data_size-validation_data_size]
train_data_y = train_data_y[0:train_data_size-validation_data_size]
train_data_size = train_data_x.shape[0]


train_generator = getBatch(x = train_data_x, y = train_data_y, batch_size = batch_size)

validation_data = (val_data_x, val_data_y)
validation_generator = getBatch (x = val_data_x, y = train_data_y, batch_size = batch_size)

test_generator = iter(TimeseriesGenerator(data=x_test_scaled, 
                                          targets=y_test_scaled, 
                                          length= sequence_length, 
                                          shuffle=False, 
                                          batch_size=(num_test - sequence_length + 1)))

test_data_x, test_data_y = next(test_generator)
test_generator = getBatch(test_data_x, test_data_y, batch_size = batch_size)

 #### MODEL TYPE DECLARATION AND CONFIGURATION ######################################

code_name = {-1: "linear", 0: "ann_base_single", 1 : "ann_base_multi", 
             2 : "gru_single_layer", 3 : "gru_single_layer_w_dropout", 4 : "gru_multi_layer", 5 : "gru_multi_layer_2",
             6 : "gru_single_layer_w_state", 7 : "gru_single_layer_w_dropout_w_state", 8 : "gru_multi_layer_w_state", 9 : "gru_multi_2_w_state",
             10 : "lstm_single_layer", 11 : "lstm_single_layer_w_dropout", 12 : "lstm_multi_layer", 13 : "lstm_multi_layer_2",
             14 : "lstm_single_layer_w_state", 15 : "lstm_single_layer_w_dropout_w_state", 16 : "lstm_multi_layer_w_state", 17 : "lstm_multi_2_w_state", 18 : "test_conv"}
model_type = 5

model = Sequential()
if model_type == -1:
    model.add(layers.Flatten(input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.Dense(num_y_signals, activation='linear'))
elif model_type == 0:
    model.add(layers.Flatten(input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.Dense(hidden_layer_size, activation='relu'))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 1:
    model.add(layers.Flatten(input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.Dense(hidden_layer_size, activation='relu'))
    model.add(layers.Dense(hidden_layer_size, activation='relu'))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 2:
    model.add(layers.GRU(hidden_layer_size, activation='relu', input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 3:
    model.add(layers.GRU(hidden_layer_size, activation='relu', input_shape=(sequence_length, x_data.shape[1])))
    model.add(Dropout(0.2))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 4:
    model.add(layers.GRU(64, activation='relu', return_sequences=True, input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.GRU(32, activation='relu'))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 5:
    model.add(layers.GRU(64, activation='relu', return_sequences=True, input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.GRU(32, activation='relu', return_sequences=True))
    model.add(layers.GRU(16, activation='relu'))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 6:
    model.add(layers.GRU(hidden_layer_size, activation='relu', batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 7:
    model.add(layers.GRU(hidden_layer_size, activation='relu', batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
    model.add(Dropout(0.2))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 8:
    model.add(layers.GRU(64, activation='relu', return_sequences=True, batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
    model.add(layers.GRU(32, activation='relu', stateful=True))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 9:
    model.add(layers.GRU(64, activation='relu', return_sequences=True, batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
    model.add(layers.GRU(32, activation='relu', return_sequences=True, stateful=True))
    model.add(layers.GRU(16, activation='relu', stateful=True))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 10:
    model.add(layers.LSTM(hidden_layer_size, activation='relu', input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 11:
    model.add(layers.LSTM(hidden_layer_size, activation='relu', input_shape=(sequence_length, x_data.shape[1])))
    model.add(Dropout(0.2))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 12:
    model.add(layers.LSTM(64, activation='relu', return_sequences=True, input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.LSTM(32, activation='relu'))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 13:
    model.add(layers.LSTM(64, activation='relu', return_sequences=True, input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.LSTM(32, activation='relu', return_sequences=True))
    model.add(layers.LSTM(16, activation='relu'))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 14:
    model.add(layers.LSTM(hidden_layer_size, activation='relu', batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 15:
    model.add(layers.LSTM(hidden_layer_size, activation='relu', batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
    model.add(Dropout(0.2))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 16:
    model.add(layers.LSTM(64, activation='relu', return_sequences=True, batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
    model.add(layers.LSTM(32, activation='relu', stateful=True))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 17:
    model.add(layers.LSTM(64, activation='relu', return_sequences=True, batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
    model.add(layers.LSTM(32, activation='relu', return_sequences=True, stateful=True))
    model.add(layers.LSTM(16, activation='relu', stateful=True))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 18:
    model.add(layers.Conv2D(filters= x_data.shape[1], kernel_size=1 , input_shape=(sequence_length, x_data.shape[1])))
    model.add(layers.GRU(hidden_layer_size, activation='relu'))
    model.add(layers.Dense(num_y_signals, activation='sigmoid'))


model.compile(optimizer=Adam(), loss='mae', metrics=[metrics.mae, 'accuracy'])
model.summary()
在配备i7-4700HQ 8GB ram的华硕N550上运行

编辑:这是模型摘要

[5 rows x 713 columns]
Station    2000003                             ...               Time
Parameter        0        1             2      ...               H_21          H_22          H_23
count      43824.0  43824.0  43824.000000      ...       43824.000000  43824.000000  43824.000000
mean           0.0      0.0     25.085629      ...           0.041667      0.041667      0.041667
std            0.0      0.0     16.950108      ...           0.199829      0.199829      0.199829
min            0.0      0.0      1.000000      ...           0.000000      0.000000      0.000000
25%            0.0      0.0     11.000000      ...           0.000000      0.000000      0.000000
50%            0.0      0.0     21.000000      ...           0.000000      0.000000      0.000000
75%            0.0      0.0     35.000000      ...           0.000000      0.000000      0.000000
max            0.0      0.0    134.000000      ...           1.000000      1.000000      1.000000

鉴于您只有8 GB的RAM,这对于您当前的硬件来说可能是一项困难的任务

我会从尽可能多地剥离模型开始,看看它是否能够处理这个问题,也许可以从移除两个中心GRU层开始


您可以尝试在您的模型中设置一个小批量。fit、8、16或32可能是一个很好的开始数字。

如果您由于硬件限制而崩溃,我建议您将规格添加到问题中。添加,即使我认为问题是同时有太多的功能是的,是的,肯定的。我想你没有可以使用的GPU吧?还可以显示model.summary()吗?减少批处理大小,这是内存使用的关键驱动因素。此外,不要使用笔记本进行深入学习:)我知道lapton不是最好的选择,但这就是我所做的。ahahI减少了批量大小,似乎内存不足发生在火车之前,由TimeSeries生成器完成。