Tensorflow 参数必须是1个整数的元组。Received或TypeError:int()参数必须是字符串、类似字节的对象或数字,而不是';列表';
我尝试使用fit_生成器和Talos(用于超参数调优)。早些时候,当我使用fit方法时,我得到了MemoryError,所以当我在这里搜索时,人们说我应该尝试使用fit_生成器。之前我给了太多的参数,所以即使使用fit_generator,我也得到了MemoryError,现在我减少了参数的数量,现在我得到了不同的错误。请在下面查找代码和错误 代码:Tensorflow 参数必须是1个整数的元组。Received或TypeError:int()参数必须是字符串、类似字节的对象或数字,而不是';列表';,tensorflow,machine-learning,keras,speech-recognition,talos,Tensorflow,Machine Learning,Keras,Speech Recognition,Talos,我尝试使用fit_生成器和Talos(用于超参数调优)。早些时候,当我使用fit方法时,我得到了MemoryError,所以当我在这里搜索时,人们说我应该尝试使用fit_生成器。之前我给了太多的参数,所以即使使用fit_generator,我也得到了MemoryError,现在我减少了参数的数量,现在我得到了不同的错误。请在下面查找代码和错误 代码: def yield_arrays_train(array_x_train_feat1=xtrain_np_img1,array_x_train_f
def yield_arrays_train(array_x_train_feat1=xtrain_np_img1,array_x_train_feat2=xtrain_np_img2,array_y_train=y_train_numpy,batch_size=6):
while 1:
for i in range(14886):
X_feat1_train = (array_x_train_feat1[i:i+batch_size,:,:].astype(np.float16))
X_feat2_train = (array_x_train_feat2[i:i+batch_size,:,:].astype(np.float16))
Y_train = (array_y_train[i:i+batch_size].astype(np.float16))
yield ([(np.array(X_feat1_train)),(np.array(X_feat2_train))],(np.array(Y_train)))
def yield_arrays_val(array_x_test_feat1,array_x_test_feat2,array_y_test,batch_size):
while 1:
for i in range(60):
X_feat1_test = (array_x_test_feat1[i:i+batch_size,:,:].astype(np.float16))
X_feat2_test = (array_x_test_feat2[i:i+batch_size,:,:].astype(np.float16))
Y_test = (array_y_test[i:i+batch_size].astype(np.float16))
yield ([(np.array(X_feat1_test)),(np.array(X_feat2_test))],(np.array(Y_test)))
def siamese (array_x_train_feat1=xtrain_np_img1,array_x_train_feat2=xtrain_np_img2,array_y_train=y_train_numpy,array_x_test_feat1=xtest_np_img1,array_x_test_feat2=xtest_np_img2,array_y_test=y_test_numpy):
W_init = tf.keras.initializers.he_normal(seed=100)
b_init = tf.keras.initializers.he_normal(seed=50)
input_shape = (24,939)
left_input = Input(input_shape)
right_input = Input(input_shape)
encoder = Sequential()
encoder.add(Conv1D(filters=8,kernel_size=6, padding='same', activation='relu',input_shape=input_shape,kernel_initializer=W_init, bias_initializer=b_init))
encoder.add(BatchNormalization())
encoder.add(Dropout(.1))
encoder.add(MaxPool1D())
encoder.add(Conv1D(filters=6,kernel_size=4, padding='same', activation='relu'))
encoder.add(BatchNormalization())
encoder.add(Dropout(.1))
encoder.add(MaxPool1D())
encoder.add(Conv1D(filters=4,kernel_size=4, padding='same', activation='relu'))
encoder.add(BatchNormalization())
encoder.add(Dropout(.1))
encoder.add(MaxPool1D())
encoder.add(Flatten())
encoder.add(Dense(10,activation='relu'))
encoder.add(Dropout(.1))
encoded_l = encoder(left_input)
encoded_r = encoder(right_input)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([encoded_l, encoded_r])
adam = optimizers.Adam(lr=.1, beta_1=0.1, beta_2=0.999,decay=.1, amsgrad=False)
earlyStopping = EarlyStopping(monitor='loss',min_delta=0,patience=3,verbose=1,restore_best_weights=False)
callback_early_stop_reduceLROnPlateau=[earlyStopping]
model = Model([left_input, right_input], distance)
model.compile(loss=contrastive_loss, optimizer=adam,metrics=[accuracy])
model.summary()
#history = model.fit([(x_train[:,:,:,0]).astype(np.float32),(x_train[:,:,:,1]).astype(np.float32)],y_train, validation_data=([(x_val[:,:,:,0]).astype(np.float32),(x_val[:,:,:,1]).astype(np.float32)], y_val) ,batch_size=params['batch_size'],epochs=params['epochs'],callbacks=callback_early_stop_reduceLROnPlateau)
history=model.fit_generator(generator=yield_arrays_train(array_x_train_feat1,array_x_train_feat2,array_y_train,6),validation_data=yield_arrays_val(array_x_test_feat1,array_x_test_feat2,array_y_test,6),steps_per_epoch=2481,epochs=5, validation_steps=1000,verbose=1,callbacks=callback_early_stop_reduceLROnPlateau,use_multiprocessing=False,workers=0)
return history,model
siamese (xtrain_np_img1,xtrain_np_img2,y_train_numpy,xtest_np_img1,xtest_np_img2,y_test_numpy)
输出:
WARNING:tensorflow:From C:\Users\DELL\AppData\Roaming\Python\Python37\site-packages\keras\backend\tensorflow_backend.py:4070: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 24, 939) 0
__________________________________________________________________________________________________
input_2 (InputLayer) (None, 24, 939) 0
__________________________________________________________________________________________________
sequential_1 (Sequential) (None, 10) 45580 input_1[0][0]
input_2[0][0]
__________________________________________________________________________________________________
lambda_1 (Lambda) (None, 1) 0 sequential_1[1][0]
sequential_1[2][0]
==================================================================================================
Total params: 45,580
Trainable params: 45,544
Non-trainable params: 36
__________________________________________________________________________________________________
WARNING:tensorflow:From C:\Users\DELL\anaconda3\envs\MyEnv\lib\site-packages\tensorflow\python\ops\math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From C:\Users\DELL\AppData\Roaming\Python\Python37\site-packages\keras\backend\tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.
Epoch 1/5
2481/2481 [==============================] - 30s 12ms/step - loss: 0.0024 - accuracy: 0.9992 - val_loss: 0.8333 - val_accuracy: 0.1667
Epoch 2/5
2481/2481 [==============================] - 28s 11ms/step - loss: 6.9194e-05 - accuracy: 0.9999 - val_loss: 0.8333 - val_accuracy: 0.1667
Epoch 3/5
2481/2481 [==============================] - 28s 11ms/step - loss: nan - accuracy: 0.9993 - val_loss: nan - val_accuracy: 0.8333
Epoch 4/5
2481/2481 [==============================] - 28s 11ms/step - loss: nan - accuracy: 1.0000 - val_loss: nan - val_accuracy: 0.8333
Epoch 5/5
2481/2481 [==============================] - 28s 11ms/step - loss: nan - accuracy: 1.0000 - val_loss: nan - val_accuracy: 0.8333oss: nan - acc
Epoch 00005: early stopping
(<keras.callbacks.callbacks.History at 0x26cf45c6ec8>,
<keras.engine.training.Model at 0x26cf3e364c8>)
错误
错误:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-31-4c923301ede6> in <module>
1 #t=ta.Scan(x=xtrain_np_img1_img2,y=y_train_numpy,x_val=xtest_np_img1_img2,y_val=y_test_numpy,model=siamese,params=p,experiment_name='exp_1')
2
----> 3 t=ta.Scan(x=[xtrain_np_img1.astype(np.float16),xtrain_np_img2.astype(np.float16)],y=y_train_numpy,x_val=[xtest_np_img1,xtest_np_img2],y_val=y_test_numpy,model=siamese,params=p,experiment_name='exp_1')
~\anaconda3\envs\MyEnv\lib\site-packages\talos\scan\Scan.py in __init__(self, x, y, params, model, experiment_name, x_val, y_val, val_split, random_method, seed, performance_target, fraction_limit, round_limit, time_limit, boolean_limit, reduction_method, reduction_interval, reduction_window, reduction_threshold, reduction_metric, minimize_loss, disable_progress_bar, print_params, clear_session, save_weights)
194 # start runtime
195 from .scan_run import scan_run
--> 196 scan_run(self)
~\anaconda3\envs\MyEnv\lib\site-packages\talos\scan\scan_run.py in scan_run(self)
24 # otherwise proceed with next permutation
25 from .scan_round import scan_round
---> 26 self = scan_round(self)
27 self.pbar.update(1)
28
~\anaconda3\envs\MyEnv\lib\site-packages\talos\scan\scan_round.py in scan_round(self)
17 # fit the model
18 from ..model.ingest_model import ingest_model
---> 19 self.model_history, self.round_model = ingest_model(self)
20 self.round_history.append(self.model_history.history)
21
~\anaconda3\envs\MyEnv\lib\site-packages\talos\model\ingest_model.py in ingest_model(self)
8 self.x_val,
9 self.y_val,
---> 10 self.round_params)
<ipython-input-30-fe409e1ff506> in siamese(array_x_train_feat1, array_x_train_feat2, array_y_train, array_x_test_feat1, array_x_test_feat2, array_y_test, params)
11
12 encoder = Sequential()
---> 13 encoder.add(Conv1D(filters=(params['filter1']),kernel_size=(params['kernel_size1']), padding='same', activation='relu',input_shape=input_shape,kernel_initializer=W_init, bias_initializer=b_init))
14 encoder.add(BatchNormalization())
15 encoder.add(Dropout((params['droprate1'])))
~\AppData\Roaming\Python\Python37\site-packages\keras\engine\sequential.py in add(self, layer)
164 # and create the node connecting the current layer
165 # to the input layer we just created.
--> 166 layer(x)
167 set_inputs = True
168 else:
~\AppData\Roaming\Python\Python37\site-packages\keras\engine\base_layer.py in __call__(self, inputs, **kwargs)
461 'You can build it manually via: '
462 '`layer.build(batch_input_shape)`')
--> 463 self.build(unpack_singleton(input_shapes))
464 self.built = True
465
~\AppData\Roaming\Python\Python37\site-packages\keras\layers\convolutional.py in build(self, input_shape)
139 name='kernel',
140 regularizer=self.kernel_regularizer,
--> 141 constraint=self.kernel_constraint)
142 if self.use_bias:
143 self.bias = self.add_weight(shape=(self.filters,),
~\AppData\Roaming\Python\Python37\site-packages\keras\engine\base_layer.py in add_weight(self, name, shape, dtype, initializer, regularizer, trainable, constraint)
277 if dtype is None:
278 dtype = self.dtype
--> 279 weight = K.variable(initializer(shape, dtype=dtype),
280 dtype=dtype,
281 name=name,
~\anaconda3\envs\MyEnv\lib\site-packages\tensorflow\python\ops\init_ops.py in __call__(self, shape, dtype, partition_info)
513 if partition_info is not None:
514 scale_shape = partition_info.full_shape
--> 515 fan_in, fan_out = _compute_fans(scale_shape)
516 if self.mode == "fan_in":
517 scale /= max(1., fan_in)
~\anaconda3\envs\MyEnv\lib\site-packages\tensorflow\python\ops\init_ops.py in _compute_fans(shape)
1445 fan_in = shape[-2] * receptive_field_size
1446 fan_out = shape[-1] * receptive_field_size
-> 1447 return int(fan_in), int(fan_out)
1448
1449
TypeError: int() argument must be a string, a bytes-like object or a number, not 'list'
输出
print (type((params['epochs'])))
<class 'list'>
print(类型((参数['epochs']))
因此,即使在这个博客中,我也看到输出是列表,所以我不明白为什么会出现错误。如果我执行----------print(type((p['filter1']))-----------------输出是------------所以这意味着我给出了列表,但python正在寻找int。要么我读了不正确的talos文档,要么我不能正确理解文档。似乎我无法正确编写参数字典,或者可能是其他原因。我很少看到其他的博客,我也这样写了参数字典,但他们没有得到任何错误。想知道我犯了什么错误
p = {
'filter1':[6],
'kernel_size1':[4],
'filter3' : [4],
'kernel_size3' : [6],
'decay' :[.1],
'droprate1' :[.1],
'filter2':[4],
'kernel_size2':[8],
'droprate4' : [.1],
'droprate2' :[.1],
'unit1': [10],
'droprate3': [.1],
'lr' :[.1]
}
def siamese (array_x_train_feat1=xtrain_np_img1,array_x_train_feat2=xtrain_np_img2,array_y_train=y_train_numpy,array_x_test_feat1=xtest_np_img1,array_x_test_feat2=xtest_np_img2,array_y_test=y_test_numpy,params=p):
W_init = tf.keras.initializers.he_normal(seed=100)
b_init = tf.keras.initializers.he_normal(seed=50)
input_shape = (24,939)
left_input = Input(input_shape)
right_input = Input(input_shape)
encoder = Sequential()
encoder.add(Conv1D(filters=(params['filter1']),kernel_size=(params['kernel_size1']), padding='same', activation='relu',input_shape=input_shape,kernel_initializer=W_init, bias_initializer=b_init))
encoder.add(BatchNormalization())
encoder.add(Dropout((params['droprate1'])))
encoder.add(MaxPool1D())
encoder.add(Conv1D(filters=(params['filter2']),kernel_size=(params['kernel_size2']), padding='same', activation='relu'))
encoder.add(BatchNormalization())
encoder.add(Dropout((params['droprate2'])))
encoder.add(MaxPool1D())
encoder.add(Conv1D(filters=(params['filter3']),kernel_size=(params['kernel_size3']), padding='same', activation='relu'))
encoder.add(BatchNormalization())
encoder.add(Dropout((params['droprate3'])))
encoder.add(MaxPool1D())
encoder.add(Flatten())
encoder.add(Dense((params['unit1']),activation='relu'))
encoder.add(Dropout((params['droprate4'])))
encoded_l = encoder(left_input)
encoded_r = encoder(right_input)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([encoded_l, encoded_r])
adam = optimizers.Adam(lr=params['lr'], beta_1=0.1, beta_2=0.999,decay=.1, amsgrad=False)
earlyStopping = EarlyStopping(monitor='loss',min_delta=0,patience=3,verbose=1,restore_best_weights=False)
callback_early_stop_reduceLROnPlateau=[earlyStopping]
model = Model([left_input, right_input], distance)
model.compile(loss=contrastive_loss, optimizer=adam,metrics=[accuracy])
model.summary()
history=model.fit_generator(generator=yield_arrays_train(array_x_train_feat1,array_x_train_feat2,array_y_train,6),validation_data=yield_arrays_val(array_x_test_feat1,array_x_test_feat2,array_y_test,6),steps_per_epoch=2481,epochs=5, validation_steps=1000,verbose=1,callbacks=callback_early_stop_reduceLROnPlateau,use_multiprocessing=False,workers=0)
return history,model
t=ta.Scan(x=[xtrain_np_img1.astype(np.float16),xtrain_np_img2.astype(np.float16)],y=y_train_numpy,x_val=[xtest_np_img1,xtest_np_img2],y_val=y_test_numpy,model=siamese,params=p,experiment_name='exp_1')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-31-4c923301ede6> in <module>
1 #t=ta.Scan(x=xtrain_np_img1_img2,y=y_train_numpy,x_val=xtest_np_img1_img2,y_val=y_test_numpy,model=siamese,params=p,experiment_name='exp_1')
2
----> 3 t=ta.Scan(x=[xtrain_np_img1.astype(np.float16),xtrain_np_img2.astype(np.float16)],y=y_train_numpy,x_val=[xtest_np_img1,xtest_np_img2],y_val=y_test_numpy,model=siamese,params=p,experiment_name='exp_1')
~\anaconda3\envs\MyEnv\lib\site-packages\talos\scan\Scan.py in __init__(self, x, y, params, model, experiment_name, x_val, y_val, val_split, random_method, seed, performance_target, fraction_limit, round_limit, time_limit, boolean_limit, reduction_method, reduction_interval, reduction_window, reduction_threshold, reduction_metric, minimize_loss, disable_progress_bar, print_params, clear_session, save_weights)
194 # start runtime
195 from .scan_run import scan_run
--> 196 scan_run(self)
~\anaconda3\envs\MyEnv\lib\site-packages\talos\scan\scan_run.py in scan_run(self)
24 # otherwise proceed with next permutation
25 from .scan_round import scan_round
---> 26 self = scan_round(self)
27 self.pbar.update(1)
28
~\anaconda3\envs\MyEnv\lib\site-packages\talos\scan\scan_round.py in scan_round(self)
17 # fit the model
18 from ..model.ingest_model import ingest_model
---> 19 self.model_history, self.round_model = ingest_model(self)
20 self.round_history.append(self.model_history.history)
21
~\anaconda3\envs\MyEnv\lib\site-packages\talos\model\ingest_model.py in ingest_model(self)
8 self.x_val,
9 self.y_val,
---> 10 self.round_params)
<ipython-input-30-fe409e1ff506> in siamese(array_x_train_feat1, array_x_train_feat2, array_y_train, array_x_test_feat1, array_x_test_feat2, array_y_test, params)
11
12 encoder = Sequential()
---> 13 encoder.add(Conv1D(filters=(params['filter1']),kernel_size=(params['kernel_size1']), padding='same', activation='relu',input_shape=input_shape,kernel_initializer=W_init, bias_initializer=b_init))
14 encoder.add(BatchNormalization())
15 encoder.add(Dropout((params['droprate1'])))
~\AppData\Roaming\Python\Python37\site-packages\keras\engine\sequential.py in add(self, layer)
164 # and create the node connecting the current layer
165 # to the input layer we just created.
--> 166 layer(x)
167 set_inputs = True
168 else:
~\AppData\Roaming\Python\Python37\site-packages\keras\engine\base_layer.py in __call__(self, inputs, **kwargs)
461 'You can build it manually via: '
462 '`layer.build(batch_input_shape)`')
--> 463 self.build(unpack_singleton(input_shapes))
464 self.built = True
465
~\AppData\Roaming\Python\Python37\site-packages\keras\layers\convolutional.py in build(self, input_shape)
139 name='kernel',
140 regularizer=self.kernel_regularizer,
--> 141 constraint=self.kernel_constraint)
142 if self.use_bias:
143 self.bias = self.add_weight(shape=(self.filters,),
~\AppData\Roaming\Python\Python37\site-packages\keras\engine\base_layer.py in add_weight(self, name, shape, dtype, initializer, regularizer, trainable, constraint)
277 if dtype is None:
278 dtype = self.dtype
--> 279 weight = K.variable(initializer(shape, dtype=dtype),
280 dtype=dtype,
281 name=name,
~\anaconda3\envs\MyEnv\lib\site-packages\tensorflow\python\ops\init_ops.py in __call__(self, shape, dtype, partition_info)
513 if partition_info is not None:
514 scale_shape = partition_info.full_shape
--> 515 fan_in, fan_out = _compute_fans(scale_shape)
516 if self.mode == "fan_in":
517 scale /= max(1., fan_in)
~\anaconda3\envs\MyEnv\lib\site-packages\tensorflow\python\ops\init_ops.py in _compute_fans(shape)
1445 fan_in = shape[-2] * receptive_field_size
1446 fan_out = shape[-1] * receptive_field_size
-> 1447 return int(fan_in), int(fan_out)
1448
1449
TypeError: int() argument must be a string, a bytes-like object or a number, not 'list'
params = {'lr': (0.1, 0.01,1 ),
'epochs': [10,5,15],
'dropout': (0, 0.40, 0.8),
'optimizer': ["Adam","Adagrad","sgd"],
'loss': ["binary_crossentropy","mean_squared_error","mean_absolute_error"],
'last_activation': ["softmax","sigmoid"],
'activation' :["relu","selu","linear"],
'clipnorm':(0.0,0.5,1),
'decay':(1e-6,1e-4,1e-2),
'momentum':(0.9,0.5,0.2),
'l1': (0.01,0.001,0.0001),
'l2': (0.01,0.001,0.0001),
'No_of_CONV_and_Maxpool_layers':[1,2],
'No_of_Dense_Layers': [2,3,4],
'No_of_Units_in_dense_layers':[64,32],
'Kernal_Size':[(3,3),(5,5)],
'Conv2d_filters':[60,40,80,120],
'pool_size':[(3,3),(5,5)],
'padding':["valid","same"]
}
lr = params['lr']
epochs=params['epochs']
dropout_rate=params['dropout']
optimizer=params['optimizer']
loss=params['loss']
last_activation=params['last_activation']
activation=params['activation']
clipnorm=params['clipnorm']
decay=params['decay']
momentum=params['momentum']
l1=params['l1']
l2=params['l2']
No_of_CONV_and_Maxpool_layers=params['No_of_CONV_and_Maxpool_layers']
No_of_Dense_Layers =params['No_of_Dense_Layers']
No_of_Units_in_dense_layers=params['No_of_Units_in_dense_layers']
Kernal_Size=params['Kernal_Size']
Conv2d_filters=params['Conv2d_filters']
pool_size_p=params['pool_size']
padding_p=params['padding']
print (type((params['epochs'])))
<class 'list'>