Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/315.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 在重训练vgg-16模型上实现LRP对真实图像和篡改图像进行预测和分类_Python_Tensorflow_Keras_Vgg Net - Fatal编程技术网

Python 在重训练vgg-16模型上实现LRP对真实图像和篡改图像进行预测和分类

Python 在重训练vgg-16模型上实现LRP对真实图像和篡改图像进行预测和分类,python,tensorflow,keras,vgg-net,Python,Tensorflow,Keras,Vgg Net,我们正在尝试在重新培训的VGG-16模型上实现LRP 我们模型的总结 这是我们的LRP代码,上面的代码中有两个错误 TypeError:无法将符号Keras输入/输出转换为numpy数组。此错误可能表示您试图将符号值传递给NumPy调用,这是不受支持的。或者,您可能试图将Keras符号输入/输出传递给不注册分派的tfapi,从而阻止Keras自动将API调用转换为功能模型中的lambda层 及 >在backprop_conv2d中(self、w、b、a、r、跨步) 70 b_p=K.最大值(b

我们正在尝试在重新培训的VGG-16模型上实现LRP

我们模型的总结

这是我们的LRP代码,上面的代码中有两个错误

TypeError:无法将符号Keras输入/输出转换为numpy数组。此错误可能表示您试图将符号值传递给NumPy调用,这是不受支持的。或者,您可能试图将Keras符号输入/输出传递给不注册分派的tfapi,从而阻止Keras自动将API调用转换为功能模型中的lambda层

>在backprop_conv2d中(self、w、b、a、r、跨步)
70 b_p=K.最大值(b,0.)
71 z_p=K.conv2d(a,kernel=w_p,strips=strips[1:-1],padding='same')+b_p+self.epsilon
--->72 s_p=r/z_p
73 c_p=conv2d_backprop_输入(K.shape(a),w_p,s_p,跨步,padding='SAME')
ValueError:维度必须相等,但对于“{node tf.math.truediv_1/truediv}}}=RealDiv[T=DT_FLOAT](占位符,占位符_1)”,维度为2和64,输入形状:[?,2],?,128128,64]

我们无法理解为什么会出现这个错误,因为我们对LRP实现还不熟悉

class LayerwiseRelevancePropagation:
def __init__(self, model, alpha=2, epsilon=1e-7):
self.model = model
self.alpha = alpha
self.beta = 1 - alpha
self.epsilon = epsilon

self.names, self.activations, self.weights = utilss.get_model_params(self.model)
self.num_layers = len(self.names)

self.relevance = self.compute_relevances()
self.lrp_runner = K.function(inputs=[self.model.input, ], outputs=[self.relevance, ])
def compute_relevances(self):
r = self.model.output
for i in range(self.num_layers-1):
  if 'dense' in self.names[i + 1]:
    r = self.backprop_fc(self.weights[i + 1][0], self.weights[i + 1][1], self.activations[i], r)
  elif 'flatten' in self.names[i + 1]:
    r = self.backprop_flatten(self.activations[i], r)
  elif 'pool' in self.names[i + 1]:
    r = self.backprop_max_pool2d(self.activations[i], r)
  elif 'conv' in self.names[i + 1]:
    r = self.backprop_conv2d(self.weights[i + 1][0], self.weights[i + 1][1], self.activations[i], r)
  elif 'dropout' in self.names[i+1]:
    print('ok')
  else:
    raise Exception('Layer not recognized!')
    sys.exit()
return r
def backprop_fc(self, w, b, a, r):
w_p = K.maximum(w, 0.)
b_p = K.maximum(b, 0.)
z_p = K.dot(a, w_p) + b_p + self.epsilon
s_p = r / z_p
c_p = K.dot(s_p, K.transpose(w_p))

w_n = K.minimum(w, 0.)
b_n = K.minimum(b, 0.)
z_n = K.dot(a, w_n) + b_n - self.epsilon
s_n = r / z_n
c_n = K.dot(s_n, K.transpose(w_n))

return a * (self.alpha * c_p + self.beta * c_n)
def backprop_flatten(self, a, r):
shape = a.get_shape().as_list()
shape[0] = -1
return K.reshape(r, shape)
def backprop_max_pool2d(self, a, r, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1)):
xshape = X.get_shape().as_list()
fshape = F.get_shape().as_list()
if len(xshape) != len(fshape):
  F = tf.reshape(F, (-1, int(np.ceil(xshape[1]/2.0)), 
  int(np.ceil(xshape[2]/2.0)), xshape[3]))
  ksize = [1, 2, 2, 1]  if ksize is None else ksize
  strides = [1, 2, 2, 1]  if strides is None else strides
Z = tf.nn.max_pool(X, strides=strides, ksize=ksize, padding=padding) + 1e-9
S = F / Z
C = gen_nn_ops._max_pool_grad(X, Z, S, ksize, strides, padding)    
F = X*C
return F
def backprop_conv2d(self, w, b, a, r, strides=(1, 1, 1, 1)):
w_p = K.maximum(w, 0.)
b_p = K.maximum(b, 0.)
z_p = K.conv2d(a, kernel=w_p, strides=strides[1:-1], padding='same') + b_p + self.epsilon
s_p = r / z_p
c_p = conv2d_backprop_input(K.shape(a), w_p, s_p, strides, padding='SAME')

w_n = K.minimum(w, 0.)
b_n = K.minimum(b, 0.)
z_n = K.conv2d(a, kernel=w_n, strides=strides[1:-1], padding='same') + b_n - self.epsilon
s_n = r / z_n
c_n = conv2d_backprop_input(K.shape(a), w_n, s_n, strides, padding='SAME')

return a * (self.alpha * c_p + self.beta * c_n)
def predict_labels(self, images):
return predict_labels(self.model, images)
def run_lrp(self, images):
print("Running LRP on {0} images...".format(len(images)))
return self.lrp_runner([images, ])[0]
def compute_heatmaps(self, images, g=0.2, cmap_type='rainbow', **kwargs):
lrps = self.run_lrp(images)
print("LRP run successfully...")
gammas = get_gammas(lrps, g=g, **kwargs)
print("Gamma Correction completed...")
heatmaps = get_heatmaps(gammas, cmap_type=cmap_type, **kwargs)
return heatmaps
> <ipython-input-49-0e52a9054e67> in backprop_conv2d(self, w, b, a, r, strides)
      70     b_p = K.maximum(b, 0.)
      71     z_p = K.conv2d(a, kernel=w_p, strides=strides[1:-1], padding='same') + b_p + self.epsilon
 ---> 72     s_p = r / z_p
      73     c_p = conv2d_backprop_input(K.shape(a), w_p, s_p, strides, padding='SAME')