Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Tensorflow Keras/tf中的解冷_Tensorflow_Keras_Masking - Fatal编程技术网

Tensorflow Keras/tf中的解冷

Tensorflow Keras/tf中的解冷,tensorflow,keras,masking,Tensorflow,Keras,Masking,我正在尝试为VGG中的自动编码器实现取消冷却。解耦合基本上是跟踪maxpool从编码器中获取的历史,然后在解码器中应用相同的历史。示例如图所示 下面是我已经完成的一个简单实现 from keras.layers import Input, Conv2D, UpSampling2D import keras from keras.layers import Lambda import keras.backend as K import tensorflow as tf from keras.mod

我正在尝试为VGG中的自动编码器实现取消冷却。解耦合基本上是跟踪maxpool从编码器中获取的历史,然后在解码器中应用相同的历史。示例如图所示

下面是我已经完成的一个简单实现

from keras.layers import Input, Conv2D, UpSampling2D
import keras
from keras.layers import Lambda
import keras.backend as K
import tensorflow as tf
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input
from keras.layers import Input, Conv2D, UpSampling2D
from keras.utils.data_utils import get_file
import keras.backend as K
import numpy as np
import tensorflow as tf
from keras.utils import plot_model

def unpool(args):
    mask, x = args
    #print(mask.shape, x.shape)
    return keras.layers.multiply([mask, x])

def mask_make(xt):
    t = MaxPooling2D((2, 2), strides=(2, 2), padding = 'SAME')(xt)
    t = UpSampling2D()(t)

    bool_mask = K.greater_equal(xt, t)
    mask = K.cast(bool_mask, dtype='float32')

    #print (mask.shape)

    mask_input = Input(tensor=mask) # Makes the mask to a Keras tensor to use as input
    return mask_input

def vgg_layers(inputs, target_layer):
    # Block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
    m1 = mask_make(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
    m2 = mask_make(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
    m3 = mask_make(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
    m4 = mask_make(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)

    # Decoder

    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='decoder_block5_conv1')(x)

    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='decoder_block4_conv4')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='decoder_block4_conv3')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='decoder_block4_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='decoder_block4_conv1')(x)
    x = UpSampling2D((2, 2), name='decoder_block4_upsample')(x)
    x = Lambda(unpool)([m4, x])

    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='decoder_block3_conv4')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='decoder_block3_conv3')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='decoder_block3_conv2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='decoder_block3_conv1')(x)
    x = UpSampling2D((2, 2), name='decoder_block3_upsample')(x)
    x = Lambda(unpool)([m3, x])

    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='decoder_block2_conv2')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='decoder_block2_conv1')(x)
    x = UpSampling2D((2, 2), name='decoder_block2_upsample')(x)
    x = Lambda(unpool)([m2, x])

    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='decoder_block1_conv2')(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='decoder_block1_conv1')(x)
    x = UpSampling2D((2, 2), name='decoder_block1_upsample')(x)
    x = Lambda(unpool)([m1, x])

    return x, masks

def VGG19(input_tensor=None, input_shape=(32,32,3), target_layer=1):
    """
    VGG19, up to the target layer (1 for relu1_1, 2 for relu2_1, etc.)
    """
    if input_tensor is None:
        inputs = Input(shape=input_shape)
    else:
        inputs = Input(tensor=input_tensor, shape=input_shape)
    layers, masks = vgg_layers(inputs, target_layer)
    model = Model(inputs, layers, name='vgg19')
    plot_model(model, to_file='model.png')
    #load_weights(model)
    #return model, masks

VGG19()
但是,我得到了“ValueError:Graph disconnected:”


请帮我解决这个问题。

使用连接解决了它

def mask_make(x, orig):
    t = UpSampling2D()(x)
    _,a,b,c = orig.shape 
    #print (a,b,c)
    xReshaped = Reshape((1,a*b*c))(t)
    origReshaped = Reshape((1,a*b*c))(orig)
    #print (xReshaped.shape)
    #print (origReshaped.shape)
    together = Concatenate(axis = -1)([origReshaped,xReshaped])
    togReshaped = Reshape((2,a,b,c))(together)
    #print (togReshaped.shape)

    bool_mask = Lambda(lambda t: K.greater_equal(t[:,0], t[:,1]))(togReshaped)

    mask = Lambda(lambda t: K.cast(t, dtype='float32'))(bool_mask)
    #mask = Reshape((a,b,c))(mask)
    #print (mask.shape)
    return mask

def vgg_layers(inputs, target_layer):
    masks = []
    # Block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
    if target_layer == 1:
        return x
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
    orig = x 
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
    masks.append(mask_make(x, orig))

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
    if target_layer == 2:
        return x
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
    orig = x 
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    masks.append(mask_make(x, orig))

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
    if target_layer == 3:
        return x
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
    orig = x 
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    masks.append(mask_make(x, orig))

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
    if target_layer == 4:
        return x
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
    orig = x 
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
    masks.append(mask_make(x, orig))

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
    return x, masks


def load_weights(model):
    f = h5py.File(WEIGHTS_PATH)
    layer_names = [name for name in f.attrs['layer_names']]

    for layer in model.layers:
        b_name = layer.name.encode()
        if b_name in layer_names:
            g = f[b_name]
            weights = [g[name] for name in g.attrs['weight_names']]
            layer.set_weights(weights)
            layer.trainable = False

    f.close()


def VGG19(input_tensor=None, input_shape=None, target_layer=1):
    """
    VGG19, up to the target layer (1 for relu1_1, 2 for relu2_1, etc.)
    """
    if input_tensor is None:
        inputs = Input(shape=input_shape)
    else:
        inputs = Input(tensor=input_tensor, shape=input_shape)
    layers, masks = vgg_layers(inputs, target_layer)
    model = Model(inputs, layers, name='vgg19')
    load_weights(model)
    return model, masks

通过使用concatenate解决了这个问题

def mask_make(x, orig):
    t = UpSampling2D()(x)
    _,a,b,c = orig.shape 
    #print (a,b,c)
    xReshaped = Reshape((1,a*b*c))(t)
    origReshaped = Reshape((1,a*b*c))(orig)
    #print (xReshaped.shape)
    #print (origReshaped.shape)
    together = Concatenate(axis = -1)([origReshaped,xReshaped])
    togReshaped = Reshape((2,a,b,c))(together)
    #print (togReshaped.shape)

    bool_mask = Lambda(lambda t: K.greater_equal(t[:,0], t[:,1]))(togReshaped)

    mask = Lambda(lambda t: K.cast(t, dtype='float32'))(bool_mask)
    #mask = Reshape((a,b,c))(mask)
    #print (mask.shape)
    return mask

def vgg_layers(inputs, target_layer):
    masks = []
    # Block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
    if target_layer == 1:
        return x
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
    orig = x 
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
    masks.append(mask_make(x, orig))

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
    if target_layer == 2:
        return x
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
    orig = x 
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    masks.append(mask_make(x, orig))

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
    if target_layer == 3:
        return x
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
    orig = x 
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    masks.append(mask_make(x, orig))

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
    if target_layer == 4:
        return x
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
    orig = x 
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
    masks.append(mask_make(x, orig))

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
    return x, masks


def load_weights(model):
    f = h5py.File(WEIGHTS_PATH)
    layer_names = [name for name in f.attrs['layer_names']]

    for layer in model.layers:
        b_name = layer.name.encode()
        if b_name in layer_names:
            g = f[b_name]
            weights = [g[name] for name in g.attrs['weight_names']]
            layer.set_weights(weights)
            layer.trainable = False

    f.close()


def VGG19(input_tensor=None, input_shape=None, target_layer=1):
    """
    VGG19, up to the target layer (1 for relu1_1, 2 for relu2_1, etc.)
    """
    if input_tensor is None:
        inputs = Input(shape=input_shape)
    else:
        inputs = Input(tensor=input_tensor, shape=input_shape)
    layers, masks = vgg_layers(inputs, target_layer)
    model = Model(inputs, layers, name='vgg19')
    load_weights(model)
    return model, masks

你试过调试吗?错误发生在哪里?您是否尝试过调试?错误发生在哪里?