Keras CNN与注意力网络的结合

Keras CNN与注意力网络的结合,keras,deep-learning,conv-neural-network,attention-model,Keras,Deep Learning,Conv Neural Network,Attention Model,这是我的注意层 class Attention(Layer): def __init__(self, **kwargs): self.init = initializers.get('normal') self.supports_masking = True self.attention_dim = 50 super(Attention, self).__init__(**kwargs) def build(sel

这是我的注意层

class Attention(Layer):
    def __init__(self, **kwargs):
        self.init = initializers.get('normal')
        self.supports_masking = True
        self.attention_dim = 50
        super(Attention, self).__init__(**kwargs)

    def build(self, input_shape):
        assert len(input_shape) == 3
        self.W = K.variable(self.init((input_shape[-1], 1)))
        self.b = K.variable(self.init((self.attention_dim, )))
        self.u = K.variable(self.init((self.attention_dim, 1)))
        self.trainable_weights = [self.W, self.b, self.u]
        super(Attention, self).build(input_shape)

    def compute_mask(self, inputs, mask=None):
        return mask

    def call(self, x, mask=None):
        uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))
        ait = K.dot(uit, self.u)
        ait = K.squeeze(ait, -1)
        ait = K.exp(ait)

        if mask is not None:
            ait *= K.cast(mask, K.floatx())

        ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
        ait = K.expand_dims(ait)
        weighted_input = x * ait
        output = K.sum(weighted_input, axis=1)
        return output

    def compute_output_shape(self, input_shape):
        return (input_shape[0], input_shape[-1])
我正在尝试将CNN和注意力网络结合起来进行文本分类。以下是我在keras中的代码:-

def inputs_and_embeddings(features, config):
        inputs, embeddings = [], []
        for f in features:
            E = Embedding if not config.fixed_embedding else FixedEmbedding
            # i = Input(shape=(config.doc_size,), dtype='int32', name=f.name)
            i = Input(shape=(config.doc_size,), dtype='int32', name=f.name)
            e = E(f.input_dim, f.output_dim, weights=[f.weights],
                  input_length=config.doc_size)(i)
            inputs.append(i)
            embeddings.append(e)
            return inputs, embeddings

inputs, embeddings = inputs_and_embeddings(features, config)
      #calculating the size of documents and all features.
    seq = concat(embeddings)
    cshape = (config.doc_size, sum(f.output_dim for f in features)) 
    seq = Reshape((1,)+cshape)(seq)

    #seq = Reshape((1, config.doc_size, w2v.output_dim))(embeddings) #old way of doing the above

    # seq = Bidirectional()
    # Convolution(s)
    convLayers = []
    for filter_size, filter_num in zip(config.filter_sizes, config.filter_nums):
        seq2 = Convolution2D(
            filter_num,
            filter_size,
            cshape[1],
            border_mode='valid',
            activation='relu',
            dim_ordering='th'
        )(seq)
        seq2 = MaxPooling2D(
            pool_size=(config.doc_size-filter_size+1, 1),
            dim_ordering='th'
        )(seq2)
        # seq2 = Flatten()(seq2)
        convLayers.append(seq2)


    seq = Concatenate(axis=1)(convLayers)
    if config.drop_prob:
        seq = Dropout(config.drop_prob)(seq)
    for s in config.hidden_sizes:
        seq = Dense(s, activation='relu')(seq)

    #need reshaping here
    seq = Reshape((200,3))(seq)
    word_encoder = Bidirectional(GRU(50, return_sequences=True))(seq) 
    rnn_type = 'GRU'  

    dense_transform_word = Dense(
            100, 
            activation='relu', kernel_regularizer=l2_reg,
            name='dense_transform_word')(word_encoder)



        # word attention
    attention_weighted_sentence = Model(
            inputs, Attention(name="word_attention")(dense_transform_word))

    word_attention_model = attention_weighted_sentence

    attention_weighted_sentence.summary()

        # sentence-attention-weighted document scores

    texts_in = Input(shape=(MAX_SEQ_LEN,config.doc_size), dtype='int32', name="input_2")

    attention_weighted_sentences = TimeDistributed(attention_weighted_sentence)(texts_in)



    if rnn_type is 'GRU':
            #sentence_encoder = Bidirectional(GRU(50, return_sequences=True, dropout=0.1, recurrent_dropout=0.2))(attention_weighted_sentences)
            dropout = Dropout(0.1)(attention_weighted_sentences)
            sentence_encoder = Bidirectional(GRU(50, return_sequences=True))(dropout)
    else:
            sentence_encoder = Bidirectional(LSTM(50, return_sequences=True, dropout=0.1, recurrent_dropout=0.2))(attention_weighted_sentences)


    dense_transform_sentence = Dense(
            100, 
            activation='relu', 
            name='dense_transform_sentence',
            kernel_regularizer=l2_reg)(sentence_encoder)

        # sentence attention
    attention_weighted_text = Attention(name="sentence_attention")(dense_transform_sentence)


    prediction = Dense(19, activation='sigmoid')(attention_weighted_text)

    model = Model(inputs, prediction)
    model.summary()


当我使用代码中所示的输入和预测初始化模型时,我得到了错误消息图disconnected error。在研究中,我发现当输入和输出之间没有连接时,就会出现这种错误。然而,我无法计算出我的模型的输入。有人能帮我吗

由于这一行
inputs,embeddings=inputs\u和\u embeddings(features,config)
出现在代码中,因此定义的函数
def inputs\u和\u embeddings
应该有关键字
return
。但是我没有找到。对不起……我之前没贴错……请现在看一下@guorui@guorui.....Could你建议我吗?是的,我会尽力。因为这行
inputs,embeddings=inputs\u和_embeddings(features,config)
出现在你的代码中,因此,定义的函数
def inputs_和_embeddings
应该具有关键字
return
。但是我没有找到。对不起……我之前没贴错……请现在看一下@guorui@guorui.....Could你建议我吗?是的,我会尽力的。试试上面的代码。如果有任何错误,请在这里进行评论。它给出了以下错误:-AttributeError:“Tensor”对象没有属性“内置”注意权句子=时间分布(输出)(文本输入)嗨,我现在要回宿舍了,因为在中国已经很晚了。明天我将检查您给我的错误。我找到了属性错误的根本原因…这是因为在注意层中,我使用层作为构造函数中的参数,该构造函数派生自keras.engine.toplogy.Layers。此图层属性自动添加与时间分布不兼容的内置函数。但是,我还没有找到解决方案。请尝试上面的代码。如果有任何错误,请在这里进行评论。它给出了以下错误:-AttributeError:“Tensor”对象没有属性“内置”注意权句子=时间分布(输出)(文本输入)嗨,我现在要回宿舍了,因为在中国已经很晚了。明天我将检查您给我的错误。我找到了属性错误的根本原因…这是因为在注意层中,我使用层作为构造函数中的参数,该构造函数派生自keras.engine.toplogy.Layers。此图层属性自动添加与时间分布不兼容的内置函数。然而,我还没有找到解决办法。
def inputs_and_embeddings(features, config):
    inputs, embeddings = [], []
    for f in features:
        E = Embedding if not config.fixed_embedding else FixedEmbedding
        # i = Input(shape=(config.doc_size,), dtype='int32', name=f.name)
        i = Input(shape=(config.doc_size,), dtype='int32', name=f.name)
        e = E(f.input_dim, 
              f.output_dim, 
              weights=[f.weights],
              input_length=config.doc_size)(i)
        inputs.append(i)
        embeddings.append(e)
        return inputs, embeddings

inputs, embeinputsddings = inputs_and_embeddings(features, config)
#calculating the size of documents and all features.
seq = concat(embeddings)
cshape = (config.doc_size, sum(f.output_dim for f in features)) 
seq = Reshape((1,)+cshape)(seq)

#seq = Reshape((1, config.doc_size, w2v.output_dim))(embeddings) #old way of doing the above

# seq = Bidirectional()
# Convolution(s)
convLayers = []
for filter_size, filter_num in zip(config.filter_sizes, config.filter_nums):
    seq2 = Convolution2D(
        filter_num,
        filter_size,
        cshape[1],
        border_mode='valid',
        activation='relu',
        dim_ordering='th'
    )(seq)
    seq2 = MaxPooling2D(
        pool_size=(config.doc_size-filter_size+1, 1),
        dim_ordering='th'
    )(seq2)
    # seq2 = Flatten()(seq2)
    convLayers.append(seq2)


seq = Concatenate(axis=1)(convLayers)
if config.drop_prob:
    seq = Dropout(config.drop_prob)(seq)
for s in config.hidden_sizes:
    seq = Dense(s, activation='relu')(seq)

#need reshaping here
seq = Reshape((200,3))(seq)
word_encoder = Bidirectional(GRU(50, return_sequences=True))(seq) 
rnn_type = 'GRU'  

dense_transform_word = Dense(
        100, 
        activation='relu', kernel_regularizer=l2_reg,
        name='dense_transform_word')(word_encoder)


outputs = Attention(name="word_attention")(dense_transform_word)
    # word attention
attention_weighted_sentence = Model(
        inputs, outputs)

word_attention_model = attention_weighted_sentence

attention_weighted_sentence.summary()

    # sentence-attention-weighted document scores

texts_in = Input(shape=(MAX_SEQ_LEN,config.doc_size), dtype='int32', name="input_2")

attention_weighted_sentences = TimeDistributed(outputs)(texts_in)



if rnn_type is 'GRU':
        #sentence_encoder = Bidirectional(GRU(50, return_sequences=True, dropout=0.1, recurrent_dropout=0.2))(attention_weighted_sentences)
        dropout = Dropout(0.1)(attention_weighted_sentences)
        sentence_encoder = Bidirectional(GRU(50, return_sequences=True))(dropout)
else:
        sentence_encoder = Bidirectional(LSTM(50, return_sequences=True, dropout=0.1, recurrent_dropout=0.2))(attention_weighted_sentences)


dense_transform_sentence = Dense(
        100, 
        activation='relu', 
        name='dense_transform_sentence',
        kernel_regularizer=l2_reg)(sentence_encoder)

    # sentence attention
attention_weighted_text = Attention(name="sentence_attention")(dense_transform_sentence)


prediction = Dense(19, activation='sigmoid')(attention_weighted_text)

model = Model([inputs, texts_in], prediction)
model.summary()