Python 用Tensorflow收敛LSTM网络

Python 用Tensorflow收敛LSTM网络,python,machine-learning,artificial-intelligence,tensorflow,lstm,Python,Machine Learning,Artificial Intelligence,Tensorflow,Lstm,我试图在长时间序列中检测微事件。为此,我将培训一个LSTM网络 数据。每个时间样本的输入是11个不同的特征,有些标准化,以适合0-1。输出将是两个类中的任意一个 批处理。由于类的巨大不平衡性,我已经提取了每60次样本的批数据,其中至少有5次将始终是类1,其余的将始终是类1。通过这种方式,类不平衡从150:1减少到12:1左右,然后我将所有批次的顺序随机化 模型。我正在尝试训练一个LSTM,初始配置为3个不同的单元,具有5个延迟步骤。我希望微事件以至少3个时间步的顺序到达 问题:当我尝试训练网络时

我试图在长时间序列中检测微事件。为此,我将培训一个LSTM网络

数据。每个时间样本的输入是11个不同的特征,有些标准化,以适合0-1。输出将是两个类中的任意一个

批处理。由于类的巨大不平衡性,我已经提取了每60次样本的批数据,其中至少有5次将始终是类1,其余的将始终是类1。通过这种方式,类不平衡从150:1减少到12:1左右,然后我将所有批次的顺序随机化

模型。我正在尝试训练一个LSTM,初始配置为3个不同的单元,具有5个延迟步骤。我希望微事件以至少3个时间步的顺序到达

问题:当我尝试训练网络时,它会很快收敛到说一切都属于多数阶级。当我实现一个加权损失函数时,在某个特定的阈值下,它会变为说所有东西都属于少数类。我怀疑(不是专家)我的LSTM单元没有学习,或者我的配置关闭了

下面是我的实现代码。我希望有人能告诉我

  • 我的实现正确吗
  • 这种行为还有什么其他原因
ar_model.py

import numpy as np
import tensorflow as tf
from tensorflow.models.rnn import rnn
import ar_config

config = ar_config.get_config()


class ARModel(object):

    def __init__(self, is_training=False, config=None):

        # Config
        if config is None:
            config = ar_config.get_config()

        # Placeholders
        self._features = tf.placeholder(tf.float32, [None, config.num_features], name='ModelInput')
        self._targets = tf.placeholder(tf.float32, [None, config.num_classes], name='ModelOutput')

        # Hidden layer
        with tf.variable_scope('lstm') as scope:
            lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.num_hidden, forget_bias=0.0)
            cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_delays)
            self._initial_state = cell.zero_state(config.batch_size, dtype=tf.float32)
            outputs, state = rnn.rnn(cell, [self._features], dtype=tf.float32)

        # Output layer
        output = outputs[-1]
        softmax_w = tf.get_variable('softmax_w', [config.num_hidden, config.num_classes], tf.float32)
        softmax_b = tf.get_variable('softmax_b', [config.num_classes], tf.float32)
        logits = tf.matmul(output, softmax_w) + softmax_b

        # Evaluate
        ratio = (60.00 / 5.00)
        class_weights = tf.constant([ratio, 1 - ratio])
        weighted_logits = tf.mul(logits, class_weights)
        loss = tf.nn.softmax_cross_entropy_with_logits(weighted_logits, self._targets)
        self._cost = cost = tf.reduce_mean(loss)
        self._predict = tf.argmax(tf.nn.softmax(logits), 1)
        self._correct = tf.equal(tf.argmax(logits, 1), tf.argmax(self._targets, 1))
        self._accuracy = tf.reduce_mean(tf.cast(self._correct, tf.float32))
        self._final_state = state

        if not is_training:
            return

        # Optimize
        optimizer = tf.train.AdamOptimizer()
        self._train_op = optimizer.minimize(cost)


    @property
    def features(self):
        return self._features

    @property
    def targets(self):
        return self._targets

    @property
    def cost(self):
        return self._cost

    @property
    def accuracy(self):
        return self._accuracy

    @property
    def train_op(self):
        return self._train_op

    @property
    def predict(self):
        return self._predict

    @property
    def initial_state(self):
        return self._initial_state

    @property
    def final_state(self):
        return self._final_state
import os
from datetime import datetime
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import ar_network
import ar_config
import ar_reader

config = ar_config.get_config()


def main(argv=None):

    if gfile.Exists(config.train_dir):
        gfile.DeleteRecursively(config.train_dir)
        gfile.MakeDirs(config.train_dir)

    train()

def train():
    train_data = ar_reader.ArousalData(config.train_data, num_steps=config.max_steps)
    test_data = ar_reader.ArousalData(config.test_data, num_steps=config.max_steps)

    with tf.Graph().as_default(), tf.Session() as session, tf.device('/cpu:0'):
        initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)

        with tf.variable_scope('model', reuse=False, initializer=initializer):
            m = ar_network.ARModel(is_training=True)
            s = tf.train.Saver(tf.all_variables())

        tf.initialize_all_variables().run()

        for batch_input, batch_target in train_data:
            step = train_data.iter_steps

            dict = {
                m.features: batch_input,
                m.targets: batch_target
            }

            session.run(m.train_op, feed_dict=dict)
            state, cost, accuracy = session.run([m.final_state, m.cost, m.accuracy], feed_dict=dict)

            if not step % 10:
                test_input, test_target = test_data.next()
                test_accuracy = session.run(m.accuracy, feed_dict={
                    m.features: test_input,
                    m.targets: test_target
                })
                now = datetime.now().time()
                print ('%s | Iter %4d | Loss= %.5f | Train= %.5f | Test= %.3f' % (now, step, cost, accuracy, test_accuracy))

            if not step % 1000:
                destination = os.path.join(config.train_dir, 'ar_model.ckpt')
                s.save(session, destination)

if __name__ == '__main__':
    tf.app.run()
class Config(object):

    # Directories
    train_dir = '...'
    ckpt_dir = '...'
    train_data = '...'
    test_data = '...'

    # Data
    num_features = 13
    num_classes = 2
    batch_size = 60

    # Model
    num_hidden = 3
    num_delays = 5

    # Training
    max_steps = 100000


def get_config():
    return Config()
ar_train.py

import numpy as np
import tensorflow as tf
from tensorflow.models.rnn import rnn
import ar_config

config = ar_config.get_config()


class ARModel(object):

    def __init__(self, is_training=False, config=None):

        # Config
        if config is None:
            config = ar_config.get_config()

        # Placeholders
        self._features = tf.placeholder(tf.float32, [None, config.num_features], name='ModelInput')
        self._targets = tf.placeholder(tf.float32, [None, config.num_classes], name='ModelOutput')

        # Hidden layer
        with tf.variable_scope('lstm') as scope:
            lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.num_hidden, forget_bias=0.0)
            cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_delays)
            self._initial_state = cell.zero_state(config.batch_size, dtype=tf.float32)
            outputs, state = rnn.rnn(cell, [self._features], dtype=tf.float32)

        # Output layer
        output = outputs[-1]
        softmax_w = tf.get_variable('softmax_w', [config.num_hidden, config.num_classes], tf.float32)
        softmax_b = tf.get_variable('softmax_b', [config.num_classes], tf.float32)
        logits = tf.matmul(output, softmax_w) + softmax_b

        # Evaluate
        ratio = (60.00 / 5.00)
        class_weights = tf.constant([ratio, 1 - ratio])
        weighted_logits = tf.mul(logits, class_weights)
        loss = tf.nn.softmax_cross_entropy_with_logits(weighted_logits, self._targets)
        self._cost = cost = tf.reduce_mean(loss)
        self._predict = tf.argmax(tf.nn.softmax(logits), 1)
        self._correct = tf.equal(tf.argmax(logits, 1), tf.argmax(self._targets, 1))
        self._accuracy = tf.reduce_mean(tf.cast(self._correct, tf.float32))
        self._final_state = state

        if not is_training:
            return

        # Optimize
        optimizer = tf.train.AdamOptimizer()
        self._train_op = optimizer.minimize(cost)


    @property
    def features(self):
        return self._features

    @property
    def targets(self):
        return self._targets

    @property
    def cost(self):
        return self._cost

    @property
    def accuracy(self):
        return self._accuracy

    @property
    def train_op(self):
        return self._train_op

    @property
    def predict(self):
        return self._predict

    @property
    def initial_state(self):
        return self._initial_state

    @property
    def final_state(self):
        return self._final_state
import os
from datetime import datetime
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import ar_network
import ar_config
import ar_reader

config = ar_config.get_config()


def main(argv=None):

    if gfile.Exists(config.train_dir):
        gfile.DeleteRecursively(config.train_dir)
        gfile.MakeDirs(config.train_dir)

    train()

def train():
    train_data = ar_reader.ArousalData(config.train_data, num_steps=config.max_steps)
    test_data = ar_reader.ArousalData(config.test_data, num_steps=config.max_steps)

    with tf.Graph().as_default(), tf.Session() as session, tf.device('/cpu:0'):
        initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)

        with tf.variable_scope('model', reuse=False, initializer=initializer):
            m = ar_network.ARModel(is_training=True)
            s = tf.train.Saver(tf.all_variables())

        tf.initialize_all_variables().run()

        for batch_input, batch_target in train_data:
            step = train_data.iter_steps

            dict = {
                m.features: batch_input,
                m.targets: batch_target
            }

            session.run(m.train_op, feed_dict=dict)
            state, cost, accuracy = session.run([m.final_state, m.cost, m.accuracy], feed_dict=dict)

            if not step % 10:
                test_input, test_target = test_data.next()
                test_accuracy = session.run(m.accuracy, feed_dict={
                    m.features: test_input,
                    m.targets: test_target
                })
                now = datetime.now().time()
                print ('%s | Iter %4d | Loss= %.5f | Train= %.5f | Test= %.3f' % (now, step, cost, accuracy, test_accuracy))

            if not step % 1000:
                destination = os.path.join(config.train_dir, 'ar_model.ckpt')
                s.save(session, destination)

if __name__ == '__main__':
    tf.app.run()
class Config(object):

    # Directories
    train_dir = '...'
    ckpt_dir = '...'
    train_data = '...'
    test_data = '...'

    # Data
    num_features = 13
    num_classes = 2
    batch_size = 60

    # Model
    num_hidden = 3
    num_delays = 5

    # Training
    max_steps = 100000


def get_config():
    return Config()
ar_config.py

import numpy as np
import tensorflow as tf
from tensorflow.models.rnn import rnn
import ar_config

config = ar_config.get_config()


class ARModel(object):

    def __init__(self, is_training=False, config=None):

        # Config
        if config is None:
            config = ar_config.get_config()

        # Placeholders
        self._features = tf.placeholder(tf.float32, [None, config.num_features], name='ModelInput')
        self._targets = tf.placeholder(tf.float32, [None, config.num_classes], name='ModelOutput')

        # Hidden layer
        with tf.variable_scope('lstm') as scope:
            lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.num_hidden, forget_bias=0.0)
            cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_delays)
            self._initial_state = cell.zero_state(config.batch_size, dtype=tf.float32)
            outputs, state = rnn.rnn(cell, [self._features], dtype=tf.float32)

        # Output layer
        output = outputs[-1]
        softmax_w = tf.get_variable('softmax_w', [config.num_hidden, config.num_classes], tf.float32)
        softmax_b = tf.get_variable('softmax_b', [config.num_classes], tf.float32)
        logits = tf.matmul(output, softmax_w) + softmax_b

        # Evaluate
        ratio = (60.00 / 5.00)
        class_weights = tf.constant([ratio, 1 - ratio])
        weighted_logits = tf.mul(logits, class_weights)
        loss = tf.nn.softmax_cross_entropy_with_logits(weighted_logits, self._targets)
        self._cost = cost = tf.reduce_mean(loss)
        self._predict = tf.argmax(tf.nn.softmax(logits), 1)
        self._correct = tf.equal(tf.argmax(logits, 1), tf.argmax(self._targets, 1))
        self._accuracy = tf.reduce_mean(tf.cast(self._correct, tf.float32))
        self._final_state = state

        if not is_training:
            return

        # Optimize
        optimizer = tf.train.AdamOptimizer()
        self._train_op = optimizer.minimize(cost)


    @property
    def features(self):
        return self._features

    @property
    def targets(self):
        return self._targets

    @property
    def cost(self):
        return self._cost

    @property
    def accuracy(self):
        return self._accuracy

    @property
    def train_op(self):
        return self._train_op

    @property
    def predict(self):
        return self._predict

    @property
    def initial_state(self):
        return self._initial_state

    @property
    def final_state(self):
        return self._final_state
import os
from datetime import datetime
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import ar_network
import ar_config
import ar_reader

config = ar_config.get_config()


def main(argv=None):

    if gfile.Exists(config.train_dir):
        gfile.DeleteRecursively(config.train_dir)
        gfile.MakeDirs(config.train_dir)

    train()

def train():
    train_data = ar_reader.ArousalData(config.train_data, num_steps=config.max_steps)
    test_data = ar_reader.ArousalData(config.test_data, num_steps=config.max_steps)

    with tf.Graph().as_default(), tf.Session() as session, tf.device('/cpu:0'):
        initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)

        with tf.variable_scope('model', reuse=False, initializer=initializer):
            m = ar_network.ARModel(is_training=True)
            s = tf.train.Saver(tf.all_variables())

        tf.initialize_all_variables().run()

        for batch_input, batch_target in train_data:
            step = train_data.iter_steps

            dict = {
                m.features: batch_input,
                m.targets: batch_target
            }

            session.run(m.train_op, feed_dict=dict)
            state, cost, accuracy = session.run([m.final_state, m.cost, m.accuracy], feed_dict=dict)

            if not step % 10:
                test_input, test_target = test_data.next()
                test_accuracy = session.run(m.accuracy, feed_dict={
                    m.features: test_input,
                    m.targets: test_target
                })
                now = datetime.now().time()
                print ('%s | Iter %4d | Loss= %.5f | Train= %.5f | Test= %.3f' % (now, step, cost, accuracy, test_accuracy))

            if not step % 1000:
                destination = os.path.join(config.train_dir, 'ar_model.ckpt')
                s.save(session, destination)

if __name__ == '__main__':
    tf.app.run()
class Config(object):

    # Directories
    train_dir = '...'
    ckpt_dir = '...'
    train_data = '...'
    test_data = '...'

    # Data
    num_features = 13
    num_classes = 2
    batch_size = 60

    # Model
    num_hidden = 3
    num_delays = 5

    # Training
    max_steps = 100000


def get_config():
    return Config()
更新的体系结构:

# Placeholders
self._features = tf.placeholder(tf.float32, [None, config.num_features, config.num_delays], name='ModelInput')
self._targets = tf.placeholder(tf.float32, [None, config.num_output], name='ModelOutput')

# Weights
weights = {
    'hidden': tf.get_variable('w_hidden', [config.num_features, config.num_hidden], tf.float32),
    'out': tf.get_variable('w_out', [config.num_hidden, config.num_classes], tf.float32)
}
biases = {
    'hidden': tf.get_variable('b_hidden', [config.num_hidden], tf.float32),
    'out': tf.get_variable('b_out', [config.num_classes], tf.float32)
}

#Layer in
with tf.variable_scope('input_hidden') as scope:
    inputs = self._features
    inputs = tf.transpose(inputs, perm=[2, 0, 1])  # (BatchSize,NumFeatures,TimeSteps) -> (TimeSteps,BatchSize,NumFeatures)
    inputs = tf.reshape(inputs, shape=[-1, config.num_features]) # (TimeSteps,BatchSize,NumFeatures -> (TimeSteps*BatchSize,NumFeatures)
    inputs = tf.add(tf.matmul(inputs, weights['hidden']), biases['hidden'])

#Layer hidden
with tf.variable_scope('hidden_hidden') as scope:
    inputs = tf.split(0, config.num_delays, inputs) # -> n_steps * (batchsize, features)
    cell = tf.nn.rnn_cell.BasicLSTMCell(config.num_hidden, forget_bias=0.0)
    self._initial_state = cell.zero_state(config.batch_size, dtype=tf.float32)
    outputs, state = rnn.rnn(cell, inputs, dtype=tf.float32)

#Layer out
with tf.variable_scope('hidden_output') as scope:
    output = outputs[-1]
    logits = tf.add(tf.matmul(output, weights['out']), biases['out'])
奇数元素 加权损失 我不确定您的“加权损失”是否符合您的要求:

    ratio = (60.00 / 5.00)
    class_weights = tf.constant([ratio, 1 - ratio])
    weighted_logits = tf.mul(logits, class_weights)
这是在计算损失函数之前应用的(我认为您还需要元素乘法?而且您的比率高于1,这使得第二部分为负?),因此它迫使您的预测在应用softmax之前以某种方式运行

如果你想要加权损失,你应该在

loss = tf.nn.softmax_cross_entropy_with_logits(weighted_logits, self._targets)
通过对权重的元素化乘法

loss = loss * weights
重量的形状如[2,]

但是,我不建议您使用加权损失。也许可以尝试将比率进一步提高到1:6以上

建筑学 据我所知,您使用的是5个堆叠LSTM,每层3个隐藏单元

尝试移除多个rnn,只需使用一个LSTM/GRU(甚至可能只是一个普通rnn),并将隐藏单元提升到~100-1000

调试 通常,当您面对行为古怪的网络问题时,最好:

打印所有内容 逐字打印模型中每个张量的形状和值,使用sess获取它,然后打印它。您的输入数据、第一个隐藏的表示、您的预测、您的损失等

您还可以使用tensorflows
x_tensor=tf.Print(x_tensor[tf.shape(x_tensor)])

使用张力板 使用梯度、准确度指标和直方图的总结,可以揭示数据中可能解释某些行为的模式,例如导致体重爆炸的原因。比如说,你的“忘记”偏差可能是无穷大,或者你没有通过某一层来跟踪梯度,等等

其他问题
  • 您的数据集有多大

  • 你的序列有多长

  • 这13个特征是分类的还是连续的?您不应该规范化分类变量或将它们表示为整数,而是应该使用一个热编码

奇数元素 加权损失 我不确定您的“加权损失”是否符合您的要求:

    ratio = (60.00 / 5.00)
    class_weights = tf.constant([ratio, 1 - ratio])
    weighted_logits = tf.mul(logits, class_weights)
这是在计算损失函数之前应用的(我认为您还需要元素乘法?而且您的比率高于1,这使得第二部分为负?),因此它迫使您的预测在应用softmax之前以某种方式运行

如果你想要加权损失,你应该在

loss = tf.nn.softmax_cross_entropy_with_logits(weighted_logits, self._targets)
通过对权重的元素化乘法

loss = loss * weights
重量的形状如[2,]

但是,我不建议您使用加权损失。也许可以尝试将比率进一步提高到1:6以上

建筑学 据我所知,您使用的是5个堆叠LSTM,每层3个隐藏单元

尝试移除多个rnn,只需使用一个LSTM/GRU(甚至可能只是一个普通rnn),并将隐藏单元提升到~100-1000

调试 通常,当您面对行为古怪的网络问题时,最好:

打印所有内容 逐字打印模型中每个张量的形状和值,使用sess获取它,然后打印它。您的输入数据、第一个隐藏的表示、您的预测、您的损失等

您还可以使用tensorflows
x_tensor=tf.Print(x_tensor[tf.shape(x_tensor)])

使用张力板 使用梯度、准确度指标和直方图的总结,可以揭示数据中可能解释某些行为的模式,例如导致体重爆炸的原因。比如说,你的“忘记”偏差可能是无穷大,或者你没有通过某一层来跟踪梯度,等等

其他问题
  • 您的数据集有多大

  • 你的序列有多长

  • 这13个特征是分类的还是连续的?您不应该规范化分类变量或将它们表示为整数,而是应该使用一个热编码


Gunnar已经提出了很多好的建议。对于这种体系结构,还有一些小事情值得注意:

  • 尝试调整Adam学习速度。你应该通过交叉验证来确定合适的学习率;作为一个粗略的开始,您只需检查较小的学习速率是否可以使您的模型避免在训练数据上崩溃
  • 你应该使用更多的隐藏单位。当您第一次使用数据集时,尝试更大的网络是很便宜的。照我说的去做