Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/302.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python tensorflow的实施速度比torch慢2倍';是一个_Python_Tensorflow_Deep Learning_Torch - Fatal编程技术网

Python tensorflow的实施速度比torch慢2倍';是一个

Python tensorflow的实施速度比torch慢2倍';是一个,python,tensorflow,deep-learning,torch,Python,Tensorflow,Deep Learning,Torch,我正在尝试在tensorflow上实现,而torch实现已经存在 我在默认配置(批量大小=6)下使用Titan X pascal对其进行了测试,平均训练迭代约为343毫秒 我用随机输入/输出将tensorflow实现固定化: import tensorflow as tf class stacked_hourglass(): def __init__(self, nb_stack, name='stacked_hourglass'): self.nb_stack = n

我正在尝试在tensorflow上实现,而torch实现已经存在

我在默认配置(批量大小=6)下使用Titan X pascal对其进行了测试,平均训练迭代约为343毫秒

我用随机输入/输出将tensorflow实现固定化:

import tensorflow as tf

class stacked_hourglass():
    def __init__(self, nb_stack, name='stacked_hourglass'):
        self.nb_stack = nb_stack
        self.name = name

    def __call__(self, x):
        with tf.name_scope(self.name) as scope:
            padding = tf.pad(x, [[0,0],[3,3],[3,3],[0,0]], name='padding')
            with tf.name_scope("preprocessing") as sc:
                conv1 = self._conv(padding, 64, 7, 2, 'VALID', 'conv1')
                norm1 = tf.contrib.layers.batch_norm(conv1, 0.9, epsilon=1e-5, 
                                    activation_fn=tf.nn.relu, scope=sc)
                r1 = self._residual_block(norm1, 128, 'r1')
                pool = tf.contrib.layers.max_pool2d(r1, [2,2], [2,2], 'VALID', scope=scope)
                r2 = self._residual_block(pool, 128, 'r2')
                r3 = self._residual_block(r2, 256, 'r3')
            hg = [None] * self.nb_stack
            ll = [None] * self.nb_stack
            ll_ = [None] * self.nb_stack
            out = [None] * self.nb_stack
            out_ = [None] * self.nb_stack
            sum_ = [None] * self.nb_stack
            with tf.name_scope('_hourglass_0_with_supervision') as sc:
                hg[0] = self._hourglass(r3, 4, 256, '_hourglass')
                ll[0] = self._conv_bn_relu(hg[0], 256, name='conv_1')
                ll_[0] = self._conv(ll[0],256,1,1,'VALID','ll')
                out[0] = self._conv(ll[0],16,1,1,'VALID','out')
                out_[0] = self._conv(out[0],256,1,1,'VALID','out_')
                sum_[0] = tf.add_n([ll_[0], out_[0], r3])
            for i in range(1, self.nb_stack - 1):
                with tf.name_scope('_hourglass_' + str(i) + '_with_supervision') as sc:
                    hg[i] = self._hourglass(sum_[i-1], 4, 256, '_hourglass')
                    ll[i] = self._conv_bn_relu(hg[i], 256, name='conv_1')
                    ll_[i] = self._conv(ll[i],256,1,1,'VALID','ll')
                    out[i] = self._conv(ll[i],16,1,1,'VALID','out')
                    out_[i] = self._conv(out[i],256,1,1,'VALID','out_')
                    sum_[i] = tf.add_n([ll_[i], out_[i], sum_[i-1]])
            with tf.name_scope('_hourglass_' + str(self.nb_stack - 1) + '_with_supervision') as sc:
                hg[self.nb_stack-1] = self._hourglass(sum_[self.nb_stack - 2], 4, 256, '_hourglass')
                ll[self.nb_stack-1] = self._conv_bn_relu(hg[self.nb_stack - 1], 256, name='conv_1')
                out[self.nb_stack-1] = self._conv(ll[self.nb_stack-1],16,1,1,'VALID','out')
            return tf.stack(out)

    def _conv(self, inputs, nb_filter, kernel_size=1, strides=1, pad='VALID', name='conv'):
        with tf.name_scope(name) as scope:
            kernel = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([kernel_size,\
                                    kernel_size,inputs.get_shape().as_list()[3],nb_filter]), name='weights')
            conv = tf.nn.conv2d(inputs, kernel, [1,strides,strides,1], padding=pad, data_format='NHWC')
            return conv

    def _conv_bn_relu(self, inputs, nb_filter, kernel_size=1, strides=1, name=None):
         with tf.name_scope(name) as scope:
            kernel = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([kernel_size,\
                                    kernel_size,inputs.get_shape().as_list()[3],nb_filter]), name='weights')
            conv = tf.nn.conv2d(inputs, kernel, [1,strides,strides,1], padding='SAME', data_format='NHWC')
            norm = tf.contrib.layers.batch_norm(conv, 0.9, epsilon=1e-5, activation_fn=tf.nn.relu, scope=scope)
            return norm

    def _conv_block(self, inputs, nb_filter_out, name='_conv_block'):
        with tf.name_scope(name) as scope:
            with tf.name_scope('norm_conv1') as sc:
                norm1 = tf.contrib.layers.batch_norm(inputs, 0.9, epsilon=1e-5, 
                                    activation_fn=tf.nn.relu, scope=sc)
                conv1 = self._conv(norm1, nb_filter_out / 2, 1, 1, 'SAME', name='conv1')
            with tf.name_scope('norm_conv2') as sc:
                norm2 = tf.contrib.layers.batch_norm(conv1, 0.9, epsilon=1e-5, 
                                    activation_fn=tf.nn.relu, scope=sc)
                conv2 = self._conv(norm2, nb_filter_out / 2, 3, 1, 'SAME', name='conv2')
            with tf.name_scope('norm_conv3') as sc:
                norm3 = tf.contrib.layers.batch_norm(conv2, 0.9, epsilon=1e-5, 
                                    activation_fn=tf.nn.relu, scope=sc)
                conv3 = self._conv(norm3, nb_filter_out, 1, 1, 'SAME', name='conv3')
            return conv3

    def _skip_layer(self, inputs, nb_filter_out, name='_skip_layer'):
        if inputs.get_shape()[3].__eq__(tf.Dimension(nb_filter_out)):
            return inputs
        else:
            with tf.name_scope(name) as scope:
                conv = self._conv(inputs, nb_filter_out, 1, 1, 'SAME', name='conv')
                return conv

    def _residual_block(self, inputs, nb_filter_out, name='_residual_block'):
        with tf.name_scope(name) as scope:
            _conv_block = self._conv_block(inputs, nb_filter_out)
            _skip_layer = self._skip_layer(inputs, nb_filter_out)
            return tf.add(_skip_layer, _conv_block)

    def _hourglass(self, inputs, n, nb_filter_res, name='_hourglass'):
        with tf.name_scope(name) as scope:
            # Upper branch
            up1 = self._residual_block(inputs, nb_filter_res, 'up1')
            # Lower branch
            pool = tf.contrib.layers.max_pool2d(inputs, [2,2], [2,2], 'VALID', scope=scope)
            low1 = self._residual_block(pool, nb_filter_res, 'low1')
            if n > 1:
                low2 = self._hourglass(low1, n-1, nb_filter_res, 'low2')
            else:
                low2 = self._residual_block(low1, nb_filter_res, 'low2')
            low3 = self._residual_block(low2, nb_filter_res, 'low3')
            low4 = tf.image.resize_nearest_neighbor(low3, tf.shape(low3)[1:3] * 2,
                                                    name='upsampling')
            if n < 4:
                return tf.add(up1, low4, name='merge')
            else:
                return self._residual_block(tf.add(up1, low4), nb_filter_res, 'low4')

if __name__ == "__main__":
    import os
    import sys
    import numpy as np
    import time
    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) 
    with tf.Graph().as_default():
        DEVICE = '/gpu:0'
        with tf.device(DEVICE):
            print "start build model..."
            _x = tf.placeholder(tf.float32, [None, 256, 256, 3])
            y = tf.placeholder(tf.float32, [8, None, 64, 64, 16])
            output = stacked_hourglass(8, 'stacked_hourglass')(_x)
            loss = tf.reduce_mean(tf.square(output - y))
            rmsprop = tf.train.RMSPropOptimizer(2.5e-4)
            print "build finished..."
        train_step = tf.Variable(0, name='global_step', trainable=False)
        with tf.device(DEVICE):
            train_rmsprop = rmsprop.minimize(loss, train_step)
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            with tf.device(DEVICE):
                sess.run(init)
            print "test..."
            xarr = np.random.rand(100, 6, 256, 256, 3)
            yarr = np.random.rand(100, 8, 6, 64, 64, 16)
            _time = time.clock()
            with tf.device(DEVICE):
                for u in range(0, 100):
                    sess.run(train_rmsprop, feed_dict={_x:xarr[u], y:yarr[u]})
            print "test:", time.clock() - _time
这意味着平均迭代时间约为717毫秒,是torch实现速度的两倍

我知道Tensorflow应该稍微慢一点,但为了赶上进度,已经做了很多工作(考虑到一些因素,现在应该很接近了)


你知道是什么使我的实现如此缓慢吗?

前进步计时比较如何?从历史上看,TensorFlow在backprop上的运行速度比Torch慢,因为AD在粒度更高的图形上运行(单独的数学运算,而不是Torch层),因此为向后传递生成了更多的运算。在某些情况下,通过添加重要ops/其梯度的融合版本来缓解这一问题

一些想法

  • 确保您在封面下使用的是
    tf.fused\u batch\u norm
    (即
    fused=True
    参数)

  • 使用队列而不是
    feed\u dict
    。这个
    feed\u dict
    会产生一个从Python运行时到TensorFlow运行时的额外副本,所以实际上您要做两个副本——Python->TensorFlow CPU,TensorFlow CPU->TensorFlow GPU。对于吸收CPU->GPU传输延迟的额外步骤,有

  • 看一眼可以告诉你哪一部分太慢了

  • tcmalloc和c协议

    sudo apt-get install google-perftools
    export LD_PRELOAD="/usr/lib/libtcmalloc.so.4" 
    pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/protobuf-3.0.0-cp27-none-linux_x86_64.whl
    

    顺便说一句,这不是完全相同的堆叠沙漏作为火炬之一。在沙漏函数中,上下分支始终只使用一个剩余块,而不是3个。
    sudo apt-get install google-perftools
    export LD_PRELOAD="/usr/lib/libtcmalloc.so.4" 
    pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/protobuf-3.0.0-cp27-none-linux_x86_64.whl