Python 基于tf.估计的tensorflow可服务系统的图优化

Python 基于tf.估计的tensorflow可服务系统的图优化,python,tensorflow,tensorflow-serving,tensorflow-estimator,Python,Tensorflow,Tensorflow Serving,Tensorflow Estimator,上下文: import tensorflow as tf from tensorflow.saved_model import simple_save from tensorflow.saved_model import signature_constants from tensorflow.saved_model import tag_constants from tensorflow.tools.graph_transforms import TransformGraph with t

上下文

import tensorflow as tf

from tensorflow.saved_model import simple_save
from tensorflow.saved_model import signature_constants
from tensorflow.saved_model import tag_constants
from tensorflow.tools.graph_transforms import TransformGraph


with tf.Session(graph=tf.Graph()) as sess_meta:
    meta_graph_def = tf.saved_model.loader.load(
        sess_meta,
        [tag_constants.SERVING],
        "/model/path")

    graph_def = meta_graph_def.graph_def

    other_graph_def = TransformGraph(
        graph_def,
        ["Placeholder"],
        ["dnn/head/predictions/probabilities"],
        ["quantize_weights"])


    with tf.Graph().as_default():
        graph = tf.get_default_graph()
        tf.import_graph_def(other_graph_def)
        in_tensor = graph.get_tensor_by_name(
            "import/Placeholder:0")
        out_tensor = graph.get_tensor_by_name(
            "import/dnn/head/predictions/probabilities:0")

        inputs = {"inputs": in_tensor}
        outputs = {"outputs": out_tensor}

        simple_save(sess_meta, "./new", inputs, outputs)
我有一个基于的简单分类器,它在一个intent标签上获取文本和输出概率。我能够训练一个将模型导出为可服务的模型以及使用服务提供可服务的模型。问题是这个可服务文件太大了(大约1GB),所以我想尝试一些方法来减小所服务文件的大小

问题

import tensorflow as tf

from tensorflow.saved_model import simple_save
from tensorflow.saved_model import signature_constants
from tensorflow.saved_model import tag_constants
from tensorflow.tools.graph_transforms import TransformGraph


with tf.Session(graph=tf.Graph()) as sess_meta:
    meta_graph_def = tf.saved_model.loader.load(
        sess_meta,
        [tag_constants.SERVING],
        "/model/path")

    graph_def = meta_graph_def.graph_def

    other_graph_def = TransformGraph(
        graph_def,
        ["Placeholder"],
        ["dnn/head/predictions/probabilities"],
        ["quantize_weights"])


    with tf.Graph().as_default():
        graph = tf.get_default_graph()
        tf.import_graph_def(other_graph_def)
        in_tensor = graph.get_tensor_by_name(
            "import/Placeholder:0")
        out_tensor = graph.get_tensor_by_name(
            "import/dnn/head/predictions/probabilities:0")

        inputs = {"inputs": in_tensor}
        outputs = {"outputs": out_tensor}

        simple_save(sess_meta, "./new", inputs, outputs)
我了解如何使用
保存的_model.pb
创建新的
.pb
文件,该文件可用于调用转换。这些转换的结果(也是
.pb
文件)不是可维护的,不能与tensorflow服务一起使用

开发人员如何从:

saved model -> graph transforms -> back to a servable
有证据表明,这当然是可能的,但从文档来看,如何做到这一点并不直观

我所尝试的

import tensorflow as tf

from tensorflow.saved_model import simple_save
from tensorflow.saved_model import signature_constants
from tensorflow.saved_model import tag_constants
from tensorflow.tools.graph_transforms import TransformGraph


with tf.Session(graph=tf.Graph()) as sess_meta:
    meta_graph_def = tf.saved_model.loader.load(
        sess_meta,
        [tag_constants.SERVING],
        "/model/path")

    graph_def = meta_graph_def.graph_def

    other_graph_def = TransformGraph(
        graph_def,
        ["Placeholder"],
        ["dnn/head/predictions/probabilities"],
        ["quantize_weights"])


    with tf.Graph().as_default():
        graph = tf.get_default_graph()
        tf.import_graph_def(other_graph_def)
        in_tensor = graph.get_tensor_by_name(
            "import/Placeholder:0")
        out_tensor = graph.get_tensor_by_name(
            "import/dnn/head/predictions/probabilities:0")

        inputs = {"inputs": in_tensor}
        outputs = {"outputs": out_tensor}

        simple_save(sess_meta, "./new", inputs, outputs)
我的想法是加载servable,从meta_graph_def中提取graph_def,转换graph_def,然后尝试重新创建servable。这似乎是不正确的做法

是否有一种方法可以成功地从导出的可服务表对图形执行转换(以减少推断时的文件大小),然后使用转换后的图形重新创建可服务表

谢谢

更新(2018-08-28)

import tensorflow as tf

from tensorflow.saved_model import simple_save
from tensorflow.saved_model import signature_constants
from tensorflow.saved_model import tag_constants
from tensorflow.tools.graph_transforms import TransformGraph


with tf.Session(graph=tf.Graph()) as sess_meta:
    meta_graph_def = tf.saved_model.loader.load(
        sess_meta,
        [tag_constants.SERVING],
        "/model/path")

    graph_def = meta_graph_def.graph_def

    other_graph_def = TransformGraph(
        graph_def,
        ["Placeholder"],
        ["dnn/head/predictions/probabilities"],
        ["quantize_weights"])


    with tf.Graph().as_default():
        graph = tf.get_default_graph()
        tf.import_graph_def(other_graph_def)
        in_tensor = graph.get_tensor_by_name(
            "import/Placeholder:0")
        out_tensor = graph.get_tensor_by_name(
            "import/dnn/head/predictions/probabilities:0")

        inputs = {"inputs": in_tensor}
        outputs = {"outputs": out_tensor}

        simple_save(sess_meta, "./new", inputs, outputs)
找到了看起来很有希望的

更新(2018-12-03)

import tensorflow as tf

from tensorflow.saved_model import simple_save
from tensorflow.saved_model import signature_constants
from tensorflow.saved_model import tag_constants
from tensorflow.tools.graph_transforms import TransformGraph


with tf.Session(graph=tf.Graph()) as sess_meta:
    meta_graph_def = tf.saved_model.loader.load(
        sess_meta,
        [tag_constants.SERVING],
        "/model/path")

    graph_def = meta_graph_def.graph_def

    other_graph_def = TransformGraph(
        graph_def,
        ["Placeholder"],
        ["dnn/head/predictions/probabilities"],
        ["quantize_weights"])


    with tf.Graph().as_default():
        graph = tf.get_default_graph()
        tf.import_graph_def(other_graph_def)
        in_tensor = graph.get_tensor_by_name(
            "import/Placeholder:0")
        out_tensor = graph.get_tensor_by_name(
            "import/dnn/head/predictions/probabilities:0")

        inputs = {"inputs": in_tensor}
        outputs = {"outputs": out_tensor}

        simple_save(sess_meta, "./new", inputs, outputs)

我打开的一个相关问题似乎在一篇详细的博文中得到了解决,该博文列在罚单的末尾。

我们可以使用下面提到的方法优化或减小Tensorflow模型的大小:

  • 冻结:将存储在SavedModel的检查点文件中的变量转换为直接存储在模型图中的常量。这会减小模型的总体大小

  • 修剪:去除预测路径中未使用的节点和图的输出,合并重复节点,以及清理其他节点操作,如摘要、标识等

  • 常数折叠:查找模型中始终计算为常数表达式的任何子图,并用这些常数替换它们。折叠批次规范:将批次规范化中引入的乘法折叠到前一层的权重乘法中

  • 量化:将权重从浮点转换为较低精度,如16或8位

  • 冻结图形的代码如下所述:

    from tensorflow.python.tools import freeze_graph
    
    output_graph_filename = os.path.join(saved_model_dir, output_filename)
    initializer_nodes = ''
    
    freeze_graph.freeze_graph(input_saved_model_dir=saved_model_dir,
          output_graph=output_graph_filename,
          saved_model_tags = tag_constants.SERVING,
          output_node_names=output_node_names,initializer_nodes=initializer_nodes,
          input_graph=None, input_saver=False, input_binary=False, 
          input_checkpoint=None, restore_op_name=None, filename_tensor_name=None,
          clear_devices=False, input_meta_graph=False)
    
    from tensorflow.tools.graph_transforms import TransformGraph
    
    def get_graph_def_from_file(graph_filepath):
      with ops.Graph().as_default():
        with tf.gfile.GFile(graph_filepath, 'rb') as f:
          graph_def = tf.GraphDef()
          graph_def.ParseFromString(f.read())
          return graph_def
    
    def optimize_graph(model_dir, graph_filename, transforms, output_node):
      input_names = []
      output_names = [output_node]
      if graph_filename is None:
        graph_def = get_graph_def_from_saved_model(model_dir)
      else:
        graph_def = get_graph_def_from_file(os.path.join(model_dir, 
             graph_filename))
      optimized_graph_def = TransformGraph(graph_def, input_names,      
          output_names, transforms)
      tf.train.write_graph(optimized_graph_def, logdir=model_dir, as_text=False, 
         name='optimized_model.pb')
      print('Graph optimized!')
    
    transforms = ['quantize_nodes', 'quantize_weights',]
    optimize_graph(saved_model_dir, None, transforms, 'head/predictions/class_ids')
    
    修剪和不断折叠的代码如下所述:

    from tensorflow.python.tools import freeze_graph
    
    output_graph_filename = os.path.join(saved_model_dir, output_filename)
    initializer_nodes = ''
    
    freeze_graph.freeze_graph(input_saved_model_dir=saved_model_dir,
          output_graph=output_graph_filename,
          saved_model_tags = tag_constants.SERVING,
          output_node_names=output_node_names,initializer_nodes=initializer_nodes,
          input_graph=None, input_saver=False, input_binary=False, 
          input_checkpoint=None, restore_op_name=None, filename_tensor_name=None,
          clear_devices=False, input_meta_graph=False)
    
    from tensorflow.tools.graph_transforms import TransformGraph
    
    def get_graph_def_from_file(graph_filepath):
      with ops.Graph().as_default():
        with tf.gfile.GFile(graph_filepath, 'rb') as f:
          graph_def = tf.GraphDef()
          graph_def.ParseFromString(f.read())
          return graph_def
    
    def optimize_graph(model_dir, graph_filename, transforms, output_node):
      input_names = []
      output_names = [output_node]
      if graph_filename is None:
        graph_def = get_graph_def_from_saved_model(model_dir)
      else:
        graph_def = get_graph_def_from_file(os.path.join(model_dir, 
             graph_filename))
      optimized_graph_def = TransformGraph(graph_def, input_names,      
          output_names, transforms)
      tf.train.write_graph(optimized_graph_def, logdir=model_dir, as_text=False, 
         name='optimized_model.pb')
      print('Graph optimized!')
    
    transforms = ['quantize_nodes', 'quantize_weights',]
    optimize_graph(saved_model_dir, None, transforms, 'head/predictions/class_ids')
    
    我们通过传递所需优化的列表来调用模型上的代码,如下所示:

    transforms = ['remove_nodes(op=Identity)', 'merge_duplicate_nodes',
     'strip_unused_nodes','fold_constants(ignore_errors=true)',
     'fold_batch_norms']
    
    optimize_graph(saved_model_dir, "frozen_model.pb" , transforms, 'head/predictions/class_ids')
    
    量化代码如下所述:

    from tensorflow.python.tools import freeze_graph
    
    output_graph_filename = os.path.join(saved_model_dir, output_filename)
    initializer_nodes = ''
    
    freeze_graph.freeze_graph(input_saved_model_dir=saved_model_dir,
          output_graph=output_graph_filename,
          saved_model_tags = tag_constants.SERVING,
          output_node_names=output_node_names,initializer_nodes=initializer_nodes,
          input_graph=None, input_saver=False, input_binary=False, 
          input_checkpoint=None, restore_op_name=None, filename_tensor_name=None,
          clear_devices=False, input_meta_graph=False)
    
    from tensorflow.tools.graph_transforms import TransformGraph
    
    def get_graph_def_from_file(graph_filepath):
      with ops.Graph().as_default():
        with tf.gfile.GFile(graph_filepath, 'rb') as f:
          graph_def = tf.GraphDef()
          graph_def.ParseFromString(f.read())
          return graph_def
    
    def optimize_graph(model_dir, graph_filename, transforms, output_node):
      input_names = []
      output_names = [output_node]
      if graph_filename is None:
        graph_def = get_graph_def_from_saved_model(model_dir)
      else:
        graph_def = get_graph_def_from_file(os.path.join(model_dir, 
             graph_filename))
      optimized_graph_def = TransformGraph(graph_def, input_names,      
          output_names, transforms)
      tf.train.write_graph(optimized_graph_def, logdir=model_dir, as_text=False, 
         name='optimized_model.pb')
      print('Graph optimized!')
    
    transforms = ['quantize_nodes', 'quantize_weights',]
    optimize_graph(saved_model_dir, None, transforms, 'head/predictions/class_ids')
    
    一旦应用了优化,我们需要将优化后的图转换回GraphDef。代码如下所示:

    def convert_graph_def_to_saved_model(export_dir, graph_filepath):
      if tf.gfile.Exists(export_dir):
        tf.gfile.DeleteRecursively(export_dir)
      graph_def = get_graph_def_from_file(graph_filepath)
      with tf.Session(graph=tf.Graph()) as session:
        tf.import_graph_def(graph_def, name='')
        tf.saved_model.simple_save(
            session,
            export_dir,
            inputs={
                node.name: session.graph.get_tensor_by_name(
                    '{}:0'.format(node.name))
                for node in graph_def.node if node.op=='Placeholder'},
            outputs={'class_ids': session.graph.get_tensor_by_name(
                'head/predictions/class_ids:0')}
        )
        print('Optimized graph converted to SavedModel!')
    
    optimized_export_dir = os.path.join(export_dir, 'optimized')
    optimized_filepath = os.path.join(saved_model_dir, 'optimized_model.pb')
    convert_graph_def_to_saved_model(optimized_export_dir, optimized_filepath)
    
    示例代码如下所示:

    def convert_graph_def_to_saved_model(export_dir, graph_filepath):
      if tf.gfile.Exists(export_dir):
        tf.gfile.DeleteRecursively(export_dir)
      graph_def = get_graph_def_from_file(graph_filepath)
      with tf.Session(graph=tf.Graph()) as session:
        tf.import_graph_def(graph_def, name='')
        tf.saved_model.simple_save(
            session,
            export_dir,
            inputs={
                node.name: session.graph.get_tensor_by_name(
                    '{}:0'.format(node.name))
                for node in graph_def.node if node.op=='Placeholder'},
            outputs={'class_ids': session.graph.get_tensor_by_name(
                'head/predictions/class_ids:0')}
        )
        print('Optimized graph converted to SavedModel!')
    
    optimized_export_dir = os.path.join(export_dir, 'optimized')
    optimized_filepath = os.path.join(saved_model_dir, 'optimized_model.pb')
    convert_graph_def_to_saved_model(optimized_export_dir, optimized_filepath)
    
    有关更多信息,请参阅@gobrewers14提到的以下链接:


    我以前确实做过类似的事情。我总是转换为保存的模型而不是冻结的模型,然后我想尝试一些优化。经过数小时的搜索,我编写了一个脚本,用于转换保存的\u model2冻结的\u模型和冻结的\u model2保存的\u模型。