Python 如何在Tensorboard中显示自定义图像(例如Matplotlib绘图)?

Python 如何在Tensorboard中显示自定义图像(例如Matplotlib绘图)?,python,tensorflow,matplotlib,pytorch,tensorboard,Python,Tensorflow,Matplotlib,Pytorch,Tensorboard,Tensorboard自述文件中的部分说明: 由于图像仪表板支持任意PNG,因此可以使用它将自定义可视化(例如matplotlib散点图)嵌入TensorBoard 我看到了pyplot图像如何写入文件,作为张量读回,然后与tf.image_summary()一起用于将其写入TensorBoard,但自述文件中的这句话表明有一种更直接的方法。有?如果是,是否有任何进一步的文件和/或示例说明如何有效地做到这一点 如果您将图像放在内存缓冲区中,则很容易做到这一点。下面,我展示了一个示例,其中pypl

Tensorboard自述文件中的部分说明:

由于图像仪表板支持任意PNG,因此可以使用它将自定义可视化(例如matplotlib散点图)嵌入TensorBoard


我看到了pyplot图像如何写入文件,作为张量读回,然后与tf.image_summary()一起用于将其写入TensorBoard,但自述文件中的这句话表明有一种更直接的方法。有?如果是,是否有任何进一步的文件和/或示例说明如何有效地做到这一点

如果您将图像放在内存缓冲区中,则很容易做到这一点。下面,我展示了一个示例,其中pyplot保存到缓冲区,然后转换为TF图像表示,然后发送到图像摘要

import io
import matplotlib.pyplot as plt
import tensorflow as tf


def gen_plot():
    """Create a pyplot plot and save to buffer."""
    plt.figure()
    plt.plot([1, 2])
    plt.title("test")
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    return buf


# Prepare the plot
plot_buf = gen_plot()

# Convert PNG buffer to TF image
image = tf.image.decode_png(plot_buf.getvalue(), channels=4)

# Add the batch dimension
image = tf.expand_dims(image, 0)

# Add image summary
summary_op = tf.summary.image("plot", image)

# Session
with tf.Session() as sess:
    # Run
    summary = sess.run(summary_op)
    # Write summary
    writer = tf.train.SummaryWriter('./logs')
    writer.add_summary(summary)
    writer.close()
这将提供以下张力板可视化:


下一个脚本不使用中间RGB/PNG编码。它还解决了执行过程中附加操作构造的问题,重用了单个摘要

在执行过程中,图形的大小应保持不变

有效的解决方案:

import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np

def get_figure():
  fig = plt.figure(num=0, figsize=(6, 4), dpi=300)
  fig.clf()
  return fig


def fig2rgb_array(fig, expand=True):
  fig.canvas.draw()
  buf = fig.canvas.tostring_rgb()
  ncols, nrows = fig.canvas.get_width_height()
  shape = (nrows, ncols, 3) if not expand else (1, nrows, ncols, 3)
  return np.fromstring(buf, dtype=np.uint8).reshape(shape)


def figure_to_summary(fig):
  image = fig2rgb_array(fig)
  summary_writer.add_summary(
    vis_summary.eval(feed_dict={vis_placeholder: image}))


if __name__ == '__main__':
      # construct graph
      x = tf.Variable(initial_value=tf.random_uniform((2, 10)))
      inc = x.assign(x + 1)

      # construct summary
      fig = get_figure()
      vis_placeholder = tf.placeholder(tf.uint8, fig2rgb_array(fig).shape)
      vis_summary = tf.summary.image('custom', vis_placeholder)

      with tf.Session() as sess:
        tf.global_variables_initializer().run()
        summary_writer = tf.summary.FileWriter('./tmp', sess.graph)

        for i in range(100):
          # execute step
          _, values = sess.run([inc, x])
          # draw on the plot
          fig = get_figure()
          plt.subplot('111').scatter(values[0], values[1])
          # save the summary
          figure_to_summary(fig)

这是为了完成安德烈·普罗诺比斯的回答。紧跟着他那篇漂亮的帖子,我创建了一个最小的工作示例

    plt.figure()
    plt.plot([1, 2])
    plt.title("test")
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image = tf.image.decode_png(buf.getvalue(), channels=4)
    image = tf.expand_dims(image, 0)
    summary = tf.summary.image("test", image, max_outputs=1)
    writer.add_summary(summary, step)
其中writer是的一个实例。 这给了我以下错误: AttributeError:“Tensor”对象没有属性“value” 解决方案是:在将摘要添加到编写器之前,必须对其进行求值(转换为字符串)。。因此,我的工作代码如下(只需在最后一行添加.eval()调用):


这可能足够简短,可以作为对他的答案的评论,但这些很容易被忽略(我可能也在做一些其他不同的事情),所以这里是,希望它能有所帮助

我的回答有点晚了。通过一个简单的散点图,可以归结为:

import tensorflow as tf
import numpy as np

import tfmpl

@tfmpl.figure_tensor
def draw_scatter(scaled, colors): 
    '''Draw scatter plots. One for each color.'''  
    figs = tfmpl.create_figures(len(colors), figsize=(4,4))
    for idx, f in enumerate(figs):
        ax = f.add_subplot(111)
        ax.axis('off')
        ax.scatter(scaled[:, 0], scaled[:, 1], c=colors[idx])
        f.tight_layout()

    return figs

with tf.Session(graph=tf.Graph()) as sess:

    # A point cloud that can be scaled by the user
    points = tf.constant(
        np.random.normal(loc=0.0, scale=1.0, size=(100, 2)).astype(np.float32)
    )
    scale = tf.placeholder(tf.float32)        
    scaled = points*scale

    # Note, `scaled` above is a tensor. Its being passed `draw_scatter` below. 
    # However, when `draw_scatter` is invoked, the tensor will be evaluated and a
    # numpy array representing its content is provided.   
    image_tensor = draw_scatter(scaled, ['r', 'g'])
    image_summary = tf.summary.image('scatter', image_tensor)      
    all_summaries = tf.summary.merge_all() 

    writer = tf.summary.FileWriter('log', sess.graph)
    summary = sess.run(all_summaries, feed_dict={scale: 2.})
    writer.add_summary(summary, global_step=0)
执行时,这将在张力板内部生成以下图

请注意,tf matplotlib注意评估任何张量输入,避免
pyplot
线程问题,并支持针对运行时关键绘图的blitting。

最后,以matplotlib创建的图像为例介绍“记录任意图像数据”。
他们写道:

在下面的代码中,您将使用matplotlib的subplot()函数将前25个图像记录为一个漂亮的网格。然后,您将在TensorBoard中查看栅格:

# Clear out prior logging data.
!rm -rf logs/plots

logdir = "logs/plots/" + datetime.now().strftime("%Y%m%d-%H%M%S")
file_writer = tf.summary.create_file_writer(logdir)

def plot_to_image(figure):
  """Converts the matplotlib plot specified by 'figure' to a PNG image and
  returns it. The supplied figure is closed and inaccessible after this call."""
  # Save the plot to a PNG in memory.
  buf = io.BytesIO()
  plt.savefig(buf, format='png')
  # Closing the figure prevents it from being displayed directly inside
  # the notebook.
  plt.close(figure)
  buf.seek(0)
  # Convert PNG buffer to TF image
  image = tf.image.decode_png(buf.getvalue(), channels=4)
  # Add the batch dimension
  image = tf.expand_dims(image, 0)
  return image

def image_grid():
  """Return a 5x5 grid of the MNIST images as a matplotlib figure."""
  # Create a figure to contain the plot.
  figure = plt.figure(figsize=(10,10))
  for i in range(25):
    # Start next subplot.
    plt.subplot(5, 5, i + 1, title=class_names[train_labels[i]])
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_images[i], cmap=plt.cm.binary)
  
  return figure

# Prepare the plot
figure = image_grid()
# Convert to image and log
with file_writer.as_default():
  tf.summary.image("Training data", plot_to_image(figure), step=0)

%tensorboard --logdir logs/plots
PyTorch解决方案:

  • 使用MatPlotLib图形
  • 把它画到画布上
  • 然后转换为numpy:
结果可直接添加到张力板:

tensorboard.add_image('name', image, global_step)

Pytorch闪电的一种解决方案

这不是完整的类,而是您必须添加的内容,以使其在框架中工作

import pytorch_lightning as pl
import seaborn as sn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image

def __init__(self, config, trained_vae, latent_dim):
    self.val_confusion = pl.metrics.classification.ConfusionMatrix(num_classes=self._config.n_clusters)
    self.logger: Optional[TensorBoardLogger] = None

def forward(self, x):
    ...
    return log_probs

def validation_step(self, batch, batch_index):
    if self._config.dataset == "mnist":
        orig_batch, label_batch = batch
        orig_batch = orig_batch.reshape(-1, 28 * 28)

    log_probs = self.forward(orig_batch)
    loss = self._criterion(log_probs, label_batch)

    self.val_confusion.update(log_probs, label_batch)
    return {"loss": loss, "labels": label_batch}

def validation_step_end(self, outputs):
    return outputs

def validation_epoch_end(self, outs):
    tb = self.logger.experiment

    # confusion matrix
    conf_mat = self.val_confusion.compute().detach().cpu().numpy().astype(np.int)
    df_cm = pd.DataFrame(
        conf_mat,
        index=np.arange(self._config.n_clusters),
        columns=np.arange(self._config.n_clusters))
    plt.figure()
    sn.set(font_scale=1.2)
    sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, fmt='d')
    buf = io.BytesIO()
    
    plt.savefig(buf, format='jpeg')
    buf.seek(0)
    im = Image.open(buf)
    im = torchvision.transforms.ToTensor()(im)
    tb.add_image("val_confusion_matrix", im, global_step=self.current_epoch)
电话呢

logger = TensorBoardLogger(save_dir=tb_logs_folder, name='Classifier')
trainer = Trainer(
    default_root_dir=classifier_checkpoints_path,
    logger=logger,
)

Matplotlib绘图可以通过以下功能直接添加到张力板:

将numpy作为np导入,将matplotlib.pyplot作为plt导入
来自torch.utils.tensorboard导入摘要编写器
#示例图
x=np.linspace(0,10)
平面图(x,np.sin(x))
#向张力板添加绘图
使用SummaryWriter(“运行/SO_测试”)作为编写器:
writer.add_图('Fig1',plt.gcf())

谢谢。你的例子确实有效。但由于某种原因,当我在实际脚本中集成相同的方法时(其中有其他摘要等),解决方案似乎并不稳定。它将向摘要文件写入一个或两个图像,然后失败并显示以下错误消息:“tensorflow.python.framework.errors.NotFoundError:FetchOutputs节点ImageSummary_2:0:未找到”。也许是某种时间问题。有什么想法吗?我不知道为什么会这样。很难说没有看到代码。
tf.image\u summary
现在已被弃用。API已更改。改为使用
tf.summary.image
(cf.相应地更新了答案,现在SummaryWriter已被弃用。(请参阅)改为使用“writer=tf.summary.FileWriter”(“./logs”)'虽然此链接可以回答问题,但最好在此处包含答案的基本部分,并提供链接供参考。如果链接页面发生更改,则仅链接的答案可能无效-
import pytorch_lightning as pl
import seaborn as sn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image

def __init__(self, config, trained_vae, latent_dim):
    self.val_confusion = pl.metrics.classification.ConfusionMatrix(num_classes=self._config.n_clusters)
    self.logger: Optional[TensorBoardLogger] = None

def forward(self, x):
    ...
    return log_probs

def validation_step(self, batch, batch_index):
    if self._config.dataset == "mnist":
        orig_batch, label_batch = batch
        orig_batch = orig_batch.reshape(-1, 28 * 28)

    log_probs = self.forward(orig_batch)
    loss = self._criterion(log_probs, label_batch)

    self.val_confusion.update(log_probs, label_batch)
    return {"loss": loss, "labels": label_batch}

def validation_step_end(self, outputs):
    return outputs

def validation_epoch_end(self, outs):
    tb = self.logger.experiment

    # confusion matrix
    conf_mat = self.val_confusion.compute().detach().cpu().numpy().astype(np.int)
    df_cm = pd.DataFrame(
        conf_mat,
        index=np.arange(self._config.n_clusters),
        columns=np.arange(self._config.n_clusters))
    plt.figure()
    sn.set(font_scale=1.2)
    sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, fmt='d')
    buf = io.BytesIO()
    
    plt.savefig(buf, format='jpeg')
    buf.seek(0)
    im = Image.open(buf)
    im = torchvision.transforms.ToTensor()(im)
    tb.add_image("val_confusion_matrix", im, global_step=self.current_epoch)
logger = TensorBoardLogger(save_dir=tb_logs_folder, name='Classifier')
trainer = Trainer(
    default_root_dir=classifier_checkpoints_path,
    logger=logger,
)
# Loading tensorboard
%tensorboard --logdir=runs