Python 将多个图像连接为一个图像

Python 将多个图像连接为一个图像,python,python-3.x,image,numpy,Python,Python 3.x,Image,Numpy,此函数用于接收由图像裁剪部分组成的numpy数组列表。作物的大小都相同,除了最右边和最下面的图像可能较小 预测[2]将返回从原始图像裁剪的第三个子图像。每个作物都是一个numpy数组。有WxH作物,从左到右,从上到下枚举(因此,如果有4个子图像构成宽度,预测中的第5个图像将是第2行子图像左侧的第一个子图像) crops包含查找将构成重建图像的水平和垂直图像数量所需的信息裁剪[2][3]将包含从顶部开始裁剪的第三幅图像,从左侧裁剪的第四幅图像 作物中包含的图像比预测中包含的图像尺寸更小(我基本上是

此函数用于接收由图像裁剪部分组成的numpy数组列表。作物的大小都相同,除了最右边和最下面的图像可能较小

预测[2]
将返回从原始图像裁剪的第三个子图像。每个作物都是一个numpy数组。有WxH作物,从左到右,从上到下枚举(因此,如果有4个子图像构成宽度,
预测中的第5个图像将是第2行子图像左侧的第一个子图像)

crops
包含查找将构成重建图像的水平和垂直图像数量所需的信息<代码>裁剪[2][3]
将包含从顶部开始裁剪的第三幅图像,从左侧裁剪的第四幅图像

作物
中包含的图像比
预测
中包含的图像尺寸更小(我基本上是在制作一个模型,以提高图像的分辨率)。根据
预测
中的图像重建的图像,按照与
作物
中的图像相同的顺序排列

def reconstruct(predictions, crops):
    if len(crops) != 0:
        print("use crops")

    # TODO: properly extract the size of the full image
    width_length = 0
    height_length = 0

    full_image = np.empty(shape=(height_length, width_length))
    print(full_image.shape)

    # TODO: properly merge the crops back into a single image
    for height in range(len(predictions[0])):
        for width in range(len(predictions)):
            # concatenate here
            print(height, width)

    return full_image
我本来打算使用,但根据我在上看到的其他答案,这不是一种有效的方法(显然,numpy只会在内存中重新创建一个新变量,复制旧变量,然后添加新数据,等等)。所以现在我想知道如何正确地将我的多个图像合并成一个图像。我目前的想法是创建一个具有适当形状的python列表,并逐步用每个numpy数组的数据填充它,但即使如此,我也不确定这是否是正确的想法

下面是我试图连接到单个图像中的一组图像:

以下是预期结果:

为了帮助您了解更多可用的功能,下面是一些代码:

def predict(args):
    model = load_model(save_dir + '/' + args.model)
    image = skimage.io.imread(tests_path + args.image)

    predictions = []
    images = []

    crops = seq_crop(image)  # crops into multiple sub-parts the image based on 'input_' constants

    for i in range(len(crops)):  # amount of vertical crops
        for j in range(len(crops[0])):  # amount of horizontal crops
            current_image = crops[i][j]
            images.append(current_image)

        # Hack because GPU can only handle one image at a time
        input_img = (np.expand_dims(images[p], 0))       # Add the image to a batch where it's the only member
        predictions.append(model.predict(input_img)[0])  # returns a list of lists, one for each image in the batch

    return predictions, image, crops


# adapted from: https://stackoverflow.com/a/52463034/9768291
def seq_crop(img):
    """
    To crop the whole image in a list of sub-images of the same size.
    Size comes from "input_" variables in the 'constants' (Evaluation).
    Padding with 0 the Bottom and Right image.

    :param img: input image
    :return: list of sub-images with defined size
    """
    width_shape = ceildiv(img.shape[1], input_width)
    height_shape = ceildiv(img.shape[0], input_height)
    sub_images = []  # will contain all the cropped sub-parts of the image

    for j in range(height_shape):
        horizontal = []
        for i in range(width_shape):
            horizontal.append(crop_precise(img, i*input_width, j*input_height, input_width, input_height))
        sub_images.append(horizontal)

    return sub_images

def crop_precise(img, coord_x, coord_y, width_length, height_length):
    """
    To crop a precise portion of an image.
    When trying to crop outside of the boundaries, the input to padded with zeros.

    :param img: image to crop
    :param coord_x: width coordinate (top left point)
    :param coord_y: height coordinate (top left point)
    :param width_length: width of the cropped portion starting from coord_x
    :param height_length: height of the cropped portion starting from coord_y
    :return: the cropped part of the image
    """

    tmp_img = img[coord_y:coord_y + height_length, coord_x:coord_x + width_length]

    return float_im(tmp_img)  # From [0,255] to [0.,1.]

# from  https://stackoverflow.com/a/17511341/9768291
def ceildiv(a, b):
    """
    To get the ceiling of a division
    :param a:
    :param b:
    :return:
    """
    return -(-a // b)

if __name__ == '__main__':
    preds, original, crops = predict(args)  # returns the predictions along with the original

    # TODO: reconstruct image
    enhanced = reconstruct(preds, crops)  # reconstructs the enhanced image from predictions
编辑

答案奏效了。以下是我使用的版本:

# adapted from  https://stackoverflow.com/a/52733370/9768291
def reconstruct(predictions, crops):

    # unflatten predictions
    def nest(data, template):
        data = iter(data)
        return [[next(data) for _ in row] for row in template]

    predictions = nest(predictions, crops)

    H = np.cumsum([x[0].shape[0] for x in predictions])
    W = np.cumsum([x.shape[1] for x in predictions[0]])
    D = predictions[0][0]
    recon = np.empty((H[-1], W[-1], D.shape[2]), D.dtype)
    for rd, rs in zip(np.split(recon, H[:-1], 0), predictions):
        for d, s in zip(np.split(rd, W[:-1], 1), rs):
            d[...] = s
    return recon

最方便的可能是
np.block

import numpy as np
from scipy import misc
import Image

# get example picture
data = misc.face()
# chop it up
I, J = map(np.arange, (200, 200), data.shape[:2], (200, 200))
chops = [np.split(row, J, axis=1) for row in np.split(data, I, axis=0)]

# do something with the bits

predictions = [chop-(i+j)*(chop>>3) for j, row in enumerate(chops) for i, chop in enumerate(row)]

# unflatten predictions
def nest(data, template):
    data = iter(data)
    return [[next(data) for _ in row] for row in template]

pred_lol = nest(predictions, chops)

# almost builtin reconstruction
def np_block_2D(chops):
    return np.block([[[x] for x in row] for row in chops])

recon = np_block_2D(pred_lol)
Image.fromarray(recon).save('demo.png')
重建的操纵图像:

但我们可以通过避免中间数组来加快速度。相反,我们复制到一个预先分配的数组中:

def speed_block_2D(chops):
    H = np.cumsum([x[0].shape[0] for x in chops])
    W = np.cumsum([x.shape[1] for x in chops[0]])
    D = chops[0][0]
    recon = np.empty((H[-1], W[-1], D.shape[2]), D.dtype)
    for rd, rs in zip(np.split(recon, H[:-1], 0), chops):
        for d, s in zip(np.split(rd, W[:-1], 1), rs):
            d[...] = s
    return recon
计时,还包括每个方法的通用ND ready变体:

numpy 2D:               0.991 ms
prealloc 2D:            0.389 ms
numpy general:          1.021 ms
prealloc general:       0.448 ms
一般情况和时间的代码:

def np_block(chops):
    d = 0
    tl = chops
    while isinstance(tl, list):
        tl = tl[0]
        d += 1
    if d < tl.ndim:
        def adjust_depth(L):
            if isinstance(L, list):
                return [adjust_depth(l) for l in L]
            else:
                ret = L
                for j in range(d, tl.ndim):
                    ret = [ret]
                return ret
        chops = adjust_depth(chops)
    return np.block(chops)

def speed_block(chops):
    def line(src, i):
        while isinstance(src, list):
            src = src[0]
        return src.shape[i]
    def hyper(src, i):
        src = iter(src)
        fst = next(src)
        if isinstance(fst, list):
            res, dtype, szs = hyper(fst, i+1)
            szs.append([res[i], *(line(s, i) for s in src)])
            res[i] = sum(szs[-1])
            return res, dtype, szs
        res = np.array(fst.shape)
        szs = [res[i], *(s.shape[i] for s in src)]
        res[i] = sum(szs)
        return res, fst.dtype, [szs]
    shape, dtype, szs = hyper(chops, 0)
    recon = np.empty(shape, dtype)
    def cpchp(dst, src, i, szs=None):
        szs = np.array(hyper(src, i)[2]) if szs is None else szs
        dst = np.split(dst, np.cumsum(szs[-1][:-1]), i)
        if isinstance(src[0], list):
            szs = szs[:-1]
            for ds, sr in zip(dst, src):
                cpchp(ds, sr, i+1, szs)
                szs = None
        else:
            for ds, sr in zip(dst, src):
                ds[...] = sr
    cpchp(recon, chops, 0, np.array(szs))
    return recon

from timeit import timeit

T = (timeit(lambda: speed_block(pred_lol), number=1000),
     timeit(lambda: np_block(pred_lol), number=1000),
     timeit(lambda: speed_block_2D(pred_lol), number=1000),
     timeit(lambda: np_block_2D(pred_lol), number=1000))

assert (np.all(speed_block(pred_lol)==np_block(pred_lol)) and
        np.all(speed_block_2D(pred_lol)==np_block(pred_lol)) and
        np.all(speed_block(pred_lol)==np_block_2D(pred_lol)))

print(f"""
numpy 2D:          {T[3]:10.3f} ms
prealloc 2D:       {T[2]:10.3f} ms
numpy general:     {T[1]:10.3f} ms
prealloc general:  {T[0]:10.3f} ms
""")
def np_块(印章):
d=0
tl=排骨
当isinstance(tl,列表)时:
tl=tl[0]
d+=1
如果d
漂亮的答案。但是,我确实从这一行得到了一个错误:
recon=np.empty((H[-1],W[-1],D.shape[2]),D.dtype)
。错误为:
索引器错误:元组索引超出范围
。更具体地说,我认为它来自
D.shape[2]
。我会尝试修复它,然后再回来找你。好吧,我不能在5分钟后编辑评论。我将其更改为
D.shape[1]
,因此得到
3
(因为我的图像是RGB)。但是,
d[…]=s
行给了我这个错误:
ValueError:无法将输入数组从形状(512,3)广播到形状(512,3,3)
。我调试时遇到了问题,因为我不太懂那种符号。我刚刚意识到我在操作中犯了一个错误:我编辑了它<代码>预测是一个数组列表,其中包含从原始图像裁剪的多个子图像。。。我忘记了第一个索引,它是
裁剪的\u图像
编号(因此,如果原始图像被裁剪为12个图像,那么
预测的第一个索引范围从0到11)。它是原始图像的所有裁剪部分(都是numpy数组)的列表。因此,如果原始图像被裁剪成12个较小的图像,那就是12个元素的列表。@payne好的,最快的修复方法是以与修订代码中的chops/crops---function
nest
相同的方式构造预测。让我知道这是否适合你。