Python 将RGB阵列转换为HSL

Python 将RGB阵列转换为HSL,python,numpy,rgb,hsl,Python,Numpy,Rgb,Hsl,首先是免责声明,我不是很精通Python,我很钦佩你们 我的问题是: 我需要从具有各种色调和亮度的模板(128px x 128px)生成10k+图像 我加载图像并将其转换为数组 image = Image.open(dir + "/" + file).convert('RGBA') arr=np.array(np.asarray(image).astype('float')) 据我所知,以这种方式处理numpy阵列比在每个像素上循环并使用colorsys快得多 现在,我偶然发现了几个将rgb转

首先是免责声明,我不是很精通Python,我很钦佩你们

我的问题是: 我需要从具有各种色调和亮度的模板(128px x 128px)生成10k+图像

我加载图像并将其转换为数组

image = Image.open(dir + "/" + file).convert('RGBA')
arr=np.array(np.asarray(image).astype('float'))
据我所知,以这种方式处理numpy阵列比在每个像素上循环并使用colorsys快得多

现在,我偶然发现了几个将rgb转换为hsv的函数。 这有助于我生成具有不同色调的图像,但我还需要使用亮度,这样一些图像可以是黑色的,而另一些图像可以是白色的

def rgb_to_hsv(rgb):
    # Translated from source of colorsys.rgb_to_hsv
    hsv=np.empty_like(rgb)
    hsv[...,3:]=rgb[...,3:]
    r,g,b=rgb[...,0],rgb[...,1],rgb[...,2]
    maxc = np.max(rgb[...,:2],axis=-1)
    minc = np.min(rgb[...,:2],axis=-1)    
    hsv[...,2] = maxc   
    hsv[...,1] = (maxc-minc) / maxc
    rc = (maxc-r) / (maxc-minc)
    gc = (maxc-g) / (maxc-minc)
    bc = (maxc-b) / (maxc-minc)
    hsv[...,0] = np.select([r==maxc,g==maxc],[bc-gc,2.0+rc-bc],default=4.0+gc-rc)
    hsv[...,0] = (hsv[...,0]/6.0) % 1.0
    idx=(minc == maxc)
    hsv[...,0][idx]=0.0
    hsv[...,1][idx]=0.0
    return hsv

def hsv_to_rgb(hsv):
    # Translated from source of colorsys.hsv_to_rgb
    rgb=np.empty_like(hsv)
    rgb[...,3:]=hsv[...,3:]    
    h,s,v=hsv[...,0],hsv[...,1],hsv[...,2]   
    i = (h*6.0).astype('uint8')
    f = (h*6.0) - i
    p = v*(1.0 - s)
    q = v*(1.0 - s*f)
    t = v*(1.0 - s*(1.0-f))
    i = i%6
    conditions=[s==0.0,i==1,i==2,i==3,i==4,i==5]
    rgb[...,0]=np.select(conditions,[v,q,p,p,t,v],default=v)
    rgb[...,1]=np.select(conditions,[v,v,v,q,p,p],default=t)
    rgb[...,2]=np.select(conditions,[v,p,t,v,v,q],default=p) 
    return rgb
修改这些函数以转换为HSL和从HSL转换为HSL有多容易? 有什么技巧可以把HSV转换成HSL吗


非常感谢您提供的任何信息,谢谢

是的,
numpy
,即矢量化代码,可以加速颜色转换

此外,对于10k+位图的大规模制作,如果与首选亮度模型不完全匹配,则可能需要重新使用现成的专业转换,或将其细分

计算机视觉库OpenCV目前作为一个
cv2
模块可用于python,它可以在不进行任何额外编码的情况下处理颜色系统转换,只需执行以下操作:

一个现成的转换一个班轮
cv2
中可用的一些颜色系统的列表(您可能会注意到
RGB
被称为
BRG
,因为OpenCV约定图像的Blue-Red-G绿色颜色平面的顺序不同)

(对称性适用于
COLOR\u YCR\u CB2BGR
COLOR\u bgr2 YCR\u CB
未显示所有对)

我做了一些亮度转换的原型(基于>>>)

但没有测试发布

def        get_YUV_V_Cr_Rec601_BRG_frame( brgFRAME ):                   # For the Rec. 601 primaries used in gamma-corrected sRGB, fast, VECTORISED MUL/ADD CODE
    out =  numpy.zeros( brgFRAME.shape[0:2] )
    out += 0.615 / 255 * brgFRAME[:,:,1]    # // Red                    # normalise to <0.0 - 1.0> before vectorised MUL/ADD, saves [usec] ... on 480x640 [px] faster goes about 2.2 [msec] instead of 5.4 [msec]
    out -= 0.515 / 255 * brgFRAME[:,:,2]    # // Green
    out -= 0.100 / 255 * brgFRAME[:,:,0]    # // Blue                   # normalise to <0.0 - 1.0> before vectorised MUL/ADD
    return out
def get_YUV_V_Cr_Rec601_BRG_frame(brgFRAME):#对于伽马校正sRGB中使用的Rec.601原色,快速、矢量化MUL/ADD代码
out=numpy.zero(brgFRAME.shape[0:2])
out+=0.615/255*brgFRAME[:,:,1]#//红色#在矢量化MUL/ADD之前标准化为,保存[usec]。。。在480x640上[px]的速度约为2.2[msec],而不是5.4[msec]
out-=0.515/255*brgFRAME[:,:,2]#//绿色
out-=0.100/255*brgFRAME[:,:,0]#//蓝色#在矢量化MUL/ADD之前标准化为
返回

谢谢你的回答。请原谅我的无知,但是我提供给brgFRAME的值是我的image numpy数组吗?我得到了VALUERROR:缓冲区不够大,
brgFRAME
是通过
.read()
方法在
cv2.VideoCapture(0)
的实例上获取的,但是它会生成一个
numpy.array()
实例。你的
.shape
.size
是什么,
.dtype
关于?到目前为止,我所拥有的是:模板是png24,具有一定的透明度。您是否跟踪了
值错误:
获取
提升
-d?我在第51行使用
获取您的Cr-Rec601\U BRG\U框架
方法而不是cv2.CVCOLOR时出错。现在,我得到的错误也出现在第51行cv2.cvtColor上,它说的是
OpenCV错误:断言失败(depth==CV_8U | | depth==CV_16U | | depth==CV_32F),在cvtColor,file/build/buildd/OpenCV-2.4.8+dfsg1/modules/imgproc/src/color.cpp,第3642行
欢迎使用堆栈溢出!请不要只回答源代码。试着提供一个关于你的解决方案如何工作的很好的描述。请参阅:。谢谢
>>> import cv2
>>> for key in dir( cv2 ):                              # show all ready conversions
...     if key[:7] == 'COLOR_Y':
...         print key

COLOR_YCR_CB2BGR
COLOR_YCR_CB2RGB
COLOR_YUV2BGR
COLOR_YUV2BGRA_I420
COLOR_YUV2BGRA_IYUV
COLOR_YUV2BGRA_NV12
COLOR_YUV2BGRA_NV21
COLOR_YUV2BGRA_UYNV
COLOR_YUV2BGRA_UYVY
COLOR_YUV2BGRA_Y422
COLOR_YUV2BGRA_YUNV
COLOR_YUV2BGRA_YUY2
COLOR_YUV2BGRA_YUYV
COLOR_YUV2BGRA_YV12
COLOR_YUV2BGRA_YVYU
COLOR_YUV2BGR_I420
COLOR_YUV2BGR_IYUV
COLOR_YUV2BGR_NV12
COLOR_YUV2BGR_NV21
COLOR_YUV2BGR_UYNV
COLOR_YUV2BGR_UYVY
COLOR_YUV2BGR_Y422
COLOR_YUV2BGR_YUNV
COLOR_YUV2BGR_YUY2
COLOR_YUV2BGR_YUYV
COLOR_YUV2BGR_YV12
COLOR_YUV2BGR_YVYU
COLOR_YUV2GRAY_420
COLOR_YUV2GRAY_I420
COLOR_YUV2GRAY_IYUV
COLOR_YUV2GRAY_NV12
COLOR_YUV2GRAY_NV21
COLOR_YUV2GRAY_UYNV
COLOR_YUV2GRAY_UYVY
COLOR_YUV2GRAY_Y422
COLOR_YUV2GRAY_YUNV
COLOR_YUV2GRAY_YUY2
COLOR_YUV2GRAY_YUYV
COLOR_YUV2GRAY_YV12
COLOR_YUV2GRAY_YVYU
COLOR_YUV2RGB
COLOR_YUV2RGBA_I420
COLOR_YUV2RGBA_IYUV
COLOR_YUV2RGBA_NV12
COLOR_YUV2RGBA_NV21
COLOR_YUV2RGBA_UYNV
COLOR_YUV2RGBA_UYVY
COLOR_YUV2RGBA_Y422
COLOR_YUV2RGBA_YUNV
COLOR_YUV2RGBA_YUY2
COLOR_YUV2RGBA_YUYV
COLOR_YUV2RGBA_YV12
COLOR_YUV2RGBA_YVYU
COLOR_YUV2RGB_I420
COLOR_YUV2RGB_IYUV
COLOR_YUV2RGB_NV12
COLOR_YUV2RGB_NV21
COLOR_YUV2RGB_UYNV
COLOR_YUV2RGB_UYVY
COLOR_YUV2RGB_Y422
COLOR_YUV2RGB_YUNV
COLOR_YUV2RGB_YUY2
COLOR_YUV2RGB_YUYV
COLOR_YUV2RGB_YV12
COLOR_YUV2RGB_YVYU
COLOR_YUV420P2BGR
COLOR_YUV420P2BGRA
COLOR_YUV420P2GRAY
COLOR_YUV420P2RGB
COLOR_YUV420P2RGBA
COLOR_YUV420SP2BGR
COLOR_YUV420SP2BGRA
COLOR_YUV420SP2GRAY
COLOR_YUV420SP2RGB
COLOR_YUV420SP2RGBA
def        get_YUV_V_Cr_Rec601_BRG_frame( brgFRAME ):                   # For the Rec. 601 primaries used in gamma-corrected sRGB, fast, VECTORISED MUL/ADD CODE
    out =  numpy.zeros( brgFRAME.shape[0:2] )
    out += 0.615 / 255 * brgFRAME[:,:,1]    # // Red                    # normalise to <0.0 - 1.0> before vectorised MUL/ADD, saves [usec] ... on 480x640 [px] faster goes about 2.2 [msec] instead of 5.4 [msec]
    out -= 0.515 / 255 * brgFRAME[:,:,2]    # // Green
    out -= 0.100 / 255 * brgFRAME[:,:,0]    # // Blue                   # normalise to <0.0 - 1.0> before vectorised MUL/ADD
    return out
# -*- coding: utf-8 -*-
# @File    : rgb2hls.py
# @Info    : @ TSMC
# @Desc    :


import colorsys

import numpy as np
import scipy.misc
import tensorflow as tf
from PIL import Image


def rgb2hls(img):
    """ note: elements in img is a float number less than 1.0 and greater than 0.
    :param img: an numpy ndarray with shape NHWC
    :return:
    """
    assert len(img.shape) == 3
    hue = np.zeros_like(img[:, :, 0])
    luminance = np.zeros_like(img[:, :, 0])
    saturation = np.zeros_like(img[:, :, 0])
    for x in range(height):
        for y in range(width):
            r, g, b = img[x, y]
            h, l, s = colorsys.rgb_to_hls(r, g, b)
            hue[x, y] = h
            luminance[x, y] = l
            saturation[x, y] = s
    return hue, luminance, saturation


def np_rgb2hls(img):
    r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]

    maxc = np.max(img, -1)
    minc = np.min(img, -1)
    l = (minc + maxc) / 2.0
    if np.array_equal(minc, maxc):
        return np.zeros_like(l), l, np.zeros_like(l)
    smask = np.greater(l, 0.5).astype(np.float32)

    s = (1.0 - smask) * ((maxc - minc) / (maxc + minc)) + smask * ((maxc - minc) / (2.001 - maxc - minc))
    rc = (maxc - r) / (maxc - minc + 0.001)
    gc = (maxc - g) / (maxc - minc + 0.001)
    bc = (maxc - b) / (maxc - minc + 0.001)

    rmask = np.equal(r, maxc).astype(np.float32)
    gmask = np.equal(g, maxc).astype(np.float32)
    rgmask = np.logical_or(rmask, gmask).astype(np.float32)

    h = rmask * (bc - gc) + gmask * (2.0 + rc - bc) + (1.0 - rgmask) * (4.0 + gc - rc)
    h = np.remainder(h / 6.0, 1.0)
    return h, l, s


def tf_rgb2hls(img):
    """ note: elements in img all in [0,1]
    :param img: a tensor with shape NHWC
    :return:
    """
    assert img.get_shape()[-1] == 3
    r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
    maxc = tf.reduce_max(img, -1)
    minc = tf.reduce_min(img, -1)

    l = (minc + maxc) / 2.0

    # if tf.reduce_all(tf.equal(minc, maxc)):
    #     return tf.zeros_like(l), l, tf.zeros_like(l)
    smask = tf.cast(tf.greater(l, 0.5), tf.float32)

    s = (1.0 - smask) * ((maxc - minc) / (maxc + minc)) + smask * ((maxc - minc) / (2.001 - maxc - minc))
    rc = (maxc - r) / (maxc - minc + 0.001)
    gc = (maxc - g) / (maxc - minc + 0.001)
    bc = (maxc - b) / (maxc - minc + 0.001)

    rmask = tf.equal(r, maxc)
    gmask = tf.equal(g, maxc)
    rgmask = tf.cast(tf.logical_or(rmask, gmask), tf.float32)
    rmask = tf.cast(rmask, tf.float32)
    gmask = tf.cast(gmask, tf.float32)

    h = rmask * (bc - gc) + gmask * (2.0 + rc - bc) + (1.0 - rgmask) * (4.0 + gc - rc)
    h = tf.mod(h / 6.0, 1.0)

    h = tf.expand_dims(h, -1)
    l = tf.expand_dims(l, -1)
    s = tf.expand_dims(s, -1)

    x = tf.concat([tf.zeros_like(l), l, tf.zeros_like(l)], -1)
    y = tf.concat([h, l, s], -1)

    return tf.where(condition=tf.reduce_all(tf.equal(minc, maxc)), x=x, y=y)


if __name__ == '__main__':
    """
    HLS: Hue, Luminance, Saturation
    H: position in the spectrum
    L: color lightness
    S: color saturation
    """
    avatar = Image.open("hue.jpg")
    width, height = avatar.size
    print("width: {}, height: {}".format(width, height))
    img = np.array(avatar)
    img = img / 255.0
    print(img.shape)

    # # hue, luminance, saturation = rgb2hls(img)
    # hue, luminance, saturation = np_rgb2hls(img)

    img_tensor = tf.convert_to_tensor(img, tf.float32)
    hls = tf_rgb2hls(img_tensor)
    h, l, s = hls[:, :, 0], hls[:, :, 1], hls[:, :, 2]

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        hue, luminance, saturation = sess.run([h, l, s])
        scipy.misc.imsave("hls_h_.jpg", hue)
        scipy.misc.imsave("hls_l_.jpg", luminance)
        scipy.misc.imsave("hls_s_.jpg", saturation)