C++ 将每个通道像素与给定向量相乘

C++ 将每个通道像素与给定向量相乘,c++,algorithm,opencv,math,C++,Algorithm,Opencv,Math,基本上我想用向量乘以图像的每个channel值。例如Vec4f(1,2,3,4)1*图像的红色通道2*绿色通道等等,这是我的代码(不是完整的代码,有一些错误),但我的老板说opencv一定有更好更简单的方法,但我找不到。先谢谢你 void scaleImage(TextureData& dst, TextureData const& src, cv::Vec4f const& scale) { auto size = src.in

基本上我想用向量乘以图像的每个channel值。例如Vec4f(1,2,3,4)1*图像的红色通道2*绿色通道等等,这是我的代码(不是完整的代码,有一些错误),但我的老板说opencv一定有更好更简单的方法,但我找不到。先谢谢你

      void scaleImage(TextureData& dst, TextureData const& src, cv::Vec4f 
      const& scale)
      {


auto size = src.info.size;
dst=src;
cv::Mat bgr[4];
cv::split(src.levels[0].images[0], bgr);

for (int y = 0; y < size.height; ++y)
{
    for (int x = 0; x < size.width; ++x)
    {

        src.levels[0].images[0].channels();

        if (src.levels[0].images[0].channels() == 4)
        {
            auto& imgRGB = dst.levels[0].images[0].at<cv::Vec4f>(x, y);
            imgRGB[3] = static_cast<uint8_t>(scale.w()*bgr[3].at[x, y]);
            imgRGB[2] = static_cast<uint8_t>(scale.x()*bgr[2].at[x, y]);
            imgRGB[1] = static_cast<uint8_t>(scale.y()*bgr[1].at[x, y]);
            imgRGB[0] = static_cast<uint8_t>(scale.z()*bgr[0].at[x, y]);

        }

        if (src.levels[0].images[0].channels() == 3)
        {
            auto& imgRGB = dst.levels[0].images[0].at<cv::Vec3f>(x, y);
            imgRGB[2] = static_cast<uint8_t>(scale.x()*bgr[2].at[x, y]);
            imgRGB[1] = static_cast<uint8_t>(scale.y()*bgr[1].at[x, y]);
            imgRGB[0] = static_cast<uint8_t>(scale.z()*bgr[0].at[x, y]);
        }

        if (src.levels[0].images[0].channels() == 2)
        {
            auto& imgRGB = dst.levels[0].images[0].at<cv::Vec2f>(x, y);
            imgRGB[1] = static_cast<uint8_t>(scale.x()*bgr[2].at[x, y]);
            imgRGB[0] = static_cast<uint8_t>(scale.y()*bgr[1].at[x, y]);
        }

        if (src.levels[0].images[0].channels() == 2)
        {
            auto& imgRGB = dst.levels[0].images[0].at<float>(x, y);
            imgRGB[0] = static_cast<uint8_t>(scale.x()*bgr[2].at[x, y]);
        }

    }
}
void scaleImage(TextureData&dst,TextureData const&src,cv::Vec4f
常数和比例)
{
自动大小=src.info.size;
dst=src;
cv::Mat-bgr[4];
cv::split(src.levels[0]。图像[0],bgr);
对于(int y=0;y
我不确定您喜欢做什么,但考虑了cv::multiply

  • 你使用哪种版本的opencv
  • 它是常数因子还是常数矩阵

好的,这是一个有点奇怪的问题。我认为没有人需要这个功能,但万一……因为它需要我2-3天(仅25行)

void scaleImage(纹理数据和dst、纹理数据常量和src、Vec4f常量和
(比例)
{    
std::载体bgr(3);
std::向量结果任务(3);
src.info;
cv::split(src.levels[0]。图像[0],bgr);
//dst=src;
//自动大小=src.info.size;
cv::Point anchor=cv::Point(-1,-1);
浮点数a=scale.x();
浮动b=刻度.y();
float c=scale.z();
float d=scale.w();

cv::Mat kern=(cv::Mat_389;(3,3)只是一个嵌套类..const source.Mat在TextureData类中(基本上source是Mat)。我使用的是OpenCVA的最新版本非常感谢Antonio,但是Mat*scalar给出了错误
  void scaleImage(TextureData& dst, TextureData const& src, Vec4f const& 
  scale)
  {    
   std::vector<cv::Mat> bgr(3);
   std::vector<cv::Mat> resultMask(3);
   src.info;

  cv::split(src.levels[0].images[0], bgr);
  //dst = src;
  //auto size = src.info.size;

   cv::Point anchor = cv::Point(-1, -1);

        float a = scale.x();
        float b = scale.y();
        float c = scale.z();
        float d = scale.w();
        cv::Mat kern = (cv::Mat_<float>(3, 3) << 0, 0, 0,
                                                 0, a, 0,
                                                 0, 0, 0);
        cv::Mat kern1 = (cv::Mat_<float>(3, 3) << 0, 0, 0,
                                                 0, b, 0,
                                                 0, 0, 0);
        cv::Mat kern2 = (cv::Mat_<float>(3, 3) << 0, 0, 0,
                                                 0, c, 0,
                                                 0, 0, 0);
        cv::Mat kern3 = (cv::Mat_<float>(3, 3) << 0, 0, 0,
                                                 0, d, 0,
                                                 0, 0, 0);
        cv::filter2D(bgr[0], resultMask[0], bgr[0].depth(), kern,  anchor, 0, 1);
        cv::filter2D(bgr[1], resultMask[1], bgr[1].depth(), kern1, anchor, 0, 1);
        cv::filter2D(bgr[2], resultMask[2], bgr[2].depth(), kern2, anchor, 0, 1);
        //cv::filter2D(bgr[3], resultMask[3], bgr[3].depth(), kern3, anchor, 0, 1);

        cv::merge(resultMask, dst.levels[0].images[0]);