Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/opencv/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 如何在OpenCV中调整大小而不进行插值(零填充)?_C++_Opencv_Image Resizing - Fatal编程技术网

C++ 如何在OpenCV中调整大小而不进行插值(零填充)?

C++ 如何在OpenCV中调整大小而不进行插值(零填充)?,c++,opencv,image-resizing,C++,Opencv,Image Resizing,有没有一种有效的方法可以在不使用任何插值的情况下在OpenCV中调整图像的大小?与传统的“调整大小”不同,我希望我的图像将像素重新映射到一个更大的图像中,但将其他所有内容填充为0 e、 g.将img1放大至2x以下至img2: img1 = [ 1, 2, 3, 4, 5, 6, 7, 8, 9 ] cv::resize(img1, img2, cv::Size(6, 6)); img2 = [ 1, 0, 2, 0, 3, 0, 0,

有没有一种有效的方法可以在不使用任何插值的情况下在OpenCV中调整图像的大小?与传统的“调整大小”不同,我希望我的图像将像素重新映射到一个更大的图像中,但将其他所有内容填充为0

e、 g.将img1放大至2x以下至img2:

img1 = [ 1, 2, 3,
         4, 5, 6,
         7, 8, 9 ]

cv::resize(img1, img2, cv::Size(6, 6));

img2 = [ 1, 0, 2, 0, 3, 0,
         0, 0, 0, 0, 0, 0,
         4, 0, 5, 0, 6, 0,
         0, 0, 0, 0, 0, 0,
         7, 0, 8, 0, 9, 0,
         0, 0, 0, 0, 0, 0 ]

我知道最明显的方法是只使用for循环,但我想知道是否有更有效的方法使用OpenCV调用?

想到的一个选项是使用
cv::resize
INTER\u NEAREST
,然后屏蔽不需要的像素

示例:

#include <opencv2/opencv.hpp>

#include <cstdint>
#include <iostream>

int main()
{
    cv::Mat m1((cv::Mat_<uint8_t>(3, 3) << 1, 2, 3, 4, 5, 6, 7, 8, 9));
    std::cout << "Input:\n" << m1 << "\n\n";

    cv::Mat mask((cv::Mat_<uint8_t>(2, 2) << 255, 0, 0, 0));
    mask = cv::repeat(mask, m1.rows, m1.cols);
    std::cout << "Mask:\n" << mask << "\n\n";

    cv::Mat m2;
    cv::resize(m1, m2, cv::Size(), 2, 2, cv::INTER_NEAREST);
    std::cout << "Resized:\n" << m2 << "\n\n";

    cv::bitwise_and(m2, mask, m2);
    std::cout << "Masked:\n" << m2 << "\n\n";
}
Input:
[  1,   2,   3;
   4,   5,   6;
   7,   8,   9]

Mask:
[255,   0, 255,   0, 255,   0;
   0,   0,   0,   0,   0,   0;
 255,   0, 255,   0, 255,   0;
   0,   0,   0,   0,   0,   0;
 255,   0, 255,   0, 255,   0;
   0,   0,   0,   0,   0,   0]

Resized:
[  1,   1,   2,   2,   3,   3;
   1,   1,   2,   2,   3,   3;
   4,   4,   5,   5,   6,   6;
   4,   4,   5,   5,   6,   6;
   7,   7,   8,   8,   9,   9;
   7,   7,   8,   8,   9,   9]

Masked:
[  1,   0,   2,   0,   3,   0;
   0,   0,   0,   0,   0,   0;
   4,   0,   5,   0,   6,   0;
   0,   0,   0,   0,   0,   0;
   7,   0,   8,   0,   9,   0;
   0,   0,   0,   0,   0,   0]

更新 如果我们消除了Miki代码中特定场景所不需要的部分,我们几乎可以将其简化为一个简单的循环

进行一些快速比较后,结果显示速度要快一些

#include <opencv2/opencv.hpp>

#include <chrono>
#include <cstdint>
#include <iostream>

cv::Mat resize_1(cv::Mat image)
{
    cv::Mat result(cv::Mat::zeros(image.rows * 2, image.cols * 2, CV_8UC1));

    for (int ra(0); ra < image.rows; ++ra) {
        for (int ca = 0; ca < image.cols; ++ca) {
            result.at<uint8_t>(ra * 2, ca * 2) = image.at<uint8_t>(ra, ca);
        }
    }

    return result;
}

cv::Mat resize_2(cv::Mat image)
{
    cv::Mat mask((cv::Mat_<uint8_t>(2, 2) << 255, 0, 0, 0));
    mask = cv::repeat(mask, image.rows, image.cols);

    cv::Mat result;
    cv::resize(image, result, cv::Size(), 2, 2, cv::INTER_NEAREST);
    cv::bitwise_and(result, mask, result);

    return result;
}

template<typename T>
void timeit(T f)
{
    using std::chrono::high_resolution_clock;
    using std::chrono::duration_cast;
    using std::chrono::microseconds;

    cv::Mat m1((cv::Mat_<uint8_t>(3, 3) << 1, 2, 3, 4, 5, 6, 7, 8, 9));
    m1 = cv::repeat(m1, 1024, 1024);

    high_resolution_clock::time_point t1 = high_resolution_clock::now();
    for (uint32_t i(0); i < 256; ++i) {
        cv::Mat result = f(m1);
    }
    high_resolution_clock::time_point t2 = high_resolution_clock::now();

    auto duration = duration_cast<microseconds>(t2 - t1).count();
    double t_ms(static_cast<double>(duration) / 1000.0);
    std::cout
        << "Total = " << t_ms << " ms\n"
        << "Iteration = " << (t_ms / 256) << " ms\n"
        << "FPS = " << (256 / t_ms * 1000.0) << "\n";
}

int main()
{
    timeit(&resize_1);
    timeit(&resize_2);
}
resize_2

Total = 7271.31 ms
Iteration = 28.4036 ms
FPS = 35.2068
更新2 并行化版本:

class ResizeInvoker : public cv::ParallelLoopBody
{
public:
    ResizeInvoker(cv::Mat const& src, cv::Mat& dst)
        : image(src)
        , result(dst)
    {
    }

    void operator()(const cv::Range& range) const
    {
        for (int y(range.start); y < (range.end); ++y) {
            for (int x(0); x < image.cols; ++x) {
                result.at<uint8_t>(y * 2, x * 2) = image.at<uint8_t>(y, x);
            }
        }
    }

    cv::Mat const& image;
    cv::Mat& result;
};

cv::Mat resize_3(cv::Mat image)
{
    cv::Mat result(cv::Mat::zeros(image.rows * 2, image.cols * 2, CV_8UC1));

    ResizeInvoker loop_body(image, result);
    cv::parallel_for_(cv::Range(0, image.rows)
        , loop_body
        , result.total() / (double)(1 << 16));

    return result;
}
更新3 如果在调用程序中使用原始指针,我们可以做得更好一些:

void operator()(const cv::Range& range) const
{
    for (int y(range.start); y < (range.end); ++y) {
        uint8_t* D = result.data + result.step * y * 2;
        uint8_t const* S = image.data + image.step * y;
        for (int x(0); x < image.cols; ++x) {
            D[x * 2] = S[x];
        }
    }
}
您可以使用图像的颜色和图案,如:

1, 0
0, 0
结果是:

Input:
[1, 2, 3;
 4, 5, 6;
 7, 8, 9]

Output:
[1, 0, 2, 0, 3, 0;
 0, 0, 0, 0, 0, 0;
 4, 0, 5, 0, 6, 0;
 0, 0, 0, 0, 0, 0;
 7, 0, 8, 0, 9, 0;
 0, 0, 0, 0, 0, 0]
代码:

#包括
#包括
使用名称空间std;
使用名称空间cv;
Mat1b克朗(常数Mat1b&A、常数Mat1b&B)
{
Mat1b K(A.rows*B.rows,A.cols*B.cols,uchar(0));
对于(int-ra=0;raMat1b img=(Mat1b(3,3)考虑共享以下方法,因为它有点不同。我不知道与其他方法相比,这种方法的效率有多高。至少您可以使用opencv调用而无需任何循环,并且可以轻松地为x和y使用任意比例因子

首先将图像转换为浮点类型,然后使用
warpAffine
(使用线性插值)对其进行缩放。使用最近邻方法调整同一图像的大小。按元素比较两个结果图像以获得遮罩。使用此遮罩从任何结果图像复制相关元素

下面是我得到的代码和一些结果:

uchar data[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
Mat im(3, 3, CV_8U, data);
im.convertTo(im, CV_32F);
// x and y scale
int xscale = 2, yscale = 2;
Size size(im.cols * xscale, im.rows * yscale);
float tr[] = {xscale, 0, 0, 0, yscale, 0};
Mat m(2, 3, CV_32F, tr);    // transformation matrix

Mat resized1, resized2;
warpAffine(im, resized1, m, size);  // affine scaling with linear interpolation
resize(im, resized2, size, 0, 0, INTER_NEAREST);    // resize with nearest neighbor
// get the mask
Mat resized = resized1 == resized2;
// copy the pixels
resized1.copyTo(resized, resized);

cout << "image:\n" << im << endl;
cout << "M:\n" << m << endl;
cout << "affine(scaled):\n" << resized1 << endl;
cout << "resized:\n" << resized2 << endl;
cout << "mask:\n" << resized << endl;
cout << "output:\n" << resized << endl;
对于xscale=4,yscale=3

output:
[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  4, 0, 0, 0, 5, 0, 0, 0, 6, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  7, 0, 0, 0, 8, 0, 0, 0, 9, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]

经过一点评估,我发现您的方法在更大的图像上速度明显更快,至少在我的pc;D上是这样。这是因为我的
kron
函数没有得到优化。@Miki只是运行了一些小的基准测试,得出了相同的结论。但是,如果我们为了调整大小而减少代码,我们基本上会得到一个简单的循环,这比我的方法稍微快一点。@Miki谢谢:)我也在想,如果我们在那里添加
D[x*2+1]=0;
,最后的版本可能会被一些编译器自动矢量化。只是直觉,没有环境来测试它。
Input:
[1, 2, 3;
 4, 5, 6;
 7, 8, 9]

Output:
[1, 0, 2, 0, 3, 0;
 0, 0, 0, 0, 0, 0;
 4, 0, 5, 0, 6, 0;
 0, 0, 0, 0, 0, 0;
 7, 0, 8, 0, 9, 0;
 0, 0, 0, 0, 0, 0]
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

Mat1b kron(const Mat1b& A, const Mat1b& B)
{
    Mat1b K(A.rows * B.rows, A.cols * B.cols, uchar(0));
    for (int ra = 0; ra < A.rows; ++ra)
    {
        for (int ca = 0; ca < A.cols; ++ca)
        {
            K(Range(ra*B.rows, (ra + 1)*B.rows), Range(ca*B.cols, (ca + 1)*B.cols)) = B.mul(A(ra, ca));
        }
    }
    return K;
}

int main()
{
    Mat1b img = (Mat1b(3, 3) << 1, 2, 3, 4, 5, 6, 7, 8, 9);
    std::cout << "Input:\n" << img << "\n\n";

    // Define the pattern
    Mat1b pattern = (Mat1b(2, 2) << 1, 0, 
                                    0, 0);

    Mat1b out = kron(img, pattern);
    std::cout << "Output:\n" << out << "\n\n";

    return 0;
}
uchar data[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
Mat im(3, 3, CV_8U, data);
im.convertTo(im, CV_32F);
// x and y scale
int xscale = 2, yscale = 2;
Size size(im.cols * xscale, im.rows * yscale);
float tr[] = {xscale, 0, 0, 0, yscale, 0};
Mat m(2, 3, CV_32F, tr);    // transformation matrix

Mat resized1, resized2;
warpAffine(im, resized1, m, size);  // affine scaling with linear interpolation
resize(im, resized2, size, 0, 0, INTER_NEAREST);    // resize with nearest neighbor
// get the mask
Mat resized = resized1 == resized2;
// copy the pixels
resized1.copyTo(resized, resized);

cout << "image:\n" << im << endl;
cout << "M:\n" << m << endl;
cout << "affine(scaled):\n" << resized1 << endl;
cout << "resized:\n" << resized2 << endl;
cout << "mask:\n" << resized << endl;
cout << "output:\n" << resized << endl;
image:
[1, 2, 3;
  4, 5, 6;
  7, 8, 9]
M:
[2, 0, 0;
  0, 2, 0]
affine(scaled):
[1, 1.5, 2, 2.5, 3, 1.5;
  2.5, 3, 3.5, 4, 4.5, 2.25;
  4, 4.5, 5, 5.5, 6, 3;
  5.5, 6, 6.5, 7, 7.5, 3.75;
  7, 7.5, 8, 8.5, 9, 4.5;
  3.5, 3.75, 4, 4.25, 4.5, 2.25]
resized:
[1, 1, 2, 2, 3, 3;
  1, 1, 2, 2, 3, 3;
  4, 4, 5, 5, 6, 6;
  4, 4, 5, 5, 6, 6;
  7, 7, 8, 8, 9, 9;
  7, 7, 8, 8, 9, 9]
mask:
[1, 0, 2, 0, 3, 0;
  0, 0, 0, 0, 0, 0;
  4, 0, 5, 0, 6, 0;
  0, 0, 0, 0, 0, 0;
  7, 0, 8, 0, 9, 0;
  0, 0, 0, 0, 0, 0]
output:
[1, 0, 2, 0, 3, 0;
  0, 0, 0, 0, 0, 0;
  4, 0, 5, 0, 6, 0;
  0, 0, 0, 0, 0, 0;
  7, 0, 8, 0, 9, 0;
  0, 0, 0, 0, 0, 0]
output:
[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  4, 0, 0, 0, 5, 0, 0, 0, 6, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  7, 0, 0, 0, 8, 0, 0, 0, 9, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]