Java 如何修复';仅黑色帧接收';在Android中使用OpenCV

Java 如何修复';仅黑色帧接收';在Android中使用OpenCV,java,android,c++,opencv,java-native-interface,Java,Android,C++,Opencv,Java Native Interface,我正在使用python和openCV为移动应用程序开发类似的增强现实功能。这段代码工作得和我预期的一样好,尽管它有一些杀伤力。我需要做一个Android应用程序,我知道我需要把Python代码转换成C++,并在Android中运行NDK,因为它有一个实时过程。我能够将openCV库加载到我的android项目中,并在本机类和MainActivity之间传递数据。然后我将python代码转换为C++(我不太熟悉),然后运行该项目。但它只给我黑色的框架。程序没有显示错误,但我没有得到预期的输出 我正

我正在使用python和openCV为移动应用程序开发类似的增强现实功能。这段代码工作得和我预期的一样好,尽管它有一些杀伤力。我需要做一个Android应用程序,我知道我需要把Python代码转换成C++,并在Android中运行NDK,因为它有一个实时过程。我能够将openCV库加载到我的android项目中,并在本机类和MainActivity之间传递数据。然后我将python代码转换为C++(我不太熟悉),然后运行该项目。但它只给我黑色的框架。程序没有显示错误,但我没有得到预期的输出

我正在尝试使用
Android Studio 3.3.2
OpenCV4Android 4.1.0
我使用templateMatching方法从捕获的帧中检测输入模板,然后使用alpha混合在检测到的区域上粘贴png,最后使用单应性将该区域添加到帧中

这是我的密码

MainActivity.java

public class MainActivity extends AppCompatActivity implements CameraBridgeViewBase.CvCameraViewListener2 {



    private static String TAG = "MainActivity";
    private JavaCameraView javaCameraView;

    // Used to load the 'native-lib' library on application startup.
    static {
        System.loadLibrary("native-lib");
        System.loadLibrary("opencv_java4");
    }

    private Mat mRgba;



    BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
        @Override
        public void onManagerConnected(int status) {
            switch(status){
                case BaseLoaderCallback.SUCCESS:{
                    javaCameraView.enableView();
                    break;
                }
                default:{
                    super.onManagerConnected(status);
                    break;
                }
            }
        }
    };

    private Mat temp, tattoo;

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);

        javaCameraView = (JavaCameraView)findViewById(R.id.java_camera_view);
        javaCameraView.setVisibility(SurfaceView.VISIBLE);
        javaCameraView.setCvCameraViewListener(this);

        AssetManager assetManager = getAssets();

        try {
            InputStream is = assetManager.open("temp.jpg");
            Bitmap bitmap = BitmapFactory.decodeStream(is);
            Bitmap bmp32 = bitmap.copy(Bitmap.Config.ARGB_8888, true);
            temp = new Mat(bitmap.getHeight(), bitmap.getWidth(), CvType.CV_8UC4);
            Utils.bitmapToMat(bmp32, temp);

        } catch (IOException e) {
            e.printStackTrace();
        }

        try {
            InputStream isTattoo = assetManager.open("tattoo2.png");
            Bitmap bitmapTattoo = BitmapFactory.decodeStream(isTattoo);
            Bitmap bmp32Tattoo = bitmapTattoo.copy(Bitmap.Config.ARGB_8888, true);
            tattoo = new Mat(bitmapTattoo.getHeight(), bitmapTattoo.getWidth(), CvType.CV_8UC4);
            Utils.bitmapToMat(bmp32Tattoo, tattoo);

        } catch (IOException e) {
            e.printStackTrace();
        }




    }

    @Override
    protected void onPause(){
        super.onPause();
        if(javaCameraView != null){
            javaCameraView.disableView();
        }
    }

    @Override
    protected void onDestroy(){
        super.onDestroy();
        if(javaCameraView != null){
            javaCameraView.disableView();
        }
    }

    @Override
    protected void onResume(){
        super.onResume();
        if(OpenCVLoader.initDebug()){
            Log.i(TAG, "OpenCV Loaded successfully ! ");
            mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
        }else{
            Log.i(TAG, "OpenCV not loaded ! ");
            OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION, this, mLoaderCallback);
        }
    }

    @Override
    public void onCameraViewStarted(int width, int height) {
        mRgba = new Mat(height, width, CvType.CV_8UC4);

    }

    @Override
    public void onCameraViewStopped() {
        mRgba.release();

    }

    @Override
    public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
        mRgba = inputFrame.rgba();

        augmentation(mRgba.getNativeObjAddr(), temp.getNativeObjAddr(), tattoo.getNativeObjAddr());

        return mRgba;
    }


    public native void augmentation(long matAddrRgba, long tempC, long tattooDesign);
}
本机lib.cpp

#include <jni.h>
#include <string>
#include <opencv2/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>

using namespace cv;
using namespace std;

extern "C" {



// Alpha Blending using direct pointer access
Mat& alphaBlendDirectAccess(Mat& alpha, Mat& foreground, Mat& background, Mat& outImage)
{

    int numberOfPixels = foreground.rows * foreground.cols * foreground.channels();

    float* fptr = reinterpret_cast<float*>(foreground.data);
    float* bptr = reinterpret_cast<float*>(background.data);
    float* aptr = reinterpret_cast<float*>(alpha.data);
    float* outImagePtr = reinterpret_cast<float*>(outImage.data);

    int i,j;
    for ( j = 0; j < numberOfPixels; ++j, outImagePtr++, fptr++, aptr++, bptr++)
    {
        *outImagePtr = (*fptr)*(*aptr) + (*bptr)*(1 - *aptr);
    }

    return outImage;
}


Mat& alphaBlend(Mat& foreg, Mat& backgg)
{

    // Read background image
    Mat background = backgg;// cropped frame
    Size sizeBackground = background.size();

    // Read in the png foreground asset file that contains both rgb and alpha information
    // Mat foreGroundImage = imread("foreGroundAssetLarge.png", -1); //resized tattoo
    Mat foreGroundImage = foreg;
    // resize the foreGroundImage to background image size
    resize(foreGroundImage, foreGroundImage, Size(sizeBackground.width,sizeBackground.height));
    Mat bgra[4];
    split(foreGroundImage, bgra);//split png foreground

    // Save the foregroung RGB content into a single Mat
    vector<Mat> foregroundChannels;
    foregroundChannels.push_back(bgra[0]);
    foregroundChannels.push_back(bgra[1]);
    foregroundChannels.push_back(bgra[2]);
    Mat foreground = Mat::zeros(foreGroundImage.size(), CV_8UC3);
    merge(foregroundChannels, foreground);

    // Save the alpha information into a single Mat
    vector<Mat> alphaChannels;
    alphaChannels.push_back(bgra[3]);
    alphaChannels.push_back(bgra[3]);
    alphaChannels.push_back(bgra[3]);
    Mat alpha = Mat::zeros(foreGroundImage.size(), CV_8UC3);
    merge(alphaChannels, alpha);



    // Convert Mat to float data type
    foreground.convertTo(foreground, CV_32FC3);
    background.convertTo(background, CV_32FC3);
    alpha.convertTo(alpha, CV_32FC3, 1.0/255); // keeps the alpha values betwen 0 and 1

    // Number of iterations to average the performane over
    int numOfIterations = 1; //1000;



    // Alpha blending using direct Mat access with for loop
    Mat outImage = Mat::zeros(foreground.size(), foreground.type());

    for (int i=0; i<numOfIterations; i++) {
        outImage = alphaBlendDirectAccess(alpha, foreground, background, outImage);
    }

    imshow("alpha blended image", outImage/255);
    outImage = outImage/255;
    outImage.convertTo(outImage, CV_8U); // Convert float to Mat data type

    return outImage;
}




Mat& applyHomography(Mat& convertedOutImage, Mat& initialFrame, int startX, int startY, int endX, int endY)
{

    struct userdata{
        Mat im;
        vector<Point2f> points;
    };

    // Read in the image.
    Mat im_src = convertedOutImage;
    Size size = im_src.size();

    // Create a vector of points.
    vector<Point2f> pts_src;
    pts_src.push_back(Point2f(0,0));
    pts_src.push_back(Point2f(size.width - 1, 0));
    pts_src.push_back(Point2f(size.width - 1, size.height -1));
    pts_src.push_back(Point2f(0, size.height - 1 ));



    // Destination image
    Mat im_dst = initialFrame;
    vector<Point2f> pts_dst;
    pts_dst.push_back(Point2f(startX, startY));
    pts_dst.push_back(Point2f(endX, startY));
    pts_dst.push_back(Point2f(endX, endY));
    pts_dst.push_back(Point2f(startX, endY));


    Mat im_temp = im_dst.clone();


    // Calculate Homography between source and destination points
    Mat h = findHomography(pts_src, pts_dst);

    // Warp source image
    warpPerspective(im_src, im_temp, h, im_dst.size());


    // Black out polygonal area in destination image.
    fillConvexPoly(im_dst, pts_dst, Scalar(0), LINE_AA);

    // Add warped source image to destination image.
    im_dst = im_dst + im_temp;



    return im_dst;
}


JNIEXPORT void JNICALL
Java_com_example_inkmastertest_MainActivity_augmentation(JNIEnv *env, jobject, jlong addrRgba, jlong tempC, jlong tattooDesign);

JNIEXPORT void JNICALL
Java_com_example_inkmastertest_MainActivity_augmentation(JNIEnv *env, jobject, jlong addrRgba, jlong tempC, jlong tattooDesign) {

    Mat& img = *(Mat*)addrRgba;
    Mat target_img = img.clone();

    Mat& template1 = *(Mat*)tempC;
    Mat& tattooDes = *(Mat*)tattooDesign;


    // Contains the description of the match
    typedef struct Match_desc{
        bool init;
        double maxVal;
        Point maxLoc;
        double scale;
        Match_desc(): init(0){}
    } Match_desc;

    Mat template_mat;
    template_mat = template1; // Read image
    cvtColor(template_mat, template_mat, COLOR_BGR2GRAY); // Convert to Gray
    Canny(template_mat, template_mat, 50, 50*4); // Find edges


    // Find size
    int tW, tH;
    tW = template_mat.cols;
    tH = template_mat.rows;



    Mat target_gray, target_resized, target_edged;

    cvtColor(target_img, target_gray, COLOR_BGR2GRAY); // Convert to Gray

    const float SCALE_START = 1;
    const float SCALE_END = 0.2;
    const int SCALE_POINTS = 20;

    Match_desc found;
    for(float scale = SCALE_START; scale >= SCALE_END; scale -= (SCALE_START - SCALE_END)/SCALE_POINTS){
        resize(target_gray, target_resized, Size(0,0), scale, scale);// Resize

        // Break if target image becomes smaller than template
        if(tW > target_resized.cols || tH > target_resized.rows) break;


        Canny(target_resized, target_edged, 50, 50*4); // Find edges

        // Match template
        Mat result;
        matchTemplate(target_edged, template_mat, result, TM_CCOEFF);

        double maxVal; Point maxLoc;
        minMaxLoc(result, NULL, &maxVal, NULL, &maxLoc);

        // If better match found
        if( found.init == false || maxVal > found.maxVal ){
            found.init = true;
            found.maxVal = maxVal;
            found.maxLoc = maxLoc;
            found.scale = scale;
        }


    }

    int startX, startY, endX, endY;
    startX = found.maxLoc.x / found.scale;
    startY = found.maxLoc.y / found.scale;

    endX= (found.maxLoc.x + tW) / found.scale;
    endY= (found.maxLoc.y + tH) / found.scale;

    // draw a bounding box around the detected result and display the image
    rectangle(target_img, Point(startX, startY), Point(endX, endY), Scalar(0, 0, 255), 3);


    Rect myROI(startX, startY, endX, endY);
    Mat cropped = target_img(myROI);

    Mat alphaBlended = alphaBlend(tattooDes , cropped);
    Mat homographyApplied = applyHomography(alphaBlended, target_img, startX, startY, endX, endY);

    img = homographyApplied;


}





}
#包括
#包括
#包括
#包括
#包括
#包括
#包括
使用名称空间cv;
使用名称空间std;
外部“C”{
//使用直接指针访问的Alpha混合
Mat和alphaBlendDirectAccess(Mat和alpha、Mat和前景、Mat和背景、Mat和outImage)
{
int numberOfPixels=foreground.rows*foreground.cols*foreground.channels();
float*fptr=reinterpret_cast(前台数据);
float*bptr=重新解释(background.data);
float*aptr=重新解释投射(α数据);
float*outImagePtr=重新解释(outImage.data);
int i,j;
对于(j=0;jtarget_resized.cols | tH>target_resized.rows)中断;
Canny(目标大小调整,目标边缘,50,50*4);//查找边缘
//匹配模板
Mat结果;
匹配模板(目标、模板、结果、TM\U CCOEFF);
双maxVal;点maxLoc;
minMaxLoc(结果、NULL和maxVal、NULL和maxLoc);
//如果找到更好的匹配项
if(find.init==false | | maxVal>find.maxVal){
found.init=true;
found.maxVal=maxVal;
found.maxLoc=maxLoc;
发现。比例=比例;
}
}
int startX、startY、endX、endY;
startX=found.maxLoc.x/found.scale;
startY=found.maxLoc.y/found.scale;
endX=(found.maxLoc.x+tW)/found.scale;
endY=(found.maxLoc.y+tH)/found.scale;
//在检测到的结果周围绘制边界框并显示图像
矩形(目标图像、点(起点x、起点y)、点(终点x、终点y)、标量(0、0、255)、3);
直视myROI(startX、startY、endX、endY);
Mat-crapped=目标值(myROI);
Mat alphaBlend=alphaBlend(纹身、裁剪);
Mat homography Applied=应用单体(字母混合、目标图像、startX、startY、endX、endY);
img=应用同形文字;
}
}
如果我可以跳过单应性会更好,但我不知道如何将两种不同大小的图像进行alpha混合。我的预期输出是在检测到的模板区域上显示输入png(纹身2.png)。如果你能在这方面帮助我,我将不胜感激。如果我还需要提什么,请告诉我。多谢各位