使用java和yolov3进行Yolo对象检测-->;解析错误)未知层类型:函数中检测到';cv::dnn::darknet::ReadDarknetFromCfgStream
我想用基于openCV的yolov3进行目标检测。目前,我正在使用windows,使用JAVA中的eclipse 我的代码保存在: 当我执行程序时,我得到一个错误:使用java和yolov3进行Yolo对象检测-->;解析错误)未知层类型:函数中检测到';cv::dnn::darknet::ReadDarknetFromCfgStream,java,opencv,yolo,Java,Opencv,Yolo,我想用基于openCV的yolov3进行目标检测。目前,我正在使用windows,使用JAVA中的eclipse 我的代码保存在: 当我执行程序时,我得到一个错误: Exception in thread "main" CvException [org.opencv.core.CvException: cv::Exception: OpenCV(4.5.0) C:\build\master_winpack-bindings-win64-vc14-static\opencv\
Exception in thread "main" CvException [org.opencv.core.CvException: cv::Exception: OpenCV(4.5.0) C:\build\master_winpack-bindings-win64-vc14-static\opencv\modules\dnn\src\darknet\darknet_importer.cpp:207: error: (-212:Parsing error) Failed to parse NetParameter file: C:\Projects\detection\opencv\yolov3.cfg in function 'cv::dnn::dnn4_v20200908::readNetFromDarknet'
]
at org.opencv.dnn.Dnn.readNetFromDarknet_0(Native Method)
at org.opencv.dnn.Dnn.readNetFromDarknet(Dnn.java:543)
at clement.yolo.main(yolo.java:67)
我的程序代码如下所示:
package clement;
import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import javax.imageio.ImageIO;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfByte;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.MatOfRect;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.dnn.Dnn;
import org.opencv.dnn.Net;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.utils.Converters;
import org.opencv.videoio.VideoCapture;
public class yolo {
private static List<String> getOutputNames(Net net) {
List<String> names = new ArrayList<>();
List<Integer> outLayers = net.getUnconnectedOutLayers().toList();
List<String> layersNames = net.getLayerNames();
outLayers.forEach((item) -> names.add(layersNames.get(item - 1)));//unfold and create R-CNN layers from the loaded YOLO model//
return names;
}
public static void main(String[] args) throws InterruptedException {
System.load("C:\\Projects\\detection\\opencv_new\\opencv\\build\\java\\x64\\opencv_java450.dll"); // Load the openCV 4.0 dll //
// System.load("C:\\Projects\\detection\\opencv\\build\\java\\x64\\opencv_java3412.dll"); // Load the openCV 4.0 dll //
String modelWeights = "C:\\Projects\\detection\\opencv\\yolov3.weights"; //Download and load only wights for YOLO , this is obtained from official YOLO site//
String modelConfiguration = "C:\\Projects\\detection\\opencv\\yolov3.cfg";//Download and load cfg file for YOLO , can be obtained from official site//
String filePath = "c:\\clement\\uwe.mp4"; //My video file to be analysed//
VideoCapture cap = new VideoCapture(filePath);// Load video using the videocapture method//
Mat frame = new Mat(); // define a matrix to extract and store pixel info from video//
Mat dst = new Mat ();
//cap.read(frame);
JFrame jframe = new JFrame("Video"); // the lines below create a frame to display the resultant video with object detection and localization//
JLabel vidpanel = new JLabel();
jframe.setContentPane(vidpanel);
jframe.setSize(600, 600);
jframe.setVisible(true);// we instantiate the frame here//
Net net = Dnn.readNetFromDarknet(modelConfiguration, modelWeights); //OpenCV DNN supports models trained from various frameworks like Caffe and TensorFlow. It also supports various networks architectures based on YOLO//
//Thread.sleep(5000);
//Mat image = Imgcodecs.imread("D:\\yolo-object-detection\\yolo-object-detection\\images\\soccer.jpg");
Size sz = new Size(288,288);
List<Mat> result = new ArrayList<>();
List<String> outBlobNames = getOutputNames(net);
while (true) {
if (cap.read(frame)) {
Mat blob = Dnn.blobFromImage(frame, 0.00392, sz, new Scalar(0), true, false); // We feed one frame of video into the network at a time, we have to convert the image to a blob. A blob is a pre-processed image that serves as the input.//
net.setInput(blob);
net.forward(result, outBlobNames); //Feed forward the model to get output //
// outBlobNames.forEach(System.out::println);
// result.forEach(System.out::println);
float confThreshold = 0.6f; //Insert thresholding beyond which the model will detect objects//
List<Integer> clsIds = new ArrayList<>();
List<Float> confs = new ArrayList<>();
List<Rect> rects = new ArrayList<>();
for (int i = 0; i < result.size(); ++i)
{
// each row is a candidate detection, the 1st 4 numbers are
// [center_x, center_y, width, height], followed by (N-4) class probabilities
Mat level = result.get(i);
for (int j = 0; j < level.rows(); ++j)
{
Mat row = level.row(j);
Mat scores = row.colRange(5, level.cols());
Core.MinMaxLocResult mm = Core.minMaxLoc(scores);
float confidence = (float)mm.maxVal;
Point classIdPoint = mm.maxLoc;
if (confidence > confThreshold)
{
int centerX = (int)(row.get(0,0)[0] * frame.cols()); //scaling for drawing the bounding boxes//
int centerY = (int)(row.get(0,1)[0] * frame.rows());
int width = (int)(row.get(0,2)[0] * frame.cols());
int height = (int)(row.get(0,3)[0] * frame.rows());
int left = centerX - width / 2;
int top = centerY - height / 2;
clsIds.add((int)classIdPoint.x);
confs.add((float)confidence);
rects.add(new Rect(left, top, width, height));
}
}
}
float nmsThresh = 0.5f;
MatOfFloat confidences = new MatOfFloat(Converters.vector_float_to_Mat(confs));
Rect[] boxesArray = rects.toArray(new Rect[0]);
MatOfRect boxes = new MatOfRect(boxesArray);
MatOfInt indices = new MatOfInt();
// Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThresh, indices); //We draw the bounding boxes for objects here//
int [] ind = indices.toArray();
int j=0;
for (int i = 0; i < ind.length; ++i)
{
int idx = ind[i];
Rect box = boxesArray[idx];
Imgproc.rectangle(frame, box.tl(), box.br(), new Scalar(0,0,255), 2);
//i=j;
System.out.println(idx);
}
// Imgcodecs.imwrite("D://out.png", image);
//System.out.println("Image Loaded");
ImageIcon image = new ImageIcon(Mat2bufferedImage(frame)); //setting the results into a frame and initializing it //
vidpanel.setIcon(image);
vidpanel.repaint();
// System.out.println(j);
// System.out.println("Done");
}
}
}
// }
private static BufferedImage Mat2bufferedImage(Mat image) { // The class described here takes in matrix and renders the video to the frame //
MatOfByte bytemat = new MatOfByte();
Imgcodecs.imencode(".jpg", image, bytemat);
byte[] bytes = bytemat.toArray();
InputStream in = new ByteArrayInputStream(bytes);
BufferedImage img = null;
try {
img = ImageIO.read(in);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return img;
}
}
package元素;
导入java.awt.image.buffereImage;
导入java.io.ByteArrayInputStream;
导入java.io.IOException;
导入java.io.InputStream;
导入java.util.ArrayList;
导入java.util.List;
导入javax.imageio.imageio;
导入javax.swing.ImageIcon;
导入javax.swing.JFrame;
导入javax.swing.JLabel;
导入org.opencv.core.core;
导入org.opencv.core.Mat;
导入org.opencv.core.MatOfByte;
导入org.opencv.core.MatOfFloat;
导入org.opencv.core.MatOfInt;
导入org.opencv.core.MatOfRect;
导入org.opencv.core.Point;
导入org.opencv.core.Rect;
导入org.opencv.core.Scalar;
导入org.opencv.core.Size;
导入org.opencv.dnn.dnn;
导入org.opencv.dnn.Net;
导入org.opencv.imgcodecs.imgcodecs;
导入org.opencv.imgproc.imgproc;
导入org.opencv.utils.Converters;
导入org.opencv.videoio.VideoCapture;
公务舱{
私有静态列表getOutputNames(Net){
列表名称=新的ArrayList();
List outLayers=net.getUnconnectedOutLayers().toList();
List LayerNames=net.getLayerNames();
exploreyers.forEach((项目)->names.add(layersNames.get(项目-1));//从加载的YOLO模型展开并创建R-CNN层//
返回姓名;
}
公共静态void main(字符串[]args)引发InterruptedException{
加载(“C:\\Projects\\detection\\opencv\u new\\opencv\\build\\java\\x64\\opencv\u java450.dll”);//加载opencv 4.0 dll//
//System.load(“C:\\Projects\\detection\\opencv\\build\\java\\x64\\opencv\u java3412.dll”);//加载opencv 4.0 dll//
String modelWeights=“C:\\Projects\\detection\\opencv\\yolov3.weights”;//仅下载和加载YOLO的wights,这可从YOLO官方网站获得//
String modelConfiguration=“C:\\Projects\\detection\\opencv\\yolov3.cfg”//下载并加载YOLO的cfg文件,可从官方网站获得//
String filePath=“c:\\clement\\uwe.mp4”//我要分析的视频文件//
VideoCapture cap=新的VideoCapture(文件路径);//使用VideoCapture方法加载视频//
Mat frame=new Mat();//定义一个矩阵来提取和存储视频中的像素信息//
Mat dst=新Mat();
//阅读(框架);
JFrame JFrame=new JFrame(“Video”);//下面的几行创建一个帧,以显示带有对象检测和定位的结果视频//
JLabel vidpanel=新的JLabel();
jframe.setContentPane(vidpanel);
jframe.setSize(600600);
setVisible(true);//我们在这里实例化框架//
Net Net=Dnn.readNetFromDarknet(modelConfiguration,modelWeights);//OpenCV Dnn支持从各种框架(如Caffe和TensorFlow)训练的模型。它还支持基于YOLO的各种网络体系结构//
//睡眠(5000);
//Mat image=Imgcodecs.imread(“D:\\yolo object detection\\yolo object detection\\images\\soccer.jpg”);
尺寸sz=新尺寸(288);
列表结果=新建ArrayList();
List outBlobNames=getOutputNames(net);
while(true){
如果(第二章(框架)){
Mat blob=Dnn.blobFromImage(frame,0.00392,sz,new Scalar(0),true,false);//我们一次将一帧视频送入网络,我们必须将图像转换为blob。blob是作为输入的预处理图像//
net.setInput(blob);
net.forward(result,outBlobNames);//前馈模型以获得输出//
//outBlobNames.forEach(System.out::println);
//result.forEach(System.out::println);
float confThreshold=0.6f;//插入阈值,超过阈值模型将检测对象//
List clsIds=new ArrayList();
List confs=new ArrayList();
List rects=new ArrayList();
对于(int i=0;i阈值)
{
int centerX=(int)(row.get(0,0)[0]*frame.cols());//绘制边界框的缩放//
int centerY=(int)(row.get(0,1)[0]*frame.rows());
int width=(int)(row.get(0,2)[0]*frame.cols());
int height=(int)(row.get(0,3)[0]*frame.rows());
int left=中心x-宽度/2;
int top=中心-高度/2;
添加((int)classIdPoint.x);
增加((浮动)信心);
添加(新的矩形(左、上、宽、高));
}
}
}
浮动nmsThresh=0.5f;
MatOfFloat机密=新的MatOfFloat(转换器。向量\u浮点\u到\u矩阵(confs));
Rect[]boxesArray=rects.toArray(新的Rect[0]);
MatOfRect盒子=新的MatOfRect(盒子阵列);
马托芬特指数=
Exception in thread "main" CvException [org.opencv.core.CvException: cv::Exception: OpenCV(4.5.0) C:\build\master_winpack-bindings-win64-vc14-static\opencv\modules\dnn\src\darknet\darknet_io.cpp:865: error: (-212:Parsing error) Unknown layer type: detection in function 'cv::dnn::darknet::ReadDarknetFromCfgStream'
]
at org.opencv.dnn.Dnn.readNetFromDarknet_0(Native Method)
at org.opencv.dnn.Dnn.readNetFromDarknet(Dnn.java:543)
at clement.yolo.main(yolo.java:67)