python、C++ 中通过OpenCV的DNN模块使用YoloV4
生活随笔
收集整理的這篇文章主要介紹了
python、C++ 中通过OpenCV的DNN模块使用YoloV4
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
?
目錄
?
1 Python環境下調用
2 C++環境下調用(編寫CMakeLists.txt文件)
2.1 OpenCV安裝
2.2 程序編寫
2.2.1?main.cpp
2.2.2?Detection.h
2.2.3?Detection.cpp
2.2.4?CMakeLists.txt
2.3 編譯和測試
1 Python環境下調用
這個較為簡單,唯一注意要點是安裝的opencv-python版本要對,以下代碼僅支持4.4.0.XX系列OpenCV版本,4.5.0版本OpenCV沒有getUnconnectedOutLayersNames()這個屬性。
完整代碼如下:
import numpy as np
import time
import cv2if cv2.__version__ != '4.4.0':print("opencv版本不支持! 本程序語法僅支持4.4.0系列OpenCV")LABELS = open("coco.names").read().strip().split("\n")
np.random.seed(666)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
# 導入 YOLO 配置和權重文件并加載網絡:
net = cv2.dnn_DetectionModel('/home/ym/ym2021/yolov4.cfg', '/home/ym/ym2021/yolov4.weights')
# 獲取 YOLO 未連接的輸出圖層
layer = net.getUnconnectedOutLayersNames()
image = cv2.imread('1.jpg')
# 獲取圖片尺寸
(H, W) = image.shape[:2]
# 從輸入圖像構造一個 blob,然后執行 YOLO 對象檢測器的前向傳遞,給我們邊界盒和相關概率
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416),swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
# 前向傳遞,獲得信息
layerOutputs = net.forward(layer)
# 用于得出檢測時間
end = time.time()
print("YOLO took {:.6f} seconds".format(end - start))boxes = []
confidences = []
classIDs = []# 循環提取每個輸出層
for output in layerOutputs:# 循環提取每個框for detection in output:# 提取當前目標的類 ID 和置信度scores = detection[5:]classID = np.argmax(scores)confidence = scores[classID]# 通過確保檢測概率大于最小概率來過濾弱預測if confidence > 0.5:# 將邊界框坐標相對于圖像的大小進行縮放,YOLO 返回的是邊界框的中心(x, y)坐標,# 后面是邊界框的寬度和高度box = detection[0:4] * np.array([W, H, W, H])(centerX, centerY, width, height) = box.astype("int")# 轉換出邊框左上角坐標x = int(centerX - (width / 2))y = int(centerY - (height / 2))# 更新邊界框坐標、置信度和類 id 的列表boxes.append([x, y, int(width), int(height)])confidences.append(float(confidence))classIDs.append(classID)
# 非最大值抑制,確定唯一邊框
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
# 確定每個對象至少有一個框存在
if len(idxs) > 0:# 循環畫出保存的邊框for i in idxs.flatten():# 提取坐標和寬度(x, y) = (boxes[i][0], boxes[i][1])(w, h) = (boxes[i][2], boxes[i][3])# 畫出邊框和標簽color = [int(c) for c in COLORS[classIDs[i]]]cv2.rectangle(image, (x, y), (x + w, y + h), color, 1, lineType=cv2.LINE_AA)text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 1, lineType=cv2.LINE_AA)
cv2.imshow("Tag", image)
cv2.waitKey(0)
?
2 C++環境下調用(編寫CMakeLists.txt文件)
2.1 OpenCV安裝
C++環境下編譯安裝OpenCV 4.5.1和OpenCV_Contrib庫,參考我《ubuntu + oepncv + PCL + realsenseSDK + ROS + OpenVino開發環境搭建》https://blog.csdn.net/weixin_42118657/article/details/114527831
2.2 程序編寫
程序結構如下:
- main.cpp
- Detection.cpp
- Detection.h
- CMakeLists.txt
- build文件夾
2.2.1?main.cpp
#include "Detection.h"#include <iostream>using namespace std;
using namespace cv;
using namespace dnn;void TestDetection()
{string image_path = "/data/1.jpg";string save_path = "result.jpg";Mat img = imread(image_path);cout << "width: " << img.cols << endl;cout << "height: " << img.rows << endl;Detection detection = Detection();detection.Initialize(img.cols, img.rows);detection.Detecting(img);imwrite(save_path, detection.GetFrame());return;
}int main()
{TestDetection();return 0;
}
2.2.2?Detection.h
#pragma once
#ifndef __DETECTION_H__
#define __DETECTION_H__#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <string.h>
#include <vector>
#include <fstream>using namespace std;
using namespace cv;
using namespace dnn;class Detection
{
public://構造、析構函數Detection();~Detection();//初始化函數void Initialize(int width, int height);//讀取網絡模型void ReadModel();//行人與車輛檢測bool Detecting(Mat frame);//獲取網絡輸出層名稱vector<String> GetOutputsNames();//對輸出進行處理,使用NMS選出最合適的框void PostProcess();//畫檢測結果void Drawer();//畫出檢測框和相關信息void DrawBoxes(int classId, float conf, int left, int top, int right, int bottom);//獲取Mat對象Mat GetFrame();//獲取圖像寬度int GetResWidth();//獲取圖像高度int GetResHeight();private://圖像屬性int m_width; //圖像寬度int m_height; //圖像高度//網絡處理相關Net m_model; //網絡模型Mat m_frame; //每一幀Mat m_blob; //從每一幀創建一個4D的blob用于網絡輸入vector<Mat> m_outs; //網絡輸出vector<float> m_confs; //置信度vector<Rect> m_boxes; //檢測框左上角坐標、寬、高vector<int> m_classIds; //類別idvector<int> m_perfIndx; //非極大閾值處理后邊界框的下標//檢測超參數int m_inpWidth; //網絡輸入圖像寬度int m_inpHeight; //網絡輸入圖像高度float m_confThro; //置信度閾值float m_NMSThro; //NMS非極大抑制閾值vector<string> m_classes; //類別名稱private://內存釋放void Dump();
};#endif
2.2.3?Detection.cpp
#include "Detection.h"using namespace cv;
using namespace dnn;//構造函數,成員變量初始化
Detection::Detection()
{//圖像屬性m_width = 0;m_height = 0;m_inpWidth = 416;m_inpHeight = 416;//其他成員變量m_confThro = 0.25;m_NMSThro = 0.4;//網絡模型加載ReadModel();
}//析構函數
Detection::~Detection()
{Dump();
}//內存釋放
void Detection::Dump()
{//網絡輸出相關清零m_outs.clear();m_boxes.clear();m_confs.clear();m_classIds.clear();m_perfIndx.clear();
}//初始化函數
void Detection::Initialize(int width, int height)
{//圖像屬性m_width = width;m_height = height;
}//讀取網絡模型和類別
void Detection::ReadModel()
{string classesFile = "/data/coco.names";String modelConfig = "/data/yolov4.cfg";String modelWeights = "/data/yolov4.weights";//加載類別名ifstream ifs(classesFile.c_str());string line;while (getline(ifs, line))m_classes.push_back(line);//加載網絡模型m_model = readNetFromDarknet(modelConfig, modelWeights);m_model.setPreferableBackend(DNN_BACKEND_OPENCV);m_model.setPreferableTarget(DNN_TARGET_OPENCL);
}//行人與車輛檢測
bool Detection::Detecting(Mat frame)
{m_frame = frame.clone();//創建4D的blob用于網絡輸入blobFromImage(m_frame, m_blob, 1 / 255.0, Size(m_inpWidth, m_inpHeight), Scalar(0, 0, 0), true, false);//設置網絡輸入m_model.setInput(m_blob);//前向預測得到網絡輸出,forward需要知道輸出層,這里用了一個函數找到輸出層m_model.forward(m_outs, GetOutputsNames());//使用非極大抑制NMS刪除置信度較低的邊界框PostProcess();//畫檢測框Drawer();return true;
}//獲取網絡輸出層名稱
vector<String> Detection::GetOutputsNames()
{static vector<String> names;if (names.empty()){//得到輸出層索引號vector<int> outLayers = m_model.getUnconnectedOutLayers();//得到網絡中所有層名稱vector<String> layersNames = m_model.getLayerNames();//獲取輸出層名稱names.resize(outLayers.size());for (int i = 0; i < outLayers.size(); ++i)names[i] = layersNames[outLayers[i] - 1];}return names;
}//使用非極大抑制NMS去除置信度較低的邊界框
void Detection::PostProcess()
{for (int num = 0; num < m_outs.size(); num++){Point Position;double confidence;//得到每個輸出的數據float *data = (float *)m_outs[num].data;for (int j = 0; j < m_outs[num].rows; j++, data += m_outs[num].cols){//得到該輸出的所有類別的Mat scores = m_outs[num].row(j).colRange(5, m_outs[num].cols);//獲取最大置信度對應的值和位置minMaxLoc(scores, 0, &confidence, 0, &Position);//對置信度大于閾值的邊界框進行相關計算和保存if (confidence > m_confThro){//data[0],data[1],data[2],data[3]都是相對于原圖像的比例int centerX = (int)(data[0] * m_width);int centerY = (int)(data[1] * m_height);int width = (int)(data[2] * m_width);int height = (int)(data[3] * m_height);int left = centerX - width / 2;int top = centerY - height / 2;//保存信息m_classIds.push_back(Position.x);m_confs.push_back((float)confidence);m_boxes.push_back(Rect(left, top, width, height));}}}//非極大值抑制,以消除具有較低置信度的冗余重疊框NMSBoxes(m_boxes, m_confs, m_confThro, m_NMSThro, m_perfIndx);
}//畫出檢測結果
void Detection::Drawer()
{//獲取所有最佳檢測框信息for (int i = 0; i < m_perfIndx.size(); i++){int idx = m_perfIndx[i];Rect box = m_boxes[idx];DrawBoxes(m_classIds[idx], m_confs[idx], box.x, box.y,box.x + box.width, box.y + box.height);}
}//畫出檢測框和相關信息
void Detection::DrawBoxes(int classId, float conf, int left, int top, int right, int bottom)
{//畫檢測框rectangle(m_frame, Point(left, top), Point(right, bottom), Scalar(255, 178, 50), 3);//該檢測框對應的類別和置信度string label = format("%.2f", conf);if (!m_classes.empty()){CV_Assert(classId < (int)m_classes.size());label = m_classes[classId] + ":" + label;}//將標簽顯示在檢測框頂部int baseLine;Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);top = max(top, labelSize.height);rectangle(m_frame, Point(left, top - round(1.5 * labelSize.height)), Point(left + round(1.5 * labelSize.width), top + baseLine), Scalar(255, 255, 255), FILLED);putText(m_frame, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0, 0, 0), 1);
}//獲取Mat對象
Mat Detection::GetFrame()
{return m_frame;
}//獲取結果圖像寬度
int Detection::GetResWidth()
{return m_width;
}//獲取結果圖像高度
int Detection::GetResHeight()
{return m_height;
}
2.2.4?CMakeLists.txt
cmake_minimum_required(VERSION 3.10)project(yolo4_test)set(CMAKE_CXX_STANDARD 11)# Add block directories
set(DETECTION Detection.cpp)
add_executable(yolo4_test main.cpp ${DETECTION})#尋找opencv庫
find_package(OpenCV REQUIRED)#添加頭文件
include_directories(${OpenCV_INCLUDE_DIRS})
#鏈接Opencv庫
target_link_libraries(yolo4_test ${OpenCV_LIBS} )
2.3 編譯和測試
- cd 到源碼目錄
- mkdir build
- cd build
- cmake ..
- make
- ./yolo4_test(運行在build文件夾生成的可執行文件yolo4_test)
結果如下:
?
總結
以上是生活随笔為你收集整理的python、C++ 中通过OpenCV的DNN模块使用YoloV4的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: (OCR论文)Mask TextSpot
- 下一篇: 原创:尖叫之夜男明星红毯造型在韩国爆红,