当前位置: 首页 > news >正文

使用opencv4.7.0部署yolov5

yolov5原理和部署原理就不说了,想了解的可以看看这篇部署原理文章

#include <fstream>
#include <sstream>
#include <iostream>
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>//using namespace cv;
//using namespace dnn;
//using namespace std;
int index = 0;struct Net_config
{float confThreshold; // Confidence thresholdfloat nmsThreshold;  // Non-maximum suppression thresholdfloat objThreshold;  //Object Confidence thresholdstd::string modelpath;
};int endsWith(std::string s, std::string sub) {return s.rfind(sub) == (s.length() - sub.length()) ? 1 : 0;
}class YOLO
{
public:YOLO(Net_config config);std::tuple<std::vector<cv::Rect>, std::vector<int>> detect(cv::Mat& frame);
private:float* anchors;int num_stride;int inpWidth;int inpHeight;std::vector<std::string> class_names;int num_class;float confThreshold;float nmsThreshold;float objThreshold;const bool keep_ratio = true;cv::dnn::Net net;void drawPred(float conf, int left, int top, int right, int bottom, cv::Mat& frame, int classid);cv::Mat resize_image(cv::Mat srcimg, int *newh, int *neww, int *top, int *left);};YOLO::YOLO(Net_config config)
{this->confThreshold = config.confThreshold;this->nmsThreshold = config.nmsThreshold;this->objThreshold = config.objThreshold;this->net = cv::dnn::readNet(config.modelpath);std::ifstream ifs("D:\\project_prj\\deeplearn\\yolov5\\class.names");std::string line;while (getline(ifs, line)) this->class_names.push_back(line);this->num_class = class_names.size();this->num_stride = 3;this->inpHeight = 640;this->inpWidth = 640;}cv::Mat YOLO::resize_image(cv::Mat srcimg, int *newh, int *neww, int *top, int *left)
{int srch = srcimg.rows, srcw = srcimg.cols;*newh = this->inpHeight;*neww = this->inpWidth;cv::Mat dstimg;if (this->keep_ratio && srch != srcw) {float hw_scale = (float)srch / srcw;if (hw_scale > 1) {*newh = this->inpHeight;*neww = int(this->inpWidth / hw_scale);resize(srcimg, dstimg, cv::Size(*neww, *newh), cv::INTER_AREA);*left = int((this->inpWidth - *neww) * 0.5);copyMakeBorder(dstimg, dstimg, 0, 0, *left, this->inpWidth - *neww - *left, cv::BORDER_CONSTANT, 114);}else {*newh = (int)this->inpHeight * hw_scale;*neww = this->inpWidth;resize(srcimg, dstimg, cv::Size(*neww, *newh), cv::INTER_AREA);*top = (int)(this->inpHeight - *newh) * 0.5;copyMakeBorder(dstimg, dstimg, *top, this->inpHeight - *newh - *top, 0, 0, cv::BORDER_CONSTANT, 114);}}else {resize(srcimg, dstimg, cv::Size(*neww, *newh), cv::INTER_AREA);}return dstimg;
}void YOLO::drawPred(float conf, int left, int top, int right, int bottom, cv::Mat& frame, int classid)   // Draw the predicted bounding box
{//Draw a rectangle displaying the bounding boxif(classid==0)cv::rectangle(frame, cv::Point(left, top), cv::Point(right, bottom), cv::Scalar(0, 0, 255), 2);elsecv::rectangle(frame, cv::Point(left, top), cv::Point(right, bottom), cv::Scalar(0, 255, 0), 2);//Get the label for the class name and its confidencestd::string label = cv::format("%.2f", conf);label = this->class_names[classid] + ":" + label;//Display the label at the top of the bounding boxint baseLine;cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);top = std::max(top, labelSize.height);if(classid == 0)//rectangle(frame, Point(left, top - int(1.5 * labelSize.height)), Point(left + int(1.5 * labelSize.width), top + baseLine), Scalar(0, 255, 0), FILLED);cv::putText(frame, label, cv::Point(left, top), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 0, 255), 1);elsecv::putText(frame, label, cv::Point(left, top), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 255, 0), 1);
}std::tuple<std::vector<cv::Rect>, std::vector<int>> YOLO::detect(cv::Mat& frame)
{int newh = 0, neww = 0, padh = 0, padw = 0;cv::Mat dstimg = this->resize_image(frame, &newh, &neww, &padh, &padw);cv::Mat blob = cv::dnn::blobFromImage(dstimg, 1 / 255.0, cv::Size(this->inpWidth, this->inpHeight), cv::Scalar(0, 0, 0), true, false);this->net.setInput(blob);std::vector<cv::Mat> outs;this->net.forward(outs, this->net.getUnconnectedOutLayersNames());int num_proposal = outs[0].size[1];int nout = outs[0].size[2];if (outs[0].dims > 2){outs[0] = outs[0].reshape(0, num_proposal);}/generate proposalsstd::vector<float> confidences;std::vector<cv::Rect> boxes;std::vector<int> classIds;float ratioh = (float)frame.rows / newh, ratiow = (float)frame.cols / neww;int n = 0, q = 0, i = 0, j = 0, row_ind = 0; ///xmin,ymin,xamx,ymax,box_score,class_scorefloat* pdata = (float*)outs[0].data;for (int i = 0; i < 25200 / 7; i++){float cx = pdata[i * 7+0];float cy = pdata[i * 7+1];float w = pdata[i * 7 + 2];float h = pdata[i * 7 + 3];float score = pdata[i * 7 + 4];if (score < this->objThreshold)continue;float class_num1 = pdata[i * 7 + 5];float class_num2 = pdata[i * 7 + 6];int left = int((cx - padw - 0.5 * w) * ratiow);int top = int((cy - padh - 0.5 * h) * ratioh);float max_class_socre = class_num1 > class_num2 ? class_num1 : class_num2;if (class_num1 > class_num2){max_class_socre = class_num1;classIds.push_back(0);}else{max_class_socre = class_num2;classIds.push_back(1);}confidences.push_back(max_class_socre);boxes.push_back(cv::Rect(left, top, (int)(w * ratiow), (int)(h * ratioh)));}// Perform non maximum suppression to eliminate redundant overlapping boxes with// lower confidencesstd::vector<cv::Rect> result_;std::vector<int> class_;std::vector<int> indices;cv::dnn::NMSBoxes(boxes, confidences, this->confThreshold, this->nmsThreshold, indices);for (size_t i = 0; i < indices.size(); ++i){int idx = indices[i];cv::Rect box = boxes[idx];result_.emplace_back(box);class_.emplace_back(classIds[idx]);this->drawPred(confidences[idx], box.x, box.y,box.x + box.width, box.y + box.height, frame, classIds[idx]);}imwrite("D:\\project_prj\\deeplearn\\yolov5\\result\\" + std::to_string(index++) + ".jpg", frame);//std::cout << "done" << std::endl;//delete pdata;return std::make_tuple(result_, class_);
}int main()
{Net_config yolo_nets = { 0.60, 0.5, 0.60, "D:\\project_prj\\run\\best_detectcircle_1.onnx" };YOLO yolo_model(yolo_nets);//string imgpath = "D:\\20230817-144309.jpg";std::string path = "C:\\datas_samll";std::vector<cv::String> result;cv::glob(path, result);for (auto x : result){std::cout << x << std::endl;cv::Mat srcimg = cv::imread(x);auto result = yolo_model.detect(srcimg);}}

http://www.lryc.cn/news/129440.html

相关文章:

  • Python - 协程基本使用详解【demo】
  • Android MVVM架构模式,详详详细学习
  • 亿赛通电子文档安全管理系统 RCE漏洞复现
  • 星际争霸之小霸王之小蜜蜂(三)--重构模块
  • JS的解析与Js2Py使用
  • Spring Bean的生命周期总结(包含面试题)
  • SpringjDBCTemplate_spring25
  • 设计模式——桥接模式
  • 改进YOLO系列:2.添加ShuffleAttention注意力机制
  • 利用Opencv实现人像迁移
  • Lnton羚通算法算力云平台在环境配置时 OpenCV 无法显示图像是什么原因?
  • 【JavaEE进阶】MyBatis的创建及使用
  • 职业学院物联网实训室建设方案
  • 3 个 ChatGPT 插件您需要立即下载3 ChatGPT Extensions You need to Download Immediately
  • 屏蔽socket 实例化时,握手阶段报错信息WebSocket connection to ‘***‘ failed
  • 单发多框检测(SSD)【动手学深度学习】
  • “RFID与光伏板的完美融合:探索能源科技的新时代!“
  • 算法leetcode|71. 简化路径(rust重拳出击)
  • 网络技术Vlan技术STP(第一课)
  • SpringBoo t+ Vue 微人事 (十一)
  • 自动驾驶卡车量产-第一章-用户需求
  • Nginx 配置文件的完整指南 (一)
  • css3+js 画出爱心特效
  • 蔚来李斌卖手机:安卓系统,苹果售价,一年一发
  • 0008__浏览器层面缓存 Etag If-None-Match等详解
  • Idea 快捷键整理
  • 管理类联考——逻辑——真题篇——按知识分类——汇总篇——一、形式逻辑——假言——第一节 充分条件
  • LSTM模型
  • 抢红包小程序
  • UVA 10006 埃氏筛法+快速幂