作者:RayChiu_Labloy
版权声明:著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处
目录
win10的OpenVINO安装
使用OpenVINO将yolo预训练模型转成IR格式
OpenVINO设置永久环境变量
windows 系统环境变量里面添加
path 相关环境添加
VS2017配置OpenVINO和OpenCV
包含目录
库目录
附加目录
推理
脚本main.cpp
效果:
执行时遇到的问题
win10的OpenVINO安装
参考:win10 安装OpenVINO并在tensorflow环境下测试官方demo_RayChiu757374816的博客-CSDN博客
使用OpenVINO将yolo预训练模型转成IR格式
参考:win10环境yolov5s预训练模型转onnx然后用openvino生成推理加速模型并测试推理_RayChiu757374816的博客-CSDN博客
OpenVINO设置永久环境变量
官方给的设置环境变量的bat脚本感觉没起作用,如果不设置永久的环境变量则会出现找不到各种动态库的情况。
windows 系统环境变量里面添加
变量名 | 变量值 |
INTEL_OPENVINO_DIR | C:\Program Files (x86)\IntelSWTools\openvino_2020.4.287 |
INTEL_CVSDK_DIR | %INTEL_OPENVINO_DIR% |
HDDL_INSTALL_DIR | %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\hddl |
InferenceEngine_DIR | %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\share |
OpenCV_DIR | %INTEL_OPENVINO_DIR%\opencv\cmake |
NGRAPH_DIR | %INTEL_OPENVINO_DIR%\deployment_tools\ngraph\cmake |
PYTHONPATH | %INTEL_OPENVINO_DIR%\python\python3.8 |
注意对应自己的OpenVINO的安装路径和python版本
path 相关环境添加
%HDDL_INSTALL_DIR%\bin
%INTEL_OPENVINO_DIR%\opencv\bin
%INTEL_OPENVINO_DIR%\deployment_tools\ngraph\lib
%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb\bin
%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Release
%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Debug
%INTEL_OPENVINO_DIR%\python\python3.8\openvino\inference_engine
VS2017配置OpenVINO和OpenCV
包含目录
库目录
附加目录
推理
脚本main.cpp
只需要对应自己的155-157三行
#include <algorithm>
#include <chrono>
#include <memory>
#include <map>
#include <string>
#include <vector>
#include <utility>
#include <fstream>
#include <inference_engine.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/video/video.hpp>
#include <opencv2/opencv.hpp>using namespace std;
using namespace InferenceEngine;
using namespace std::chrono;cv::Mat jpg;string names[] = { "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush" };static void loadjpg(const char * jpgname, int width, int height)
{//loadimage(&jpg, jpgname);////cv::Mat jpg_2x;jpg = cv::imread(jpgname);cout << "load image: " << jpgname << " resize: w=" << width << " h=" << height << endl;cv::resize(jpg, jpg, cv::Size(width, height), 0, 0, cv::INTER_CUBIC);
}#define IMG_640//以下为工具函数
double sigmoid(double x) {return (1 / (1 + exp(-x)));
}vector<int> get_anchors(int net_grid) {vector<int> anchors(6);int a80[6] = { 10,13, 16,30, 33,23 };int a40[6] = { 30,61, 62,45, 59,119 };int a20[6] = { 116,90, 156,198, 373,326 };
#ifdef IMG_640if (net_grid == 80) {anchors.insert(anchors.begin(), a80, a80 + 6);}else if (net_grid == 40) {anchors.insert(anchors.begin(), a40, a40 + 6);}else if (net_grid == 20) {anchors.insert(anchors.begin(), a20, a20 + 6);}
#endif
#ifdef IMG_416if (net_grid == 52) {anchors.insert(anchors.begin(), a80, a80 + 6);}else if (net_grid == 26) {anchors.insert(anchors.begin(), a40, a40 + 6);}else if (net_grid == 13) {anchors.insert(anchors.begin(), a20, a20 + 6);}
#endifreturn anchors;
}//注意此处的阈值是框和物体prob乘积的阈值
bool parse_yolov5(const Blob::Ptr &blob, int net_grid, float cof_threshold,vector<cv::Rect>& o_rect, vector<float>& o_rect_cof, vector<int>& o_label) {vector<int> anchors = get_anchors(net_grid);LockedMemory<const void> blobMapped = as<MemoryBlob>(blob)->rmap();const float *output_blob = blobMapped.as<float *>();//80个类是85,一个类是6,n个类是n+5//int item_size = 6;int item_size = 85;//int item_size = 1; //make item_size uselesssize_t anchor_n = 3;for (int n = 0; n < anchor_n; ++n)for (int i = 0; i < net_grid; ++i)for (int j = 0; j < net_grid; ++j){if (i == 9 || j == 9){n = n;}double box_prob = output_blob[n*net_grid*net_grid*item_size + i * net_grid + j + 4 * net_grid*net_grid];box_prob = sigmoid(box_prob);//框置信度不满足则整体置信度不满足if (box_prob < cof_threshold)continue;cout << "n= " << n << " " << i << " " << j << " conf=" << box_prob << endl;//注意此处输出为中心点坐标,需要转化为角点坐标double x = output_blob[n*net_grid*net_grid*item_size + i * net_grid + j + 0 * net_grid*net_grid];double y = output_blob[n*net_grid*net_grid*item_size + i * net_grid + j + 1 * net_grid*net_grid];double w = output_blob[n*net_grid*net_grid*item_size + i * net_grid + j + 2 * net_grid*net_grid];double h = output_blob[n*net_grid*net_grid*item_size + i * net_grid + j + 3 * net_grid*net_grid];double max_prob = 0;int idx = 0;for (int t = 5; t < 85; ++t) {double tp = output_blob[n*net_grid*net_grid*item_size + i * net_grid + j + t * net_grid*net_grid];tp = sigmoid(tp);if (tp > max_prob) {max_prob = tp;idx = t;}}float cof = box_prob * max_prob;//对于边框置信度小于阈值的边框,不关心其他数值,不进行计算减少计算量if (cof < cof_threshold)continue;#ifdef IMG_640x = (sigmoid(x) * 2 - 0.5 + j)*640.0f / net_grid;y = (sigmoid(y) * 2 - 0.5 + i)*640.0f / net_grid;
#endif
#ifdef IMG_416x = (sigmoid(x) * 2 - 0.5 + j) * 416 / net_grid;y = (sigmoid(y) * 2 - 0.5 + i) * 416 / net_grid;
#endifw = pow(sigmoid(w) * 2, 2) * anchors[n * 2];h = pow(sigmoid(h) * 2, 2) * anchors[n * 2 + 1];double r_x = x - w / 2;double r_y = y - h / 2;cv::Rect rect = cv::Rect(round(r_x), round(r_y), round(w), round(h));o_rect.push_back(rect);o_rect_cof.push_back(cof);o_label.push_back(idx - 5);}if (o_rect.size() == 0) return false;else return true;
}int main(int argc, char *argv[]) {try {string FLAGS_d = "GPU"; //"GPU";string FLAGS_m = "E:/projects/pyHome/about_yolo/yolov5-master/changeModle2/yolov5s_sim.xml";string FLAGS_i = "E:/projects/pyHome/about_yolo/yolov5-master/data/images/bus.jpg";int FLAGS_nt = 10;cout << "Model name = " << FLAGS_m << endl;cout << "Image name = " << FLAGS_i << endl;cout << "starting" << endl;const Version *IEversion;IEversion = GetInferenceEngineVersion();cout << "InferenceEngine: API version " << IEversion->apiVersion.major << "." << IEversion->apiVersion.minor << endl;cout << "InferenceEngine: Build : " << IEversion->buildNumber << endl << endl;// --------------------------- 1. Load inference engine -------------------------------------cout << "Creating Inference Engine" << endl;Core ie;// -----------------------------------------------------------------------------------------------------// --------------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------cout << "Loading network files" << endl;/** Read network model **/CNNNetwork network = ie.ReadNetwork(FLAGS_m);cout << "network layer count: " << network.layerCount() << endl;// -----------------------------------------------------------------------------------------------------// --------------------------- 3. Configure input & output ---------------------------------------------// --------------------------- Prepare input blobs -----------------------------------------------------cout << "Preparing input blobs" << endl;/** Taking information about all topology inputs **/InputsDataMap inputInfo(network.getInputsInfo());if (inputInfo.size() != 1) throw std::logic_error("Sample supports topologies with 1 input only");auto inputInfoItem = *inputInfo.begin();/** Specifying the precision and layout of input data provided by the user.* This should be called before load of the network to the device **/inputInfoItem.second->setPrecision(Precision::U8);inputInfoItem.second->setLayout(Layout::NCHW);//cout << FLAGS_i << endl;loadjpg(FLAGS_i.c_str(), inputInfoItem.second->getTensorDesc().getDims()[3],inputInfoItem.second->getTensorDesc().getDims()[2]);if (jpg.data == NULL){cout << "Valid input images were not found!" << endl;}/** Setting batch size to 1 **/network.setBatchSize(1);size_t batchSize = network.getBatchSize();cout << "Batch size is " << std::to_string(batchSize) << endl;// --------------------------- 4. Loading model to the device ------------------------------------------cout << "Loading model to the device: " << FLAGS_d << endl;ExecutableNetwork executable_network = ie.LoadNetwork(network, FLAGS_d);// -----------------------------------------------------------------------------------------------------// --------------------------- 5. Create infer request -------------------------------------------------cout << "Create infer request" << endl;InferRequest inferRequest_regular = executable_network.CreateInferRequest();// -----------------------------------------------------------------------------------------------------// --------------------------- 6. Prepare input --------------------------------------------------------for (auto & item : inputInfo) {Blob::Ptr inputBlob = inferRequest_regular.GetBlob(item.first);SizeVector dims = inputBlob->getTensorDesc().getDims();/** Fill input tensor with images. First b channel, then g and r channels **/size_t num_channels = dims[1];std::cout << "num_channles = " << num_channels << std::endl;size_t image_size = dims[3] * dims[2];MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);if (!minput) {cout << "We expect MemoryBlob from inferRequest_regular, but by fact we were not able to cast inputBlob to MemoryBlob" << endl;return 1;}// locked memory holder should be alive all time while access to its buffer happensauto minputHolder = minput->wmap();auto data = minputHolder.as<PrecisionTrait<Precision::U8>::value_type *>();unsigned char* pixels = (unsigned char*)(jpg.data);cout << "image_size = " << image_size << endl;/** Iterate over all pixel in image (b,g,r) **/for (size_t pid = 0; pid < image_size; pid++) {/** Iterate over all channels **/for (size_t ch = 0; ch < num_channels; ++ch) {/** [images stride + channels stride + pixel id ] all in bytes **/data[ch * image_size + pid] = pixels[pid*num_channels + ch];}}}milliseconds start_ms = duration_cast<milliseconds>(system_clock::now().time_since_epoch());// --------------------------- 7. Do inference ---------------------------------------------------------/* Start sync request */cout << "Start inference " << endl;inferRequest_regular.Infer();milliseconds end_ms = duration_cast<milliseconds>(system_clock::now().time_since_epoch());std::cout << "total cost time: " << (end_ms - start_ms).count() << " ms" << std::endl;float total_time = (end_ms - start_ms).count() / 1000.0;std::cout << "FPS: " << (float)1.0 / total_time << std::endl;// -----------------------------------------------------------------------------------------------------// --------------------------- 8. Process output -------------------------------------------------------cout << "Processing output blobs" << endl;OutputsDataMap outputInfo(network.getOutputsInfo());//获取各层结果vector<cv::Rect> origin_rect;vector<float> origin_rect_cof;vector<int> label;double _cof_threshold = 0.5; //置信度阈值,计算方法是框置信度乘以物品种类置信度double _nms_area_threshold = 0.5; //nms最小重叠面积阈值#ifdef IMG_416int s[3] = { 26,13,52 };
#endif
#ifdef IMG_640int s[3] = { 80,40,20 };
#endif//int s[3] = { 40,20,80 };int i = 0;for (auto &output : outputInfo) {auto output_name = output.first;cout << " ------ output_name = " << output_name << endl;Blob::Ptr blob = inferRequest_regular.GetBlob(output_name);parse_yolov5(blob, s[i], _cof_threshold, origin_rect, origin_rect_cof, label);cout << "label.size() = " << label.size() << endl;++i;}//后处理获得最终检测结果vector<int> final_id;cv::dnn::NMSBoxes(origin_rect, origin_rect_cof, _cof_threshold, _nms_area_threshold, final_id);cout << "final_id.size() = " << final_id.size() << endl;//根据final_id获取最终结果for (int i = 0; i < final_id.size(); ++i) {cv::Rect resize_rect = origin_rect[final_id[i]];origin_rect_cof[final_id[i]];int xmin = resize_rect.x;int ymin = resize_rect.y;int width = resize_rect.width;int height = resize_rect.height;cv::Rect rect(xmin, ymin, width, height);//左上坐标(x,y)和矩形的长(x)宽(y)/*cv::putText(jpg, "label="+std::to_string(label[final_id[i]]), cv::Point2f(xmin, ymin), cv::FONT_HERSHEY_TRIPLEX, 0.7, cv::Scalar{ 0, 0, 255 });*/cv::putText(jpg, names[label[final_id[i]]], cv::Point2f(xmin, ymin), cv::FONT_HERSHEY_TRIPLEX, 0.7, cv::Scalar{ 0, 0, 255 });cv::rectangle(jpg, rect, cv::Scalar(0, 0, 255), 1, cv::LINE_8, 0);}imshow("result", jpg);while (1){if (cv::waitKey(30) == 27 /*ESC*/){break;}}}catch (const std::exception& error) {cout << error.what() << endl;return 1;}catch (...) {cout << "Unknown/internal exception happened." << endl;return 1;}cout << "Execution successful" << endl;cout << endl << "This sample is an API example, for any performance measurements ""please use the dedicated benchmark_app tool" << endl;// -----------------------------------------------------------------------------------------------------return 0;
}
效果:
执行时遇到的问题
执行报错:错误 C4996 'wcstombs': This function or variable may be unsafe. Consider using wcstombs_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details. yolov5OpenvinoSelfTest c:/program files (x86)/intelswtools/openvino_2020.4.287/deployment_tools/inference_engine/include/ie_unicode.hpp 38
遇到这样的情况就是缺少宏,所以需要我们将需要的宏进行加上就可以了
在以下的位置:项目->属性->配置属性->C/C++ -> 预处理器 -> 预处理器定义,增加_CRT_SECURE_NO_DEPRECATE
参考:GitHub - fb029ed/yolov5_cpp_openvino: 用c++实现了yolov5使用openvino的部署
【如果对您有帮助,交个朋友给个一键三连吧,您的肯定是我博客高质量维护的动力!!!】