visual studio 2019配置OnnxRuntime+推理+vgg16
生活随笔
收集整理的這篇文章主要介紹了
visual studio 2019配置OnnxRuntime+推理+vgg16
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個(gè)參考.
配置onnxruntime
onnxruntime 動態(tài)庫下載
你可以下載vgg16模型加代碼運(yùn)行這個(gè)項(xiàng)目
代碼在git
vgg16模型鏈接
鏈接: https://pan.baidu.com/s/19FE_eOgx8dbAwFitbzY1bg?pwd=6sub 提取碼: 6sub 復(fù)制這段內(nèi)容后
也可以把代碼+模型+依賴庫全部下載,注意需要配置你的模型和相關(guān)文件路徑+你的opencv依賴
鏈接: https://pan.baidu.com/s/1RZSoeR5hYMNXspeJ6ywXiA?pwd=d6p5
提取碼: d6p5
我建議你自己配置onnxruntime 環(huán)境然后復(fù)制代碼+配置模型和數(shù)據(jù)路徑
#include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/opencv.hpp> #include <opencv2/highgui.hpp> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc_c.h> #include <opencv2/dnn.hpp> #include <iostream> #include <onnxruntime_cxx_api.h> #include <assert.h> #include <vector> #include <fstream>using namespace cv; //當(dāng)定義這一行后,cv::imread可以直接寫成imread using namespace std; using namespace Ort; using namespace cv::dnn;//String labels_txt_file = "F:\\Pycharm\\PyCharm_Study\\Others\\c++_learning\\C++_Master\\Onnx\\classification\\classification_classes_ILSVRC2012.txt";//String labels_txt_file = "/home/oem/lg/project/onnx_infer/onnx_cpp/onnxruntime_yolo_cpn-master/weights/classification_classes_ILSVRC2012.txt";String labels_txt_file = "C://Users//DELL//source//repos//ConsoleApplication7//weights//classification_classes_ILSVRC2012.txt"; vector<String> readClassNames(); // string對象作為vector對象 // 圖像處理 標(biāo)準(zhǔn)化處理 void PreProcess(const Mat& image, Mat& image_blob) {Mat input;image.copyTo(input);//數(shù)據(jù)處理 標(biāo)準(zhǔn)化std::vector<Mat> channels, channel_p;split(input, channels);Mat R, G, B;B = channels.at(0);G = channels.at(1);R = channels.at(2);B = (B / 255. - 0.406) / 0.225;G = (G / 255. - 0.456) / 0.224;R = (R / 255. - 0.485) / 0.229;channel_p.push_back(R);channel_p.push_back(G);channel_p.push_back(B);Mat outt;merge(channel_p, outt);image_blob = outt; }// 讀取txt文件 std::vector<String> readClassNames() {std::vector<String> classNames;std::ifstream fp(labels_txt_file);if (!fp.is_open()){printf("could not open file...\n");exit(-1);}std::string name;while (!fp.eof()){std::getline(fp, name);if (name.length())classNames.push_back(name);}fp.close();return classNames; }int main() // 返回值為整型帶參的main函數(shù). 函數(shù)體內(nèi)使用或不使用argc和argv都可 {//environment (設(shè)置為VERBOSE(ORT_LOGGING_LEVEL_VERBOSE)時(shí),方便控制臺輸出時(shí)看到是使用了cpu還是gpu執(zhí)行)Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "OnnxModel");Ort::SessionOptions session_options;// 使用1個(gè)線程執(zhí)行op,若想提升速度,增加線程數(shù)session_options.SetIntraOpNumThreads(1);// CUDA加速開啟(由于onnxruntime的版本太高,無cuda_provider_factory.h的頭文件,加速可以使用onnxruntime V1.8的版本)//OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0);// ORT_ENABLE_ALL: 啟用所有可能的優(yōu)化session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);//load model and creat session#ifdef _WIN32//const wchar_t* model_path = L"F:\\Pycharm\\PyCharm_Study\\Others\\c++_learning\\C++_Master\\Onnx\\classification\\vgg16.onnx";const wchar_t* model_path = L"C://Users//DELL//source//repos//ConsoleApplication7//weights//vgg16.onnx";#else//const char* model_path = "F:\\Pycharm\\PyCharm_Study\\Others\\c++_learning\\C++_Master\\Onnx\\classification\\vgg16.onnx";const char* model_path = "../../weights/vgg16.onnx"; #endifprintf("Using Onnxruntime C++ API\n");Ort::Session session(env, model_path, session_options);// print model input layer (node names, types, shape etc.)Ort::AllocatorWithDefaultOptions allocator;//model info// 獲得模型又多少個(gè)輸入和輸出,一般是指對應(yīng)網(wǎng)絡(luò)層的數(shù)目// 一般輸入只有圖像的話input_nodes為1size_t num_input_nodes = session.GetInputCount();// 如果是多輸出網(wǎng)絡(luò),就會是對應(yīng)輸出的數(shù)目size_t num_output_nodes = session.GetOutputCount();printf("Number of inputs = %zu\n", num_input_nodes);printf("Number of output = %zu\n", num_output_nodes);//獲取輸入nameconst char* input_name = session.GetInputName(0, allocator);std::cout << "input_name:" << input_name << std::endl;//獲取輸出nameconst char* output_name = session.GetOutputName(0, allocator);std::cout << "output_name: " << output_name << std::endl;// 自動獲取維度數(shù)量auto input_dims = session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();auto output_dims = session.GetOutputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();std::cout << "input_dims:" << input_dims[0] << std::endl;std::cout << "output_dims:" << output_dims[0] << std::endl;std::vector<const char*> input_names{ input_name };std::vector<const char*> output_names = { output_name };std::vector<const char*> input_node_names = { "input.1" };std::vector<const char*> output_node_names = { "70" };//加載圖片Mat img = imread("C://Users//DELL//source//repos//ConsoleApplication7//weights//dog.jpg");//Mat img = imread("F:\\Pycharm\\PyCharm_Study\\Others\\c++_learning\\C++_Master\\Onnx\\classification\\dog.jpg");Mat det1, det2;resize(img, det1, Size(256, 256), INTER_AREA);det1.convertTo(det1, CV_32FC3);PreProcess(det1, det2); //標(biāo)準(zhǔn)化處理Mat blob = dnn::blobFromImage(det2, 1., Size(224, 224), Scalar(0, 0, 0), false, true);printf("Load success!\n");clock_t startTime, endTime;//創(chuàng)建輸入tensorauto memory_info = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);std::vector<Ort::Value> input_tensors;input_tensors.emplace_back(Ort::Value::CreateTensor<float>(memory_info, blob.ptr<float>(), blob.total(), input_dims.data(), input_dims.size()));/*cout << int(input_dims.size()) << endl;*/startTime = clock();//(score model & input tensor, get back output tensor)auto output_tensors = session.Run(Ort::RunOptions{ nullptr }, input_node_names.data(), input_tensors.data(), input_names.size(), output_node_names.data(), output_node_names.size());endTime = clock();assert(output_tensors.size() == 1 && output_tensors.front().IsTensor());//除了第一個(gè)節(jié)點(diǎn)外,其他參數(shù)與原網(wǎng)絡(luò)對應(yīng)不上程序就會無法執(zhí)行//第二個(gè)參數(shù)代表輸入節(jié)點(diǎn)的名稱集合//第四個(gè)參數(shù)1代表輸入層的數(shù)目//第五個(gè)參數(shù)代表輸出節(jié)點(diǎn)的名稱集合//最后一個(gè)參數(shù)代表輸出節(jié)點(diǎn)的數(shù)目// 獲取輸出(Get pointer to output tensor float values)float* floatarr = output_tensors[0].GetTensorMutableData<float>(); // 也可以使用output_tensors.front(); 獲取list中的第一個(gè)元素變量 list.pop_front(); 刪除list中的第一個(gè)位置的元素// 得到最可能分類輸出Mat newarr = Mat_<double>(1, 1000); //定義一個(gè)1*1000的矩陣for (int i = 0; i < newarr.rows; i++){for (int j = 0; j < newarr.cols; j++) //矩陣列數(shù)循環(huán){newarr.at<double>(i, j) = floatarr[j];}}/*cout << newarr.size() << endl;*/vector<String> labels = readClassNames();for (int n = 0; n < newarr.rows; n++) {Point classNumber;double classProb;Mat probMat = newarr(Rect(0, n, 1000, 1)).clone();Mat result = probMat.reshape(1, 1);minMaxLoc(result, NULL, &classProb, NULL, &classNumber);int classidx = classNumber.x;printf("\n current image classification : %s, possible : %.2f\n", labels.at(classidx).c_str(), classProb);std::cout << "The run time is:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;// 顯示文本putText(img, labels.at(classidx), Point(10, 20), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 0, 255), 1, 1);imshow("Image Classification", img);waitKey(0);}// 計(jì)算運(yùn)行時(shí)間std::cout << "The run time is:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;printf("Done!\n");//system("pause");return 0; }總結(jié)
以上是生活随笔為你收集整理的visual studio 2019配置OnnxRuntime+推理+vgg16的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 如何用visual studio 201
- 下一篇: GraalVM Internship P