window10+paddleseg+C++部署


部署環境:VS2019+cuda10.2+cmake3.17.0

v2.0.0參考  兼容並包的PaddleX-Inference部署方式

按照官方步驟基於PaddleInference的推理-Windows環境編譯下載paddleX和cuda10.2版本的paddle_inference.zip,將PaddleX中的cpp拷貝出來並新建build_out文件夾

 

將cpp文件夾下的cmakeList.txt修改為下圖,僅對下圖中的幾行進行修改,其它不變:

 

完全按照下圖選擇進行cmake,不能多也不能少,不然會有麻煩

 

open Project可以看到如下幾項

將其中的model_infer.cpp更改如下:

// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <gflags/gflags.h>
#include <string>
#include <vector>
#include<opencv2/opencv.hpp>
#include<iostream>
#include "model_deploy/common/include/paddle_deploy.h"

using namespace cv;
using namespace std;

//DEFINE_string(model_filename, "", "Path of det inference model");
//DEFINE_string(params_filename, "", "Path of det inference params");
//DEFINE_string(cfg_file, "", "Path of yaml file");
//DEFINE_string(model_type, "", "model type");
//DEFINE_string(image, "", "Path of test image file");
//DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
//DEFINE_int32(gpu_id, 0, "GPU card id");
// 這里的struct是為了在自己的項目中接收推理結果,因為推理結果是
//vector<PaddleDeploy::Result>類型,我們自己的項目中沒有
struct result_local
{
    std::vector<uint8_t> data;
    // the shape of mask
    std::vector<float> score;
};
//為了導出dll。
#define DLLEXPORT extern "C" _declspec(dllexport)

string model_type = "seg";
PaddleDeploy::Model* model;
//設置接口
DLLEXPORT void Init(string model_filename, string cfg_file, string params_filename, int gpu_id)
{
    // create model
    model = PaddleDeploy::CreateModel(model_type);
    // model init
    model->Init(cfg_file);
    // inference engine init
    PaddleDeploy::PaddleEngineConfig engine_config;
    engine_config.model_filename = model_filename;
    engine_config.params_filename = params_filename;
    engine_config.use_gpu = true;
    engine_config.gpu_id = gpu_id;
    model->PaddleEngineInit(engine_config);
}

DLLEXPORT  void Predict(vector<Mat> imgs, vector<result_local>& results_back)
{
    cout << "enter Predict       imgs.size():   " << imgs.size() << endl;
    vector<cv::Mat> imgs_local;
    if (imgs.size() == 0)
    {
        cout << "nothing pic in";
    }
    for (int i = 0; i < imgs.size(); i++)
    {
        imgs_local.push_back(std::move(imgs[i]));
    }
    // predict
    std::vector<PaddleDeploy::Result> results;
    model->Predict(imgs_local, &results, 1);
    for (int i = 0; i < results.size(); i++)
    {
        result_local temp;
        vector<uint8_t> data = results[i].seg_result->label_map.data;
        vector<float> score = results[i].seg_result->score_map.data;
        temp.data = data;
        temp.score = score;
        results_back.push_back(temp);
    }
}

//int main(int argc, char** argv) {
//  // Parsing command-line
//  google::ParseCommandLineFlags(&argc, &argv, true);
//
//  // create model
//  PaddleDeploy::Model* model = PaddleDeploy::CreateModel(FLAGS_model_type);
//
//  // model init
//  model->Init(FLAGS_cfg_file);
//
//  // inference engine init
//  PaddleDeploy::PaddleEngineConfig engine_config;
//  engine_config.model_filename = FLAGS_model_filename;
//  engine_config.params_filename = FLAGS_params_filename;
//  engine_config.use_gpu = FLAGS_use_gpu;
//  engine_config.gpu_id = FLAGS_gpu_id;
//  model->PaddleEngineInit(engine_config);
//
//  // prepare data
//  std::vector<cv::Mat> imgs;
//  imgs.push_back(std::move(cv::imread(FLAGS_image)));
//
//  // predict
//  std::vector<PaddleDeploy::Result> results;
//  model->Predict(imgs, &results, 1);
//
//  std::cout << results[0] << std::endl;
//  delete model;
//  return 0;
//}

然后選擇model_infer右鍵設為啟動項,點擊生成

在VS中新建項目

#include<iostream>
#include<opencv2/opencv.hpp>
#include<Windows.h>
using namespace std;
using namespace cv;

struct result_now
{
    std::vector<uint8_t> data;
    // the shape of mask
    std::vector<float> score;
};
typedef void(*ptrSub_Init)(string model_filename, string cfg_file, string params_filename, int gpu_id);
typedef void(*ptrSub_predict)(vector<cv::Mat> imgs, vector<result_now>& results_back);
HMODULE hMod = LoadLibrary("model_infer.dll");
typedef void(*ptrSub_batch_Init)(string model_filename, string cfg_file, string params_filename, int gpu_id,bool use_trt);
typedef void(*ptrSub_batch_predict)(std::vector<std::string> image_paths, vector<result_now>& results_back, int batch_size);
HMODULE hMod_batch = LoadLibrary("batch_infer.dll");
//這部分是用於接收model_infer的結果
void normal_infer()
{
    string imgs_path = "D:/VS_projects/test/";
    string cfg_file = "D:/PaddleSeg_projects/PaddleX_DLL/test/deploy.yaml";
    string params_filename = "D:/PaddleSeg_projects/PaddleX_DLL/test/model.pdiparams";
    string model_filename = "D:/PaddleSeg_projects/PaddleX_DLL/test/model.pdmodel";
    vector<string> img_names;
    glob(imgs_path, img_names);
    vector<Mat> imgs;
    for (int i = 0; i < img_names.size(); i++)
    {
        Mat img = imread(img_names[i]);
        imgs.push_back(img);
    }
    vector<result_now> results;
    if (hMod != NULL)
    {
        ptrSub_Init Init = (ptrSub_Init)GetProcAddress(hMod, "Init");
        ptrSub_predict Predict = (ptrSub_predict)GetProcAddress(hMod, "Predict");
        if (Init != NULL)
        {
            Init(model_filename, cfg_file, params_filename, 0);
        }
        if (Predict != NULL)
        {
            Predict(imgs, results);
        }
    }
    std::cout << results.size() << endl;
    std::vector<uint8_t> data = results[0].data;
    for (int i = 0; i < imgs[0].rows; i++)
    {
        for (int j = 0; j < imgs[0].cols; j++)
        {
            if (data[j + imgs[0].cols * i] == 0)
            {
                continue;
            }
            imgs[0].at<Vec3b>(i, j)[0] = data[j + imgs[0].cols * i] * 100;
            imgs[0].at<Vec3b>(i, j)[1] = data[j + imgs[0].cols * i] * 100;
            imgs[0].at<Vec3b>(i, j)[2] = data[j + imgs[0].cols * i] * 100;
        }
    }
    std::vector<uint8_t> data1 = results[1].data;
    for (int i = 0; i < imgs[1].rows; i++)
    {
        for (int j = 0; j < imgs[1].cols; j++)
        {
            if (data1[j + imgs[1].cols * i] == 0)
            {
                continue;
            }
            imgs[1].at<Vec3b>(i, j)[0] = data1[j + imgs[1].cols * i] * 100;
            imgs[1].at<Vec3b>(i, j)[1] = data1[j + imgs[1].cols * i] * 100;
            imgs[1].at<Vec3b>(i, j)[2] = data1[j + imgs[1].cols * i] * 100;
        }
    }
    namedWindow("re", WINDOW_AUTOSIZE);
    imshow("re", imgs[0]);
    namedWindow("re2", WINDOW_AUTOSIZE);
    imshow("re2", imgs[1]);
    waitKey(0);
}
//這部分用於接收batch_infer的結果
void batch_infer() { string imgs_path = "D:/VS_projects/test/"; string cfg_file = "D:/PaddleSeg_projects/PaddleX_DLL/test/deploy.yaml"; string params_filename = "D:/PaddleSeg_projects/PaddleX_DLL/test/model.pdiparams"; string model_filename = "D:/PaddleSeg_projects/PaddleX_DLL/test/model.pdmodel"; vector<std::string> img_names; glob(imgs_path, img_names); vector<Mat> imgs; for (int i = 0; i < img_names.size(); i++) { Mat img = imread(img_names[i]); imgs.push_back(img); } vector<result_now> results; if (hMod_batch != NULL) { ptrSub_batch_Init Init = (ptrSub_batch_Init)GetProcAddress(hMod_batch, "Init"); ptrSub_batch_predict Predict = (ptrSub_batch_predict)GetProcAddress(hMod_batch, "Predict"); if (Init != NULL) { Init(model_filename, cfg_file, params_filename,0,false); } if (Predict != NULL) { Predict(img_names, results,1); } } std::cout << "local result size "<<results.size() << endl; std::vector<uint8_t> data = results[0].data; for (int i = 0; i < imgs[0].rows; i++) { for (int j = 0; j < imgs[0].cols; j++) { if (data[j + imgs[0].cols * i] == 0) { continue; } imgs[0].at<Vec3b>(i, j)[0] = data[j + imgs[0].cols * i] * 100; imgs[0].at<Vec3b>(i, j)[1] = data[j + imgs[0].cols * i] * 100; imgs[0].at<Vec3b>(i, j)[2] = data[j + imgs[0].cols * i] * 100; } } std::vector<uint8_t> data1 = results[1].data; for (int i = 0; i < imgs[1].rows; i++) { for (int j = 0; j < imgs[1].cols; j++) { if (data1[j + imgs[1].cols * i] == 0) { continue; } imgs[1].at<Vec3b>(i, j)[0] = data1[j + imgs[1].cols * i] * 100; imgs[1].at<Vec3b>(i, j)[1] = data1[j + imgs[1].cols * i] * 100; imgs[1].at<Vec3b>(i, j)[2] = data1[j + imgs[1].cols * i] * 100; } } namedWindow("re", WINDOW_AUTOSIZE); imshow("re", imgs[0]); namedWindow("re2", WINDOW_AUTOSIZE); imshow("re2", imgs[1]); waitKey(0); } int main() { //normal_infer(); batch_infer(); }

將上邊生成的dll以及build_out\paddle_deploy中的dll一起放到新建的項目中,即可調用

 

補充:

如果調用batch_infer,則更改batch_infer.cpp的代碼:

// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <gflags/gflags.h>
#include <omp.h>
#include <memory>
#include <string>
#include <fstream>
#include<opencv2/opencv.hpp>
#include<iostream>
#include "model_deploy/common/include/paddle_deploy.h"
using namespace cv;
using namespace std;

//DEFINE_string(model_filename, "D:/PaddleSeg_projects/PaddleX_DLL4/weights_info/model.pdmodel", "Path of det inference model");
//DEFINE_string(params_filename, "D:/PaddleSeg_projects/PaddleX_DLL4/weights_info/model.pdiparams", "Path of det inference params");
//DEFINE_string(cfg_file, "D:/PaddleSeg_projects/PaddleX_DLL4/weights_info/deploy.yaml", "Path of yaml file");
//DEFINE_string(model_type, "seg", "model type");
//DEFINE_string(image_list, "D:/VS_projects/test/in.txt", "Path of test image file");
//DEFINE_int32(batch_size, 1, "Batch size of infering");
//DEFINE_bool(use_gpu, true, "Infering with GPU or CPU");
//DEFINE_int32(gpu_id, 0, "GPU card id");
//DEFINE_bool(use_trt, false, "Infering with TensorRT");

struct result_local
{
    std::vector<uint8_t> data;
    // the shape of mask
    std::vector<float> score;
};

#define DLLEXPORT extern "C" _declspec(dllexport)

string model_type = "seg";
PaddleDeploy::Model* model;

DLLEXPORT void Init(string model_filename, string cfg_file, string params_filename, int gpu_id,bool use_trt)
{
    // create model
    model = PaddleDeploy::CreateModel(model_type);
    // model init
    model->Init(cfg_file);
    // inference engine init
    PaddleDeploy::PaddleEngineConfig engine_config;
    engine_config.model_filename = model_filename;
    engine_config.params_filename = params_filename;
    engine_config.use_gpu = true;
    engine_config.gpu_id = gpu_id;
    engine_config.use_trt = use_trt;
    if (use_trt) {
        engine_config.precision = 0;
    }
    model->PaddleEngineInit(engine_config);
    std::cout << "Paddle  Init  Sucess" << std::endl;
}

DLLEXPORT  void Predict(std::vector<std::string> image_paths, vector<result_local>& results_back,int batch_size)
{
    // infer
    std::vector<PaddleDeploy::Result> results;
    for (int i = 0; i < image_paths.size(); i += batch_size) {
        // Read image
        results.clear();
        int im_vec_size = std::min(static_cast<int>(image_paths.size()), i + batch_size);
        std::vector<cv::Mat> im_vec(im_vec_size - i);
        #pragma omp parallel for num_threads(im_vec_size - i)
        for (int j = i; j < im_vec_size; ++j) {
            im_vec[j - i] = std::move(cv::imread(image_paths[j], 1));
        }
        std::cout << "im_vec size       " << im_vec.size() << endl;
        model->Predict(im_vec, &results);
        std::vector<uint8_t> data = results[0].seg_result->label_map.data;
        cv::Mat img = cv::imread(image_paths[i]);
        for (int i = 0; i < img.rows; i++)
        {
            for (int j = 0; j < img.cols; j++)
            {
                if (data[j + img.cols * i] == 0)
                {
                    continue;
                }
                img.at<Vec3b>(i, j)[0] = data[j + img.cols * i] * 100;
                img.at<Vec3b>(i, j)[1] = data[j + img.cols * i] * 100;
                img.at<Vec3b>(i, j)[2] = data[j + img.cols * i] * 100;
            }
        }
        namedWindow("re", WINDOW_AUTOSIZE);
        imshow("re", img);
        waitKey(0);
        for (int i = 0; i < results.size(); i++)
        {
            result_local temp;
            vector<uint8_t> data = results[i].seg_result->label_map.data;
            vector<float> score = results[i].seg_result->score_map.data;
            temp.data = data;
            temp.score = score;
            results_back.push_back(temp);
        }
    }
}

//int main(int argc, char** argv) {
//  // Parsing command-line
//  google::ParseCommandLineFlags(&argc, &argv, true);
//
//  // create model
//  PaddleDeploy::Model* model = PaddleDeploy::CreateModel(FLAGS_model_type);
//
//  // model init
//  model->Init(FLAGS_cfg_file);
//
//  // inference engine init
//  PaddleDeploy::PaddleEngineConfig engine_config;
//  engine_config.model_filename = FLAGS_model_filename;
//  engine_config.params_filename = FLAGS_params_filename;
//  engine_config.use_gpu = FLAGS_use_gpu;
//  engine_config.gpu_id = FLAGS_gpu_id;
//  engine_config.use_trt = FLAGS_use_trt;
//  if (FLAGS_use_trt) {
//    engine_config.precision = 0;
//  }
//  model->PaddleEngineInit(engine_config);
//
//  // Mini-batch
//  std::vector<std::string> image_paths;
//  if (FLAGS_image_list != "") {
//    std::ifstream inf(FLAGS_image_list);
//    if (!inf) {
//      std::cerr << "Fail to open file " << FLAGS_image_list << std::endl;
//      return -1;
//    }
//    std::string image_path;
//    while (getline(inf, image_path)) {
//      image_paths.push_back(image_path);
//    }
//  }
//  // infer
//  std::vector<PaddleDeploy::Result> results;
//  for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
//    // Read image
//    results.clear();
//    int im_vec_size =
//        std::min(static_cast<int>(image_paths.size()), i + FLAGS_batch_size);
//    std::vector<cv::Mat> im_vec(im_vec_size - i);
//    #pragma omp parallel for num_threads(im_vec_size - i)
//    for (int j = i; j < im_vec_size; ++j) {
//      im_vec[j - i] = std::move(cv::imread(image_paths[j], 1));
//    }
//    std::cout << im_vec.size() << "      :::::::::" << endl;
//    model->Predict(im_vec, &results);
//
//    std::cout << i / FLAGS_batch_size << " group -----" << std::endl;
//    for (auto j = 0; j < results.size(); ++j) {
//      std::cout << "Result for sample " << j << std::endl;
//      std::cout << results[j] << std::endl;
//    }
//    std::cout << results.size() << std::endl;
//    std::vector<uint8_t> data = results[0].seg_result->label_map.data;
//    cv::Mat img = cv::imread(image_paths[i]);
//    for (int i = 0; i < img.rows; i++)
//    {
//        for (int j = 0; j < img.cols; j++)
//        {
//            if (data[j + img.cols * i] == 0)
//            {
//                continue;
//            }
//            img.at<Vec3b>(i, j)[0] = data[j + img.cols * i] * 100;
//            img.at<Vec3b>(i, j)[1] = data[j + img.cols * i] * 100;
//            img.at<Vec3b>(i, j)[2] = data[j + img.cols * i] * 100;
//        }
//    }
//    namedWindow("re", WINDOW_AUTOSIZE);
//    imshow("re", img);
//    waitKey(0);
//  }
//  delete model;
//  return 0;
//}

關於cuda11.0+vs2019

有如下改動:

 

參考:

工業黨福利:使用PaddleX高效實現指針型儀表讀取系列文章

如何在C++程序中調用dll文件

C++調用DLL的兩種方法

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM