优化代码

main
熊继淙 1 year ago
parent 9dc17da6ba
commit 243dfa5a72

@ -4,7 +4,7 @@
namespace fs = std::filesystem; namespace fs = std::filesystem;
using namespace samplesCommon; using namespace samplesCommon;
MF_Resnet34Infer::MF_Resnet34Infer(const utils::InitParameter& param) :MF_ImageClassificationBase(param) MF_Resnet34Infer::MF_Resnet34Infer(const trtUtils::InitParameter& param) :MF_ImageClassificationBase(param)
{ {
checkRuntime(cudaStreamCreate(&mStream)); checkRuntime(cudaStreamCreate(&mStream));
@ -102,7 +102,7 @@ bool MF_Resnet34Infer::initEngine(const std::string& _onnxFileName)
return true; return true;
} }
bool MF_Resnet34Infer::doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImgs, std::vector<utils::MR_Result>* _detectRes, int* _user) bool MF_Resnet34Infer::doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImgs, std::vector<trtUtils::MR_Result>* _detectRes, int* _user)
{ {
m_mutex.lock(); m_mutex.lock();
std::vector<cv::Mat> matImgs; std::vector<cv::Mat> matImgs;
@ -152,7 +152,7 @@ bool MF_Resnet34Infer::doTRTInfer(const std::vector<MN_VisionImage::MS_ImagePara
return true; return true;
} }
bool MF_Resnet34Infer::doTRTInfer(const std::vector<cv::Mat>& _matImgs, std::vector<utils::MR_Result>* _detectRes, int* _user) bool MF_Resnet34Infer::doTRTInfer(const std::vector<cv::Mat>& _matImgs, std::vector<trtUtils::MR_Result>* _detectRes, int* _user)
{ {
m_mutex.lock(); m_mutex.lock();
int iRet = 0; int iRet = 0;
@ -390,20 +390,20 @@ int MF_Resnet34Infer::postProcess(const std::vector<cv::Mat>& _imgsBatch)
confidence = prob[predict_label]; // »ñµÃÔ¤²âÖµµÄÖÃÐÅ¶È confidence = prob[predict_label]; // »ñµÃÔ¤²âÖµµÄÖÃÐŶÈ
if (confidence < 0.5) if (confidence < 0.5)
{ {
detectRes = utils::ME_DetectRes::E_DETECT_NG; detectRes = trtUtils::ME_DetectRes::E_DETECT_NG;
} }
else else
{ {
detectRes = utils::ME_DetectRes::E_DETECT_OK; detectRes = trtUtils::ME_DetectRes::E_DETECT_OK;
} }
printf("Predict: %s, confidence: %.3f, label: %d. \n", predictName.c_str(), confidence, predict_label); printf("Predict: %s, confidence: %.3f, label: %d. \n", predictName.c_str(), confidence, predict_label);
return 0; return 0;
} }
int MF_Resnet34Infer::getDetectResult(std::vector<utils::MR_Result>& _result) int MF_Resnet34Infer::getDetectResult(std::vector<trtUtils::MR_Result>& _result)
{ {
utils::MR_Result res; trtUtils::MR_Result res;
for (size_t i = 0; i < m_param.batch_size; i++) for (size_t i = 0; i < m_param.batch_size; i++)
{ {
res.mClassifyDecRes.mDetectRes = detectRes; res.mClassifyDecRes.mDetectRes = detectRes;

@ -9,15 +9,15 @@
class MF_Resnet34Infer : public MF_ImageClassificationBase class MF_Resnet34Infer : public MF_ImageClassificationBase
{ {
public: public:
MF_Resnet34Infer(const utils::InitParameter& param); MF_Resnet34Infer(const trtUtils::InitParameter& param);
~MF_Resnet34Infer(); ~MF_Resnet34Infer();
// 初始化引擎 engine // 初始化引擎 engine
bool initEngine(const std::string& _onnxFileName); bool initEngine(const std::string& _onnxFileName);
// 推理 // 推理
bool doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImgs, std::vector<utils::MR_Result>* _detectRes, int* _user); bool doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImgs, std::vector<trtUtils::MR_Result>* _detectRes, int* _user);
// 推理 // 推理
bool doTRTInfer(const std::vector<cv::Mat>& _matImgs, std::vector<utils::MR_Result>* _detectRes, int* _user); bool doTRTInfer(const std::vector<cv::Mat>& _matImgs, std::vector<trtUtils::MR_Result>* _detectRes, int* _user);
// 获取错误信息 // 获取错误信息
static std::string getError(); static std::string getError();
// 清理数据/内存 // 清理数据/内存
@ -34,7 +34,7 @@ protected:
// 后处理 // 后处理
int postProcess(const std::vector<cv::Mat>& _imgsBatch); int postProcess(const std::vector<cv::Mat>& _imgsBatch);
// 获取推理结果 // 获取推理结果
int getDetectResult(std::vector<utils::MR_Result>& _result); int getDetectResult(std::vector<trtUtils::MR_Result>& _result);
private: private:
@ -46,7 +46,7 @@ private:
private: private:
int input_numel; int input_numel;
nvinfer1::Dims input_dims; // 输入数据维度 nvinfer1::Dims input_dims; // 输入数据维度
utils::ME_DetectRes detectRes; // ¼ì²â½á¹û trtUtils::ME_DetectRes detectRes; // ¼ì²â½á¹û
float confidence; // 置信度 float confidence; // 置信度
std::string predictName; // 推理类别 std::string predictName; // 推理类别
std::mutex m_mutex; std::mutex m_mutex;

@ -3,7 +3,7 @@
namespace fs = std::filesystem; namespace fs = std::filesystem;
MF_Yolov8Infer::MF_Yolov8Infer(const utils::InitParameter& param) : MF_ObjectDetectBase(param) MF_Yolov8Infer::MF_Yolov8Infer(const trtUtils::InitParameter& param) : MF_ObjectDetectBase(param)
{ {
} }
@ -98,7 +98,7 @@ bool MF_Yolov8Infer::initEngine(const std::string& _onnxFileName)
return false; return false;
} }
bool MF_Yolov8Infer::doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImgs, std::vector<utils::MR_Result>* _detectRes, int* _user) bool MF_Yolov8Infer::doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImgs, std::vector<trtUtils::MR_Result>* _detectRes, int* _user)
{ {
std::vector<cv::Mat> matImgs; std::vector<cv::Mat> matImgs;
for (auto _var : _bufImgs) for (auto _var : _bufImgs)
@ -159,7 +159,7 @@ bool MF_Yolov8Infer::doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>
return false; return false;
} }
bool MF_Yolov8Infer::doTRTInfer(const std::vector<cv::Mat>& _bufImgs, std::vector<utils::MR_Result>* _detectRes, int* _user) bool MF_Yolov8Infer::doTRTInfer(const std::vector<cv::Mat>& _bufImgs, std::vector<trtUtils::MR_Result>* _detectRes, int* _user)
{ {
return false; return false;
} }
@ -258,7 +258,7 @@ int MF_Yolov8Infer::postProcess(const std::vector<cv::Mat>& _imgsBatch)
return 0; return 0;
} }
int MF_Yolov8Infer::getDetectResult(std::vector<utils::MR_Result>& _result) int MF_Yolov8Infer::getDetectResult(std::vector<trtUtils::MR_Result>& _result)
{ {
if (_result.size() <= 0) if (_result.size() <= 0)
{ {

@ -5,15 +5,15 @@
class MF_Yolov8Infer : public MF_ObjectDetectBase class MF_Yolov8Infer : public MF_ObjectDetectBase
{ {
public: public:
MF_Yolov8Infer(const utils::InitParameter& param); MF_Yolov8Infer(const trtUtils::InitParameter& param);
~MF_Yolov8Infer(); ~MF_Yolov8Infer();
// 初始化引擎 engine // 初始化引擎 engine
bool initEngine(const std::string& _onnxFileName); bool initEngine(const std::string& _onnxFileName);
// 推理 // 推理
bool doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImgs, std::vector<utils::MR_Result>* _detectRes, int* _user); bool doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImgs, std::vector<trtUtils::MR_Result>* _detectRes, int* _user);
// 推理 // 推理
bool doTRTInfer(const std::vector<cv::Mat>& _matImgs, std::vector<utils::MR_Result>* _detectRes, int* _user); bool doTRTInfer(const std::vector<cv::Mat>& _matImgs, std::vector<trtUtils::MR_Result>* _detectRes, int* _user);
// 获取错误信息 // 获取错误信息
static std::string getError(); static std::string getError();
// 清理数据/内存 // 清理数据/内存
@ -32,7 +32,7 @@ protected:
// 后处理 // 后处理
int postProcess(const std::vector<cv::Mat>& _imgsBatch); int postProcess(const std::vector<cv::Mat>& _imgsBatch);
// 获取最终检测结果 // 获取最终检测结果
int getDetectResult(std::vector<utils::MR_Result>& _result); int getDetectResult(std::vector<trtUtils::MR_Result>& _result);
private: private:

@ -3,15 +3,15 @@
#include "MF_Yolov8Infer.h" #include "MF_Yolov8Infer.h"
MI_VisionInterface* getInterfacePtr(const utils::InitParameter& _params) MI_VisionInterface* getInterfacePtr(const trtUtils::InitParameter& _params)
{ {
switch (_params.m_modelType) switch (_params.m_modelType)
{ {
case utils::ME_ModelType::E_RESNET34: case trtUtils::ME_ModelType::E_RESNET34:
return new MF_Resnet34Infer(_params); return new MF_Resnet34Infer(_params);
case utils::ME_ModelType::E_RESNET50: case trtUtils::ME_ModelType::E_RESNET50:
return new MF_Resnet34Infer(_params); return new MF_Resnet34Infer(_params);
case utils::ME_ModelType::E_YOLOV8: case trtUtils::ME_ModelType::E_YOLOV8:
return new MF_Yolov8Infer(_params); return new MF_Yolov8Infer(_params);
default: default:
return nullptr; return nullptr;

@ -14,19 +14,19 @@ public:
// 检查模型输入输出 // 检查模型输入输出
virtual bool check() = 0; virtual bool check() = 0;
// 推理 // 推理
virtual bool doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImg, std::vector<utils::MR_Result>* _detectRes, int* _user) = 0; virtual bool doTRTInfer(const std::vector<MN_VisionImage::MS_ImageParam>& _bufImg, std::vector<trtUtils::MR_Result>* _detectRes, int* _user) = 0;
// 推理 // 推理
virtual bool doTRTInfer(const std::vector<cv::Mat>& _matImgs, std::vector<utils::MR_Result>* _detectRes, int* _user) = 0; virtual bool doTRTInfer(const std::vector<cv::Mat>& _matImgs, std::vector<trtUtils::MR_Result>* _detectRes, int* _user) = 0;
// 获取错误信息 // 获取错误信息
virtual std::string getError() = 0; virtual std::string getError() = 0;
// 释放数据/内存 // 释放数据/内存
virtual void freeMemeory() = 0; virtual void freeMemeory() = 0;
// Öá¾¶²âÁ¿ÏîÄ¿½Ó¿Ú
virtual bool measureAxis(std::vector<double>& measureRes, const MN_VisionImage::MS_ImageParam& _bufImg) = 0; virtual bool measureAxis(std::vector<double>& measureRes, const MN_VisionImage::MS_ImageParam& _bufImg) = 0;
}; };
// 导出接口类 // 导出接口类
MI_ALGORITHM_EXPORT MI_VisionInterface* getInterfacePtr(const utils::InitParameter& _params); MI_ALGORITHM_EXPORT MI_VisionInterface* getInterfacePtr(const trtUtils::InitParameter& _params);

@ -11,7 +11,7 @@ bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, in
return true; return true;
} }
__device__ void affine_project_device_kernel(utils::AffineMat* matrix, int x, int y, float* proj_x, float* proj_y) __device__ void affine_project_device_kernel(trtUtils::AffineMat* matrix, int x, int y, float* proj_x, float* proj_y)
{ {
*proj_x = matrix->v0 * x + matrix->v1 * y + matrix->v2; *proj_x = matrix->v0 * x + matrix->v1 * y + matrix->v2;
*proj_y = matrix->v3 * x + matrix->v4 * y + matrix->v5; *proj_y = matrix->v3 * x + matrix->v4 * y + matrix->v5;
@ -19,7 +19,7 @@ __device__ void affine_project_device_kernel(utils::AffineMat* matrix, int x, in
__global__ void resize_rgb_padding_device_kernel(float* src, int src_width, int src_height, int src_area, int src_volume, __global__ void resize_rgb_padding_device_kernel(float* src, int src_width, int src_height, int src_area, int src_volume,
float* dst, int dst_width, int dst_height, int dst_area, int dst_volume, float* dst, int dst_width, int dst_height, int dst_area, int dst_volume,
int batch_size, float padding_value, utils::AffineMat matrix) int batch_size, float padding_value, trtUtils::AffineMat matrix)
{ {
int dx = blockDim.x * blockIdx.x + threadIdx.x; int dx = blockDim.x * blockIdx.x + threadIdx.x;
int dy = blockDim.y * blockIdx.y + threadIdx.y; int dy = blockDim.y * blockIdx.y + threadIdx.y;
@ -81,7 +81,7 @@ __global__ void resize_rgb_padding_device_kernel(float* src, int src_width, int
__global__ void resize_rgb_padding_device_kernel(unsigned char* src, int src_width, int src_height, int src_area, int src_volume, __global__ void resize_rgb_padding_device_kernel(unsigned char* src, int src_width, int src_height, int src_area, int src_volume,
float* dst, int dst_width, int dst_height, int dst_area, int dst_volume, float* dst, int dst_width, int dst_height, int dst_area, int dst_volume,
int batch_size, float padding_value, utils::AffineMat matrix) int batch_size, float padding_value, trtUtils::AffineMat matrix)
{ {
int dx = blockDim.x * blockIdx.x + threadIdx.x; int dx = blockDim.x * blockIdx.x + threadIdx.x;
int dy = blockDim.y * blockIdx.y + threadIdx.y; int dy = blockDim.y * blockIdx.y + threadIdx.y;
@ -143,7 +143,7 @@ __global__ void resize_rgb_padding_device_kernel(unsigned char* src, int src_wid
} }
__global__ void resize_rgb_without_padding_device_kernel(float* src, int src_width, int src_height, int src_area, int src_volume, __global__ void resize_rgb_without_padding_device_kernel(float* src, int src_width, int src_height, int src_area, int src_volume,
float* dst, int dst_width, int dst_height, int dst_area, int dst_volume, float* dst, int dst_width, int dst_height, int dst_area, int dst_volume,
int batch_size, utils::AffineMat matrix) int batch_size, trtUtils::AffineMat matrix)
{ {
int dx = blockDim.x * blockIdx.x + threadIdx.x; int dx = blockDim.x * blockIdx.x + threadIdx.x;
int dy = blockDim.y * blockIdx.y + threadIdx.y; int dy = blockDim.y * blockIdx.y + threadIdx.y;
@ -207,7 +207,7 @@ __global__ void resize_rgb_without_padding_device_kernel(float* src, int src_wid
__global__ void resize_gray_without_padding_device_kernel( __global__ void resize_gray_without_padding_device_kernel(
float* src, int src_width, int src_height, int src_area, float* src, int src_width, int src_height, int src_area,
float* dst, int dst_width, int dst_height, int dst_area, float* dst, int dst_width, int dst_height, int dst_area,
int batch_size, utils::AffineMat matrix) int batch_size, trtUtils::AffineMat matrix)
{ {
int dx = blockDim.x * blockIdx.x + threadIdx.x; int dx = blockDim.x * blockIdx.x + threadIdx.x;
int dy = blockDim.y * blockIdx.y + threadIdx.y; int dy = blockDim.y * blockIdx.y + threadIdx.y;
@ -344,7 +344,7 @@ __global__ void hwc2chw_device_kernel(float* src, float* dst,
} }
void resizeDevice(const int& batchSize, float* src, int srcWidth, int srcHeight, void resizeDevice(const int& batchSize, float* src, int srcWidth, int srcHeight,
float* dst, int dstWidth, int dstHeight, float paddingValue, utils::AffineMat matrix) float* dst, int dstWidth, int dstHeight, float paddingValue, trtUtils::AffineMat matrix)
{ {
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE); dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_size((dstWidth * dstHeight + BLOCK_SIZE - 1) / BLOCK_SIZE, (batchSize + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 grid_size((dstWidth * dstHeight + BLOCK_SIZE - 1) / BLOCK_SIZE, (batchSize + BLOCK_SIZE - 1) / BLOCK_SIZE);
@ -363,7 +363,7 @@ void resizeDevice(const int& batchSize, float* src, int srcWidth, int srcHeight,
} }
void resizeDevice(const int& batchSize, unsigned char* src, int srcWidth, int srcHeight, void resizeDevice(const int& batchSize, unsigned char* src, int srcWidth, int srcHeight,
float* dst, int dstWidth, int dstHeight, float paddingValue, utils::AffineMat matrix) float* dst, int dstWidth, int dstHeight, float paddingValue, trtUtils::AffineMat matrix)
{ {
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE); dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_size((dstWidth * dstHeight + BLOCK_SIZE - 1) / BLOCK_SIZE, dim3 grid_size((dstWidth * dstHeight + BLOCK_SIZE - 1) / BLOCK_SIZE,
@ -380,7 +380,7 @@ void resizeDevice(const int& batchSize, unsigned char* src, int srcWidth, int sr
} }
void resizeDevice(const int& batchSize, float* src, int srcWidth, int srcHeight, void resizeDevice(const int& batchSize, float* src, int srcWidth, int srcHeight,
float* dst, int dstWidth, int dstHeight, utils::ColorMode mode, utils::AffineMat matrix) float* dst, int dstWidth, int dstHeight, trtUtils::ColorMode mode, trtUtils::AffineMat matrix)
{ {
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE); dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_size((dstWidth * dstHeight + BLOCK_SIZE - 1) / BLOCK_SIZE, (batchSize + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 grid_size((dstWidth * dstHeight + BLOCK_SIZE - 1) / BLOCK_SIZE, (batchSize + BLOCK_SIZE - 1) / BLOCK_SIZE);
@ -392,13 +392,13 @@ void resizeDevice(const int& batchSize, float* src, int srcWidth, int srcHeight,
switch (mode) switch (mode)
{ {
case utils::ColorMode::RGB: case trtUtils::ColorMode::RGB:
resize_rgb_without_padding_device_kernel << < grid_size, block_size, 0, nullptr >> > ( resize_rgb_without_padding_device_kernel << < grid_size, block_size, 0, nullptr >> > (
src, srcWidth, srcHeight, src_area, src_volume, src, srcWidth, srcHeight, src_area, src_volume,
dst, dstWidth, dstHeight, dst_area, dst_volume, dst, dstWidth, dstHeight, dst_area, dst_volume,
batchSize, matrix); batchSize, matrix);
return; return;
case utils::ColorMode::GRAY: case trtUtils::ColorMode::GRAY:
resize_gray_without_padding_device_kernel << < grid_size, block_size, 0, nullptr >> > ( resize_gray_without_padding_device_kernel << < grid_size, block_size, 0, nullptr >> > (
src, srcWidth, srcHeight, src_area, src, srcWidth, srcHeight, src_area,
dst, dstWidth, dstHeight, dst_area, batchSize, matrix); dst, dstWidth, dstHeight, dst_area, batchSize, matrix);
@ -423,7 +423,7 @@ void bgr2rgbDevice(const int& batchSize, float* src,
} }
void normDevice(const int& batchSize, float* src, int srcWidth, int srcHeight, void normDevice(const int& batchSize, float* src, int srcWidth, int srcHeight,
float* dst, int dstWidth, int dstHeight, utils::InitParameter param) float* dst, int dstWidth, int dstHeight, trtUtils::InitParameter param)
{ {
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE); dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_size((dstWidth * dstHeight * 3 + BLOCK_SIZE - 1) / BLOCK_SIZE, dim3 grid_size((dstWidth * dstHeight * 3 + BLOCK_SIZE - 1) / BLOCK_SIZE,
@ -622,7 +622,7 @@ __global__ void nms_sort_kernel(int topK, int batch_size, float iou_thresh,
} }
} }
void decodeDevice(utils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea, float* dst, int dstWidth, int dstHeight) void decodeDevice(trtUtils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea, float* dst, int dstWidth, int dstHeight)
{ {
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE); dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_size((srcHeight + BLOCK_SIZE - 1) / BLOCK_SIZE, dim3 grid_size((srcHeight + BLOCK_SIZE - 1) / BLOCK_SIZE,
@ -636,7 +636,7 @@ void decodeDevice(utils::InitParameter param, float* src, int srcWidth, int srcH
dst, dstWidth, dstHeight, dstArea); dst, dstWidth, dstHeight, dstArea);
} }
void nmsDeviceV1(utils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea) void nmsDeviceV1(trtUtils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea)
{ {
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE); dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_size((param.topK + BLOCK_SIZE - 1) / BLOCK_SIZE, dim3 grid_size((param.topK + BLOCK_SIZE - 1) / BLOCK_SIZE,
@ -647,7 +647,7 @@ void nmsDeviceV1(utils::InitParameter param, float* src, int srcWidth, int srcHe
src, srcWidth, srcHeight, srcArea); src, srcWidth, srcHeight, srcArea);
} }
void nmsDeviceV2(utils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea, void nmsDeviceV2(trtUtils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea,
int* idx, float* conf) int* idx, float* conf)
{ {
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE); dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);

@ -11,33 +11,33 @@ bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, in
//note: resize rgb with padding //note: resize rgb with padding
void resizeDevice(const int& batch_size, float* src, int src_width, int src_height, void resizeDevice(const int& batch_size, float* src, int src_width, int src_height,
float* dst, int dstWidth, int dstHeight, float* dst, int dstWidth, int dstHeight,
float paddingValue, utils::AffineMat matrix); float paddingValue, trtUtils::AffineMat matrix);
//overload:resize rgb with padding, but src's type is uin8 //overload:resize rgb with padding, but src's type is uin8
void resizeDevice(const int& batch_size, unsigned char* src, int src_width, int src_height, void resizeDevice(const int& batch_size, unsigned char* src, int src_width, int src_height,
float* dst, int dstWidth, int dstHeight, float* dst, int dstWidth, int dstHeight,
float paddingValue, utils::AffineMat matrix); float paddingValue, trtUtils::AffineMat matrix);
// overload: resize rgb/gray without padding // overload: resize rgb/gray without padding
void resizeDevice(const int& batchSize, float* src, int srcWidth, int srcHeight, void resizeDevice(const int& batchSize, float* src, int srcWidth, int srcHeight,
float* dst, int dstWidth, int dstHeight, float* dst, int dstWidth, int dstHeight,
utils::ColorMode mode, utils::AffineMat matrix); trtUtils::ColorMode mode, trtUtils::AffineMat matrix);
void bgr2rgbDevice(const int& batch_size, float* src, int srcWidth, int srcHeight, void bgr2rgbDevice(const int& batch_size, float* src, int srcWidth, int srcHeight,
float* dst, int dstWidth, int dstHeight); float* dst, int dstWidth, int dstHeight);
void normDevice(const int& batch_size, float* src, int srcWidth, int srcHeight, void normDevice(const int& batch_size, float* src, int srcWidth, int srcHeight,
float* dst, int dstWidth, int dstHeight, float* dst, int dstWidth, int dstHeight,
utils::InitParameter norm_param); trtUtils::InitParameter norm_param);
void hwc2chwDevice(const int& batch_size, float* src, int srcWidth, int srcHeight, void hwc2chwDevice(const int& batch_size, float* src, int srcWidth, int srcHeight,
float* dst, int dstWidth, int dstHeight); float* dst, int dstWidth, int dstHeight);
void decodeDevice(utils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcLength, float* dst, int dstWidth, int dstHeight); void decodeDevice(trtUtils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcLength, float* dst, int dstWidth, int dstHeight);
// nms fast // nms fast
void nmsDeviceV1(utils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea); void nmsDeviceV1(trtUtils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea);
// nms sort // nms sort
void nmsDeviceV2(utils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea, void nmsDeviceV2(trtUtils::InitParameter param, float* src, int srcWidth, int srcHeight, int srcArea,
int* idx, float* conf); int* idx, float* conf);
Loading…
Cancel
Save