Fix model optimisation

This commit is contained in:
2026-04-09 08:09:02 +10:00
parent 34854d87f4
commit eeb205779a
10 changed files with 688 additions and 160 deletions

View File

@@ -16,19 +16,30 @@ extern "C" ANSODENGINE_API std::string CreateANSODHandle(ANSCENTER::ANSODBase**
int autoDetectEngine,
int modelType,
int detectionType, int loadEngineOnCreation=1);
extern "C" ANSODENGINE_API std::string RunInference(ANSCENTER::ANSODBase * *Handle, unsigned char* jpeg_string, unsigned int bufferLength);
extern "C" ANSODENGINE_API std::string RunTiledInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId);
extern "C" ANSODENGINE_API std::string RunInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId);
extern "C" ANSODENGINE_API std::string RunTiledInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId);
extern "C" ANSODENGINE_API int CreateANSODHandleEx(ANSCENTER::ANSODBase** Handle,
const char* licenseKey,
const char* modelFilePath,
const char* modelFileZipPassword,
float detectionScoreThreshold,
float modelConfThreshold,
float modelMNSThreshold,
int autoDetectEngine,
int modelType,
int detectionType, std::string& labelMap, int loadEngineOnCreation=1);
ANSODENGINE_API std::string RunInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength);
ANSODENGINE_API std::string RunTiledInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId);
ANSODENGINE_API std::string RunInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId);
ANSODENGINE_API std::string RunTiledInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId);
ANSODENGINE_API std::string RunInferenceInCroppedBBoxImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strBboxes);
ANSODENGINE_API std::string RunInferenceInCroppedPolygonImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strPolygon);
ANSODENGINE_API std::string RunInferenceBinary(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height);
ANSODENGINE_API std::string RunInferenceFromCV(ANSCENTER::ANSODBase** Handle, cv::Mat image);
ANSODENGINE_API std::string RunInferenceImagePath(ANSCENTER::ANSODBase** Handle, const char* imageFilePath);
ANSODENGINE_API int OptimizeModelStr(const char* modelFilePath, const char* modelFileZipPassword, int modelType, int modelDetectionType, int fp16, std::string& modelFolder);
extern "C" ANSODENGINE_API void RunTiledInferenceFromCV(ANSCENTER::ANSODBase** Handle, cv::Mat image, int tiledWidth, int titledHeight, double overlap, std::vector<ANSCENTER::Object>& results, const char* cameraId);
extern "C" ANSODENGINE_API std::string RunInferenceInCroppedBBoxImages(ANSCENTER::ANSODBase * *Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strBboxes);
extern "C" ANSODENGINE_API std::string RunInferenceInCroppedPolygonImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strPolygon);
extern "C" ANSODENGINE_API std::string RunInferenceBinary(ANSCENTER::ANSODBase * *Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height);
extern "C" ANSODENGINE_API std::string RunInferenceImagePath(ANSCENTER::ANSODBase * *Handle,const char* imageFilePath);
extern "C" ANSODENGINE_API std::string OptimizeModelStr(const char* modelFilePath, const char* modelFileZipPassword, int modelType, int modelDetectionType, int fp16);
extern "C" ANSODENGINE_API std::string RunInferenceFromCV(ANSCENTER::ANSODBase** Handle, cv::Mat image);
extern "C" ANSODENGINE_API void RunDetectMovement(ANSCENTER::ANSODBase** Handle, cv::Mat image, const char* cameraId, std::vector<ANSCENTER::Object>& results);
extern "C" ANSODENGINE_API int ReleaseANSODHandle(ANSCENTER::ANSODBase **Handle);

View File

@@ -1899,6 +1899,42 @@ namespace ANSCENTER {
return results;
}
catch (const std::exception& e) {
const std::string msg = e.what();
// ── DML device-removal detection ──────────────────────────
// HRESULT 887A0005 = DXGI_ERROR_DEVICE_REMOVED ("The GPU
// device instance has been suspended"). Once the D3D12
// device is gone the ORT session is permanently broken.
// Log once, attempt CPU fallback, suppress further flood.
if (msg.find("887A0005") != std::string::npos) {
if (!_dmlDeviceLost) {
_dmlDeviceLost = true;
_logger.LogFatal("ANSONNXYOLO::DetectObjects",
"DirectML GPU device lost (887A0005) — attempting CPU fallback",
__FILE__, __LINE__);
ANS_DBG("ONNXYOLO", "DML device lost — recreating session on CPU");
try {
m_ortEngine.reset();
if (InitOrtEngine(ANSCENTER::EngineType::CPU)) {
_logger.LogInfo("ANSONNXYOLO::DetectObjects",
"CPU fallback session created successfully",
__FILE__, __LINE__);
ANS_DBG("ONNXYOLO", "CPU fallback OK");
} else {
_logger.LogFatal("ANSONNXYOLO::DetectObjects",
"CPU fallback session creation failed",
__FILE__, __LINE__);
}
} catch (const std::exception& re) {
_logger.LogFatal("ANSONNXYOLO::DetectObjects",
std::string("CPU fallback exception: ") + re.what(),
__FILE__, __LINE__);
}
}
// Suppress flood — already logged above
return {};
}
ANS_DBG("ONNXYOLO", "DetectObjects EXCEPTION: %s cam=%s", e.what(), camera_id.c_str());
_logger.LogFatal("ANSONNXYOLO::DetectObjects", e.what(), __FILE__, __LINE__);
return {};
@@ -1939,6 +1975,22 @@ namespace ANSCENTER {
return DetectObjectsBatch(inputs, camera_id);
}
catch (const std::exception& e) {
const std::string msg = e.what();
if (msg.find("887A0005") != std::string::npos) {
if (!_dmlDeviceLost) {
_dmlDeviceLost = true;
_logger.LogFatal("ANSONNXYOLO::RunInferencesBatch",
"DirectML GPU device lost (887A0005) — attempting CPU fallback",
__FILE__, __LINE__);
try {
m_ortEngine.reset();
if (!InitOrtEngine(ANSCENTER::EngineType::CPU))
_logger.LogFatal("ANSONNXYOLO::RunInferencesBatch",
"CPU fallback session creation failed", __FILE__, __LINE__);
} catch (...) {}
}
return {};
}
_logger.LogFatal("ANSONNXYOLO::RunInferencesBatch",
e.what(), __FILE__, __LINE__);
return {};
@@ -1976,6 +2028,22 @@ namespace ANSCENTER {
return batchResults;
}
catch (const std::exception& e) {
const std::string msg = e.what();
if (msg.find("887A0005") != std::string::npos) {
if (!_dmlDeviceLost) {
_dmlDeviceLost = true;
_logger.LogFatal("ANSONNXYOLO::DetectObjectsBatch",
"DirectML GPU device lost (887A0005) — attempting CPU fallback",
__FILE__, __LINE__);
try {
m_ortEngine.reset();
if (!InitOrtEngine(ANSCENTER::EngineType::CPU))
_logger.LogFatal("ANSONNXYOLO::DetectObjectsBatch",
"CPU fallback session creation failed", __FILE__, __LINE__);
} catch (...) {}
}
return {};
}
_logger.LogFatal("ANSONNXYOLO::DetectObjectsBatch",
e.what(), __FILE__, __LINE__);
return {};

View File

@@ -213,6 +213,12 @@ namespace ANSCENTER {
// ONNX Runtime inference engine
std::unique_ptr<ONNXYOLO> m_ortEngine;
// DML device-lost recovery: when DirectML's GPU device is removed
// (HRESULT 887A0005), the session is permanently broken. We detect
// this once, attempt a CPU-fallback recreation, and suppress further
// error-log flooding.
bool _dmlDeviceLost{ false };
// Internal detection pipeline
std::vector<Object> DetectObjects(const cv::Mat& inputImage,
const std::string& camera_id);

View File

@@ -30,6 +30,7 @@ namespace ANSCENTER {
_isInitialized = false; // Reset initialization flag
}
std::string onnxModel = CreateFilePath(_modelFolder, "scrfd.onnx");
_scrfdModelPath = onnxModel;
this->_face_detector = std::make_unique<SCRFD>(onnxModel);
_isInitialized = true;
_movementObjects.clear();
@@ -80,6 +81,7 @@ namespace ANSCENTER {
_face_detector.reset(); // Releases previously allocated memory for face detection
_isInitialized = false; // Reset initialization flag
}
_scrfdModelPath = modelFullName;
this->_face_detector = std::make_unique<SCRFD>(modelFullName);
_isInitialized = true;
return _isInitialized;
@@ -98,6 +100,30 @@ namespace ANSCENTER {
return result;
}
std::vector<Object> ANSOVFD::RunInference(const cv::Mat& input, const std::string& camera_id, bool useDynamicImage, bool validateFace, bool facelivenessCheck) {
// ── DML device-lost recovery (outside mutex) ──────────────
if (_dmlDeviceLost && _face_detector) {
// The DML session is broken — recreate on CPU
try {
auto cpuDetector = std::make_unique<ANSCENTER::SCRFD>(
_scrfdModelPath, ANSCENTER::EngineType::CPU);
{
std::lock_guard<std::mutex> guard(_mtx);
_face_detector = std::move(cpuDetector);
}
_logger.LogInfo("ANSOVFD::RunInference",
"CPU fallback session created successfully",
__FILE__, __LINE__);
} catch (const std::exception& re) {
_logger.LogFatal("ANSOVFD::RunInference",
std::string("CPU fallback exception: ") + re.what(),
__FILE__, __LINE__);
std::lock_guard<std::mutex> guard(_mtx);
_face_detector.reset();
_isInitialized = false;
return {};
}
_dmlDeviceLost = false; // Recovery complete
}
if (facelivenessCheck) {
std::vector<Object> rawFaceResults = Inference(input, camera_id, useDynamicImage, validateFace);
std::vector<Object> facesWithLivenessResults = ValidateLivenessFaces(input, rawFaceResults, camera_id);
@@ -108,6 +134,29 @@ namespace ANSCENTER {
}
}
std::vector<Object> ANSOVFD::RunInference(const cv::Mat& input, bool useDynamicImage, bool validateFace, bool facelivenessCheck) {
// ── DML device-lost recovery (outside mutex) ──────────────
if (_dmlDeviceLost && _face_detector) {
try {
auto cpuDetector = std::make_unique<ANSCENTER::SCRFD>(
_scrfdModelPath, ANSCENTER::EngineType::CPU);
{
std::lock_guard<std::mutex> guard(_mtx);
_face_detector = std::move(cpuDetector);
}
_logger.LogInfo("ANSOVFD::RunInference",
"CPU fallback session created successfully",
__FILE__, __LINE__);
} catch (const std::exception& re) {
_logger.LogFatal("ANSOVFD::RunInference",
std::string("CPU fallback exception: ") + re.what(),
__FILE__, __LINE__);
std::lock_guard<std::mutex> guard(_mtx);
_face_detector.reset();
_isInitialized = false;
return {};
}
_dmlDeviceLost = false;
}
if (facelivenessCheck) {
std::vector<Object> rawFaceResults = Inference(input, "CustomCam", useDynamicImage, validateFace);
std::vector<Object> facesWithLivenessResults = ValidateLivenessFaces(input, rawFaceResults, "CustomCam");
@@ -331,8 +380,21 @@ namespace ANSCENTER {
}
catch (const std::exception& e) {
const std::string msg = e.what();
// DML device-removal detection (see ANSONNXYOLO.cpp for details)
if (msg.find("887A0005") != std::string::npos) {
if (!_dmlDeviceLost) {
_dmlDeviceLost = true;
_logger.LogFatal("ANSOVFD::RunInference",
"DirectML GPU device lost (887A0005) — will attempt CPU fallback on next call",
__FILE__, __LINE__);
}
return {};
}
_logger.LogFatal("ANSOVFD::RunInference",
"Exception: " + std::string(e.what()),
"Exception: " + msg,
__FILE__, __LINE__);
return {};
}

View File

@@ -25,6 +25,10 @@ namespace ANSCENTER {
std::mutex _mtx;
std::string _modelFilePath;
std::unique_ptr<SCRFD> _face_detector = nullptr;
// DML device-lost recovery (see ANSONNXYOLO.h for rationale)
bool _dmlDeviceLost{ false };
std::string _scrfdModelPath; // cached for CPU fallback recreation
std::vector<Object> _movementObjects;
int _retainDetectedFaces{ 0 };
std::vector<Object> Inference(const cv::Mat& input, const std::string& camera_id, bool useDynamicImage = true, bool validateFace=false);

View File

@@ -298,6 +298,13 @@ BOOL APIENTRY DllMain( HMODULE hModule,
return TRUE;
}
// CLASSIFICATION = 0,
// DETECTION = 1,
// SEGMENTATION = 2,
// FACEDETECTOR = 3,
// FACERECOGNIZER = 4,
// LICENSEPLATE = 5,
// TEXTSCENSE = 6
// External APIs
extern "C" ANSODENGINE_API std::string CreateANSODHandle(ANSCENTER::ANSODBase** Handle,
const char* licenseKey,
@@ -306,9 +313,10 @@ extern "C" ANSODENGINE_API std::string CreateANSODHandle(ANSCENTER::ANSODBase**
float detectionScoreThreshold,
float modelConfThreshold,
float modelMNSThreshold,
int autoDetectEngine,
int autoDetectEngine,//-1: CPU, 0: GPU; 1 auto detection
int modelType,
int detectionType, int loadEngineOnCreation)
int detectionType,
int loadEngineOnCreation)
{
if (Handle == nullptr) return "";
@@ -345,14 +353,7 @@ extern "C" ANSODENGINE_API std::string CreateANSODHandle(ANSCENTER::ANSODBase**
else modelConfig.autoGPUDetection = false;
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
// CLASSIFICATION = 0,
// DETECTION = 1,
// SEGMENTATION = 2,
// FACEDETECTOR = 3,
// FACERECOGNIZER = 4,
// LICENSEPLATE = 5,
// TEXTSCENSE = 6
if (autoDetectEngine==-1)engineType=ANSCENTER::EngineType::CPU;// We force to use CPU
//Force modelType to ANSONNXYOLO and ANSRTYOLO if detectionType is detection and modelType is TENSORRT or ONNX
@@ -361,7 +362,8 @@ extern "C" ANSODENGINE_API std::string CreateANSODHandle(ANSCENTER::ANSODBase**
(modelType == 22)|| // TensorRT Pose
(modelType == 24)) // TensorRT Segmentation
{
modelType = 31; // RTYOLO
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) modelType = 31; // RTYOLO
else modelType=30;// ONNXYOLO
}
else if ((modelType == 3) || // YoloV8/YoloV11 (Object Detection)
(modelType == 17)|| // YOLO V12
@@ -376,7 +378,6 @@ extern "C" ANSODENGINE_API std::string CreateANSODHandle(ANSCENTER::ANSODBase**
// do nothing, use the modelType specified by user
}
switch (detectionType) {
case 0:
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
@@ -615,18 +616,382 @@ extern "C" ANSODENGINE_API std::string CreateANSODHandle(ANSCENTER::ANSODBase**
return labelMap;
}
else {
// Assign GPU via round-robin and check VRAM before initialization
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
// CUDA round-robin + VRAM check — only relevant for NVIDIA GPUs.
// On AMD/DirectML and OpenVINO these calls hit stub CUDA APIs that
// return bogus 0-byte VRAM and pollute the log with false warnings.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
}
RegisterODHandle(*Handle);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
(*Handle)->SetLoadEngineOnCreation(_loadEngineOnCreation); //Set force to load the engine immediately
bool loadResult = (*Handle)->Initialize(licenseKey, modelConfig, modelFilePath, modelFileZipPassword, labelMap);
return labelMap;
}
}
extern "C" ANSODENGINE_API int CreateANSODHandleEx(ANSCENTER::ANSODBase** Handle,
const char* licenseKey,
const char* modelFilePath,
const char* modelFileZipPassword,
float detectionScoreThreshold,
float modelConfThreshold,
float modelMNSThreshold,
int autoDetectEngine,
int modelType,
int detectionType,
std::string& labelMap,
int loadEngineOnCreation)
{
if (Handle == nullptr) return -1; // invalid modelType return
bool _loadEngineOnCreation = false;
if (loadEngineOnCreation == 1) {
_loadEngineOnCreation = true;
}
else {
_loadEngineOnCreation = false;
}
labelMap.clear();
ANSCENTER::ModelConfig modelConfig;
if (detectionScoreThreshold <= 0)modelConfig.detectionScoreThreshold = 0.5;
else modelConfig.detectionScoreThreshold = detectionScoreThreshold;
if (modelConfThreshold <= 0)modelConfig.modelConfThreshold = 0.5;
else modelConfig.modelConfThreshold = modelConfThreshold;
if (modelMNSThreshold <= 0)modelConfig.modelMNSThreshold = 0.45;
else modelConfig.modelMNSThreshold = modelMNSThreshold;
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
if (autoDetectEngine == 1)modelConfig.autoGPUDetection = true;
else modelConfig.autoGPUDetection = false;
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
if (autoDetectEngine==-1)engineType=ANSCENTER::EngineType::CPU;// We force to use CPU
//Force modelType to ANSONNXYOLO and ANSRTYOLO if detectionType is detection and modelType is TENSORRT or ONNX
if ((modelType == 4) || // TensorRT
(modelType == 14)|| // TensorRT Yolov10
(modelType == 22)|| // TensorRT Pose
(modelType == 24)) // TensorRT Segmentation
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) modelType = 31; // RTYOLO
else modelType=30;// ONNXYOLO
}
else if ((modelType == 3) || // YoloV8/YoloV11 (Object Detection)
(modelType == 17)|| // YOLO V12
(modelType == 20) || // ONNX Classification
(modelType == 21) || // ONNX Pose
(modelType == 23) || // ONNX Segmentation
(modelType == 25)) // OBB Segmentation
{
modelType = 30; // ONNXYOLO
}
else {
// do nothing, use the modelType specified by user
}
// returnModelType will be set after the switch to reflect the actual
// model class that was instantiated (e.g. RTYOLO→ONNXYOLO on AMD).
int returnModelType = modelType;
switch (detectionType) {
case 0:
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
break;
case 1:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
case 2:
modelConfig.detectionType = ANSCENTER::DetectionType::SEGMENTATION;
break;
case 3:
modelConfig.detectionType = ANSCENTER::DetectionType::FACEDETECTOR;
break;
case 4:
modelConfig.detectionType = ANSCENTER::DetectionType::FACERECOGNIZER;
break;
case 5:
modelConfig.detectionType = ANSCENTER::DetectionType::LICENSEPLATE;
break;
case 6:
modelConfig.detectionType = ANSCENTER::DetectionType::TEXTSCENSE;
break;
case 7:
modelConfig.detectionType = ANSCENTER::DetectionType::KEYPOINT;
break;
default:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
}
switch (modelType) {
case 0: //TENSORFLOW =0
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW;
break;
case 1: //YOLOV4 = 1
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV4;
break;
case 2://YOLOV5 = 2
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV5;
break;
case 3: //YOLOV8 = 3,
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
break;
case 4: //TENSORRT = 4,
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTCL();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
}
else {// default is detection
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
case 5: //OPENVINO = 5
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
(*Handle) = new ANSCENTER::OPENVINOCL();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
(*Handle) = new ANSCENTER::ANSOVSEG();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
case 6: //FACEDETECT = 6
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
case 10: //ANOMALIB=10
(*Handle) = new ANSCENTER::ANSANOMALIB();
modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB;
break;
case 11: //OPENPOSE=11
(*Handle) = new ANSCENTER::ANSPOSE();
modelConfig.modelType = ANSCENTER::ModelType::POSE;
break;
case 12: //SAM=12
(*Handle) = new ANSCENTER::ANSSAM();
modelConfig.modelType = ANSCENTER::ModelType::SAM;
break;
case 13: //ODHUBMODEL=13
(*Handle) = new ANSCENTER::ODHUBAPI();
modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL;
break;
case 14: //TensorRT for Object Detection Yolov10
(*Handle) = new ANSCENTER::ANSYOLOV10RTOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10RTOD;
break;
case 15: //OpenVino for Object Detection Yolov10
(*Handle) = new ANSCENTER::ANSOYOLOV10OVOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD;
break;
case 16: //Custom detector
(*Handle) = new ANSCENTER::ANSCUSTOMDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR;
break;
case 17: //Yolo V12
(*Handle) = new ANSCENTER::YOLO12OD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV12;
break;
case 18: //Custom script model
(*Handle) = new ANSCENTER::ANSCUSTOMPY();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY;
break;
case 19: //Motion Detector
(*Handle) = new ANSCENTER::ANSMOTIONDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR;
break;
case 20: //ONNXCL
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
break;
case 21: //ONNXPOSE
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
break;
case 22: //TENSORRTPOSE
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSTENSORRTPOSE();
modelConfig.modelType = ANSCENTER::ModelType::RTPOSE;
}
else {
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
}
break;
case 23: //ONNXSEG
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
break;
case 24: //RTSEG
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
case 25: //ONNXOBB
(*Handle) = new ANSCENTER::ANSONNXOBB();
modelConfig.modelType = ANSCENTER::ModelType::ONNXOBB;
break;
//case 26: //RTOBB
// (*Handle) = new ANSCENTER::ANSTENSORRTOBB();
// modelConfig.modelType = ANSCENTER::ModelType::RTOBB;
// break;
case 27: //MOVIENET
(*Handle) = new ANSCENTER::ANSMOVIENET();
modelConfig.modelType = ANSCENTER::ModelType::MOVIENET;
break;
case 28: //ONNXSAM3
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
break;
case 29: //RTSAM3
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSSAM3();
modelConfig.modelType = ANSCENTER::ModelType::RTSAM3;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
break;
case 30: //ONNXYOLO
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
break;
case 31: //RTYOLO
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSRTYOLO();
modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
}
else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
default:
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
}
// Update returnModelType to reflect the actual class that was created.
// The switch may have fallen back (e.g. RTYOLO→ONNXYOLO on non-NVIDIA).
returnModelType = static_cast<int>(modelConfig.modelType);
if (*Handle == nullptr) {
labelMap ="";
return returnModelType;
}
else {
// CUDA round-robin + VRAM check — only relevant for NVIDIA GPUs.
// On AMD/DirectML and OpenVINO these calls hit stub CUDA APIs that
// return bogus 0-byte VRAM and pollute the log with false warnings.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
}
RegisterODHandle(*Handle);
(*Handle)->SetLoadEngineOnCreation(_loadEngineOnCreation); //Set force to load the engine immediately
bool loadResult = (*Handle)->Initialize(licenseKey, modelConfig, modelFilePath, modelFileZipPassword, labelMap);
return returnModelType;
}
}
//// For LabVIEW API
extern "C" ANSODENGINE_API int CreateANSODHandle_LV(ANSCENTER::ANSODBase** Handle, const char* licenseKey, const char* modelFilePath, const char* modelFileZipPassword, float modelThreshold, float modelConfThreshold, float modelNMSThreshold, int autoDetectEngine, int modelType, int detectorType, int loadEngineOnCreation, LStrHandle labelMap) {
try {
std::string lbMap;
int returnModelType = CreateANSODHandleEx(Handle, licenseKey, modelFilePath, modelFileZipPassword, modelThreshold, modelConfThreshold, modelNMSThreshold, autoDetectEngine, modelType, detectorType, lbMap, loadEngineOnCreation);
// CreateANSODHandleEx returns -1 only when Handle is nullptr.
// Check that instead of lbMap.empty() — labelMap can be legitimately
// empty when loadEngineOnCreation==0 or the model has no class file.
if (returnModelType < 0 || Handle == nullptr || *Handle == nullptr) return -1;
int size = static_cast<int>(lbMap.length());
if (size > 0) {
MgErr error = DSSetHandleSize(labelMap, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*labelMap)->cnt = size;
memcpy((*labelMap)->str, lbMap.c_str(), size);
}
else return -1;
}
else {
// Empty label map — set LabVIEW string to empty
MgErr error = DSSetHandleSize(labelMap, sizeof(int32));
if (error == noErr) (*labelMap)->cnt = 0;
}
return returnModelType;
}
catch (...) {
return -1;
}
}
extern "C" __declspec(dllexport) int LoadModelFromFolder(ANSCENTER::ANSODBase** Handle, const char* licenseKey,
const char* modelName,
@@ -641,7 +1006,8 @@ extern "C" __declspec(dllexport) int LoadModelFromFolder(ANSCENTER::ANSODBase**
const char* modelFolder,
std::string& labelMap)
{
try {
try
{
if (Handle == nullptr) return 0;
labelMap.clear();
ANSCENTER::ModelConfig modelConfig;
@@ -661,18 +1027,23 @@ extern "C" __declspec(dllexport) int LoadModelFromFolder(ANSCENTER::ANSODBase**
if (modelMNSThreshold <= 0)modelConfig.modelMNSThreshold = 0.45;
else modelConfig.modelMNSThreshold = modelMNSThreshold;
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
if (autoDetectEngine == 1)modelConfig.autoGPUDetection = true;
else modelConfig.autoGPUDetection = false;
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
if (autoDetectEngine==-1)engineType=ANSCENTER::EngineType::CPU;// We force to use CPU
//Force modelType to ANSONNXYOLO and ANSRTYOLO if detectionType is detection and modelType is TENSORRT or ONNX
if ((modelType == 4) || // TensorRT
(modelType == 14) || // TensorRT Yolov10
(modelType == 22) || // TensorRT Pose
(modelType == 24)) // TensorRT Segmentation
{
modelType = 31; // RTYOLO
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU)modelType = 31; // RTYOLO
else modelType = 30;// ONNXYOLO
}
else if ((modelType == 3) || // YoloV8/YoloV11 (Object Detection)
(modelType == 17) || // YOLO V12
@@ -922,13 +1293,15 @@ extern "C" __declspec(dllexport) int LoadModelFromFolder(ANSCENTER::ANSODBase**
return -1;
}
else {
// Assign GPU via round-robin and check VRAM before initialization
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
// CUDA round-robin + VRAM check — NVIDIA only (see CreateANSODHandle).
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
}
RegisterODHandle(*Handle);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
(*Handle)->SetLoadEngineOnCreation(_loadEngineOnCreation); //Set force to load the engine immediately
bool result = (*Handle)->LoadModelFromFolder(licenseKey, modelConfig, modelName, className, modelFolder, labelMap);
if (result) return 1;
@@ -940,9 +1313,8 @@ extern "C" __declspec(dllexport) int LoadModelFromFolder(ANSCENTER::ANSODBase**
}
}
extern "C" ANSODENGINE_API std::string OptimizeModelStr(const char* modelFilePath, const char* modelFileZipPassword, int modelType, int modelDetectionType, int fp16) {
ANSODENGINE_API int OptimizeModelStr(const char* modelFilePath, const char* modelFileZipPassword, int modelType, int modelDetectionType, int fp16, std::string& modelFolder) {
try {
std::string st;
bool optimizedResult = false;
// NOTE: odMutex was removed here. OptimizeModelStr creates its own
// temporary Engine<float> on the stack — no shared state with running
@@ -950,19 +1322,41 @@ extern "C" ANSODENGINE_API std::string OptimizeModelStr(const char* modelFilePat
// result delivery for the entire duration of TRT engine building.
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
//Force modelType to ANSONNXYOLO and ANSRTYOLO if detectionType is detection and modelType is TENSORRT or ONNX
if ((modelType == 4) || // TensorRT
(modelType == 14) || // TensorRT Yolov10
(modelType == 22) || // TensorRT Pose
(modelType == 24)) // TensorRT Segmentation
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU)modelType = 31; // RTYOLO
else modelType = 30;// ONNXYOLO
}
else if ((modelType == 3) || // YoloV8/YoloV11 (Object Detection)
(modelType == 17) || // YOLO V12
(modelType == 20) || // ONNX Classification
(modelType == 21) || // ONNX Pose
(modelType == 23) || // ONNX Segmentation
(modelType == 25)) // OBB Segmentation
{
modelType = 30; // ONNXYOLO
}
else {
// do nothing, use the modelType specified by user
}
//if ((modelType == 4)||// If modelType is TensorRT (4), handle separately.
// (modelType == 31)) // If modelType is RTYOLO (31), handle separately.
//{
// if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
// if (modelDetectionType == 0) {
// return ANSCENTER::ANSUtilityHelper::ModelOptimizer(modelFilePath, modelFileZipPassword, fp16, st, 224, 244) ? st : ""; // this is for classification models
// }
// else {
// return ANSCENTER::ANSUtilityHelper::ModelOptimizer(modelFilePath, modelFileZipPassword, fp16, st, 640, 640) ? st : ""; // standard size for detection models, segmetation and others
// }
// }
//}
if (modelType == 31) // If modelType is RTYOLO (31), handle separately.
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
if (modelDetectionType == 0) {
return ANSCENTER::ANSUtilityHelper::ModelOptimizer(modelFilePath, modelFileZipPassword, fp16, modelFolder, 224, 244) ? 1 : 0; // this is for classification models
}
else {
return ANSCENTER::ANSUtilityHelper::ModelOptimizer(modelFilePath, modelFileZipPassword, fp16, modelFolder, 640, 640) ? 1 : 0; // standard size for detection models, segmetation and others
}
}
}
// Create model handle dynamically
std::unique_ptr<ANSCENTER::ANSODBase> Handle;
ANSCENTER::ModelConfig modelConfig;
@@ -971,53 +1365,36 @@ extern "C" ANSODENGINE_API std::string OptimizeModelStr(const char* modelFilePat
case 0: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW; break;
case 1: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV4; break;
case 2: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV5; break;
case 3: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV8; break;
case 5: Handle = std::make_unique<ANSCENTER::OPENVINOOD>(); modelConfig.modelType = ANSCENTER::ModelType::OPENVINO; break;
case 6: Handle = std::make_unique<ANSCENTER::ANSFD>(); modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT; break;
case 10: Handle = std::make_unique<ANSCENTER::ANSANOMALIB>(); modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB; break;
case 11: Handle = std::make_unique<ANSCENTER::ANSPOSE>(); modelConfig.modelType = ANSCENTER::ModelType::POSE; break;
case 12: Handle = std::make_unique<ANSCENTER::ANSSAM>(); modelConfig.modelType = ANSCENTER::ModelType::SAM; break;
case 13: Handle = std::make_unique<ANSCENTER::ODHUBAPI>(); modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL; break;
case 14: Handle = std::make_unique<ANSCENTER::ANSYOLOV10RTOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV10RTOD; break;
case 15: Handle = std::make_unique<ANSCENTER::ANSOYOLOV10OVOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD; break;
case 16: Handle = std::make_unique<ANSCENTER::ANSCUSTOMDETECTOR>(); modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR; break;
case 17: Handle = std::make_unique<ANSCENTER::YOLO12OD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV12; break;
case 18: Handle = std::make_unique<ANSCENTER::ANSCUSTOMPY>(); modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY; break;
case 19: Handle = std::make_unique<ANSCENTER::ANSMOTIONDETECTOR>(); modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR; break;
case 20: Handle = std::make_unique<ANSCENTER::ANSONNXCL>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXCL; break;
case 21: Handle = std::make_unique<ANSCENTER::ANSONNXPOSE>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE; break;
case 22: Handle = std::make_unique<ANSCENTER::ANSTENSORRTPOSE>(); modelConfig.modelType = ANSCENTER::ModelType::RTPOSE; break;
case 23: Handle = std::make_unique<ANSCENTER::ANSONNXSEG>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG; break;
case 24: {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::TENSORRTSEG>();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
Handle = std::make_unique<ANSCENTER::ANSONNXSEG>();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
}
case 25: Handle = std::make_unique<ANSCENTER::ANSONNXOBB>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXOBB; break;
case 27: Handle = std::make_unique<ANSCENTER::ANSMOVIENET>(); modelConfig.modelType = ANSCENTER::ModelType::MOVIENET; break;
case 28: Handle = std::make_unique<ANSCENTER::ANSONNXSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3; break;
case 29: {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::ANSSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::RTSAM3; break;
Handle = std::make_unique<ANSCENTER::ANSSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::RTSAM3;
}
else {
Handle = std::make_unique<ANSCENTER::ANSONNXSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3; break;
Handle = std::make_unique<ANSCENTER::ANSONNXSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
break;
}
case 30: Handle = std::make_unique<ANSCENTER::ANSONNXYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO; break;
case 31: {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::ANSRTYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::RTYOLO; break;
Handle = std::make_unique<ANSCENTER::ANSRTYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
}
else {
Handle = std::make_unique<ANSCENTER::ANSONNXYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO; break;
Handle = std::make_unique<ANSCENTER::ANSONNXYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
}
default: {
@@ -1076,46 +1453,51 @@ extern "C" ANSODENGINE_API std::string OptimizeModelStr(const char* modelFilePat
break;
}
}
// Bypass pool: OptimizeModelStr only needs a lightweight engine to build/verify
// the engine file. A full pool here wastes ~30s and ~2GB GPU for a temporary
// handle that is destroyed as soon as this function returns.
// maxSlotsPerGpu=0 triggers the EnginePoolManager bypass (no pool, no sharing).
if (Handle) Handle->SetMaxSlotsPerGpu(0);
if (Handle) Handle->SetSkipEngineCache(true); // Don't cache — optimizer creates temporary engines
if (Handle) Handle->SetForceNoPool(true); // No multi-GPU pool (no idle timer threads)
// TensorRT-specific: bypass pool and cache for temporary optimizer engines
if (Handle && engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle->SetMaxSlotsPerGpu(0);
Handle->SetSkipEngineCache(true);
Handle->SetForceNoPool(true);
}
// RAII guard: restore process-wide flags on ALL exit paths (normal return,
// exception, early return). Without this, an exception in LoadModel or
// OptimizeModel permanently leaves the flags set, breaking all subsequent
// engine creation (no pool, no cache) for the lifetime of the process.
// RAII guard for TensorRT process-wide flags.
// Without this, an exception in LoadModel or OptimizeModel permanently
// leaves the flags set, breaking all subsequent engine creation.
struct GlobalFlagGuard {
GlobalFlagGuard() {
g_forceNoPool = true;
TRTEngineCache::globalBypass() = true;
bool active;
GlobalFlagGuard(bool isNvidia) : active(isNvidia) {
if (active) {
g_forceNoPool = true;
TRTEngineCache::globalBypass() = true;
}
}
~GlobalFlagGuard() {
g_forceNoPool = false;
TRTEngineCache::globalBypass() = false;
if (active) {
g_forceNoPool = false;
TRTEngineCache::globalBypass() = false;
}
}
} flagGuard;
} flagGuard(engineType == ANSCENTER::EngineType::NVIDIA_GPU);
// Load and optimize model
if (Handle && Handle->LoadModel(modelFilePath, modelFileZipPassword)) {
optimizedResult = Handle->OptimizeModel(_fp16, st);
optimizedResult = Handle->OptimizeModel(_fp16, modelFolder);
}
Handle.reset(); // Destroy all engines BEFORE guard restores cache
Handle.reset(); // Destroy engines BEFORE guard restores cache
return (optimizedResult && !st.empty()) ? st : "";
if (optimizedResult && !modelFolder.empty()) return 1;
else return 0;
}
catch (const std::exception& e) {
// GlobalFlagGuard destructor runs here — flags are always restored
std::cerr << "OptimizeModelStr Exception: " << e.what() << std::endl;
return "";
return -1;
}
catch (...) {
// GlobalFlagGuard destructor runs here — flags are always restored
std::cerr << "OptimizeModelStr: Unknown exception occurred." << std::endl;
return "";
return -1;
}
}
static int ReleaseANSODHandle_Impl(ANSCENTER::ANSODBase** Handle) {
@@ -1148,7 +1530,7 @@ extern "C" ANSODENGINE_API int ReleaseANSODHandle(ANSCENTER::ANSODBase** Handle)
return 1;
}
}
extern "C" ANSODENGINE_API std::string RunInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
ANSODENGINE_API std::string RunInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
@@ -1170,7 +1552,7 @@ extern "C" ANSODENGINE_API std::string RunInference(ANSCENTER::ANSODBase** Hand
}
}
extern "C" ANSODENGINE_API std::string RunTiledInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId) {
ANSODENGINE_API std::string RunTiledInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
@@ -1191,7 +1573,7 @@ extern "C" ANSODENGINE_API std::string RunTiledInference(ANSCENTER::ANSODBase**
}
}
extern "C" ANSODENGINE_API std::string RunInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId) {
ANSODENGINE_API std::string RunInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunInferenceFromJpegString(jpeg_string, jpeg_size, cameraId);
@@ -1204,7 +1586,7 @@ extern "C" ANSODENGINE_API std::string RunInferenceFromJpegString(ANSCENTER::AN
}
}
extern "C" ANSODENGINE_API std::string RunTiledInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId) {
ANSODENGINE_API std::string RunTiledInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunTiledInferenceFromJpegString(jpeg_string, jpeg_size, tiledWidth, titledHeight, overlap, cameraId);
@@ -1218,7 +1600,7 @@ extern "C" ANSODENGINE_API std::string RunTiledInferenceFromJpegString(ANSCENTE
}
}
extern "C" ANSODENGINE_API std::string RunInferenceFromCV(ANSCENTER::ANSODBase** Handle, cv::Mat image)
ANSODENGINE_API std::string RunInferenceFromCV(ANSCENTER::ANSODBase** Handle, cv::Mat image)
{
try {
if (Handle == nullptr || *Handle == nullptr) return "";
@@ -1250,7 +1632,7 @@ extern "C" ANSODENGINE_API void RunTiledInferenceFromCV(ANSCENTER::ANSODBase** H
results.clear();
}
}
extern "C" ANSODENGINE_API std::string RunInferenceInCroppedBBoxImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strBboxes) {
ANSODENGINE_API std::string RunInferenceInCroppedBBoxImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strBboxes) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
@@ -1272,7 +1654,7 @@ extern "C" ANSODENGINE_API std::string RunInferenceInCroppedBBoxImages(ANSCENTE
}
}
extern "C" ANSODENGINE_API std::string RunInferenceInCroppedPolygonImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strPolygon) {
ANSODENGINE_API std::string RunInferenceInCroppedPolygonImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strPolygon) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
@@ -1293,7 +1675,7 @@ extern "C" ANSODENGINE_API std::string RunInferenceInCroppedPolygonImages(ANSCE
return result;
}
}
extern "C" ANSODENGINE_API std::string RunInferenceBinary(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
ANSODENGINE_API std::string RunInferenceBinary(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_bytes == nullptr || width == 0 || height == 0) return "";
@@ -1315,7 +1697,7 @@ extern "C" ANSODENGINE_API std::string RunInferenceBinary(ANSCENTER::ANSODBase*
}
}
extern "C" ANSODENGINE_API std::string RunInferenceImagePath(ANSCENTER::ANSODBase** Handle, const char* imageFilePath) {
ANSODENGINE_API std::string RunInferenceImagePath(ANSCENTER::ANSODBase** Handle, const char* imageFilePath) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::string stImageFileName(imageFilePath);
@@ -1337,27 +1719,7 @@ extern "C" ANSODENGINE_API std::string RunInferenceImagePath(ANSCENTER::ANSODBa
}
}
//// For LabVIEW API
extern "C" ANSODENGINE_API int CreateANSODHandle_LV(ANSCENTER::ANSODBase** Handle, const char* licenseKey, const char* modelFilePath, const char* modelFileZipPassword, float modelThreshold, float modelConfThreshold, float modelNMSThreshold, int autoDetectEngine, int modelType, int detectorType, int loadEngineOnCreation, LStrHandle labelMap) {
try {
std::string lbMap = CreateANSODHandle(Handle, licenseKey, modelFilePath, modelFileZipPassword, modelThreshold, modelConfThreshold, modelNMSThreshold, autoDetectEngine, modelType, detectorType, loadEngineOnCreation);
if (lbMap.empty()) return 0;
int size = static_cast<int>(lbMap.length());
MgErr error;
error = DSSetHandleSize(labelMap, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*labelMap)->cnt = size;
memcpy((*labelMap)->str, lbMap.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInference_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
try {
std::string st = RunInference(Handle, jpeg_string, bufferLength);
@@ -1517,8 +1879,9 @@ extern "C" ANSODENGINE_API int RunInferenceImagePath_LV(ANSCENTER::ANSODBase**
extern "C" ANSODENGINE_API int OptimizeModel(const char* modelFilePath, const char* modelFileZipPassword,int modelType, int modelDetectionType, int fp16, LStrHandle optimizedModelFolder) {
try {
std::string st=OptimizeModelStr(modelFilePath, modelFileZipPassword, modelType, modelDetectionType, fp16);
if (st.empty()) {
std::string st;
int ret = OptimizeModelStr(modelFilePath, modelFileZipPassword, modelType, modelDetectionType, fp16, st);
if (ret <= 0 || st.empty()) {
return 0;
}
int size = static_cast<int>(st.length());
@@ -1608,8 +1971,9 @@ extern "C" __declspec(dllexport) const char* OptimizeModelStr_CS(const char* mod
{
try {
static std::string result;
result = OptimizeModelStr(modelFilePath, modelFileZipPassword, modelType, modelDetectionType,fp16);
return result.c_str();
result.clear();
int ret = OptimizeModelStr(modelFilePath, modelFileZipPassword, modelType, modelDetectionType, fp16, result);
return (ret > 0 && !result.empty()) ? result.c_str() : "";
}
catch (...) {
return "";