Files
ANSCORE/modules/ANSODEngine/dllmain.cpp

2949 lines
102 KiB
C++

// dllmain.cpp : Defines the entry point for the DLL application.
#include "pch.h"
#include "ANSODEngine.h"
#include "NV12PreprocessHelper.h" // tl_currentGpuFrame()
#include "ANSGpuFrameRegistry.h" // gpu_frame_lookup(cv::Mat*)
#include "engine/TRTEngineCache.h" // clearAll() on DLL_PROCESS_DETACH
#include "engine/EnginePoolManager.h" // clearAll() on DLL_PROCESS_DETACH
#include <climits> // INT_MIN
#include "ANSLicense.h" // ANS_DBG macro for DebugView
#include "ANSODVendorGate.h" // ansod_vendor_gate::IsNvidiaGpuAvailable()
// Process-wide flag: when true, all engines force single-GPU path (no pool, no idle timers).
// Defined here, declared extern in EngineBuildLoadNetwork.inl.
ANSODENGINE_API std::atomic<bool> g_forceNoPool{false};
// Canonical thread-local GpuFrameData* for NV12 zero-copy fast path.
// Exported so that ALL DLLs (ANSLPR, ANSFR, ANSOCR) share the SAME
// thread_local slot. Without this, each DLL gets its own inline copy
// of tl_currentGpuFrame()'s thread_local — ANSLPR sets its copy, but
// ANSODEngine's tryNV12() reads ANSODEngine's copy (nullptr), silently
// disabling NV12 zero-copy for ALPR, FR, and OCR inference.
extern "C" ANSODENGINE_API GpuFrameData** ANSODEngine_GetTlsGpuFrame() {
thread_local GpuFrameData* ptr = nullptr;
return &ptr;
}
#include "ANSYOLOOD.h"
#include "ANSODHUB.h"
#include "ANSCUSTOMPY.h"
#include "ANSTENSORRTOD.h"
#include "ANSTENSORRTCL.h"
#include "ANSOPENVINOCL.h"
#include "ANSOPENVINOOD.h"
#include "ANSYOLOV10RTOD.h"
#include "ANSYOLOV10OVOD.h"
#include "ANSYOLO12OD.h"
#include "ANSONNXCL.h"
#include "ANSONNXPOSE.h"
#include "ANSTENSORRTPOSE.h"
#include "ANSMotionDetector.h"
#include "ANSCUSTOMDetector.h"
#include "ANSONNXSEG.h"
#include "ANSTENSORRTSEG.h"
#include "ANSONNXOBB.h"
#include "ANSOVSEG.h"
#include "ANSFD.h"
#include "ANSANOMALIB.h"
#include "ANSPOSE.h"
#include "ANSSAM.h"
#include "Movienet.h"
#include "ANSSAM3.h"
#include "ANSONNXSAM3.h"
#include "ANSRTYOLO.h"
#include "ANSONNXYOLO.h"
#include <pipelines/metadata.h>
#include <models/input_data.h>
#include "utils/visualizer.hpp"
#include <turbojpeg.h>
#include <vector>
#include <map>
#include <string>
#include <unordered_set>
#include <mutex>
// Handle registry with refcount — prevents use-after-free when
// ReleaseANSODHandle is called while inference is still running.
// refcount: starts at 1 on Register. AcquireODHandle increments,
// ReleaseODHandle decrements. Object is destroyed when refcount hits 0.
#include <unordered_map>
#include <condition_variable>
#include <atomic>
#include <cuda_runtime.h>
// ============================================================================
// Round-Robin GPU Assigner
//
// Each call to CreateANSODHandle assigns the next GPU in round-robin order.
// This distributes tasks evenly across all available GPUs:
// Task 1 → GPU 0, Task 2 → GPU 1, Task 3 → GPU 0, Task 4 → GPU 1, ...
//
// The GPU count is queried once (lazily) and cached. The atomic counter
// ensures thread-safe assignment even when multiple tasks are created
// concurrently.
// ============================================================================
static std::atomic<int> g_gpuRoundRobinCounter{0};
static int g_numGPUs = -1; // -1 = not yet queried
static std::mutex g_gpuCountMutex;
static int GetNumGPUs() {
std::lock_guard<std::mutex> lk(g_gpuCountMutex);
if (g_numGPUs < 0) {
// Defense-in-depth: all callers (AssignNextGPU, GetPoolMaxSlotsPerGpu,
// CheckGPUVRAM) are invoked inside factory-level NVIDIA_GPU guards,
// but skip the CUDA runtime entirely on AMD/Intel/CPU hardware so a
// future refactor cannot accidentally wake up cudart on non-NVIDIA.
// See ANSODVendorGate.h.
if (!ansod_vendor_gate::IsNvidiaGpuAvailable()) {
g_numGPUs = 1; // report a single "virtual" slot so round-robin is a no-op
std::cout << "Info [GPU]: non-NVIDIA hardware — CUDA probe skipped, pool slots=1"
<< std::endl;
return g_numGPUs;
}
// Use yield mode before any CUDA call to avoid busy-wait spinning
// that falsely reports 100% GPU utilization in nvidia-smi.
cudaSetDeviceFlags(cudaDeviceScheduleYield);
cudaGetDeviceCount(&g_numGPUs);
if (g_numGPUs <= 0) g_numGPUs = 1; // fallback to GPU 0
std::cout << "Info [GPU]: Detected " << g_numGPUs << " CUDA GPU(s) for round-robin assignment" << std::endl;
}
return g_numGPUs;
}
// Determine maxSlotsPerGpu based on GPU topology:
// 1 GPU → 1 (single slot, no round-robin needed)
// >1 GPU, VRAM<24GB → 1 (round-robin: 1 slot per GPU)
// >1 GPU, VRAM≥24GB → -1 (elastic: on-demand slot growth)
// Result is cached after the first query.
static int GetPoolMaxSlotsPerGpu() {
static int s_result = INT_MIN;
static std::mutex s_mutex;
std::lock_guard<std::mutex> lk(s_mutex);
if (s_result != INT_MIN) return s_result;
// Short-circuit on non-NVIDIA: no TRT engines will be built, no pool to
// size, and cudaSetDevice/cudaMemGetInfo below should not be reached.
// Safety net — callers today are already inside NVIDIA_GPU guards.
if (!ansod_vendor_gate::IsNvidiaGpuAvailable()) {
s_result = 1;
return s_result;
}
const int n = GetNumGPUs();
if (n <= 1) {
s_result = 1;
std::cout << "Info [GPU]: Single GPU — pool mode: 1 slot, no round-robin" << std::endl;
return s_result;
}
// Multiple GPUs — check VRAM (GPUs are assumed same spec)
constexpr size_t kLargeVramBytes = 24ULL * 1024 * 1024 * 1024; // 24 GB
size_t totalMem = 0, freeMem = 0;
cudaSetDevice(0);
cudaMemGetInfo(&freeMem, &totalMem);
if (totalMem >= kLargeVramBytes) {
s_result = -1;
std::cout << "Info [GPU]: " << n << " GPUs, VRAM >= 24 GB — pool mode: elastic" << std::endl;
} else {
s_result = 1;
std::cout << "Info [GPU]: " << n << " GPUs, VRAM < 24 GB — pool mode: round-robin" << std::endl;
}
return s_result;
}
// Returns the next GPU index in round-robin order.
// Thread-safe: uses atomic fetch_add.
static int AssignNextGPU() {
// Non-NVIDIA short-circuit: no CUDA devices, return 0 and skip the
// "assigning task" log to avoid polluting AMD/Intel/CPU logs.
if (!ansod_vendor_gate::IsNvidiaGpuAvailable()) return 0;
const int numGPUs = GetNumGPUs();
const int idx = g_gpuRoundRobinCounter.fetch_add(1);
const int gpuIndex = idx % numGPUs;
std::cout << "Info [GPU]: Assigning task to GPU " << gpuIndex
<< " (task #" << (idx + 1) << ", " << numGPUs << " GPU(s) available)" << std::endl;
return gpuIndex;
}
// Check if a GPU has enough free VRAM for a new engine.
// Returns true if sufficient, false if not.
// minFreeBytes: minimum free VRAM required (default 512 MiB safety margin).
static bool CheckGPUVRAM(int gpuIndex, size_t minFreeBytes = 512ULL * 1024 * 1024) {
// Non-NVIDIA short-circuit: no CUDA devices present — report "OK"
// silently so the TRT pool path is a no-op on AMD/Intel/CPU and the
// log isn't polluted with spurious 0-byte VRAM warnings.
if (!ansod_vendor_gate::IsNvidiaGpuAvailable()) return true;
int prevDevice = 0;
cudaGetDevice(&prevDevice);
cudaSetDevice(gpuIndex);
size_t freeBytes = 0, totalBytes = 0;
cudaMemGetInfo(&freeBytes, &totalBytes);
// Restore previous device to avoid side-effects on caller's thread
cudaSetDevice(prevDevice);
if (freeBytes < minFreeBytes) {
std::cout << "Warning [GPU]: GPU " << gpuIndex << " has only "
<< (freeBytes / (1024 * 1024)) << " MiB free (need "
<< (minFreeBytes / (1024 * 1024)) << " MiB). Task creation may fail." << std::endl;
return false;
}
std::cout << "Info [GPU]: GPU " << gpuIndex << " has "
<< (freeBytes / (1024 * 1024)) << " MiB free / "
<< (totalBytes / (1024 * 1024)) << " MiB total" << std::endl;
return true;
}
static std::unordered_map<ANSCENTER::ANSODBase*, int>& ODHandleRegistry() {
static std::unordered_map<ANSCENTER::ANSODBase*, int> s;
return s;
}
static std::mutex& ODHandleRegistryMutex() {
static std::mutex m;
return m;
}
static std::condition_variable& ODHandleRegistryCV() {
static std::condition_variable cv;
return cv;
}
static void RegisterODHandle(ANSCENTER::ANSODBase* h) {
std::lock_guard<std::mutex> lk(ODHandleRegistryMutex());
ODHandleRegistry()[h] = 1; // refcount = 1
}
// Acquire a handle for use (increment refcount). Returns the handle
// if valid, nullptr if already released.
static ANSCENTER::ANSODBase* AcquireODHandle(ANSCENTER::ANSODBase* h) {
std::lock_guard<std::mutex> lk(ODHandleRegistryMutex());
auto it = ODHandleRegistry().find(h);
if (it == ODHandleRegistry().end()) return nullptr;
it->second++;
return h;
}
// Release a use of the handle (decrement refcount).
// Returns true if this was the last reference (caller should destroy).
static bool ReleaseODHandleRef(ANSCENTER::ANSODBase* h) {
std::lock_guard<std::mutex> lk(ODHandleRegistryMutex());
auto it = ODHandleRegistry().find(h);
if (it == ODHandleRegistry().end()) return false;
it->second--;
if (it->second <= 0) {
ODHandleRegistry().erase(it);
ODHandleRegistryCV().notify_all();
return true; // Caller should destroy
}
return false;
}
// Unregister and wait for all in-flight uses to finish.
// Decrements the creation refcount and blocks until refcount hits 0.
// Returns true if caller should destroy the object.
static bool UnregisterODHandle(ANSCENTER::ANSODBase* h) {
std::unique_lock<std::mutex> lk(ODHandleRegistryMutex());
auto it = ODHandleRegistry().find(h);
if (it == ODHandleRegistry().end()) return false;
it->second--; // Remove creation ref
// Wait for in-flight inferences to finish (30s timeout as safety net)
bool ok = ODHandleRegistryCV().wait_for(lk, std::chrono::seconds(30), [&]() {
auto it2 = ODHandleRegistry().find(h);
return it2 == ODHandleRegistry().end() || it2->second <= 0;
});
if (!ok) {
OutputDebugStringA("WARNING: UnregisterODHandle timed out waiting for in-flight inference\n");
}
ODHandleRegistry().erase(h);
return true; // Safe to destroy now
}
// RAII guard — ensures ReleaseODHandleRef is always called, preventing
// refcount leaks that would cause UnregisterODHandle to deadlock.
class ODHandleGuard {
ANSCENTER::ANSODBase* engine;
public:
explicit ODHandleGuard(ANSCENTER::ANSODBase* e) : engine(e) {}
~ODHandleGuard() { if (engine) ReleaseODHandleRef(engine); }
ANSCENTER::ANSODBase* get() const { return engine; }
explicit operator bool() const { return engine != nullptr; }
ODHandleGuard(const ODHandleGuard&) = delete;
ODHandleGuard& operator=(const ODHandleGuard&) = delete;
};
BOOL APIENTRY DllMain( HMODULE hModule,
DWORD ul_reason_for_call,
LPVOID lpReserved
)
{
switch (ul_reason_for_call)
{
case DLL_PROCESS_ATTACH:
// Pin the DLL so it is never unmapped while idle-timer or CUDA threads
// are still running. During LabVIEW shutdown the CLR/COM teardown can
// unload DLLs before all threads exit → crash at unmapped code.
//
// CRITICAL: do NOT call CheckHardwareInformation() or
// ansod_vendor_gate::IsNvidiaGpuAvailable() from here. DllMain holds
// the OS loader lock (LdrpLoaderLock). CheckHardwareInformation
// touches hwinfo → DXGI / WMI / COM, which internally call
// LoadLibrary; doing that while holding the loader lock causes a
// classic loader-lock deadlock (observed as a full hang of the
// ANSLPR-UnitTest stress test). The vendor gate will lazy-
// initialise on the first real call from worker code, which runs
// with the loader lock released.
{
HMODULE hSelf = nullptr;
GetModuleHandleExW(
GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
GET_MODULE_HANDLE_EX_FLAG_PIN,
reinterpret_cast<LPCWSTR>(&DllMain),
&hSelf);
}
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
// When lpReserved != NULL, the process is terminating via ExitProcess.
// The OS has already killed all worker threads (idle timers, CUDA
// threads, etc.). Calling ~Engine() → stopIdleTimer() → thread::join()
// on a dead thread causes undefined behavior → std::terminate → abort.
// Set the global flag so atexit destructors (which still run after
// DllMain returns) skip thread joins and CUDA/TRT cleanup.
if (lpReserved != nullptr) {
g_processExiting().store(true, std::memory_order_relaxed);
break;
}
// Dynamic FreeLibrary — threads are still alive, safe to clean up.
// Without this, idle-timer threads keep the process alive indefinitely.
try {
std::vector<ANSCENTER::ANSODBase*> leakedHandles;
{
std::lock_guard<std::mutex> lk(ODHandleRegistryMutex());
for (auto& [h, _] : ODHandleRegistry())
leakedHandles.push_back(h);
ODHandleRegistry().clear();
}
for (auto* h : leakedHandles) {
try { h->Destroy(); delete h; } catch (...) {}
}
try { EnginePoolManager<float>::instance().clearAll(); } catch (...) {}
try { TRTEngineCache::instance().clearAll(); } catch (...) {}
} catch (...) {}
break;
}
return TRUE;
}
// CLASSIFICATION = 0,
// DETECTION = 1,
// SEGMENTATION = 2,
// FACEDETECTOR = 3,
// FACERECOGNIZER = 4,
// LICENSEPLATE = 5,
// TEXTSCENSE = 6
// External APIs
extern "C" ANSODENGINE_API std::string CreateANSODHandle(ANSCENTER::ANSODBase** Handle,
const char* licenseKey,
const char* modelFilePath,
const char* modelFileZipPassword,
float detectionScoreThreshold,
float modelConfThreshold,
float modelMNSThreshold,
int autoDetectEngine,//-1: CPU, 0: GPU; 1 auto detection
int modelType,
int detectionType,
int loadEngineOnCreation)
{
if (Handle == nullptr) return "";
// NOTE: We intentionally do NOT destroy any existing *Handle here.
// LabVIEW reuses DLL parameter buffer addresses, so *Handle may point
// to ANOTHER task's live engine — not this task's old engine.
// The caller must use ReleaseANSODHandle() explicitly to clean up.
// (See ansod_debug.log analysis: Handle** addresses are recycled by
// LabVIEW across different tasks, causing the old cleanup code to
// destroy active engines belonging to other tasks.)
std::string labelMap = "";
bool _loadEngineOnCreation = false;
if (loadEngineOnCreation == 1) {
_loadEngineOnCreation = true;
}
else {
_loadEngineOnCreation = false;
}
labelMap.clear();
ANSCENTER::ModelConfig modelConfig;
if (detectionScoreThreshold <= 0)modelConfig.detectionScoreThreshold = 0.5;
else modelConfig.detectionScoreThreshold = detectionScoreThreshold;
if (modelConfThreshold <= 0)modelConfig.modelConfThreshold = 0.5;
else modelConfig.modelConfThreshold = modelConfThreshold;
if (modelMNSThreshold <= 0)modelConfig.modelMNSThreshold = 0.45;
else modelConfig.modelMNSThreshold = modelMNSThreshold;
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
if (autoDetectEngine == 1)modelConfig.autoGPUDetection = true;
else modelConfig.autoGPUDetection = false;
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
if (autoDetectEngine==-1)engineType=ANSCENTER::EngineType::CPU;// We force to use CPU
//Force modelType to ANSONNXYOLO and ANSRTYOLO if detectionType is detection and modelType is TENSORRT or ONNX
if ((modelType == 4) || // TensorRT
(modelType == 14)|| // TensorRT Yolov10
(modelType == 22)|| // TensorRT Pose
(modelType == 24)) // TensorRT Segmentation
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) modelType = 31; // RTYOLO
else modelType=30;// ONNXYOLO
}
else if ((modelType == 3) || // YoloV8/YoloV11 (Object Detection)
(modelType == 17)|| // YOLO V12
(modelType == 20) || // ONNX Classification
(modelType == 21) || // ONNX Pose
(modelType == 23) || // ONNX Segmentation
(modelType == 25)) // OBB Segmentation
{
modelType = 30; // ONNXYOLO
}
else {
// do nothing, use the modelType specified by user
}
switch (detectionType) {
case 0:
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
break;
case 1:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
case 2:
modelConfig.detectionType = ANSCENTER::DetectionType::SEGMENTATION;
break;
case 3:
modelConfig.detectionType = ANSCENTER::DetectionType::FACEDETECTOR;
break;
case 4:
modelConfig.detectionType = ANSCENTER::DetectionType::FACERECOGNIZER;
break;
case 5:
modelConfig.detectionType = ANSCENTER::DetectionType::LICENSEPLATE;
break;
case 6:
modelConfig.detectionType = ANSCENTER::DetectionType::TEXTSCENSE;
break;
case 7:
modelConfig.detectionType = ANSCENTER::DetectionType::KEYPOINT;
break;
default:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
}
switch (modelType) {
case 0: //TENSORFLOW =0
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW;
break;
case 1: //YOLOV4 = 1
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV4;
break;
case 2://YOLOV5 = 2
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV5;
break;
case 3: //YOLOV8 = 3,
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
break;
case 4: //TENSORRT = 4,
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTCL();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
}
else {// default is detection
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
case 5: //OPENVINO = 5
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
(*Handle) = new ANSCENTER::OPENVINOCL();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
(*Handle) = new ANSCENTER::ANSOVSEG();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
case 6: //FACEDETECT = 6
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
case 10: //ANOMALIB=10
(*Handle) = new ANSCENTER::ANSANOMALIB();
modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB;
break;
case 11: //OPENPOSE=11
(*Handle) = new ANSCENTER::ANSPOSE();
modelConfig.modelType = ANSCENTER::ModelType::POSE;
break;
case 12: //SAM=12
(*Handle) = new ANSCENTER::ANSSAM();
modelConfig.modelType = ANSCENTER::ModelType::SAM;
break;
case 13: //ODHUBMODEL=13
(*Handle) = new ANSCENTER::ODHUBAPI();
modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL;
break;
case 14: //TensorRT for Object Detection Yolov10
// Upstream modelType rewrite (see top of each factory) already
// redirects 14 → 31 (RTYOLO) on NVIDIA or 14 → 30 (ONNXYOLO) on
// non-NVIDIA, so this branch is unreachable in practice. Keep
// an explicit vendor gate as defense-in-depth against future
// refactors — ANSYOLOV10RTOD is a TensorRT class and must never
// be constructed on AMD/Intel/CPU hardware.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSYOLOV10RTOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10RTOD;
} else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
case 15: //OpenVino for Object Detection Yolov10
(*Handle) = new ANSCENTER::ANSOYOLOV10OVOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD;
break;
case 16: //Custom detector
(*Handle) = new ANSCENTER::ANSCUSTOMDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR;
break;
case 17: //Yolo V12
(*Handle) = new ANSCENTER::YOLO12OD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV12;
break;
case 18: //Custom script model
(*Handle) = new ANSCENTER::ANSCUSTOMPY();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY;
break;
case 19: //Motion Detector
(*Handle) = new ANSCENTER::ANSMOTIONDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR;
break;
case 20: //ONNXCL
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
break;
case 21: //ONNXPOSE
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
break;
case 22: //TENSORRTPOSE
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSTENSORRTPOSE();
modelConfig.modelType = ANSCENTER::ModelType::RTPOSE;
}
else {
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
}
break;
case 23: //ONNXSEG
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
break;
case 24: //RTSEG
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
case 25: //ONNXOBB
(*Handle) = new ANSCENTER::ANSONNXOBB();
modelConfig.modelType = ANSCENTER::ModelType::ONNXOBB;
break;
//case 26: //RTOBB
// (*Handle) = new ANSCENTER::ANSTENSORRTOBB();
// modelConfig.modelType = ANSCENTER::ModelType::RTOBB;
// break;
case 27: //MOVIENET
(*Handle) = new ANSCENTER::ANSMOVIENET();
modelConfig.modelType = ANSCENTER::ModelType::MOVIENET;
break;
case 28: //ONNXSAM3
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
break;
case 29: //RTSAM3
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSSAM3();
modelConfig.modelType = ANSCENTER::ModelType::RTSAM3;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
break;
case 30: //ONNXYOLO
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
break;
case 31: //RTYOLO
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSRTYOLO();
modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
}
else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
default:
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
}
if (*Handle == nullptr) {
return labelMap;
}
else {
// CUDA round-robin + VRAM check — only relevant for NVIDIA GPUs.
// On AMD/DirectML and OpenVINO these calls hit stub CUDA APIs that
// return bogus 0-byte VRAM and pollute the log with false warnings.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
}
RegisterODHandle(*Handle);
(*Handle)->SetLoadEngineOnCreation(_loadEngineOnCreation); //Set force to load the engine immediately
bool loadResult = (*Handle)->Initialize(licenseKey, modelConfig, modelFilePath, modelFileZipPassword, labelMap);
return labelMap;
}
}
extern "C" ANSODENGINE_API int CreateANSODHandleEx(ANSCENTER::ANSODBase** Handle,
const char* licenseKey,
const char* modelFilePath,
const char* modelFileZipPassword,
float detectionScoreThreshold,
float modelConfThreshold,
float modelMNSThreshold,
int autoDetectEngine,
int modelType,
int detectionType,
std::string& labelMap,
int loadEngineOnCreation)
{
if (Handle == nullptr) return -1; // invalid modelType return
bool _loadEngineOnCreation = false;
if (loadEngineOnCreation == 1) {
_loadEngineOnCreation = true;
}
else {
_loadEngineOnCreation = false;
}
labelMap.clear();
ANSCENTER::ModelConfig modelConfig;
if (detectionScoreThreshold <= 0)modelConfig.detectionScoreThreshold = 0.5;
else modelConfig.detectionScoreThreshold = detectionScoreThreshold;
if (modelConfThreshold <= 0)modelConfig.modelConfThreshold = 0.5;
else modelConfig.modelConfThreshold = modelConfThreshold;
if (modelMNSThreshold <= 0)modelConfig.modelMNSThreshold = 0.45;
else modelConfig.modelMNSThreshold = modelMNSThreshold;
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
if (autoDetectEngine == 1)modelConfig.autoGPUDetection = true;
else modelConfig.autoGPUDetection = false;
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
if (autoDetectEngine==-1)engineType=ANSCENTER::EngineType::CPU;// We force to use CPU
//Force modelType to ANSONNXYOLO and ANSRTYOLO if detectionType is detection and modelType is TENSORRT or ONNX
if ((modelType == 4) || // TensorRT
(modelType == 14)|| // TensorRT Yolov10
(modelType == 22)|| // TensorRT Pose
(modelType == 24)) // TensorRT Segmentation
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) modelType = 31; // RTYOLO
else modelType=30;// ONNXYOLO
}
else if ((modelType == 3) || // YoloV8/YoloV11 (Object Detection)
(modelType == 17)|| // YOLO V12
(modelType == 20) || // ONNX Classification
(modelType == 21) || // ONNX Pose
(modelType == 23) || // ONNX Segmentation
(modelType == 25)) // OBB Segmentation
{
modelType = 30; // ONNXYOLO
}
else {
// do nothing, use the modelType specified by user
}
// returnModelType will be set after the switch to reflect the actual
// model class that was instantiated (e.g. RTYOLO→ONNXYOLO on AMD).
int returnModelType = modelType;
switch (detectionType) {
case 0:
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
break;
case 1:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
case 2:
modelConfig.detectionType = ANSCENTER::DetectionType::SEGMENTATION;
break;
case 3:
modelConfig.detectionType = ANSCENTER::DetectionType::FACEDETECTOR;
break;
case 4:
modelConfig.detectionType = ANSCENTER::DetectionType::FACERECOGNIZER;
break;
case 5:
modelConfig.detectionType = ANSCENTER::DetectionType::LICENSEPLATE;
break;
case 6:
modelConfig.detectionType = ANSCENTER::DetectionType::TEXTSCENSE;
break;
case 7:
modelConfig.detectionType = ANSCENTER::DetectionType::KEYPOINT;
break;
default:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
}
switch (modelType) {
case 0: //TENSORFLOW =0
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW;
break;
case 1: //YOLOV4 = 1
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV4;
break;
case 2://YOLOV5 = 2
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV5;
break;
case 3: //YOLOV8 = 3,
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
break;
case 4: //TENSORRT = 4,
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTCL();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
}
else {// default is detection
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
case 5: //OPENVINO = 5
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
(*Handle) = new ANSCENTER::OPENVINOCL();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
(*Handle) = new ANSCENTER::ANSOVSEG();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
case 6: //FACEDETECT = 6
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
case 10: //ANOMALIB=10
(*Handle) = new ANSCENTER::ANSANOMALIB();
modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB;
break;
case 11: //OPENPOSE=11
(*Handle) = new ANSCENTER::ANSPOSE();
modelConfig.modelType = ANSCENTER::ModelType::POSE;
break;
case 12: //SAM=12
(*Handle) = new ANSCENTER::ANSSAM();
modelConfig.modelType = ANSCENTER::ModelType::SAM;
break;
case 13: //ODHUBMODEL=13
(*Handle) = new ANSCENTER::ODHUBAPI();
modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL;
break;
case 14: //TensorRT for Object Detection Yolov10
// Upstream modelType rewrite (see top of each factory) already
// redirects 14 → 31 (RTYOLO) on NVIDIA or 14 → 30 (ONNXYOLO) on
// non-NVIDIA, so this branch is unreachable in practice. Keep
// an explicit vendor gate as defense-in-depth against future
// refactors — ANSYOLOV10RTOD is a TensorRT class and must never
// be constructed on AMD/Intel/CPU hardware.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSYOLOV10RTOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10RTOD;
} else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
case 15: //OpenVino for Object Detection Yolov10
(*Handle) = new ANSCENTER::ANSOYOLOV10OVOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD;
break;
case 16: //Custom detector
(*Handle) = new ANSCENTER::ANSCUSTOMDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR;
break;
case 17: //Yolo V12
(*Handle) = new ANSCENTER::YOLO12OD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV12;
break;
case 18: //Custom script model
(*Handle) = new ANSCENTER::ANSCUSTOMPY();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY;
break;
case 19: //Motion Detector
(*Handle) = new ANSCENTER::ANSMOTIONDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR;
break;
case 20: //ONNXCL
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
break;
case 21: //ONNXPOSE
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
break;
case 22: //TENSORRTPOSE
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSTENSORRTPOSE();
modelConfig.modelType = ANSCENTER::ModelType::RTPOSE;
}
else {
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
}
break;
case 23: //ONNXSEG
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
break;
case 24: //RTSEG
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
case 25: //ONNXOBB
(*Handle) = new ANSCENTER::ANSONNXOBB();
modelConfig.modelType = ANSCENTER::ModelType::ONNXOBB;
break;
//case 26: //RTOBB
// (*Handle) = new ANSCENTER::ANSTENSORRTOBB();
// modelConfig.modelType = ANSCENTER::ModelType::RTOBB;
// break;
case 27: //MOVIENET
(*Handle) = new ANSCENTER::ANSMOVIENET();
modelConfig.modelType = ANSCENTER::ModelType::MOVIENET;
break;
case 28: //ONNXSAM3
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
break;
case 29: //RTSAM3
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSSAM3();
modelConfig.modelType = ANSCENTER::ModelType::RTSAM3;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
break;
case 30: //ONNXYOLO
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
break;
case 31: //RTYOLO
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSRTYOLO();
modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
}
else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
default:
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
}
// Update returnModelType to reflect the actual class that was created.
// The switch may have fallen back (e.g. RTYOLO→ONNXYOLO on non-NVIDIA).
returnModelType = static_cast<int>(modelConfig.modelType);
if (*Handle == nullptr) {
labelMap ="";
return returnModelType;
}
else {
// CUDA round-robin + VRAM check — only relevant for NVIDIA GPUs.
// On AMD/DirectML and OpenVINO these calls hit stub CUDA APIs that
// return bogus 0-byte VRAM and pollute the log with false warnings.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
}
RegisterODHandle(*Handle);
(*Handle)->SetLoadEngineOnCreation(_loadEngineOnCreation); //Set force to load the engine immediately
bool loadResult = (*Handle)->Initialize(licenseKey, modelConfig, modelFilePath, modelFileZipPassword, labelMap);
return returnModelType;
}
}
//// For LabVIEW API
extern "C" ANSODENGINE_API int CreateANSODHandle_LV(ANSCENTER::ANSODBase** Handle, const char* licenseKey, const char* modelFilePath, const char* modelFileZipPassword, float modelThreshold, float modelConfThreshold, float modelNMSThreshold, int autoDetectEngine, int modelType, int detectorType, int loadEngineOnCreation, LStrHandle labelMap) {
try {
std::string lbMap;
int returnModelType = CreateANSODHandleEx(Handle, licenseKey, modelFilePath, modelFileZipPassword, modelThreshold, modelConfThreshold, modelNMSThreshold, autoDetectEngine, modelType, detectorType, lbMap, loadEngineOnCreation);
// CreateANSODHandleEx returns -1 only when Handle is nullptr.
// Check that instead of lbMap.empty() — labelMap can be legitimately
// empty when loadEngineOnCreation==0 or the model has no class file.
if (returnModelType < 0 || Handle == nullptr || *Handle == nullptr) return -1;
int size = static_cast<int>(lbMap.length());
if (size > 0) {
MgErr error = DSSetHandleSize(labelMap, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*labelMap)->cnt = size;
memcpy((*labelMap)->str, lbMap.c_str(), size);
}
else return -1;
}
else {
// Empty label map — set LabVIEW string to empty
MgErr error = DSSetHandleSize(labelMap, sizeof(int32));
if (error == noErr) (*labelMap)->cnt = 0;
}
return returnModelType;
}
catch (...) {
return -1;
}
}
extern "C" __declspec(dllexport) int LoadModelFromFolder(ANSCENTER::ANSODBase** Handle, const char* licenseKey,
const char* modelName,
const char* className,
float detectionScoreThreshold,
float modelConfThreshold,
float modelMNSThreshold,
int autoDetectEngine,
int modelType,
int detectionType,
int loadEngineOnCreation,
const char* modelFolder,
std::string& labelMap)
{
try
{
if (Handle == nullptr) return 0;
labelMap.clear();
ANSCENTER::ModelConfig modelConfig;
bool _loadEngineOnCreation = false;
if (loadEngineOnCreation == 1) {
_loadEngineOnCreation = true;
}
else {
_loadEngineOnCreation = false;
}
if (detectionScoreThreshold <= 0)modelConfig.detectionScoreThreshold = 0.5;
else modelConfig.detectionScoreThreshold = detectionScoreThreshold;
if (modelConfThreshold <= 0)modelConfig.modelConfThreshold = 0.5;
else modelConfig.modelConfThreshold = modelConfThreshold;
if (modelMNSThreshold <= 0)modelConfig.modelMNSThreshold = 0.45;
else modelConfig.modelMNSThreshold = modelMNSThreshold;
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
if (autoDetectEngine == 1)modelConfig.autoGPUDetection = true;
else modelConfig.autoGPUDetection = false;
if (autoDetectEngine==-1)engineType=ANSCENTER::EngineType::CPU;// We force to use CPU
//Force modelType to ANSONNXYOLO and ANSRTYOLO if detectionType is detection and modelType is TENSORRT or ONNX
if ((modelType == 4) || // TensorRT
(modelType == 14) || // TensorRT Yolov10
(modelType == 22) || // TensorRT Pose
(modelType == 24)) // TensorRT Segmentation
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU)modelType = 31; // RTYOLO
else modelType = 30;// ONNXYOLO
}
else if ((modelType == 3) || // YoloV8/YoloV11 (Object Detection)
(modelType == 17) || // YOLO V12
(modelType == 20) || // ONNX Classification
(modelType == 21) || // ONNX Pose
(modelType == 23) || // ONNX Segmentation
(modelType == 25)) // OBB Segmentation
{
modelType = 30; // ONNXYOLO
}
else {
// do nothing, use the modelType specified by user
}
// NOTE: We intentionally do NOT destroy any existing *Handle here.
// LabVIEW reuses DLL parameter buffer addresses, so *Handle may point
// to ANOTHER task's live engine — not this task's old engine.
// The caller must use ReleaseANSODHandle() explicitly to clean up.
switch (detectionType) {
case 0:
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
break;
case 1:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
case 2:
modelConfig.detectionType = ANSCENTER::DetectionType::SEGMENTATION;
break;
case 3:
modelConfig.detectionType = ANSCENTER::DetectionType::FACEDETECTOR;
break;
case 4:
modelConfig.detectionType = ANSCENTER::DetectionType::FACERECOGNIZER;
break;
case 5:
modelConfig.detectionType = ANSCENTER::DetectionType::LICENSEPLATE;
break;
case 6:
modelConfig.detectionType = ANSCENTER::DetectionType::TEXTSCENSE;
break;
case 7:
modelConfig.detectionType = ANSCENTER::DetectionType::KEYPOINT;
break;
default:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
}
switch (modelType) {
case 0: //TENSORFLOW =0
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW;
break;
case 1: //YOLOV4 = 1
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV4;
break;
case 2://YOLOV5 = 2
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV5;
break;
case 3: //YOLOV8 = 3,
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
break;
case 4: //TENSORRT = 4,
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTCL();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
break;
}
else {// default is detection
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
case 5: //OPENVINO = 5
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
(*Handle) = new ANSCENTER::OPENVINOCL();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
(*Handle) = new ANSCENTER::ANSOVSEG();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
case 6: //FACEDETECT = 6
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
case 10: //ANOMALIB=10
(*Handle) = new ANSCENTER::ANSANOMALIB();
modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB;
break;
case 11: //OPENPOSE=11
(*Handle) = new ANSCENTER::ANSPOSE();
modelConfig.modelType = ANSCENTER::ModelType::POSE;
break;
case 12: //SAM=12
(*Handle) = new ANSCENTER::ANSSAM();
modelConfig.modelType = ANSCENTER::ModelType::SAM;
break;
case 13: //ODHUBMODEL=13
(*Handle) = new ANSCENTER::ODHUBAPI();
modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL;
break;
case 14: //TensorRT for Object Detection Yolov10
// Upstream modelType rewrite (see top of each factory) already
// redirects 14 → 31 (RTYOLO) on NVIDIA or 14 → 30 (ONNXYOLO) on
// non-NVIDIA, so this branch is unreachable in practice. Keep
// an explicit vendor gate as defense-in-depth against future
// refactors — ANSYOLOV10RTOD is a TensorRT class and must never
// be constructed on AMD/Intel/CPU hardware.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSYOLOV10RTOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10RTOD;
} else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
case 15: //OpenVino for Object Detection Yolov10
(*Handle) = new ANSCENTER::ANSOYOLOV10OVOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD;
break;
case 16: //Custom detector
(*Handle) = new ANSCENTER::ANSCUSTOMDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR;
break;
case 17: //Yolo V12
(*Handle) = new ANSCENTER::YOLO12OD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV12;
break;
case 18: //Custom Python
(*Handle) = new ANSCENTER::ANSCUSTOMPY();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY;
break;
case 19: //Motion Detector
(*Handle) = new ANSCENTER::ANSMOTIONDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR;
break;
case 20: //ANSONNXCL
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
break;
case 21: //ANSONNXPOSE
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
break;
case 22: //ANSTENSORRTPOSE
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSTENSORRTPOSE();
modelConfig.modelType = ANSCENTER::ModelType::RTPOSE;
}
else {
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
}
break;
case 23: //ONNXSEG
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
break;
case 24: //RTSEG
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
case 25: //ONNXOBB
(*Handle) = new ANSCENTER::ANSONNXOBB();
modelConfig.modelType = ANSCENTER::ModelType::ONNXOBB;
break;
case 27: //MOVIENET
(*Handle) = new ANSCENTER::ANSMOVIENET();
modelConfig.modelType = ANSCENTER::ModelType::MOVIENET;
break;
case 28: //ANSONNXSAM3
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
break;
case 29: //ANSSAM3
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
break;
case 30: //ONNXYOLO
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
break;
case 31: //RTYOLO
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSRTYOLO();
modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
}
else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
default:
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
}
if (*Handle == nullptr) {
return -1;
}
else {
// CUDA round-robin + VRAM check — NVIDIA only (see CreateANSODHandle).
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
}
RegisterODHandle(*Handle);
(*Handle)->SetLoadEngineOnCreation(_loadEngineOnCreation); //Set force to load the engine immediately
bool result = (*Handle)->LoadModelFromFolder(licenseKey, modelConfig, modelName, className, modelFolder, labelMap);
if (result) return 1;
else return 0;
}
}
catch (...) {
return 0;
}
}
ANSODENGINE_API int OptimizeModelStr(const char* modelFilePath, const char* modelFileZipPassword, int modelType, int modelDetectionType, int fp16, std::string& modelFolder) {
try {
bool optimizedResult = false;
// NOTE: odMutex was removed here. OptimizeModelStr creates its own
// temporary Engine<float> on the stack — no shared state with running
// inference tasks. The global mutex was blocking ALL running tasks'
// result delivery for the entire duration of TRT engine building.
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
//Force modelType to ANSONNXYOLO and ANSRTYOLO if detectionType is detection and modelType is TENSORRT or ONNX
if ((modelType == 4) || // TensorRT
(modelType == 14) || // TensorRT Yolov10
(modelType == 22) || // TensorRT Pose
(modelType == 24)) // TensorRT Segmentation
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU)modelType = 31; // RTYOLO
else modelType = 30;// ONNXYOLO
}
else if ((modelType == 3) || // YoloV8/YoloV11 (Object Detection)
(modelType == 17) || // YOLO V12
(modelType == 20) || // ONNX Classification
(modelType == 21) || // ONNX Pose
(modelType == 23) || // ONNX Segmentation
(modelType == 25)) // OBB Segmentation
{
modelType = 30; // ONNXYOLO
}
else {
// do nothing, use the modelType specified by user
}
if (modelType == 31) // If modelType is RTYOLO (31), handle separately.
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
if (modelDetectionType == 0) {
return ANSCENTER::ANSUtilityHelper::ModelOptimizer(modelFilePath, modelFileZipPassword, fp16, modelFolder, 224, 244) ? 1 : 0; // this is for classification models
}
else {
return ANSCENTER::ANSUtilityHelper::ModelOptimizer(modelFilePath, modelFileZipPassword, fp16, modelFolder, 640, 640) ? 1 : 0; // standard size for detection models, segmetation and others
}
}
}
// Create model handle dynamically
std::unique_ptr<ANSCENTER::ANSODBase> Handle;
ANSCENTER::ModelConfig modelConfig;
bool _fp16 = (fp16 == 1);
switch (modelType) {
case 0: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW; break;
case 1: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV4; break;
case 2: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV5; break;
case 5: Handle = std::make_unique<ANSCENTER::OPENVINOOD>(); modelConfig.modelType = ANSCENTER::ModelType::OPENVINO; break;
case 6: Handle = std::make_unique<ANSCENTER::ANSFD>(); modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT; break;
case 10: Handle = std::make_unique<ANSCENTER::ANSANOMALIB>(); modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB; break;
case 11: Handle = std::make_unique<ANSCENTER::ANSPOSE>(); modelConfig.modelType = ANSCENTER::ModelType::POSE; break;
case 12: Handle = std::make_unique<ANSCENTER::ANSSAM>(); modelConfig.modelType = ANSCENTER::ModelType::SAM; break;
case 13: Handle = std::make_unique<ANSCENTER::ODHUBAPI>(); modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL; break;
case 15: Handle = std::make_unique<ANSCENTER::ANSOYOLOV10OVOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD; break;
case 16: Handle = std::make_unique<ANSCENTER::ANSCUSTOMDETECTOR>(); modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR; break;
case 18: Handle = std::make_unique<ANSCENTER::ANSCUSTOMPY>(); modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY; break;
case 19: Handle = std::make_unique<ANSCENTER::ANSMOTIONDETECTOR>(); modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR; break;
case 27: Handle = std::make_unique<ANSCENTER::ANSMOVIENET>(); modelConfig.modelType = ANSCENTER::ModelType::MOVIENET; break;
case 28: Handle = std::make_unique<ANSCENTER::ANSONNXSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3; break;
case 29: {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::ANSSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::RTSAM3;
}
else {
Handle = std::make_unique<ANSCENTER::ANSONNXSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
break;
}
case 30: Handle = std::make_unique<ANSCENTER::ANSONNXYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO; break;
case 31: {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::ANSRTYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
}
else {
Handle = std::make_unique<ANSCENTER::ANSONNXYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
}
default: {
if (modelDetectionType == 0) // classification
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::TENSORRTCL>();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
Handle = std::make_unique<ANSCENTER::ANSONNXCL>();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
}
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
}
else if (modelDetectionType == 1) // detection
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::TENSORRTOD>();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
Handle = std::make_unique<ANSCENTER::YOLOOD>();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
}
else if (modelDetectionType == 2) // segmentation
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU)
{
Handle = std::make_unique<ANSCENTER::TENSORRTSEG>();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
Handle = std::make_unique<ANSCENTER::ANSONNXSEG>();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
modelConfig.detectionType = ANSCENTER::DetectionType::SEGMENTATION;
}
else // default is detection
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::TENSORRTOD>();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
Handle = std::make_unique<ANSCENTER::YOLOOD>();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
}
break;
}
}
// TensorRT-specific: bypass pool and cache for temporary optimizer engines
if (Handle && engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle->SetMaxSlotsPerGpu(0);
Handle->SetSkipEngineCache(true);
Handle->SetForceNoPool(true);
}
// RAII guard for TensorRT process-wide flags.
// Without this, an exception in LoadModel or OptimizeModel permanently
// leaves the flags set, breaking all subsequent engine creation.
struct GlobalFlagGuard {
bool active;
GlobalFlagGuard(bool isNvidia) : active(isNvidia) {
if (active) {
g_forceNoPool = true;
TRTEngineCache::globalBypass() = true;
}
}
~GlobalFlagGuard() {
if (active) {
g_forceNoPool = false;
TRTEngineCache::globalBypass() = false;
}
}
} flagGuard(engineType == ANSCENTER::EngineType::NVIDIA_GPU);
// Load and optimize model
if (Handle && Handle->LoadModel(modelFilePath, modelFileZipPassword)) {
optimizedResult = Handle->OptimizeModel(_fp16, modelFolder);
}
Handle.reset(); // Destroy engines BEFORE guard restores cache
if (optimizedResult && !modelFolder.empty()) return 1;
else return 0;
}
catch (const std::exception& e) {
// GlobalFlagGuard destructor runs here — flags are always restored
std::cerr << "OptimizeModelStr Exception: " << e.what() << std::endl;
return -1;
}
catch (...) {
// GlobalFlagGuard destructor runs here — flags are always restored
std::cerr << "OptimizeModelStr: Unknown exception occurred." << std::endl;
return -1;
}
}
static int ReleaseANSODHandle_Impl(ANSCENTER::ANSODBase** Handle) {
try {
if (Handle == nullptr) return 0;
if (*Handle == nullptr) return 0;
// Only release if this handle was registered by us
if (!UnregisterODHandle(*Handle)) {
// Not in registry — already freed or not ours
*Handle = nullptr;
return 0;
}
(*Handle)->Destroy();
delete *Handle;
*Handle = nullptr;
return 0;
}
catch (...) {
*Handle = nullptr;
return 1;
}
}
extern "C" ANSODENGINE_API int ReleaseANSODHandle(ANSCENTER::ANSODBase** Handle) {
__try {
return ReleaseANSODHandle_Impl(Handle);
}
__except (EXCEPTION_EXECUTE_HANDLER) {
return 1;
}
}
ANSODENGINE_API std::string RunInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::string detectionResult;
(*Handle)->RunInference(frame, "Cam", detectionResult);
frame.release();
return detectionResult;
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
ANSODENGINE_API std::string RunTiledInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunInferences(frame, tiledWidth, titledHeight, overlap, cameraId);
frame.release();
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
ANSODENGINE_API std::string RunInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunInferenceFromJpegString(jpeg_string, jpeg_size, cameraId);
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
ANSODENGINE_API std::string RunTiledInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunTiledInferenceFromJpegString(jpeg_string, jpeg_size, tiledWidth, titledHeight, overlap, cameraId);
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
ANSODENGINE_API std::string RunInferenceFromCV(ANSCENTER::ANSODBase** Handle, cv::Mat image)
{
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::string detectionResult;
(*Handle)->RunInference(image, "Cam", detectionResult);
return detectionResult;
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
extern "C" ANSODENGINE_API void RunDetectMovement(ANSCENTER::ANSODBase** Handle, cv::Mat image, const char* cameraId, std::vector<ANSCENTER::Object>& results) {
try{
if (Handle == nullptr || *Handle == nullptr) { results.clear(); return; }
results = (*Handle)->DetectMovement(image, cameraId);
}
catch (...) {
results.clear();
}
}
extern "C" ANSODENGINE_API void RunTiledInferenceFromCV(ANSCENTER::ANSODBase** Handle, cv::Mat image, int tiledWidth, int titledHeight, double overlap, std::vector<ANSCENTER::Object>& results, const char* cameraId) {
try {
if (Handle == nullptr || *Handle == nullptr) { results.clear(); return; }
results = (*Handle)->RunInferences(image, tiledWidth, titledHeight, overlap, cameraId);
}
catch (...) {
results.clear();
}
}
ANSODENGINE_API std::string RunInferenceInCroppedBBoxImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strBboxes) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::vector<cv::Rect> bBoxes = ANSCENTER::ANSUtilityHelper::GetBoundingBoxesFromString(strBboxes);
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunInference(frame, bBoxes, cameraId);
frame.release();
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
ANSODENGINE_API std::string RunInferenceInCroppedPolygonImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strPolygon) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::vector<cv::Point> polygon = ANSCENTER::ANSUtilityHelper::StringToPolygon(strPolygon);
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunInference(frame, polygon, cameraId);
frame.release();
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
ANSODENGINE_API std::string RunInferenceBinary(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_bytes == nullptr || width == 0 || height == 0) return "";
cv::Mat frame = cv::Mat(height, width, CV_8UC3, jpeg_bytes).clone(); // make a copy
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::string detectionResult;
(*Handle)->RunInference(frame, "Cam", detectionResult);
frame.release();
return detectionResult;
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
ANSODENGINE_API std::string RunInferenceImagePath(ANSCENTER::ANSODBase** Handle, const char* imageFilePath) {
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::string stImageFileName(imageFilePath);
cv::Mat frame = cv::imread(stImageFileName, cv::ImreadModes::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::string detectionResult;
(*Handle)->RunInference(frame, "Cam", detectionResult);
frame.release();
return detectionResult;
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
extern "C" ANSODENGINE_API int RunInference_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
try {
std::string st = RunInference(Handle, jpeg_string, bufferLength);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunTiledInference_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId, LStrHandle detectionResult) {
try {
std::string st = RunTiledInference(Handle, jpeg_string, bufferLength, tiledWidth, titledHeight, overlap, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceFromJpegString_LV(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId, LStrHandle detectionResult) {
try {
std::string st = RunInferenceFromJpegString(Handle, jpeg_string, jpeg_size, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunTiledInferenceFromJpegString_LV(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId, LStrHandle detectionResult) {
try {
std::string st = RunTiledInferenceFromJpegString(Handle, jpeg_string, jpeg_size, tiledWidth, titledHeight, overlap, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceInCroppedBBoxImages_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, int32 bufferLength, const char* cameraId, const char* strBboxes, LStrHandle detectionResult) {
try {
std::string st = RunInferenceInCroppedBBoxImages(Handle, jpeg_string, bufferLength, cameraId, strBboxes);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceInCroppedBBoxPolygonImages_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, int32 bufferLength, const char* cameraId, const char* strPolygon, LStrHandle detectionResult) {
try {
std::string st = RunInferenceInCroppedPolygonImages(Handle, jpeg_string, bufferLength, cameraId, strPolygon);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceBinary_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height, LStrHandle detectionResult) {
try {
std::string st = RunInferenceBinary(Handle, jpeg_bytes, width, height);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceImagePath_LV(ANSCENTER::ANSODBase** Handle, const char* imageFilePath, LStrHandle detectionResult) {
try {
std::string st = RunInferenceImagePath(Handle, imageFilePath);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int OptimizeModel(const char* modelFilePath, const char* modelFileZipPassword,int modelType, int modelDetectionType, int fp16, LStrHandle optimizedModelFolder) {
try {
std::string st;
int ret = OptimizeModelStr(modelFilePath, modelFileZipPassword, modelType, modelDetectionType, fp16, st);
if (ret <= 0 || st.empty()) {
return 0;
}
int size = static_cast<int>(st.length());
if (size > 0 && optimizedModelFolder) {
MgErr error = DSSetHandleSize(optimizedModelFolder, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*optimizedModelFolder)->cnt = size;
memcpy((*optimizedModelFolder)->str, st.c_str(), size);
return 1;
}
}
return 0;
}
catch (const std::exception& e) {
return 0;
}
catch (...) {
return 0;
}
}
extern "C" __declspec(dllexport) const char* CreateANSODHandle_CS(ANSCENTER::ANSODBase** Handle, const char* licenseKey, const char* modelFilePath, const char* modelFileZipPassword, float modelThreshold, float modelConfThreshold, float modelNMSThreshold, int autoDetectEngine, int modelType, int detectionType, int loadEngineOnCreation) {
try {
static std::string result;
result = CreateANSODHandle(Handle, licenseKey, modelFilePath, modelFileZipPassword, modelThreshold, modelConfThreshold, modelNMSThreshold, autoDetectEngine, modelType, detectionType, loadEngineOnCreation);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInferenceImagePath_CS(ANSCENTER::ANSODBase** Handle, const char* imageFilePath) {
try {
static std::string result;
result = RunInferenceImagePath(Handle, imageFilePath);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInference_CS(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
try {
static std::string result;
result = RunInference(Handle, jpeg_string, bufferLength);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInferenceInCroppedBBoxImages_CS(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strBboxes) {
try {
static std::string result;
result = RunInferenceInCroppedBBoxImages(Handle, jpeg_string, bufferLength, cameraId, strBboxes);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInferenceInCroppedPolygonImages_CS(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strPolygon) {
try {
static std::string result;
result = RunInferenceInCroppedPolygonImages(Handle, jpeg_string, bufferLength, cameraId, strPolygon);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInferenceBinary_CS(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
try {
static std::string result;
result = RunInferenceBinary(Handle, jpeg_bytes, width, height);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* OptimizeModelStr_CS(const char* modelFilePath, const char* modelFileZipPassword, int modelType, int modelDetectionType, int fp16)
{
try {
static std::string result;
result.clear();
int ret = OptimizeModelStr(modelFilePath, modelFileZipPassword, modelType, modelDetectionType, fp16, result);
return (ret > 0 && !result.empty()) ? result.c_str() : "";
}
catch (...) {
return "";
}
}
// with camera id
extern "C" ANSODENGINE_API int RunDetectMovement_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
try {
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
return 0;
}
std::vector<ANSCENTER::Object> outputs = (*Handle)->DetectMovement(frame, cameraId);
frame.release();
std::string st = ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceFromJpegStringWithCameraId_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
try {
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
return 0;
}
std::string st;
(*Handle)->RunInference(frame, cameraId, st);
frame.release();
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
static int RunInferenceComplete_LV_Impl(
ANSCENTER::ANSODBase** Handle,
cv::Mat** cvImage,
const char* cameraId,
int getJpegString,
int jpegImageSize,
const char* activeROIMode,
LStrHandle detectionResult,
LStrHandle imageStr)
{
if (Handle == nullptr || *Handle == nullptr) {
return -1;
}
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) {
return -2;
}
// RAII guard — prevents ReleaseANSODHandle from destroying the engine
// while we are still using it. Destructor auto-releases refcount.
ANSCENTER::ANSODBase* handleSnapshot = *Handle; // snapshot the pointer value
ODHandleGuard guard(AcquireODHandle(handleSnapshot));
if (!guard) {
return -3; // Handle was already released
}
auto* engine = guard.get();
try {
auto _t0 = std::chrono::steady_clock::now();
// Save/restore thread-local to support nested calls (custom model DLLs
// calling back into ANSODEngine via ANSLIB.dll).
GpuFrameData* savedFrame = tl_currentGpuFrame();
// Lookup NV12 frame data BEFORE cloning (clone creates new cv::Mat*)
GpuFrameData* gpuFrame = ANSGpuFrameRegistry::instance().lookup(*cvImage);
cv::Mat localImage = (**cvImage).clone();
int originalWidth = localImage.cols;
int originalHeight = localImage.rows;
ANS_DBG("LV_Inference", "START cam=%s %dx%d gpuFrame=%p nv12=%s",
cameraId ? cameraId : "?", originalWidth, originalHeight,
(void*)gpuFrame, gpuFrame ? "YES" : "NO");
if (originalWidth == 0 || originalHeight == 0) {
tl_currentGpuFrame() = savedFrame;
return -2;
}
// Set thread-local so engines can access NV12 data without registry lookup.
// Safe: *cvImage holds a refcount, keeping gpuFrame alive during inference.
// Only use OWN gpuFrame — never inherit outer caller's frame (dimension mismatch on crops).
tl_currentGpuFrame() = gpuFrame;
auto _t1 = std::chrono::steady_clock::now();
std::vector<ANSCENTER::Object> outputs = engine->RunInferenceWithOption(localImage, cameraId, activeROIMode);
auto _t2 = std::chrono::steady_clock::now();
tl_currentGpuFrame() = savedFrame;
double prepMs = std::chrono::duration<double, std::milli>(_t1 - _t0).count();
double infMs = std::chrono::duration<double, std::milli>(_t2 - _t1).count();
if (infMs > 500.0) {
ANS_DBG("LV_Inference", "SLOW cam=%s prep=%.1fms inf=%.1fms results=%zu",
cameraId ? cameraId : "?", prepMs, infMs, outputs.size());
}
bool getJpeg = (getJpegString == 1);
std::string stImage;
// NOTE: odMutex was removed here. All variables in this scope are local
// (outputs, localImage, stImage, stDetectionResult) and the LStrHandle
// parameters are per-call. The global mutex was blocking running tasks
// for minutes when another task was optimizing a TRT engine.
int maxImageSize = originalWidth;// std::max(originalWidth, originalHeight);
bool resizeNeeded = (jpegImageSize > 0) && (jpegImageSize < maxImageSize);
float ratio = 1.0f;
int newWidth = originalWidth;
int newHeight = originalHeight;
if (resizeNeeded) {
newWidth = jpegImageSize;
newHeight = static_cast<int>(std::round(newWidth * static_cast<double>(originalHeight) / originalWidth));
ratio = static_cast<float>(newWidth) / originalWidth;
for (auto& obj : outputs) {
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x * ratio), newWidth - 1));
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y * ratio), newHeight - 1));
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width * ratio), newWidth - obj.box.x));
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height * ratio), newHeight - obj.box.y));
}
}
else {
for (auto& obj : outputs) {
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x), originalWidth - 1));
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y), originalHeight - 1));
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width), originalWidth - obj.box.x));
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height), originalHeight - obj.box.y));
}
}
// Convert to JPEG if needed
if (getJpeg) {
cv::Mat processedImage = localImage;
if (resizeNeeded) {
cv::resize(localImage, processedImage, cv::Size(newWidth, newHeight), 0, 0, cv::INTER_AREA);
}
std::vector<uchar> buf;
if (cv::imencode(".jpg", processedImage, buf, { cv::IMWRITE_JPEG_QUALITY, 50 })) {
stImage.assign(buf.begin(), buf.end());
}
else {
std::cerr << "Error: JPEG encoding failed!" << std::endl;
}
}
std::string stDetectionResult = ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
if (stDetectionResult.empty()) {
return 0;
}
int size = static_cast<int>(stDetectionResult.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error != noErr) {
return 0;
}
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, stDetectionResult.c_str(), size);
if (getJpeg) {
if (stImage.empty()) {
return 0;
}
size = static_cast<int>(stImage.length());
error = DSSetHandleSize(imageStr, sizeof(int32) + size * sizeof(uChar));
if (error != noErr) {
return 0;
}
(*imageStr)->cnt = size;
memcpy((*imageStr)->str, stImage.c_str(), size);
}
return 1;
}
catch (const std::exception& ex) {
return 0;
}
catch (...) {
return 0;
}
}
// SEH wrapper — no C++ objects allowed in this function
static int RunInferenceComplete_LV_SEH(
ANSCENTER::ANSODBase** Handle,
cv::Mat** cvImage,
const char* cameraId,
int getJpegString,
int jpegImageSize,
const char* activeROIMode,
LStrHandle detectionResult,
LStrHandle imageStr)
{
__try {
return RunInferenceComplete_LV_Impl(Handle, cvImage, cameraId, getJpegString, jpegImageSize, activeROIMode, detectionResult, imageStr);
}
__except (EXCEPTION_EXECUTE_HANDLER) {
return -4;
}
}
extern "C" ANSODENGINE_API int RunInferenceComplete_LV(
ANSCENTER::ANSODBase** Handle,
cv::Mat** cvImage,
const char* cameraId,
int getJpegString,
int jpegImageSize,
const char* activeROIMode,
LStrHandle detectionResult,
LStrHandle imageStr)
{
return RunInferenceComplete_LV_SEH(Handle, cvImage, cameraId, getJpegString, jpegImageSize, activeROIMode, detectionResult, imageStr);
}
// V2: Accepts handle as uint64_t by value — eliminates pointer-to-pointer
// instability when LabVIEW calls concurrently from multiple tasks.
// LabVIEW CLFN: set Handle parameter to Numeric / Unsigned Pointer-sized Integer / Pass: Value
extern "C" ANSODENGINE_API int RunInferenceComplete_LV_V2(
uint64_t handleVal,
cv::Mat** cvImage,
const char* cameraId,
int getJpegString,
int jpegImageSize,
const char* activeROIMode,
LStrHandle detectionResult,
LStrHandle imageStr)
{
// Cast the by-value integer back to the engine pointer — no double dereference
ANSCENTER::ANSODBase* directHandle = reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal);
if (directHandle == nullptr) {
return -1;
}
// Reuse a temporary pointer so we can call the existing Impl function
ANSCENTER::ANSODBase* tempArr[1] = { directHandle };
ANSCENTER::ANSODBase** pHandle = &tempArr[0];
__try {
return RunInferenceComplete_LV_Impl(pHandle, cvImage, cameraId, getJpegString, jpegImageSize, activeROIMode, detectionResult, imageStr);
}
__except (EXCEPTION_EXECUTE_HANDLER) {
return -4;
}
}
// ============================================================================
// V2 LabVIEW API — Accept handle as uint64_t by value.
// Eliminates Handle** pointer-to-pointer instability when LabVIEW calls
// concurrently from multiple tasks.
// LabVIEW CLFN: set Handle parameter to Numeric / Unsigned Pointer-sized Integer / Pass: Value
// ============================================================================
// Helper: cast uint64_t handle to ANSODBase** for delegation to existing functions
#define V2_HANDLE_SETUP(handleVal) \
ANSCENTER::ANSODBase* _v2Direct = reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal); \
if (_v2Direct == nullptr) return 0; \
ANSCENTER::ANSODBase* _v2Arr[1] = { _v2Direct }; \
ANSCENTER::ANSODBase** Handle = &_v2Arr[0];
extern "C" ANSODENGINE_API int RunInference_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInference(Handle, jpeg_string, bufferLength);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunTiledInference_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId, LStrHandle detectionResult) {
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunTiledInference(Handle, jpeg_string, bufferLength, tiledWidth, titledHeight, overlap, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunTiledInferenceFromJpegString_LV_V2(uint64_t handleVal, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId, LStrHandle detectionResult) {
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunTiledInferenceFromJpegString(Handle, jpeg_string, jpeg_size, tiledWidth, titledHeight, overlap, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceFromJpegString_LV_V2(uint64_t handleVal, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId, LStrHandle detectionResult) {
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceFromJpegString(Handle, jpeg_string, jpeg_size, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceBinary_LV_V2(uint64_t handleVal, unsigned char* jpeg_bytes, unsigned int width, unsigned int height, LStrHandle detectionResult) {
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceBinary(Handle, jpeg_bytes, width, height);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceImagePath_LV_V2(uint64_t handleVal, const char* imageFilePath, LStrHandle detectionResult) {
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceImagePath(Handle, imageFilePath);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceInCroppedBBoxImages_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, int32 bufferLength, const char* cameraId, const char* strBboxes, LStrHandle detectionResult) {
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceInCroppedBBoxImages(Handle, jpeg_string, bufferLength, cameraId, strBboxes);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceInCroppedBBoxPolygonImages_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, int32 bufferLength, const char* cameraId, const char* strPolygon, LStrHandle detectionResult) {
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceInCroppedPolygonImages(Handle, jpeg_string, bufferLength, cameraId, strPolygon);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunDetectMovement_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
try {
ANSCENTER::ANSODBase* directHandle = reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal);
if (directHandle == nullptr) return 0;
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) return 0;
std::vector<ANSCENTER::Object> outputs = directHandle->DetectMovement(frame, cameraId);
frame.release();
std::string st = ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceFromJpegStringWithCameraId_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
try {
ANSCENTER::ANSODBase* directHandle = reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal);
if (directHandle == nullptr) return 0;
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) return 0;
std::string st;
directHandle->RunInference(frame, cameraId, st);
frame.release();
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceComplete_CPP(ANSCENTER::ANSODBase** Handle, cv::Mat** cvImage, const char* cameraId, const char* activeROIMode, std::vector<ANSCENTER::Object> &detectionResult) {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null in RunInferenceComplete_CPP!" << std::endl;
return -1;
}
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) {
std::cerr << "Error: Invalid or empty input image in RunInferenceComplete_CPP!" << std::endl;
return -2;
}
ODHandleGuard guard(AcquireODHandle(*Handle));
if (!guard) {
return -3; // Handle was already released
}
auto* engine = guard.get();
try {
// Save/restore thread-local to support nested calls (e.g., custom model DLLs
// calling back into ANSODEngine via ANSLIB.dll). Without this, the inner call
// would overwrite the outer caller's valid GpuFrameData* with nullptr.
GpuFrameData* savedFrame = tl_currentGpuFrame();
GpuFrameData* gpuFrame = ANSGpuFrameRegistry::instance().lookup(*cvImage);
cv::Mat localImage = (**cvImage).clone();
int originalWidth = localImage.cols;
int originalHeight = localImage.rows;
if (originalWidth == 0 || originalHeight == 0) {
tl_currentGpuFrame() = savedFrame;
return -2;
}
// Only use gpuFrame if this cv::Mat* has NV12 data.
// Do NOT propagate savedFrame to inner engines — the inner call's cv::Mat*
// is a local copy (from ANSLIB.dll) that doesn't correspond to the outer
// frame's NV12 geometry. Using savedFrame here would cause the inner engine
// to read NV12 data that doesn't match its input image.
tl_currentGpuFrame() = gpuFrame;
detectionResult = engine->RunInferenceWithOption(localImage, cameraId, activeROIMode);
tl_currentGpuFrame() = savedFrame;
if (detectionResult.empty()) return 0;
else return 1;
}
catch (const std::exception& ex) {
std::cerr << "Exception in RunInferenceComplete_CPP: " << ex.what() << std::endl;
return 0;
}
catch (...) {
std::cerr << "Unknown exception in RunInferenceComplete_CPP!" << std::endl;
return 0;
}
}
extern "C" ANSODENGINE_API int RunInference_CPP(ANSCENTER::ANSODBase** Handle, cv::Mat** cvImage, const char* cameraId, std::vector<ANSCENTER::Object>& detectionResult) {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null in RunInference_CPP!" << std::endl;
return -1;
}
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) {
std::cerr << "Error: Invalid or empty input image in RunInference_CPP!" << std::endl;
return -2;
}
ODHandleGuard guard(AcquireODHandle(*Handle));
if (!guard) {
return -3; // Handle was already released
}
auto* engine = guard.get();
try {
// Save/restore thread-local (same nested-call protection as RunInferenceComplete_CPP)
GpuFrameData* savedFrame = tl_currentGpuFrame();
GpuFrameData* gpuFrame = ANSGpuFrameRegistry::instance().lookup(*cvImage);
cv::Mat localImage = (**cvImage).clone();
int originalWidth = localImage.cols;
int originalHeight = localImage.rows;
if (originalWidth == 0 || originalHeight == 0) {
tl_currentGpuFrame() = savedFrame;
return -2;
}
// Only use gpuFrame if this cv::Mat* has NV12 data (see RunInferenceComplete_CPP comment)
tl_currentGpuFrame() = gpuFrame;
detectionResult = engine->RunInference(localImage, cameraId);
tl_currentGpuFrame() = savedFrame;
if (detectionResult.empty()) return 0;
else return 1;
}
catch (const std::exception& ex) {
std::cerr << "Exception in RunInference_CPP: " << ex.what() << std::endl;
return 0;
}
catch (...) {
std::cerr << "Unknown exception in RunInference_CPP!" << std::endl;
return 0;
}
}
// Utility function to convert a string to a vector of cv::Point
extern "C" __declspec(dllexport) int GetEngineType() {
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
switch (engineType) {
case ANSCENTER::EngineType::CPU:
return 0; // CPU
case ANSCENTER::EngineType::NVIDIA_GPU:
return 1; // GPU
case ANSCENTER::EngineType::OPENVINO_GPU:
return 2; // NPU
case ANSCENTER::EngineType::AMD_GPU:
return 3;
default:
return 99; // Unknown
}
}
extern "C" __declspec(dllexport) int GetActiveRect(ANSCENTER::ANSODBase** Handle, cv::Mat cvImage, cv::Rect& activeWindow) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (cvImage.empty()) {
std::cerr << "Error: Input image is empty!" << std::endl;
return -1;
}
activeWindow = (*Handle)->GetActiveWindow(cvImage);
if (activeWindow.empty()) {
std::cerr << "Error: Active window is empty!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in GetActiveWindow!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int DetectMovement(ANSCENTER::ANSODBase** Handle, cv::Mat image, const char* cameraId, std::vector<ANSCENTER::Object>& results) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (image.empty()) {
std::cerr << "Error: Input image is empty!" << std::endl;
return -1;
}
results = (*Handle)->DetectMovement(image, cameraId);
if (results.empty()) {
std::cerr << "Error: No detection results!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in DetectMovement!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int Optimize(ANSCENTER::ANSODBase** Handle, bool fp16) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
std::string modelFolder;
bool result = (*Handle)->OptimizeModel(fp16, modelFolder);
if (!result) {
std::cerr << "Error: Model optimization failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in Optimize!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int SetODParameters(ANSCENTER::ANSODBase** Handle, const char* parameters) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (parameters == nullptr) {
std::cerr << "Error: Parameters string is null!" << std::endl;
return -1;
}
std::string paramStr(parameters);
ANSCENTER::Params param = ANSCENTER::ANSUtilityHelper::ParseCustomParameters(paramStr);
bool result = (*Handle)->SetParameters(param);
if (!result) {
std::cerr << "Error: Setting parameters failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in SetParameters!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int GetODParameters(ANSCENTER::ANSODBase** Handle, ANSCENTER::Params& param) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
param = (*Handle)->GetParameters();
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in GetParameters!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int GetConfiguredParameters(ANSCENTER::ANSODBase** Handle, LStrHandle stParam) {
__try {
return [&]() -> int {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (stParam == nullptr) {
std::cerr << "Error: stParam LStrHandle is null!" << std::endl;
return -1;
}
ANSCENTER::Params param;
bool result = (*Handle)->ConfigureParameters(param);
if (!result) {
std::cerr << "Error: Getting parameters failed!" << std::endl;
return 0;
}
std::string st = ANSCENTER::ANSUtilityHelper::SerializeCustomParamters(param);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(stParam, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*stParam)->cnt = size;
memcpy((*stParam)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (const std::exception& e) {
std::cerr << "GetConfiguredParameters exception: " << e.what() << std::endl;
return 0;
}
catch (...) {
std::cerr << "GetConfiguredParameters unknown exception" << std::endl;
return 0;
}
}();
}
__except (EXCEPTION_EXECUTE_HANDLER) {
std::cerr << "GetConfiguredParameters SEH exception (code: " << GetExceptionCode() << ")" << std::endl;
return -2;
}
}
extern "C" __declspec(dllexport) int GetConfiguredParameters_CPP(ANSCENTER::ANSODBase** Handle, std::string& stParam) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
ANSCENTER::Params param;
bool result = (*Handle)->ConfigureParameters(param);
if (!result) {
std::cerr << "Error: Getting parameters failed!" << std::endl;
return 0;
}
stParam = ANSCENTER::ANSUtilityHelper::SerializeCustomParamters(param);
if (stParam.empty()) return 0;
else return 1;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int ShutdownPythonEngine() {
ANSCENTER::ANSCUSTOMPY::SafeShutdownAll(false);
return 1;
}
extern "C" ANSODENGINE_API int ShutdownPythonEngine_CPP() {
ANSCENTER::ANSCUSTOMPY::SafeShutdownAll(true);
return 1;
}
extern "C" __declspec(dllexport) int UpdateDetectionMinScore(ANSCENTER::ANSODBase** Handle, float detectionScore) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
bool result = (*Handle)->UpdateDetectionThreshold(detectionScore);
if (!result) {
std::cerr << "Error: Updating detection threshold failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in UpdateDetectionMinScore!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int SetPrompt(ANSCENTER::ANSODBase** Handle, const char* textPrompt) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (textPrompt == nullptr) {
std::cerr << "Error: Text prompt is null!" << std::endl;
return -1;
}
std::string promptStr(textPrompt);
bool result = (*Handle)->SetPrompt(promptStr);
if (!result) {
std::cerr << "Error: Setting text prompt failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in SetPrompt!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int SetTracker(ANSCENTER::ANSODBase** Handle, int trackerType, int enableTracker) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
ANSCENTER::TrackerType ansTrackerType;
switch (trackerType) {
case 0:
ansTrackerType = ANSCENTER::TrackerType::BYTETRACK;
break;
case 1:
ansTrackerType = ANSCENTER::TrackerType::UCMC;
break;
case 2:
ansTrackerType = ANSCENTER::TrackerType::OCSORT;
break;
default:
ansTrackerType = ANSCENTER::TrackerType::BYTETRACK;
break;
}
bool enable = false;
if (enableTracker == 1)enable = true;
bool result = (*Handle)->SetTracker(ansTrackerType, enable);
if (!result) {
std::cerr << "Error: Setting tracker failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in SetTracker!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int SetTrackerParameters(ANSCENTER::ANSODBase** Handle, const char* trackerParams) {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (trackerParams == nullptr) {
std::cerr << "Error: Tracker parameters string is null!" << std::endl;
return -1;
}
std::string paramStr(trackerParams);
bool result = (*Handle)->SetTrackerParameters(paramStr);
if (!result) {
std::cerr << "Error: Setting tracker parameters failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in SetTrackerParameters!" << std::endl;
return -1;
}
}
// Set stabilization parameters (JSON string).
// All keys are optional — omit to keep current defaults.
// Example: {"ema_alpha":0.4, "class_consistency_frames":8, "hysteresis_enter":0.5, "hysteresis_keep":0.3}
extern "C" __declspec(dllexport) int SetStabilizationParameters(ANSCENTER::ANSODBase** Handle, const char* stabParams) {
try {
if (Handle == nullptr || *Handle == nullptr) {
return -1;
}
if (stabParams == nullptr) {
return -1;
}
std::string paramStr(stabParams);
bool result = (*Handle)->SetStabilizationParameters(paramStr);
return result ? 1 : 0;
}
catch (...) {
return -1;
}
}