Files
ANSCORE/modules/ANSODEngine/dllmain.cpp

3229 lines
124 KiB
C++
Raw Normal View History

2026-03-28 16:54:11 +11:00
// dllmain.cpp : Defines the entry point for the DLL application.
#include "pch.h"
#include "ANSODEngine.h"
#include "NV12PreprocessHelper.h" // tl_currentGpuFrame()
#include "ANSGpuFrameRegistry.h" // gpu_frame_lookup(cv::Mat*)
#include "engine/TRTEngineCache.h" // clearAll() on DLL_PROCESS_DETACH
#include "engine/EnginePoolManager.h" // clearAll() on DLL_PROCESS_DETACH
#include <climits> // INT_MIN
2026-04-04 20:19:54 +11:00
#include "ANSLicense.h" // ANS_DBG macro for DebugView
#include "ANSODVendorGate.h" // ansod_vendor_gate::IsNvidiaGpuAvailable()
2026-03-28 16:54:11 +11:00
// Process-wide flag: when true, all engines force single-GPU path (no pool, no idle timers).
// Defined here, declared extern in EngineBuildLoadNetwork.inl.
ANSODENGINE_API std::atomic<bool> g_forceNoPool{false};
// Canonical thread-local GpuFrameData* for NV12 zero-copy fast path.
// Exported so that ALL DLLs (ANSLPR, ANSFR, ANSOCR) share the SAME
// thread_local slot. Without this, each DLL gets its own inline copy
// of tl_currentGpuFrame()'s thread_local — ANSLPR sets its copy, but
// ANSODEngine's tryNV12() reads ANSODEngine's copy (nullptr), silently
// disabling NV12 zero-copy for ALPR, FR, and OCR inference.
extern "C" ANSODENGINE_API GpuFrameData** ANSODEngine_GetTlsGpuFrame() {
thread_local GpuFrameData* ptr = nullptr;
return &ptr;
}
#include "ANSYOLOOD.h"
#include "ANSODHUB.h"
#include "ANSCUSTOMPY.h"
#include "ANSTENSORRTOD.h"
#include "ANSTENSORRTCL.h"
#include "ANSOPENVINOCL.h"
#include "ANSOPENVINOOD.h"
#include "ANSYOLOV10RTOD.h"
#include "ANSYOLOV10OVOD.h"
#include "ANSYOLO12OD.h"
#include "ANSONNXCL.h"
#include "ANSONNXPOSE.h"
#include "ANSTENSORRTPOSE.h"
#include "ANSMotionDetector.h"
#include "ANSCUSTOMDetector.h"
#include "ANSONNXSEG.h"
#include "ANSTENSORRTSEG.h"
#include "ANSONNXOBB.h"
#include "ANSOVSEG.h"
#include "ANSFD.h"
#include "ANSANOMALIB.h"
#include "ANSPOSE.h"
#include "ANSSAM.h"
#include "Movienet.h"
#include "ANSSAM3.h"
#include "ANSONNXSAM3.h"
#include "ANSRTYOLO.h"
#include "ANSONNXYOLO.h"
#include <pipelines/metadata.h>
#include <models/input_data.h>
#include "utils/visualizer.hpp"
#include <turbojpeg.h>
#include <vector>
#include <map>
#include <string>
#include <unordered_set>
#include <mutex>
// DebugView: filter on "[ANSOD]" — gated by ANSCORE_DEBUGVIEW in ANSLicense.h.
2026-03-28 16:54:11 +11:00
// Handle registry with refcount — prevents use-after-free when
// ReleaseANSODHandle is called while inference is still running.
// refcount: starts at 1 on Register. AcquireODHandle increments,
// ReleaseODHandle decrements. Object is destroyed when refcount hits 0.
#include <unordered_map>
#include <condition_variable>
#include <atomic>
#include <cuda_runtime.h>
// ============================================================================
// Round-Robin GPU Assigner
//
// Each call to CreateANSODHandle assigns the next GPU in round-robin order.
// This distributes tasks evenly across all available GPUs:
// Task 1 → GPU 0, Task 2 → GPU 1, Task 3 → GPU 0, Task 4 → GPU 1, ...
//
// The GPU count is queried once (lazily) and cached. The atomic counter
// ensures thread-safe assignment even when multiple tasks are created
// concurrently.
// ============================================================================
static std::atomic<int> g_gpuRoundRobinCounter{0};
static int g_numGPUs = -1; // -1 = not yet queried
static std::mutex g_gpuCountMutex;
static int GetNumGPUs() {
std::lock_guard<std::mutex> lk(g_gpuCountMutex);
if (g_numGPUs < 0) {
// Defense-in-depth: all callers (AssignNextGPU, GetPoolMaxSlotsPerGpu,
// CheckGPUVRAM) are invoked inside factory-level NVIDIA_GPU guards,
// but skip the CUDA runtime entirely on AMD/Intel/CPU hardware so a
// future refactor cannot accidentally wake up cudart on non-NVIDIA.
// See ANSODVendorGate.h.
if (!ansod_vendor_gate::IsNvidiaGpuAvailable()) {
g_numGPUs = 1; // report a single "virtual" slot so round-robin is a no-op
std::cout << "Info [GPU]: non-NVIDIA hardware — CUDA probe skipped, pool slots=1"
<< std::endl;
return g_numGPUs;
}
2026-03-28 16:54:11 +11:00
// Use yield mode before any CUDA call to avoid busy-wait spinning
// that falsely reports 100% GPU utilization in nvidia-smi.
cudaSetDeviceFlags(cudaDeviceScheduleYield);
cudaGetDeviceCount(&g_numGPUs);
if (g_numGPUs <= 0) g_numGPUs = 1; // fallback to GPU 0
std::cout << "Info [GPU]: Detected " << g_numGPUs << " CUDA GPU(s) for round-robin assignment" << std::endl;
}
return g_numGPUs;
}
// Determine maxSlotsPerGpu based on GPU topology:
// 1 GPU → 1 (single slot, no round-robin needed)
// >1 GPU, VRAM<24GB → 1 (round-robin: 1 slot per GPU)
// >1 GPU, VRAM≥24GB → -1 (elastic: on-demand slot growth)
// Result is cached after the first query.
static int GetPoolMaxSlotsPerGpu() {
static int s_result = INT_MIN;
static std::mutex s_mutex;
std::lock_guard<std::mutex> lk(s_mutex);
if (s_result != INT_MIN) return s_result;
// Short-circuit on non-NVIDIA: no TRT engines will be built, no pool to
// size, and cudaSetDevice/cudaMemGetInfo below should not be reached.
// Safety net — callers today are already inside NVIDIA_GPU guards.
if (!ansod_vendor_gate::IsNvidiaGpuAvailable()) {
s_result = 1;
return s_result;
}
const int n = GetNumGPUs();
if (n <= 1) {
s_result = 1;
std::cout << "Info [GPU]: Single GPU — pool mode: 1 slot, no round-robin" << std::endl;
return s_result;
}
// Multiple GPUs — check VRAM (GPUs are assumed same spec)
constexpr size_t kLargeVramBytes = 24ULL * 1024 * 1024 * 1024; // 24 GB
size_t totalMem = 0, freeMem = 0;
cudaSetDevice(0);
cudaMemGetInfo(&freeMem, &totalMem);
if (totalMem >= kLargeVramBytes) {
s_result = -1;
std::cout << "Info [GPU]: " << n << " GPUs, VRAM >= 24 GB — pool mode: elastic" << std::endl;
} else {
s_result = 1;
std::cout << "Info [GPU]: " << n << " GPUs, VRAM < 24 GB — pool mode: round-robin" << std::endl;
}
return s_result;
}
2026-03-28 16:54:11 +11:00
// Returns the next GPU index in round-robin order.
// Thread-safe: uses atomic fetch_add.
static int AssignNextGPU() {
// Non-NVIDIA short-circuit: no CUDA devices, return 0 and skip the
// "assigning task" log to avoid polluting AMD/Intel/CPU logs.
if (!ansod_vendor_gate::IsNvidiaGpuAvailable()) return 0;
2026-03-28 16:54:11 +11:00
const int numGPUs = GetNumGPUs();
const int idx = g_gpuRoundRobinCounter.fetch_add(1);
const int gpuIndex = idx % numGPUs;
std::cout << "Info [GPU]: Assigning task to GPU " << gpuIndex
<< " (task #" << (idx + 1) << ", " << numGPUs << " GPU(s) available)" << std::endl;
return gpuIndex;
}
// Check if a GPU has enough free VRAM for a new engine.
// Returns true if sufficient, false if not.
// minFreeBytes: minimum free VRAM required (default 512 MiB safety margin).
static bool CheckGPUVRAM(int gpuIndex, size_t minFreeBytes = 512ULL * 1024 * 1024) {
// Non-NVIDIA short-circuit: no CUDA devices present — report "OK"
// silently so the TRT pool path is a no-op on AMD/Intel/CPU and the
// log isn't polluted with spurious 0-byte VRAM warnings.
if (!ansod_vendor_gate::IsNvidiaGpuAvailable()) return true;
2026-03-28 16:54:11 +11:00
int prevDevice = 0;
cudaGetDevice(&prevDevice);
cudaSetDevice(gpuIndex);
size_t freeBytes = 0, totalBytes = 0;
cudaMemGetInfo(&freeBytes, &totalBytes);
// Restore previous device to avoid side-effects on caller's thread
cudaSetDevice(prevDevice);
if (freeBytes < minFreeBytes) {
std::cout << "Warning [GPU]: GPU " << gpuIndex << " has only "
<< (freeBytes / (1024 * 1024)) << " MiB free (need "
<< (minFreeBytes / (1024 * 1024)) << " MiB). Task creation may fail." << std::endl;
return false;
}
std::cout << "Info [GPU]: GPU " << gpuIndex << " has "
<< (freeBytes / (1024 * 1024)) << " MiB free / "
<< (totalBytes / (1024 * 1024)) << " MiB total" << std::endl;
return true;
}
2026-04-19 14:47:29 +10:00
// destructionStarted: set by the first Unregister caller; blocks new Acquires
// and makes subsequent Unregister calls return false without deleting.
// Prevents double-free when Release is raced on the same handle.
struct ODEntry { int refcount; bool destructionStarted; };
static std::unordered_map<ANSCENTER::ANSODBase*, ODEntry>& ODHandleRegistry() {
static std::unordered_map<ANSCENTER::ANSODBase*, ODEntry> s;
2026-03-28 16:54:11 +11:00
return s;
}
static std::mutex& ODHandleRegistryMutex() {
static std::mutex m;
return m;
}
static std::condition_variable& ODHandleRegistryCV() {
static std::condition_variable cv;
return cv;
}
static void RegisterODHandle(ANSCENTER::ANSODBase* h) {
std::lock_guard<std::mutex> lk(ODHandleRegistryMutex());
2026-04-19 14:47:29 +10:00
ODHandleRegistry()[h] = { 1, false };
ANS_DBG("ANSOD","Register: handle=%p (uint=%llu) registrySize=%zu",
(void*)h, (unsigned long long)(uintptr_t)h, ODHandleRegistry().size());
2026-03-28 16:54:11 +11:00
}
// Acquire a handle for use (increment refcount). Returns the handle
2026-04-19 14:47:29 +10:00
// if valid, nullptr if already released or being destroyed.
2026-03-28 16:54:11 +11:00
static ANSCENTER::ANSODBase* AcquireODHandle(ANSCENTER::ANSODBase* h) {
std::lock_guard<std::mutex> lk(ODHandleRegistryMutex());
auto it = ODHandleRegistry().find(h);
if (it == ODHandleRegistry().end()) {
ANS_DBG("ANSOD","Acquire FAIL: handle=%p (uint=%llu) NOT in registry. registrySize=%zu",
(void*)h, (unsigned long long)(uintptr_t)h, ODHandleRegistry().size());
size_t i = 0;
for (auto& kv : ODHandleRegistry()) {
ANS_DBG("ANSOD"," registry[%zu] = %p (uint=%llu) refcount=%d destructionStarted=%d",
i++, (void*)kv.first, (unsigned long long)(uintptr_t)kv.first,
kv.second.refcount, kv.second.destructionStarted ? 1 : 0);
}
return nullptr;
}
if (it->second.destructionStarted) {
ANS_DBG("ANSOD","Acquire FAIL: handle=%p is being destroyed (destructionStarted=true)", (void*)h);
return nullptr;
}
2026-04-19 14:47:29 +10:00
it->second.refcount++;
ANS_DBG("ANSOD","Acquire OK: handle=%p refcount=%d", (void*)h, it->second.refcount);
2026-03-28 16:54:11 +11:00
return h;
}
2026-04-19 14:47:29 +10:00
// Release a use of the handle (decrement refcount). Only signals the CV;
// the object is always deleted by Unregister, never by a stray ref-drop.
2026-03-28 16:54:11 +11:00
static bool ReleaseODHandleRef(ANSCENTER::ANSODBase* h) {
std::lock_guard<std::mutex> lk(ODHandleRegistryMutex());
auto it = ODHandleRegistry().find(h);
if (it == ODHandleRegistry().end()) return false;
2026-04-19 14:47:29 +10:00
it->second.refcount--;
if (it->second.refcount <= 0) {
2026-03-28 16:54:11 +11:00
ODHandleRegistryCV().notify_all();
}
return false;
}
// Unregister and wait for all in-flight uses to finish.
2026-04-19 14:47:29 +10:00
// First caller takes ownership of destruction; subsequent calls return false.
2026-03-28 16:54:11 +11:00
static bool UnregisterODHandle(ANSCENTER::ANSODBase* h) {
std::unique_lock<std::mutex> lk(ODHandleRegistryMutex());
auto it = ODHandleRegistry().find(h);
if (it == ODHandleRegistry().end()) {
ANS_DBG("ANSOD","Unregister: handle=%p NOT in registry (already gone)", (void*)h);
return false;
}
2026-04-19 14:47:29 +10:00
if (it->second.destructionStarted) {
ANS_DBG("ANSOD","Unregister: handle=%p already being destroyed by another thread, returning false", (void*)h);
2026-04-19 14:47:29 +10:00
return false; // Another thread already owns the delete.
}
ANS_DBG("ANSOD","Unregister: handle=%p starting (refcount before=%d)", (void*)h, it->second.refcount);
2026-04-19 14:47:29 +10:00
it->second.destructionStarted = true;
it->second.refcount--; // Remove creation ref
2026-03-28 16:54:11 +11:00
// Wait for in-flight inferences to finish (30s timeout as safety net)
bool ok = ODHandleRegistryCV().wait_for(lk, std::chrono::seconds(30), [&]() {
auto it2 = ODHandleRegistry().find(h);
2026-04-19 14:47:29 +10:00
return it2 == ODHandleRegistry().end() || it2->second.refcount <= 0;
2026-03-28 16:54:11 +11:00
});
if (!ok) {
ANS_DBG("ANSOD","WARNING: Unregister timed out waiting for in-flight operations on handle=%p", (void*)h);
2026-03-28 16:54:11 +11:00
OutputDebugStringA("WARNING: UnregisterODHandle timed out waiting for in-flight inference\n");
}
ODHandleRegistry().erase(h);
return true; // Safe to destroy now
}
// RAII guard — ensures ReleaseODHandleRef is always called, preventing
// refcount leaks that would cause UnregisterODHandle to deadlock.
class ODHandleGuard {
ANSCENTER::ANSODBase* engine;
public:
explicit ODHandleGuard(ANSCENTER::ANSODBase* e) : engine(e) {}
~ODHandleGuard() { if (engine) ReleaseODHandleRef(engine); }
ANSCENTER::ANSODBase* get() const { return engine; }
explicit operator bool() const { return engine != nullptr; }
ODHandleGuard(const ODHandleGuard&) = delete;
ODHandleGuard& operator=(const ODHandleGuard&) = delete;
};
BOOL APIENTRY DllMain( HMODULE hModule,
DWORD ul_reason_for_call,
LPVOID lpReserved
)
{
switch (ul_reason_for_call)
{
case DLL_PROCESS_ATTACH:
// Pin the DLL so it is never unmapped while idle-timer or CUDA threads
// are still running. During LabVIEW shutdown the CLR/COM teardown can
// unload DLLs before all threads exit → crash at unmapped code.
//
// CRITICAL: do NOT call CheckHardwareInformation() or
// ansod_vendor_gate::IsNvidiaGpuAvailable() from here. DllMain holds
// the OS loader lock (LdrpLoaderLock). CheckHardwareInformation
// touches hwinfo → DXGI / WMI / COM, which internally call
// LoadLibrary; doing that while holding the loader lock causes a
// classic loader-lock deadlock (observed as a full hang of the
// ANSLPR-UnitTest stress test). The vendor gate will lazy-
// initialise on the first real call from worker code, which runs
// with the loader lock released.
2026-03-28 16:54:11 +11:00
{
HMODULE hSelf = nullptr;
GetModuleHandleExW(
GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
GET_MODULE_HANDLE_EX_FLAG_PIN,
reinterpret_cast<LPCWSTR>(&DllMain),
&hSelf);
}
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
// When lpReserved != NULL, the process is terminating via ExitProcess.
// The OS has already killed all worker threads (idle timers, CUDA
// threads, etc.). Calling ~Engine() → stopIdleTimer() → thread::join()
// on a dead thread causes undefined behavior → std::terminate → abort.
// Set the global flag so atexit destructors (which still run after
// DllMain returns) skip thread joins and CUDA/TRT cleanup.
if (lpReserved != nullptr) {
g_processExiting().store(true, std::memory_order_relaxed);
break;
}
// Dynamic FreeLibrary — threads are still alive, safe to clean up.
// Without this, idle-timer threads keep the process alive indefinitely.
try {
std::vector<ANSCENTER::ANSODBase*> leakedHandles;
{
std::lock_guard<std::mutex> lk(ODHandleRegistryMutex());
for (auto& [h, _] : ODHandleRegistry())
leakedHandles.push_back(h);
ODHandleRegistry().clear();
}
for (auto* h : leakedHandles) {
try { h->Destroy(); delete h; } catch (...) {}
}
try { EnginePoolManager<float>::instance().clearAll(); } catch (...) {}
try { TRTEngineCache::instance().clearAll(); } catch (...) {}
} catch (...) {}
break;
}
return TRUE;
}
2026-04-09 08:09:02 +10:00
// CLASSIFICATION = 0,
// DETECTION = 1,
// SEGMENTATION = 2,
// FACEDETECTOR = 3,
// FACERECOGNIZER = 4,
// LICENSEPLATE = 5,
// TEXTSCENSE = 6
2026-03-28 16:54:11 +11:00
// External APIs
extern "C" ANSODENGINE_API std::string CreateANSODHandle(ANSCENTER::ANSODBase** Handle,
const char* licenseKey,
const char* modelFilePath,
const char* modelFileZipPassword,
float detectionScoreThreshold,
float modelConfThreshold,
float modelMNSThreshold,
2026-04-09 08:09:02 +10:00
int autoDetectEngine,//-1: CPU, 0: GPU; 1 auto detection
2026-03-28 16:54:11 +11:00
int modelType,
2026-04-09 08:09:02 +10:00
int detectionType,
int loadEngineOnCreation)
2026-03-28 16:54:11 +11:00
{
ANS_DBG("ANSOD","CreateANSODHandle: HandlePtr=%p, *Handle(in)=%p, modelType=%d, detectionType=%d, autoDetectEngine=%d, loadOnCreate=%d, modelPath=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr),
modelType, detectionType, autoDetectEngine, loadEngineOnCreation,
modelFilePath ? modelFilePath : "(null)");
if (Handle == nullptr) { ANS_DBG("ANSOD","CreateANSODHandle FAIL: Handle is null"); return ""; }
2026-03-28 16:54:11 +11:00
// NOTE: We intentionally do NOT destroy any existing *Handle here.
// LabVIEW reuses DLL parameter buffer addresses, so *Handle may point
// to ANOTHER task's live engine — not this task's old engine.
// The caller must use ReleaseANSODHandle() explicitly to clean up.
// (See ansod_debug.log analysis: Handle** addresses are recycled by
// LabVIEW across different tasks, causing the old cleanup code to
// destroy active engines belonging to other tasks.)
std::string labelMap = "";
bool _loadEngineOnCreation = false;
if (loadEngineOnCreation == 1) {
_loadEngineOnCreation = true;
}
else {
_loadEngineOnCreation = false;
}
labelMap.clear();
ANSCENTER::ModelConfig modelConfig;
if (detectionScoreThreshold <= 0)modelConfig.detectionScoreThreshold = 0.5;
else modelConfig.detectionScoreThreshold = detectionScoreThreshold;
if (modelConfThreshold <= 0)modelConfig.modelConfThreshold = 0.5;
else modelConfig.modelConfThreshold = modelConfThreshold;
if (modelMNSThreshold <= 0)modelConfig.modelMNSThreshold = 0.45;
else modelConfig.modelMNSThreshold = modelMNSThreshold;
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
if (autoDetectEngine == 1)modelConfig.autoGPUDetection = true;
else modelConfig.autoGPUDetection = false;
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
2026-04-09 08:09:02 +10:00
if (autoDetectEngine==-1)engineType=ANSCENTER::EngineType::CPU;// We force to use CPU
2026-03-28 16:54:11 +11:00
2026-04-21 09:26:02 +10:00
// Route detection / pose / segmentation / OBB / classification to the best
// available backend: prefer TensorRT on NVIDIA, otherwise the matching ONNX
// handler. Unlisted modelType values are left untouched for the switch below.
// See CreateANSODHandleEx for the full rationale — three correctness bugs
// were fixed in that dispatcher and must be kept in sync across copies.
const bool onNvidia = (engineType == ANSCENTER::EngineType::NVIDIA_GPU);
switch (modelType) {
// ── Detection family: YOLOv8 / V11 / V12 / generic TRT / V10-RTOD ──
case 3: // YOLOV8 / YOLOV11
case 4: // generic TensorRT
case 14: // YOLOv10RTOD (TRT end-to-end NMS)
case 17: // YOLOV12
modelType = onNvidia ? 31 /* RTYOLO */ : 30 /* ONNXYOLO */;
break;
// ── Pose ─────────────────────────────────────────────────────────────
case 21: // ONNXPOSE
case 22: // RTPOSE
modelType = onNvidia ? 22 /* RTPOSE */ : 21 /* ONNXPOSE */;
break;
// ── Segmentation ─────────────────────────────────────────────────────
case 23: // ONNXSEG
case 24: // RTSEG
modelType = onNvidia ? 24 /* RTSEG */ : 23 /* ONNXSEG */;
break;
// ── OBB / Classification (ONNX-only today — leave as-is) ─────────────
case 20: // ONNXCL
case 25: // ONNXOBB
break;
default:
// Any other modelType is handled directly by the switch below.
break;
2026-03-28 16:54:11 +11:00
}
switch (detectionType) {
case 0:
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
break;
case 1:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
case 2:
modelConfig.detectionType = ANSCENTER::DetectionType::SEGMENTATION;
break;
case 3:
modelConfig.detectionType = ANSCENTER::DetectionType::FACEDETECTOR;
break;
case 4:
modelConfig.detectionType = ANSCENTER::DetectionType::FACERECOGNIZER;
break;
case 5:
modelConfig.detectionType = ANSCENTER::DetectionType::LICENSEPLATE;
break;
case 6:
modelConfig.detectionType = ANSCENTER::DetectionType::TEXTSCENSE;
break;
case 7:
modelConfig.detectionType = ANSCENTER::DetectionType::KEYPOINT;
break;
default:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
}
switch (modelType) {
case 0: //TENSORFLOW =0
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW;
break;
case 1: //YOLOV4 = 1
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV4;
break;
case 2://YOLOV5 = 2
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV5;
break;
case 3: //YOLOV8 = 3,
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
break;
case 4: //TENSORRT = 4,
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTCL();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
}
else {// default is detection
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
case 5: //OPENVINO = 5
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
(*Handle) = new ANSCENTER::OPENVINOCL();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
(*Handle) = new ANSCENTER::ANSOVSEG();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
case 6: //FACEDETECT = 6
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
case 10: //ANOMALIB=10
(*Handle) = new ANSCENTER::ANSANOMALIB();
modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB;
break;
case 11: //OPENPOSE=11
(*Handle) = new ANSCENTER::ANSPOSE();
modelConfig.modelType = ANSCENTER::ModelType::POSE;
break;
case 12: //SAM=12
(*Handle) = new ANSCENTER::ANSSAM();
modelConfig.modelType = ANSCENTER::ModelType::SAM;
break;
case 13: //ODHUBMODEL=13
(*Handle) = new ANSCENTER::ODHUBAPI();
modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL;
break;
case 14: //TensorRT for Object Detection Yolov10
// Upstream modelType rewrite (see top of each factory) already
// redirects 14 → 31 (RTYOLO) on NVIDIA or 14 → 30 (ONNXYOLO) on
// non-NVIDIA, so this branch is unreachable in practice. Keep
// an explicit vendor gate as defense-in-depth against future
// refactors — ANSYOLOV10RTOD is a TensorRT class and must never
// be constructed on AMD/Intel/CPU hardware.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSYOLOV10RTOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10RTOD;
} else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
2026-03-28 16:54:11 +11:00
break;
case 15: //OpenVino for Object Detection Yolov10
(*Handle) = new ANSCENTER::ANSOYOLOV10OVOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD;
break;
case 16: //Custom detector
(*Handle) = new ANSCENTER::ANSCUSTOMDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR;
break;
case 17: //Yolo V12
(*Handle) = new ANSCENTER::YOLO12OD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV12;
break;
case 18: //Custom script model
(*Handle) = new ANSCENTER::ANSCUSTOMPY();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY;
break;
case 19: //Motion Detector
(*Handle) = new ANSCENTER::ANSMOTIONDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR;
break;
case 20: //ONNXCL
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
break;
case 21: //ONNXPOSE
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
break;
case 22: //TENSORRTPOSE
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSTENSORRTPOSE();
modelConfig.modelType = ANSCENTER::ModelType::RTPOSE;
}
else {
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
}
break;
case 23: //ONNXSEG
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
break;
case 24: //RTSEG
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
case 25: //ONNXOBB
(*Handle) = new ANSCENTER::ANSONNXOBB();
modelConfig.modelType = ANSCENTER::ModelType::ONNXOBB;
break;
//case 26: //RTOBB
// (*Handle) = new ANSCENTER::ANSTENSORRTOBB();
// modelConfig.modelType = ANSCENTER::ModelType::RTOBB;
// break;
case 27: //MOVIENET
(*Handle) = new ANSCENTER::ANSMOVIENET();
modelConfig.modelType = ANSCENTER::ModelType::MOVIENET;
break;
case 28: //ONNXSAM3
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
break;
case 29: //RTSAM3
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSSAM3();
modelConfig.modelType = ANSCENTER::ModelType::RTSAM3;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
break;
case 30: //ONNXYOLO
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
break;
case 31: //RTYOLO
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSRTYOLO();
modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
}
else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
default:
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
}
if (*Handle == nullptr) {
ANS_DBG("ANSOD","CreateANSODHandle FAIL: allocation produced null handle (modelType=%d)", modelType);
2026-03-28 16:54:11 +11:00
return labelMap;
}
else {
ANS_DBG("ANSOD","CreateANSODHandle: allocated handle=%p (uint=%llu) modelType=%d, calling Initialize...",
(void*)*Handle, (unsigned long long)(uintptr_t)*Handle, (int)modelConfig.modelType);
2026-04-09 08:09:02 +10:00
// CUDA round-robin + VRAM check — only relevant for NVIDIA GPUs.
// On AMD/DirectML and OpenVINO these calls hit stub CUDA APIs that
// return bogus 0-byte VRAM and pollute the log with false warnings.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
}
2026-03-28 16:54:11 +11:00
RegisterODHandle(*Handle);
(*Handle)->SetLoadEngineOnCreation(_loadEngineOnCreation); //Set force to load the engine immediately
bool loadResult = (*Handle)->Initialize(licenseKey, modelConfig, modelFilePath, modelFileZipPassword, labelMap);
ANS_DBG("ANSOD","CreateANSODHandle OK: handle=%p initResult=%d labelMapLen=%zu",
(void*)*Handle, loadResult ? 1 : 0, labelMap.size());
2026-03-28 16:54:11 +11:00
return labelMap;
}
}
2026-04-09 08:09:02 +10:00
extern "C" ANSODENGINE_API int CreateANSODHandleEx(ANSCENTER::ANSODBase** Handle,
const char* licenseKey,
const char* modelFilePath,
const char* modelFileZipPassword,
float detectionScoreThreshold,
float modelConfThreshold,
float modelMNSThreshold,
int autoDetectEngine,
int modelType,
int detectionType,
std::string& labelMap,
int loadEngineOnCreation)
{
ANS_DBG("ANSOD","CreateANSODHandleEx: HandlePtr=%p, *Handle(in)=%p, modelType=%d, detectionType=%d, autoDetectEngine=%d, loadOnCreate=%d, modelPath=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr),
modelType, detectionType, autoDetectEngine, loadEngineOnCreation,
modelFilePath ? modelFilePath : "(null)");
if (Handle == nullptr) { ANS_DBG("ANSOD","CreateANSODHandleEx FAIL: Handle is null"); return -1; } // invalid modelType return
2026-04-09 08:09:02 +10:00
bool _loadEngineOnCreation = false;
if (loadEngineOnCreation == 1) {
_loadEngineOnCreation = true;
}
else {
_loadEngineOnCreation = false;
}
labelMap.clear();
ANSCENTER::ModelConfig modelConfig;
if (detectionScoreThreshold <= 0)modelConfig.detectionScoreThreshold = 0.5;
else modelConfig.detectionScoreThreshold = detectionScoreThreshold;
if (modelConfThreshold <= 0)modelConfig.modelConfThreshold = 0.5;
else modelConfig.modelConfThreshold = modelConfThreshold;
if (modelMNSThreshold <= 0)modelConfig.modelMNSThreshold = 0.45;
else modelConfig.modelMNSThreshold = modelMNSThreshold;
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
if (autoDetectEngine == 1)modelConfig.autoGPUDetection = true;
else modelConfig.autoGPUDetection = false;
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
if (autoDetectEngine==-1)engineType=ANSCENTER::EngineType::CPU;// We force to use CPU
2026-04-21 09:26:02 +10:00
// Route detection / pose / segmentation / OBB / classification to the best
// available backend: prefer TensorRT on NVIDIA, otherwise the matching ONNX
// handler. Unlisted modelType values are left untouched for the switch below.
//
// Previous revisions of this block had two correctness bugs:
// (1) modelType == 3 / 17 (YoloV8/V11/V12 detection) was hard-wired to
// ONNXYOLO even on NVIDIA — bypassing the TensorRT path entirely and
// duplicating VRAM when multiple handles loaded the same .onnx (ORT
// has no EnginePoolManager dedupe).
// (2) modelType == 20 / 21 / 23 / 25 (ONNX CLS / POSE / SEG / OBB) was
// rewritten to 30 (ONNXYOLO = detection), making the dedicated
// case 20 / 21 / 23 / 25 handlers unreachable dead code. A user
// passing modelType=20 for classification ended up with a YOLO head.
// (3) modelType == 22 / 24 (TRT pose / TRT seg) on a non-NVIDIA box fell
// back to ONNXYOLO instead of the correct ONNXPOSE / ONNXSEG handler.
const bool onNvidia = (engineType == ANSCENTER::EngineType::NVIDIA_GPU);
switch (modelType) {
// ── Detection family: YOLOv8 / V11 / V12 / generic TRT / V10-RTOD ──
case 3: // YOLOV8 / YOLOV11
case 4: // generic TensorRT
case 14: // YOLOv10RTOD (TRT end-to-end NMS)
case 17: // YOLOV12
modelType = onNvidia ? 31 /* RTYOLO */ : 30 /* ONNXYOLO */;
break;
// ── Pose ─────────────────────────────────────────────────────────────
case 21: // ONNXPOSE
case 22: // RTPOSE
modelType = onNvidia ? 22 /* RTPOSE */ : 21 /* ONNXPOSE */;
break;
// ── Segmentation ─────────────────────────────────────────────────────
case 23: // ONNXSEG
case 24: // RTSEG
modelType = onNvidia ? 24 /* RTSEG */ : 23 /* ONNXSEG */;
break;
// ── Oriented Bounding Box (ONNX-only today) ──────────────────────────
case 25: // ONNXOBB — no TRT variant; leave as-is
break;
// ── Classification (ONNX-only in this dispatcher) ────────────────────
case 20: // ONNXCL — no TRT variant; leave as-is
break;
default:
// Any other modelType is handled directly by the switch below
// (TENSORFLOW, YOLOV4, YOLOV5, FACEDETECT, FACERECOGNIZE, ALPR,
// OCR, ANOMALIB, POSE, SAM, ODHUBMODEL, CUSTOMDETECTOR, CUSTOMPY,
// MOTIONDETECTOR, MOVIENET, ONNXSAM3, RTSAM3, ONNXYOLO=30,
// RTYOLO=31). Do nothing — keep user's value.
break;
2026-04-09 08:09:02 +10:00
}
// returnModelType will be set after the switch to reflect the actual
// model class that was instantiated (e.g. RTYOLO→ONNXYOLO on AMD).
int returnModelType = modelType;
switch (detectionType) {
case 0:
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
break;
case 1:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
case 2:
modelConfig.detectionType = ANSCENTER::DetectionType::SEGMENTATION;
break;
case 3:
modelConfig.detectionType = ANSCENTER::DetectionType::FACEDETECTOR;
break;
case 4:
modelConfig.detectionType = ANSCENTER::DetectionType::FACERECOGNIZER;
break;
case 5:
modelConfig.detectionType = ANSCENTER::DetectionType::LICENSEPLATE;
break;
case 6:
modelConfig.detectionType = ANSCENTER::DetectionType::TEXTSCENSE;
break;
case 7:
modelConfig.detectionType = ANSCENTER::DetectionType::KEYPOINT;
break;
default:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
}
switch (modelType) {
case 0: //TENSORFLOW =0
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW;
break;
case 1: //YOLOV4 = 1
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV4;
break;
case 2://YOLOV5 = 2
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV5;
break;
case 3: //YOLOV8 = 3,
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
break;
case 4: //TENSORRT = 4,
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTCL();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
}
else {// default is detection
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
case 5: //OPENVINO = 5
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
(*Handle) = new ANSCENTER::OPENVINOCL();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
(*Handle) = new ANSCENTER::ANSOVSEG();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
case 6: //FACEDETECT = 6
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
case 10: //ANOMALIB=10
(*Handle) = new ANSCENTER::ANSANOMALIB();
modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB;
break;
case 11: //OPENPOSE=11
(*Handle) = new ANSCENTER::ANSPOSE();
modelConfig.modelType = ANSCENTER::ModelType::POSE;
break;
case 12: //SAM=12
(*Handle) = new ANSCENTER::ANSSAM();
modelConfig.modelType = ANSCENTER::ModelType::SAM;
break;
case 13: //ODHUBMODEL=13
(*Handle) = new ANSCENTER::ODHUBAPI();
modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL;
break;
case 14: //TensorRT for Object Detection Yolov10
// Upstream modelType rewrite (see top of each factory) already
// redirects 14 → 31 (RTYOLO) on NVIDIA or 14 → 30 (ONNXYOLO) on
// non-NVIDIA, so this branch is unreachable in practice. Keep
// an explicit vendor gate as defense-in-depth against future
// refactors — ANSYOLOV10RTOD is a TensorRT class and must never
// be constructed on AMD/Intel/CPU hardware.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSYOLOV10RTOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10RTOD;
} else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
2026-04-09 08:09:02 +10:00
break;
case 15: //OpenVino for Object Detection Yolov10
(*Handle) = new ANSCENTER::ANSOYOLOV10OVOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD;
break;
case 16: //Custom detector
(*Handle) = new ANSCENTER::ANSCUSTOMDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR;
break;
case 17: //Yolo V12
(*Handle) = new ANSCENTER::YOLO12OD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV12;
break;
case 18: //Custom script model
(*Handle) = new ANSCENTER::ANSCUSTOMPY();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY;
break;
case 19: //Motion Detector
(*Handle) = new ANSCENTER::ANSMOTIONDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR;
break;
case 20: //ONNXCL
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
break;
case 21: //ONNXPOSE
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
break;
case 22: //TENSORRTPOSE
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSTENSORRTPOSE();
modelConfig.modelType = ANSCENTER::ModelType::RTPOSE;
}
else {
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
}
break;
case 23: //ONNXSEG
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
break;
case 24: //RTSEG
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
case 25: //ONNXOBB
(*Handle) = new ANSCENTER::ANSONNXOBB();
modelConfig.modelType = ANSCENTER::ModelType::ONNXOBB;
break;
//case 26: //RTOBB
// (*Handle) = new ANSCENTER::ANSTENSORRTOBB();
// modelConfig.modelType = ANSCENTER::ModelType::RTOBB;
// break;
case 27: //MOVIENET
(*Handle) = new ANSCENTER::ANSMOVIENET();
modelConfig.modelType = ANSCENTER::ModelType::MOVIENET;
break;
case 28: //ONNXSAM3
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
break;
case 29: //RTSAM3
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSSAM3();
modelConfig.modelType = ANSCENTER::ModelType::RTSAM3;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
break;
case 30: //ONNXYOLO
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
break;
case 31: //RTYOLO
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSRTYOLO();
modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
}
else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
default:
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
}
// Update returnModelType to reflect the actual class that was created.
// The switch may have fallen back (e.g. RTYOLO→ONNXYOLO on non-NVIDIA).
returnModelType = static_cast<int>(modelConfig.modelType);
if (*Handle == nullptr) {
ANS_DBG("ANSOD","CreateANSODHandleEx FAIL: allocation produced null handle (modelType=%d)", modelType);
2026-04-09 08:09:02 +10:00
labelMap ="";
return returnModelType;
}
else {
ANS_DBG("ANSOD","CreateANSODHandleEx: allocated handle=%p (uint=%llu) modelType=%d, calling Initialize...",
(void*)*Handle, (unsigned long long)(uintptr_t)*Handle, (int)modelConfig.modelType);
2026-04-09 08:09:02 +10:00
// CUDA round-robin + VRAM check — only relevant for NVIDIA GPUs.
// On AMD/DirectML and OpenVINO these calls hit stub CUDA APIs that
// return bogus 0-byte VRAM and pollute the log with false warnings.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
}
RegisterODHandle(*Handle);
(*Handle)->SetLoadEngineOnCreation(_loadEngineOnCreation); //Set force to load the engine immediately
bool loadResult = (*Handle)->Initialize(licenseKey, modelConfig, modelFilePath, modelFileZipPassword, labelMap);
ANS_DBG("ANSOD","CreateANSODHandleEx OK: handle=%p initResult=%d returnModelType=%d labelMapLen=%zu",
(void*)*Handle, loadResult ? 1 : 0, returnModelType, labelMap.size());
2026-04-09 08:09:02 +10:00
return returnModelType;
}
}
//// For LabVIEW API
extern "C" ANSODENGINE_API int CreateANSODHandle_LV(ANSCENTER::ANSODBase** Handle, const char* licenseKey, const char* modelFilePath, const char* modelFileZipPassword, float modelThreshold, float modelConfThreshold, float modelNMSThreshold, int autoDetectEngine, int modelType, int detectorType, int loadEngineOnCreation, LStrHandle labelMap) {
ANS_DBG("ANSOD","CreateANSODHandle_LV: HandlePtr=%p, *Handle(in)=%p, modelType=%d, detectorType=%d, autoDetectEngine=%d, loadOnCreate=%d, modelPath=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr),
modelType, detectorType, autoDetectEngine, loadEngineOnCreation,
modelFilePath ? modelFilePath : "(null)");
2026-04-09 08:09:02 +10:00
try {
std::string lbMap;
int returnModelType = CreateANSODHandleEx(Handle, licenseKey, modelFilePath, modelFileZipPassword, modelThreshold, modelConfThreshold, modelNMSThreshold, autoDetectEngine, modelType, detectorType, lbMap, loadEngineOnCreation);
// CreateANSODHandleEx returns -1 only when Handle is nullptr.
// Check that instead of lbMap.empty() — labelMap can be legitimately
// empty when loadEngineOnCreation==0 or the model has no class file.
if (returnModelType < 0 || Handle == nullptr || *Handle == nullptr) return -1;
int size = static_cast<int>(lbMap.length());
if (size > 0) {
MgErr error = DSSetHandleSize(labelMap, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*labelMap)->cnt = size;
memcpy((*labelMap)->str, lbMap.c_str(), size);
}
else return -1;
}
else {
// Empty label map — set LabVIEW string to empty
MgErr error = DSSetHandleSize(labelMap, sizeof(int32));
if (error == noErr) (*labelMap)->cnt = 0;
}
return returnModelType;
}
catch (...) {
return -1;
}
}
2026-03-28 16:54:11 +11:00
extern "C" __declspec(dllexport) int LoadModelFromFolder(ANSCENTER::ANSODBase** Handle, const char* licenseKey,
const char* modelName,
const char* className,
float detectionScoreThreshold,
float modelConfThreshold,
float modelMNSThreshold,
int autoDetectEngine,
int modelType,
int detectionType,
int loadEngineOnCreation,
const char* modelFolder,
std::string& labelMap)
{
ANS_DBG("ANSOD","LoadModelFromFolder: HandlePtr=%p, *Handle(in)=%p, modelType=%d, detectionType=%d, autoDetectEngine=%d, modelFolder=%s, modelName=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr),
modelType, detectionType, autoDetectEngine,
modelFolder ? modelFolder : "(null)", modelName ? modelName : "(null)");
2026-04-09 08:09:02 +10:00
try
{
if (Handle == nullptr) { ANS_DBG("ANSOD","LoadModelFromFolder FAIL: Handle is null"); return 0; }
2026-03-28 16:54:11 +11:00
labelMap.clear();
ANSCENTER::ModelConfig modelConfig;
bool _loadEngineOnCreation = false;
if (loadEngineOnCreation == 1) {
_loadEngineOnCreation = true;
}
else {
_loadEngineOnCreation = false;
}
if (detectionScoreThreshold <= 0)modelConfig.detectionScoreThreshold = 0.5;
else modelConfig.detectionScoreThreshold = detectionScoreThreshold;
if (modelConfThreshold <= 0)modelConfig.modelConfThreshold = 0.5;
else modelConfig.modelConfThreshold = modelConfThreshold;
if (modelMNSThreshold <= 0)modelConfig.modelMNSThreshold = 0.45;
else modelConfig.modelMNSThreshold = modelMNSThreshold;
2026-04-09 08:09:02 +10:00
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
2026-03-28 16:54:11 +11:00
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
if (autoDetectEngine == 1)modelConfig.autoGPUDetection = true;
else modelConfig.autoGPUDetection = false;
2026-04-09 08:09:02 +10:00
if (autoDetectEngine==-1)engineType=ANSCENTER::EngineType::CPU;// We force to use CPU
2026-04-21 09:26:02 +10:00
// Route detection / pose / segmentation / OBB / classification to the best
// available backend: prefer TensorRT on NVIDIA, otherwise the matching ONNX
// handler. Unlisted modelType values are left untouched for the switch below.
// See CreateANSODHandleEx for the full rationale — three correctness bugs
// were fixed in that dispatcher and must be kept in sync across copies.
2026-03-28 16:54:11 +11:00
{
2026-04-21 09:26:02 +10:00
const bool onNvidia = (engineType == ANSCENTER::EngineType::NVIDIA_GPU);
switch (modelType) {
// ── Detection family: YOLOv8 / V11 / V12 / generic TRT / V10-RTOD ──
case 3: // YOLOV8 / YOLOV11
case 4: // generic TensorRT
case 14: // YOLOv10RTOD (TRT end-to-end NMS)
case 17: // YOLOV12
modelType = onNvidia ? 31 /* RTYOLO */ : 30 /* ONNXYOLO */;
break;
// ── Pose ─────────────────────────────────────────────────────────
case 21: // ONNXPOSE
case 22: // RTPOSE
modelType = onNvidia ? 22 /* RTPOSE */ : 21 /* ONNXPOSE */;
break;
// ── Segmentation ─────────────────────────────────────────────────
case 23: // ONNXSEG
case 24: // RTSEG
modelType = onNvidia ? 24 /* RTSEG */ : 23 /* ONNXSEG */;
break;
// ── OBB / Classification (ONNX-only today — leave as-is) ─────────
case 20: // ONNXCL
case 25: // ONNXOBB
break;
default:
// Any other modelType is handled directly by the switch below.
break;
}
2026-03-28 16:54:11 +11:00
}
// NOTE: We intentionally do NOT destroy any existing *Handle here.
// LabVIEW reuses DLL parameter buffer addresses, so *Handle may point
// to ANOTHER task's live engine — not this task's old engine.
// The caller must use ReleaseANSODHandle() explicitly to clean up.
switch (detectionType) {
case 0:
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
break;
case 1:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
case 2:
modelConfig.detectionType = ANSCENTER::DetectionType::SEGMENTATION;
break;
case 3:
modelConfig.detectionType = ANSCENTER::DetectionType::FACEDETECTOR;
break;
case 4:
modelConfig.detectionType = ANSCENTER::DetectionType::FACERECOGNIZER;
break;
case 5:
modelConfig.detectionType = ANSCENTER::DetectionType::LICENSEPLATE;
break;
case 6:
modelConfig.detectionType = ANSCENTER::DetectionType::TEXTSCENSE;
break;
case 7:
modelConfig.detectionType = ANSCENTER::DetectionType::KEYPOINT;
break;
default:
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
break;
}
switch (modelType) {
case 0: //TENSORFLOW =0
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW;
break;
case 1: //YOLOV4 = 1
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV4;
break;
case 2://YOLOV5 = 2
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV5;
break;
case 3: //YOLOV8 = 3,
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
break;
case 4: //TENSORRT = 4,
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTCL();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
break;
}
else {// default is detection
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTOD();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
(*Handle) = new ANSCENTER::YOLOOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
break;
}
case 5: //OPENVINO = 5
if (modelConfig.detectionType == ANSCENTER::DetectionType::CLASSIFICATION) {
(*Handle) = new ANSCENTER::OPENVINOCL();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::DETECTION) {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else if (modelConfig.detectionType == ANSCENTER::DetectionType::SEGMENTATION) {// Segmentation
(*Handle) = new ANSCENTER::ANSOVSEG();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
else {
(*Handle) = new ANSCENTER::OPENVINOOD();
modelConfig.modelType = ANSCENTER::ModelType::OPENVINO;
break;
}
case 6: //FACEDETECT = 6
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
case 10: //ANOMALIB=10
(*Handle) = new ANSCENTER::ANSANOMALIB();
modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB;
break;
case 11: //OPENPOSE=11
(*Handle) = new ANSCENTER::ANSPOSE();
modelConfig.modelType = ANSCENTER::ModelType::POSE;
break;
case 12: //SAM=12
(*Handle) = new ANSCENTER::ANSSAM();
modelConfig.modelType = ANSCENTER::ModelType::SAM;
break;
case 13: //ODHUBMODEL=13
(*Handle) = new ANSCENTER::ODHUBAPI();
modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL;
break;
case 14: //TensorRT for Object Detection Yolov10
// Upstream modelType rewrite (see top of each factory) already
// redirects 14 → 31 (RTYOLO) on NVIDIA or 14 → 30 (ONNXYOLO) on
// non-NVIDIA, so this branch is unreachable in practice. Keep
// an explicit vendor gate as defense-in-depth against future
// refactors — ANSYOLOV10RTOD is a TensorRT class and must never
// be constructed on AMD/Intel/CPU hardware.
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSYOLOV10RTOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10RTOD;
} else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
2026-03-28 16:54:11 +11:00
break;
case 15: //OpenVino for Object Detection Yolov10
(*Handle) = new ANSCENTER::ANSOYOLOV10OVOD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD;
break;
case 16: //Custom detector
(*Handle) = new ANSCENTER::ANSCUSTOMDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR;
break;
case 17: //Yolo V12
(*Handle) = new ANSCENTER::YOLO12OD();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV12;
break;
case 18: //Custom Python
(*Handle) = new ANSCENTER::ANSCUSTOMPY();
modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY;
break;
case 19: //Motion Detector
(*Handle) = new ANSCENTER::ANSMOTIONDETECTOR();
modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR;
break;
case 20: //ANSONNXCL
(*Handle) = new ANSCENTER::ANSONNXCL();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
break;
case 21: //ANSONNXPOSE
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
break;
case 22: //ANSTENSORRTPOSE
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSTENSORRTPOSE();
modelConfig.modelType = ANSCENTER::ModelType::RTPOSE;
}
else {
(*Handle) = new ANSCENTER::ANSONNXPOSE();
modelConfig.modelType = ANSCENTER::ModelType::ONNXPOSE;
}
break;
case 23: //ONNXSEG
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
break;
case 24: //RTSEG
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::TENSORRTSEG();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSEG();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
break;
case 25: //ONNXOBB
(*Handle) = new ANSCENTER::ANSONNXOBB();
modelConfig.modelType = ANSCENTER::ModelType::ONNXOBB;
break;
case 27: //MOVIENET
(*Handle) = new ANSCENTER::ANSMOVIENET();
modelConfig.modelType = ANSCENTER::ModelType::MOVIENET;
break;
case 28: //ANSONNXSAM3
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
break;
case 29: //ANSSAM3
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
else {
(*Handle) = new ANSCENTER::ANSONNXSAM3();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
}
break;
case 30: //ONNXYOLO
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
break;
case 31: //RTYOLO
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
(*Handle) = new ANSCENTER::ANSRTYOLO();
modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
}
else {
(*Handle) = new ANSCENTER::ANSONNXYOLO();
modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
}
break;
default:
(*Handle) = new ANSCENTER::ANSFD();
modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT;
break;
}
if (*Handle == nullptr) {
ANS_DBG("ANSOD","LoadModelFromFolder FAIL: allocation produced null handle (modelType=%d)", modelType);
2026-03-28 16:54:11 +11:00
return -1;
}
else {
ANS_DBG("ANSOD","LoadModelFromFolder: allocated handle=%p (uint=%llu) modelType=%d, calling LoadModelFromFolder...",
(void*)*Handle, (unsigned long long)(uintptr_t)*Handle, (int)modelConfig.modelType);
2026-04-09 08:09:02 +10:00
// CUDA round-robin + VRAM check — NVIDIA only (see CreateANSODHandle).
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
const int assignedGPU = AssignNextGPU();
modelConfig.gpuDeviceIndex = assignedGPU;
CheckGPUVRAM(assignedGPU);
(*Handle)->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
}
2026-03-28 16:54:11 +11:00
RegisterODHandle(*Handle);
(*Handle)->SetLoadEngineOnCreation(_loadEngineOnCreation); //Set force to load the engine immediately
bool result = (*Handle)->LoadModelFromFolder(licenseKey, modelConfig, modelName, className, modelFolder, labelMap);
ANS_DBG("ANSOD","LoadModelFromFolder OK: handle=%p loadResult=%d labelMapLen=%zu",
(void*)*Handle, result ? 1 : 0, labelMap.size());
2026-03-28 16:54:11 +11:00
if (result) return 1;
else return 0;
}
}
catch (const std::exception& e) {
ANS_DBG("ANSOD","LoadModelFromFolder EXCEPTION (std::exception): %s", e.what());
return 0;
}
2026-03-28 16:54:11 +11:00
catch (...) {
ANS_DBG("ANSOD","LoadModelFromFolder EXCEPTION (unknown)");
2026-03-28 16:54:11 +11:00
return 0;
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API int OptimizeModelStr(const char* modelFilePath, const char* modelFileZipPassword, int modelType, int modelDetectionType, int fp16, std::string& modelFolder) {
ANS_DBG("ANSOD","OptimizeModelStr: modelType=%d, modelDetectionType=%d, fp16=%d, modelPath=%s",
modelType, modelDetectionType, fp16, modelFilePath ? modelFilePath : "(null)");
2026-03-28 16:54:11 +11:00
try {
bool optimizedResult = false;
// NOTE: odMutex was removed here. OptimizeModelStr creates its own
// temporary Engine<float> on the stack — no shared state with running
// inference tasks. The global mutex was blocking ALL running tasks'
// result delivery for the entire duration of TRT engine building.
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
2026-04-09 08:09:02 +10:00
2026-04-21 09:26:02 +10:00
// Route detection / pose / segmentation / OBB / classification to the best
// available backend: prefer TensorRT on NVIDIA, otherwise the matching ONNX
// handler. Unlisted modelType values are left untouched for the switch below.
// See CreateANSODHandleEx for the full rationale — three correctness bugs
// were fixed in that dispatcher and must be kept in sync across copies.
2026-04-09 08:09:02 +10:00
{
2026-04-21 09:26:02 +10:00
const bool onNvidia = (engineType == ANSCENTER::EngineType::NVIDIA_GPU);
switch (modelType) {
// ── Detection family: YOLOv8 / V11 / V12 / generic TRT / V10-RTOD ──
case 3: // YOLOV8 / YOLOV11
case 4: // generic TensorRT
case 14: // YOLOv10RTOD (TRT end-to-end NMS)
case 17: // YOLOV12
modelType = onNvidia ? 31 /* RTYOLO */ : 30 /* ONNXYOLO */;
break;
// ── Pose ─────────────────────────────────────────────────────────
case 21: // ONNXPOSE
case 22: // RTPOSE
modelType = onNvidia ? 22 /* RTPOSE */ : 21 /* ONNXPOSE */;
break;
// ── Segmentation ─────────────────────────────────────────────────
case 23: // ONNXSEG
case 24: // RTSEG
modelType = onNvidia ? 24 /* RTSEG */ : 23 /* ONNXSEG */;
break;
// ── OBB / Classification (ONNX-only today — leave as-is) ─────────
case 20: // ONNXCL
case 25: // ONNXOBB
break;
default:
// Any other modelType is handled directly by the switch below.
break;
}
2026-04-09 08:09:02 +10:00
}
2026-03-28 16:54:11 +11:00
2026-04-09 08:09:02 +10:00
if (modelType == 31) // If modelType is RTYOLO (31), handle separately.
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
if (modelDetectionType == 0) {
return ANSCENTER::ANSUtilityHelper::ModelOptimizer(modelFilePath, modelFileZipPassword, fp16, modelFolder, 224, 244) ? 1 : 0; // this is for classification models
}
else {
return ANSCENTER::ANSUtilityHelper::ModelOptimizer(modelFilePath, modelFileZipPassword, fp16, modelFolder, 640, 640) ? 1 : 0; // standard size for detection models, segmetation and others
}
}
}
2026-03-28 16:54:11 +11:00
// Create model handle dynamically
std::unique_ptr<ANSCENTER::ANSODBase> Handle;
ANSCENTER::ModelConfig modelConfig;
bool _fp16 = (fp16 == 1);
switch (modelType) {
case 0: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::TENSORFLOW; break;
case 1: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV4; break;
case 2: Handle = std::make_unique<ANSCENTER::YOLOOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV5; break;
case 5: Handle = std::make_unique<ANSCENTER::OPENVINOOD>(); modelConfig.modelType = ANSCENTER::ModelType::OPENVINO; break;
case 6: Handle = std::make_unique<ANSCENTER::ANSFD>(); modelConfig.modelType = ANSCENTER::ModelType::FACEDETECT; break;
case 10: Handle = std::make_unique<ANSCENTER::ANSANOMALIB>(); modelConfig.modelType = ANSCENTER::ModelType::ANOMALIB; break;
case 11: Handle = std::make_unique<ANSCENTER::ANSPOSE>(); modelConfig.modelType = ANSCENTER::ModelType::POSE; break;
case 12: Handle = std::make_unique<ANSCENTER::ANSSAM>(); modelConfig.modelType = ANSCENTER::ModelType::SAM; break;
case 13: Handle = std::make_unique<ANSCENTER::ODHUBAPI>(); modelConfig.modelType = ANSCENTER::ModelType::ODHUBMODEL; break;
case 15: Handle = std::make_unique<ANSCENTER::ANSOYOLOV10OVOD>(); modelConfig.modelType = ANSCENTER::ModelType::YOLOV10OVOD; break;
case 16: Handle = std::make_unique<ANSCENTER::ANSCUSTOMDETECTOR>(); modelConfig.modelType = ANSCENTER::ModelType::CUSTOMDETECTOR; break;
case 18: Handle = std::make_unique<ANSCENTER::ANSCUSTOMPY>(); modelConfig.modelType = ANSCENTER::ModelType::CUSTOMPY; break;
case 19: Handle = std::make_unique<ANSCENTER::ANSMOTIONDETECTOR>(); modelConfig.modelType = ANSCENTER::ModelType::MOTIONDETECTOR; break;
case 27: Handle = std::make_unique<ANSCENTER::ANSMOVIENET>(); modelConfig.modelType = ANSCENTER::ModelType::MOVIENET; break;
case 28: Handle = std::make_unique<ANSCENTER::ANSONNXSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3; break;
case 29: {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
2026-04-09 08:09:02 +10:00
Handle = std::make_unique<ANSCENTER::ANSSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::RTSAM3;
2026-03-28 16:54:11 +11:00
}
else {
2026-04-09 08:09:02 +10:00
Handle = std::make_unique<ANSCENTER::ANSONNXSAM3>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXSAM3;
2026-03-28 16:54:11 +11:00
}
2026-04-09 08:09:02 +10:00
break;
2026-03-28 16:54:11 +11:00
}
case 30: Handle = std::make_unique<ANSCENTER::ANSONNXYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO; break;
case 31: {
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
2026-04-09 08:09:02 +10:00
Handle = std::make_unique<ANSCENTER::ANSRTYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::RTYOLO;
2026-03-28 16:54:11 +11:00
}
else {
2026-04-09 08:09:02 +10:00
Handle = std::make_unique<ANSCENTER::ANSONNXYOLO>(); modelConfig.modelType = ANSCENTER::ModelType::ONNXYOLO;
2026-03-28 16:54:11 +11:00
}
2026-04-09 08:09:02 +10:00
break;
2026-03-28 16:54:11 +11:00
}
default: {
if (modelDetectionType == 0) // classification
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::TENSORRTCL>();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
Handle = std::make_unique<ANSCENTER::ANSONNXCL>();
modelConfig.modelType = ANSCENTER::ModelType::ONNXCL;
}
modelConfig.detectionType = ANSCENTER::DetectionType::CLASSIFICATION;
}
else if (modelDetectionType == 1) // detection
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::TENSORRTOD>();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
Handle = std::make_unique<ANSCENTER::YOLOOD>();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
}
else if (modelDetectionType == 2) // segmentation
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU)
{
Handle = std::make_unique<ANSCENTER::TENSORRTSEG>();
modelConfig.modelType = ANSCENTER::ModelType::RTSEG;
}
else {
Handle = std::make_unique<ANSCENTER::ANSONNXSEG>();
modelConfig.modelType = ANSCENTER::ModelType::ONNXSEG;
}
modelConfig.detectionType = ANSCENTER::DetectionType::SEGMENTATION;
}
else // default is detection
{
if (engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle = std::make_unique<ANSCENTER::TENSORRTOD>();
modelConfig.modelType = ANSCENTER::ModelType::TENSORRT;
}
else {
Handle = std::make_unique<ANSCENTER::YOLOOD>();
modelConfig.modelType = ANSCENTER::ModelType::YOLOV8;
}
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
}
break;
}
}
2026-04-09 08:09:02 +10:00
// TensorRT-specific: bypass pool and cache for temporary optimizer engines
if (Handle && engineType == ANSCENTER::EngineType::NVIDIA_GPU) {
Handle->SetMaxSlotsPerGpu(0);
Handle->SetSkipEngineCache(true);
Handle->SetForceNoPool(true);
}
// RAII guard for TensorRT process-wide flags.
// Without this, an exception in LoadModel or OptimizeModel permanently
// leaves the flags set, breaking all subsequent engine creation.
2026-03-28 16:54:11 +11:00
struct GlobalFlagGuard {
2026-04-09 08:09:02 +10:00
bool active;
GlobalFlagGuard(bool isNvidia) : active(isNvidia) {
if (active) {
g_forceNoPool = true;
TRTEngineCache::globalBypass() = true;
}
2026-03-28 16:54:11 +11:00
}
~GlobalFlagGuard() {
2026-04-09 08:09:02 +10:00
if (active) {
g_forceNoPool = false;
TRTEngineCache::globalBypass() = false;
}
2026-03-28 16:54:11 +11:00
}
2026-04-09 08:09:02 +10:00
} flagGuard(engineType == ANSCENTER::EngineType::NVIDIA_GPU);
2026-03-28 16:54:11 +11:00
// Load and optimize model
if (Handle && Handle->LoadModel(modelFilePath, modelFileZipPassword)) {
2026-04-09 08:09:02 +10:00
optimizedResult = Handle->OptimizeModel(_fp16, modelFolder);
2026-03-28 16:54:11 +11:00
}
2026-04-09 08:09:02 +10:00
Handle.reset(); // Destroy engines BEFORE guard restores cache
2026-03-28 16:54:11 +11:00
2026-04-09 08:09:02 +10:00
if (optimizedResult && !modelFolder.empty()) return 1;
else return 0;
2026-03-28 16:54:11 +11:00
}
catch (const std::exception& e) {
// GlobalFlagGuard destructor runs here — flags are always restored
std::cerr << "OptimizeModelStr Exception: " << e.what() << std::endl;
2026-04-09 08:09:02 +10:00
return -1;
2026-03-28 16:54:11 +11:00
}
catch (...) {
// GlobalFlagGuard destructor runs here — flags are always restored
std::cerr << "OptimizeModelStr: Unknown exception occurred." << std::endl;
2026-04-09 08:09:02 +10:00
return -1;
2026-03-28 16:54:11 +11:00
}
}
static int ReleaseANSODHandle_Impl(ANSCENTER::ANSODBase** Handle) {
try {
if (Handle == nullptr) {
ANS_DBG("ANSOD","ReleaseANSODHandle: HandlePtr is null, no-op");
return 0;
}
if (*Handle == nullptr) {
ANS_DBG("ANSOD","ReleaseANSODHandle: *Handle is null, no-op");
return 0;
}
ANSCENTER::ANSODBase* h = *Handle;
ANS_DBG("ANSOD","ReleaseANSODHandle called: handle=%p (uint=%llu)",
(void*)h, (unsigned long long)(uintptr_t)h);
2026-03-28 16:54:11 +11:00
// Only release if this handle was registered by us
if (!UnregisterODHandle(h)) {
2026-03-28 16:54:11 +11:00
// Not in registry — already freed or not ours
ANS_DBG("ANSOD","ReleaseANSODHandle: Unregister returned false (already gone or being destroyed by another thread), handle=%p", (void*)h);
2026-03-28 16:54:11 +11:00
*Handle = nullptr;
return 0;
}
h->Destroy();
delete h;
2026-03-28 16:54:11 +11:00
*Handle = nullptr;
ANS_DBG("ANSOD","ReleaseANSODHandle OK: handle=%p deleted, registry now has %zu entries",
(void*)h, ODHandleRegistry().size());
2026-03-28 16:54:11 +11:00
return 0;
}
catch (...) {
ANS_DBG("ANSOD","ReleaseANSODHandle EXCEPTION (unknown)");
if (Handle) *Handle = nullptr;
2026-03-28 16:54:11 +11:00
return 1;
}
}
extern "C" ANSODENGINE_API int ReleaseANSODHandle(ANSCENTER::ANSODBase** Handle) {
__try {
return ReleaseANSODHandle_Impl(Handle);
}
__except (EXCEPTION_EXECUTE_HANDLER) {
ANS_DBG("ANSOD","ReleaseANSODHandle: SEH exception caught");
if (Handle) *Handle = nullptr;
2026-03-28 16:54:11 +11:00
return 1;
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API std::string RunInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
ANS_DBG("ANSOD","RunInference: HandlePtr=%p, *Handle=%p, bufferLength=%u",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength);
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::string detectionResult;
(*Handle)->RunInference(frame, "Cam", detectionResult);
frame.release();
return detectionResult;
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API std::string RunTiledInference(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId) {
ANS_DBG("ANSOD","RunTiledInference: HandlePtr=%p, *Handle=%p, bufferLength=%u, tile=%dx%d, overlap=%.3f, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength, tiledWidth, titledHeight, overlap,
cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunInferences(frame, tiledWidth, titledHeight, overlap, cameraId);
frame.release();
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API std::string RunInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId) {
ANS_DBG("ANSOD","RunInferenceFromJpegString: HandlePtr=%p, *Handle=%p, jpeg_size=%lu, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), jpeg_size, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunInferenceFromJpegString(jpeg_string, jpeg_size, cameraId);
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API std::string RunTiledInferenceFromJpegString(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId) {
ANS_DBG("ANSOD","RunTiledInferenceFromJpegString: HandlePtr=%p, *Handle=%p, jpeg_size=%lu, tile=%dx%d, overlap=%.3f, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), jpeg_size, tiledWidth, titledHeight, overlap,
cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunTiledInferenceFromJpegString(jpeg_string, jpeg_size, tiledWidth, titledHeight, overlap, cameraId);
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API std::string RunInferenceFromCV(ANSCENTER::ANSODBase** Handle, cv::Mat image)
2026-03-28 16:54:11 +11:00
{
ANS_DBG("ANSOD","RunInferenceFromCV: HandlePtr=%p, *Handle=%p, image=%dx%d",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), image.cols, image.rows);
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::string detectionResult;
(*Handle)->RunInference(image, "Cam", detectionResult);
return detectionResult;
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
extern "C" ANSODENGINE_API void RunDetectMovement(ANSCENTER::ANSODBase** Handle, cv::Mat image, const char* cameraId, std::vector<ANSCENTER::Object>& results) {
ANS_DBG("ANSOD","RunDetectMovement: HandlePtr=%p, *Handle=%p, image=%dx%d, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), image.cols, image.rows,
cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try{
if (Handle == nullptr || *Handle == nullptr) { results.clear(); return; }
results = (*Handle)->DetectMovement(image, cameraId);
}
catch (...) {
results.clear();
}
}
extern "C" ANSODENGINE_API void RunTiledInferenceFromCV(ANSCENTER::ANSODBase** Handle, cv::Mat image, int tiledWidth, int titledHeight, double overlap, std::vector<ANSCENTER::Object>& results, const char* cameraId) {
ANS_DBG("ANSOD","RunTiledInferenceFromCV: HandlePtr=%p, *Handle=%p, image=%dx%d, tile=%dx%d, overlap=%.3f, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), image.cols, image.rows,
tiledWidth, titledHeight, overlap, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) { results.clear(); return; }
results = (*Handle)->RunInferences(image, tiledWidth, titledHeight, overlap, cameraId);
}
catch (...) {
results.clear();
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API std::string RunInferenceInCroppedBBoxImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strBboxes) {
ANS_DBG("ANSOD","RunInferenceInCroppedBBoxImages: HandlePtr=%p, *Handle=%p, bufferLength=%u, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::vector<cv::Rect> bBoxes = ANSCENTER::ANSUtilityHelper::GetBoundingBoxesFromString(strBboxes);
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunInference(frame, bBoxes, cameraId);
frame.release();
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API std::string RunInferenceInCroppedPolygonImages(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strPolygon) {
ANS_DBG("ANSOD","RunInferenceInCroppedPolygonImages: HandlePtr=%p, *Handle=%p, bufferLength=%u, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_string == nullptr || bufferLength == 0) return "";
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::vector<cv::Point> polygon = ANSCENTER::ANSUtilityHelper::StringToPolygon(strPolygon);
std::vector<ANSCENTER::Object> outputs = (*Handle)->RunInference(frame, polygon, cameraId);
frame.release();
return ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API std::string RunInferenceBinary(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
ANS_DBG("ANSOD","RunInferenceBinary: HandlePtr=%p, *Handle=%p, width=%u, height=%u",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), width, height);
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) return "";
if (jpeg_bytes == nullptr || width == 0 || height == 0) return "";
cv::Mat frame = cv::Mat(height, width, CV_8UC3, jpeg_bytes).clone(); // make a copy
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::string detectionResult;
(*Handle)->RunInference(frame, "Cam", detectionResult);
frame.release();
return detectionResult;
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
2026-04-09 08:09:02 +10:00
ANSODENGINE_API std::string RunInferenceImagePath(ANSCENTER::ANSODBase** Handle, const char* imageFilePath) {
ANS_DBG("ANSOD","RunInferenceImagePath: HandlePtr=%p, *Handle=%p, imageFilePath=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), imageFilePath ? imageFilePath : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) return "";
std::string stImageFileName(imageFilePath);
cv::Mat frame = cv::imread(stImageFileName, cv::ImreadModes::IMREAD_COLOR);
if (frame.empty()) {
std::string result = "";
result.clear();
return result;
}
std::string detectionResult;
(*Handle)->RunInference(frame, "Cam", detectionResult);
frame.release();
return detectionResult;
}
catch (...) {
std::string result = "";
result.clear();
return result;
}
}
2026-04-09 08:09:02 +10:00
2026-03-28 16:54:11 +11:00
extern "C" ANSODENGINE_API int RunInference_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInference_LV: HandlePtr=%p, *Handle=%p, bufferLength=%u",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength);
2026-03-28 16:54:11 +11:00
try {
std::string st = RunInference(Handle, jpeg_string, bufferLength);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunTiledInference_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunTiledInference_LV: HandlePtr=%p, *Handle=%p, bufferLength=%u, tile=%dx%d, overlap=%.3f, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength, tiledWidth, titledHeight, overlap,
cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
std::string st = RunTiledInference(Handle, jpeg_string, bufferLength, tiledWidth, titledHeight, overlap, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceFromJpegString_LV(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceFromJpegString_LV: HandlePtr=%p, *Handle=%p, jpeg_size=%lu, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), jpeg_size, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
std::string st = RunInferenceFromJpegString(Handle, jpeg_string, jpeg_size, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunTiledInferenceFromJpegString_LV(ANSCENTER::ANSODBase** Handle, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunTiledInferenceFromJpegString_LV: HandlePtr=%p, *Handle=%p, jpeg_size=%lu, tile=%dx%d, overlap=%.3f, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), jpeg_size, tiledWidth, titledHeight, overlap,
cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
std::string st = RunTiledInferenceFromJpegString(Handle, jpeg_string, jpeg_size, tiledWidth, titledHeight, overlap, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceInCroppedBBoxImages_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, int32 bufferLength, const char* cameraId, const char* strBboxes, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceInCroppedBBoxImages_LV: HandlePtr=%p, *Handle=%p, bufferLength=%d, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), (int)bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
std::string st = RunInferenceInCroppedBBoxImages(Handle, jpeg_string, bufferLength, cameraId, strBboxes);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceInCroppedBBoxPolygonImages_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, int32 bufferLength, const char* cameraId, const char* strPolygon, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceInCroppedBBoxPolygonImages_LV: HandlePtr=%p, *Handle=%p, bufferLength=%d, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), (int)bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
std::string st = RunInferenceInCroppedPolygonImages(Handle, jpeg_string, bufferLength, cameraId, strPolygon);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceBinary_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceBinary_LV: HandlePtr=%p, *Handle=%p, width=%u, height=%u",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), width, height);
2026-03-28 16:54:11 +11:00
try {
std::string st = RunInferenceBinary(Handle, jpeg_bytes, width, height);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceImagePath_LV(ANSCENTER::ANSODBase** Handle, const char* imageFilePath, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceImagePath_LV: HandlePtr=%p, *Handle=%p, imageFilePath=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), imageFilePath ? imageFilePath : "(null)");
2026-03-28 16:54:11 +11:00
try {
std::string st = RunInferenceImagePath(Handle, imageFilePath);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int OptimizeModel(const char* modelFilePath, const char* modelFileZipPassword,int modelType, int modelDetectionType, int fp16, LStrHandle optimizedModelFolder) {
ANS_DBG("ANSOD","OptimizeModel: modelType=%d, modelDetectionType=%d, fp16=%d, modelPath=%s",
modelType, modelDetectionType, fp16, modelFilePath ? modelFilePath : "(null)");
2026-03-28 16:54:11 +11:00
try {
2026-04-09 08:09:02 +10:00
std::string st;
int ret = OptimizeModelStr(modelFilePath, modelFileZipPassword, modelType, modelDetectionType, fp16, st);
if (ret <= 0 || st.empty()) {
2026-03-28 16:54:11 +11:00
return 0;
}
int size = static_cast<int>(st.length());
if (size > 0 && optimizedModelFolder) {
MgErr error = DSSetHandleSize(optimizedModelFolder, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*optimizedModelFolder)->cnt = size;
memcpy((*optimizedModelFolder)->str, st.c_str(), size);
return 1;
}
}
return 0;
}
catch (const std::exception& e) {
return 0;
}
catch (...) {
return 0;
}
}
extern "C" __declspec(dllexport) const char* CreateANSODHandle_CS(ANSCENTER::ANSODBase** Handle, const char* licenseKey, const char* modelFilePath, const char* modelFileZipPassword, float modelThreshold, float modelConfThreshold, float modelNMSThreshold, int autoDetectEngine, int modelType, int detectionType, int loadEngineOnCreation) {
ANS_DBG("ANSOD","CreateANSODHandle_CS: HandlePtr=%p, *Handle(in)=%p, modelType=%d, detectionType=%d, autoDetectEngine=%d, loadOnCreate=%d, modelPath=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr),
modelType, detectionType, autoDetectEngine, loadEngineOnCreation,
modelFilePath ? modelFilePath : "(null)");
2026-03-28 16:54:11 +11:00
try {
static std::string result;
result = CreateANSODHandle(Handle, licenseKey, modelFilePath, modelFileZipPassword, modelThreshold, modelConfThreshold, modelNMSThreshold, autoDetectEngine, modelType, detectionType, loadEngineOnCreation);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInferenceImagePath_CS(ANSCENTER::ANSODBase** Handle, const char* imageFilePath) {
ANS_DBG("ANSOD","RunInferenceImagePath_CS: HandlePtr=%p, *Handle=%p, imageFilePath=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), imageFilePath ? imageFilePath : "(null)");
2026-03-28 16:54:11 +11:00
try {
static std::string result;
result = RunInferenceImagePath(Handle, imageFilePath);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInference_CS(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
ANS_DBG("ANSOD","RunInference_CS: HandlePtr=%p, *Handle=%p, bufferLength=%u",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength);
2026-03-28 16:54:11 +11:00
try {
static std::string result;
result = RunInference(Handle, jpeg_string, bufferLength);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInferenceInCroppedBBoxImages_CS(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strBboxes) {
ANS_DBG("ANSOD","RunInferenceInCroppedBBoxImages_CS: HandlePtr=%p, *Handle=%p, bufferLength=%u, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
static std::string result;
result = RunInferenceInCroppedBBoxImages(Handle, jpeg_string, bufferLength, cameraId, strBboxes);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInferenceInCroppedPolygonImages_CS(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, const char* strPolygon) {
ANS_DBG("ANSOD","RunInferenceInCroppedPolygonImages_CS: HandlePtr=%p, *Handle=%p, bufferLength=%u, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
static std::string result;
result = RunInferenceInCroppedPolygonImages(Handle, jpeg_string, bufferLength, cameraId, strPolygon);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* RunInferenceBinary_CS(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
ANS_DBG("ANSOD","RunInferenceBinary_CS: HandlePtr=%p, *Handle=%p, width=%u, height=%u",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), width, height);
2026-03-28 16:54:11 +11:00
try {
static std::string result;
result = RunInferenceBinary(Handle, jpeg_bytes, width, height);
return result.c_str();
}
catch (...) {
return "";
}
}
extern "C" __declspec(dllexport) const char* OptimizeModelStr_CS(const char* modelFilePath, const char* modelFileZipPassword, int modelType, int modelDetectionType, int fp16)
{
ANS_DBG("ANSOD","OptimizeModelStr_CS: modelType=%d, modelDetectionType=%d, fp16=%d, modelPath=%s",
modelType, modelDetectionType, fp16, modelFilePath ? modelFilePath : "(null)");
2026-03-28 16:54:11 +11:00
try {
static std::string result;
2026-04-09 08:09:02 +10:00
result.clear();
int ret = OptimizeModelStr(modelFilePath, modelFileZipPassword, modelType, modelDetectionType, fp16, result);
return (ret > 0 && !result.empty()) ? result.c_str() : "";
2026-03-28 16:54:11 +11:00
}
catch (...) {
return "";
}
}
// with camera id
extern "C" ANSODENGINE_API int RunDetectMovement_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunDetectMovement_LV: HandlePtr=%p, *Handle=%p, bufferLength=%u, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
return 0;
}
std::vector<ANSCENTER::Object> outputs = (*Handle)->DetectMovement(frame, cameraId);
frame.release();
std::string st = ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int RunInferenceFromJpegStringWithCameraId_LV(ANSCENTER::ANSODBase** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceFromJpegStringWithCameraId_LV: HandlePtr=%p, *Handle=%p, bufferLength=%u, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) {
return 0;
}
std::string st;
(*Handle)->RunInference(frame, cameraId, st);
frame.release();
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) {
return 0;
}
}
static int RunInferenceComplete_LV_Impl(
ANSCENTER::ANSODBase** Handle,
cv::Mat** cvImage,
const char* cameraId,
int getJpegString,
int jpegImageSize,
const char* activeROIMode,
LStrHandle detectionResult,
LStrHandle imageStr)
{
if (Handle == nullptr || *Handle == nullptr) {
return -1;
}
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) {
return -2;
}
// RAII guard — prevents ReleaseANSODHandle from destroying the engine
// while we are still using it. Destructor auto-releases refcount.
ANSCENTER::ANSODBase* handleSnapshot = *Handle; // snapshot the pointer value
ODHandleGuard guard(AcquireODHandle(handleSnapshot));
if (!guard) {
return -3; // Handle was already released
}
auto* engine = guard.get();
try {
2026-04-04 20:19:54 +11:00
auto _t0 = std::chrono::steady_clock::now();
2026-03-28 16:54:11 +11:00
// Save/restore thread-local to support nested calls (custom model DLLs
// calling back into ANSODEngine via ANSLIB.dll).
GpuFrameData* savedFrame = tl_currentGpuFrame();
// Lookup NV12 frame data BEFORE cloning (clone creates new cv::Mat*)
GpuFrameData* gpuFrame = ANSGpuFrameRegistry::instance().lookup(*cvImage);
cv::Mat localImage = (**cvImage).clone();
int originalWidth = localImage.cols;
int originalHeight = localImage.rows;
2026-04-04 20:19:54 +11:00
ANS_DBG("LV_Inference", "START cam=%s %dx%d gpuFrame=%p nv12=%s",
cameraId ? cameraId : "?", originalWidth, originalHeight,
(void*)gpuFrame, gpuFrame ? "YES" : "NO");
2026-03-28 16:54:11 +11:00
if (originalWidth == 0 || originalHeight == 0) {
tl_currentGpuFrame() = savedFrame;
return -2;
}
// Set thread-local so engines can access NV12 data without registry lookup.
// Safe: *cvImage holds a refcount, keeping gpuFrame alive during inference.
// Only use OWN gpuFrame — never inherit outer caller's frame (dimension mismatch on crops).
tl_currentGpuFrame() = gpuFrame;
2026-04-04 20:19:54 +11:00
auto _t1 = std::chrono::steady_clock::now();
2026-03-28 16:54:11 +11:00
std::vector<ANSCENTER::Object> outputs = engine->RunInferenceWithOption(localImage, cameraId, activeROIMode);
2026-04-04 20:19:54 +11:00
auto _t2 = std::chrono::steady_clock::now();
2026-03-28 16:54:11 +11:00
tl_currentGpuFrame() = savedFrame;
2026-04-04 20:19:54 +11:00
double prepMs = std::chrono::duration<double, std::milli>(_t1 - _t0).count();
double infMs = std::chrono::duration<double, std::milli>(_t2 - _t1).count();
if (infMs > 500.0) {
ANS_DBG("LV_Inference", "SLOW cam=%s prep=%.1fms inf=%.1fms results=%zu",
cameraId ? cameraId : "?", prepMs, infMs, outputs.size());
}
2026-03-28 16:54:11 +11:00
bool getJpeg = (getJpegString == 1);
std::string stImage;
// NOTE: odMutex was removed here. All variables in this scope are local
// (outputs, localImage, stImage, stDetectionResult) and the LStrHandle
// parameters are per-call. The global mutex was blocking running tasks
// for minutes when another task was optimizing a TRT engine.
int maxImageSize = originalWidth;// std::max(originalWidth, originalHeight);
bool resizeNeeded = (jpegImageSize > 0) && (jpegImageSize < maxImageSize);
float ratio = 1.0f;
int newWidth = originalWidth;
int newHeight = originalHeight;
if (resizeNeeded) {
newWidth = jpegImageSize;
newHeight = static_cast<int>(std::round(newWidth * static_cast<double>(originalHeight) / originalWidth));
ratio = static_cast<float>(newWidth) / originalWidth;
for (auto& obj : outputs) {
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x * ratio), newWidth - 1));
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y * ratio), newHeight - 1));
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width * ratio), newWidth - obj.box.x));
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height * ratio), newHeight - obj.box.y));
}
}
else {
for (auto& obj : outputs) {
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x), originalWidth - 1));
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y), originalHeight - 1));
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width), originalWidth - obj.box.x));
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height), originalHeight - obj.box.y));
}
}
// Convert to JPEG if needed
if (getJpeg) {
cv::Mat processedImage = localImage;
if (resizeNeeded) {
cv::resize(localImage, processedImage, cv::Size(newWidth, newHeight), 0, 0, cv::INTER_AREA);
}
std::vector<uchar> buf;
if (cv::imencode(".jpg", processedImage, buf, { cv::IMWRITE_JPEG_QUALITY, 50 })) {
stImage.assign(buf.begin(), buf.end());
}
else {
std::cerr << "Error: JPEG encoding failed!" << std::endl;
}
}
std::string stDetectionResult = ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
if (stDetectionResult.empty()) {
return 0;
}
int size = static_cast<int>(stDetectionResult.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error != noErr) {
return 0;
}
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, stDetectionResult.c_str(), size);
if (getJpeg) {
if (stImage.empty()) {
return 0;
}
size = static_cast<int>(stImage.length());
error = DSSetHandleSize(imageStr, sizeof(int32) + size * sizeof(uChar));
if (error != noErr) {
return 0;
}
(*imageStr)->cnt = size;
memcpy((*imageStr)->str, stImage.c_str(), size);
}
return 1;
}
catch (const std::exception& ex) {
return 0;
}
catch (...) {
return 0;
}
}
// SEH wrapper — no C++ objects allowed in this function
static int RunInferenceComplete_LV_SEH(
ANSCENTER::ANSODBase** Handle,
cv::Mat** cvImage,
const char* cameraId,
int getJpegString,
int jpegImageSize,
const char* activeROIMode,
LStrHandle detectionResult,
LStrHandle imageStr)
{
__try {
return RunInferenceComplete_LV_Impl(Handle, cvImage, cameraId, getJpegString, jpegImageSize, activeROIMode, detectionResult, imageStr);
}
__except (EXCEPTION_EXECUTE_HANDLER) {
return -4;
}
}
extern "C" ANSODENGINE_API int RunInferenceComplete_LV(
ANSCENTER::ANSODBase** Handle,
cv::Mat** cvImage,
const char* cameraId,
int getJpegString,
int jpegImageSize,
const char* activeROIMode,
LStrHandle detectionResult,
LStrHandle imageStr)
{
ANS_DBG("ANSOD","RunInferenceComplete_LV: HandlePtr=%p, *Handle=%p, cam=%s, getJpeg=%d, jpegImageSize=%d, roi=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr),
cameraId ? cameraId : "(null)", getJpegString, jpegImageSize,
activeROIMode ? activeROIMode : "(null)");
2026-03-28 16:54:11 +11:00
return RunInferenceComplete_LV_SEH(Handle, cvImage, cameraId, getJpegString, jpegImageSize, activeROIMode, detectionResult, imageStr);
}
// V2: Accepts handle as uint64_t by value — eliminates pointer-to-pointer
// instability when LabVIEW calls concurrently from multiple tasks.
// LabVIEW CLFN: set Handle parameter to Numeric / Unsigned Pointer-sized Integer / Pass: Value
extern "C" ANSODENGINE_API int RunInferenceComplete_LV_V2(
uint64_t handleVal,
cv::Mat** cvImage,
const char* cameraId,
int getJpegString,
int jpegImageSize,
const char* activeROIMode,
LStrHandle detectionResult,
LStrHandle imageStr)
{
// Cast the by-value integer back to the engine pointer — no double dereference
ANSCENTER::ANSODBase* directHandle = reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal);
ANS_DBG("ANSOD","RunInferenceComplete_LV_V2: handleVal=%llu handle=%p, cam=%s, getJpeg=%d, jpegImageSize=%d, roi=%s",
(unsigned long long)handleVal, (void*)directHandle,
cameraId ? cameraId : "(null)", getJpegString, jpegImageSize,
activeROIMode ? activeROIMode : "(null)");
2026-03-28 16:54:11 +11:00
if (directHandle == nullptr) {
return -1;
}
// Reuse a temporary pointer so we can call the existing Impl function
ANSCENTER::ANSODBase* tempArr[1] = { directHandle };
ANSCENTER::ANSODBase** pHandle = &tempArr[0];
__try {
return RunInferenceComplete_LV_Impl(pHandle, cvImage, cameraId, getJpegString, jpegImageSize, activeROIMode, detectionResult, imageStr);
}
__except (EXCEPTION_EXECUTE_HANDLER) {
return -4;
}
}
// ============================================================================
// V2 LabVIEW API — Accept handle as uint64_t by value.
// Eliminates Handle** pointer-to-pointer instability when LabVIEW calls
// concurrently from multiple tasks.
// LabVIEW CLFN: set Handle parameter to Numeric / Unsigned Pointer-sized Integer / Pass: Value
// ============================================================================
// Helper: cast uint64_t handle to ANSODBase** for delegation to existing functions
#define V2_HANDLE_SETUP(handleVal) \
ANSCENTER::ANSODBase* _v2Direct = reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal); \
if (_v2Direct == nullptr) return 0; \
ANSCENTER::ANSODBase* _v2Arr[1] = { _v2Direct }; \
ANSCENTER::ANSODBase** Handle = &_v2Arr[0];
extern "C" ANSODENGINE_API int RunInference_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInference_LV_V2: handleVal=%llu handle=%p, bufferLength=%u",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal), bufferLength);
2026-03-28 16:54:11 +11:00
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInference(Handle, jpeg_string, bufferLength);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunTiledInference_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, int tiledWidth, int titledHeight, double overlap, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunTiledInference_LV_V2: handleVal=%llu handle=%p, bufferLength=%u, tile=%dx%d, overlap=%.3f, cam=%s",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal),
bufferLength, tiledWidth, titledHeight, overlap, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunTiledInference(Handle, jpeg_string, bufferLength, tiledWidth, titledHeight, overlap, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunTiledInferenceFromJpegString_LV_V2(uint64_t handleVal, const char* jpeg_string, unsigned long jpeg_size, int tiledWidth, int titledHeight, double overlap, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunTiledInferenceFromJpegString_LV_V2: handleVal=%llu handle=%p, jpeg_size=%lu, tile=%dx%d, overlap=%.3f, cam=%s",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal),
jpeg_size, tiledWidth, titledHeight, overlap, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunTiledInferenceFromJpegString(Handle, jpeg_string, jpeg_size, tiledWidth, titledHeight, overlap, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceFromJpegString_LV_V2(uint64_t handleVal, const char* jpeg_string, unsigned long jpeg_size, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceFromJpegString_LV_V2: handleVal=%llu handle=%p, jpeg_size=%lu, cam=%s",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal),
jpeg_size, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceFromJpegString(Handle, jpeg_string, jpeg_size, cameraId);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceBinary_LV_V2(uint64_t handleVal, unsigned char* jpeg_bytes, unsigned int width, unsigned int height, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceBinary_LV_V2: handleVal=%llu handle=%p, width=%u, height=%u",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal), width, height);
2026-03-28 16:54:11 +11:00
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceBinary(Handle, jpeg_bytes, width, height);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceImagePath_LV_V2(uint64_t handleVal, const char* imageFilePath, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceImagePath_LV_V2: handleVal=%llu handle=%p, imageFilePath=%s",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal),
imageFilePath ? imageFilePath : "(null)");
2026-03-28 16:54:11 +11:00
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceImagePath(Handle, imageFilePath);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceInCroppedBBoxImages_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, int32 bufferLength, const char* cameraId, const char* strBboxes, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceInCroppedBBoxImages_LV_V2: handleVal=%llu handle=%p, bufferLength=%d, cam=%s",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal),
(int)bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceInCroppedBBoxImages(Handle, jpeg_string, bufferLength, cameraId, strBboxes);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceInCroppedBBoxPolygonImages_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, int32 bufferLength, const char* cameraId, const char* strPolygon, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceInCroppedBBoxPolygonImages_LV_V2: handleVal=%llu handle=%p, bufferLength=%d, cam=%s",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal),
(int)bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
V2_HANDLE_SETUP(handleVal);
std::string st = RunInferenceInCroppedPolygonImages(Handle, jpeg_string, bufferLength, cameraId, strPolygon);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunDetectMovement_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunDetectMovement_LV_V2: handleVal=%llu handle=%p, bufferLength=%u, cam=%s",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal),
bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
ANSCENTER::ANSODBase* directHandle = reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal);
if (directHandle == nullptr) return 0;
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) return 0;
std::vector<ANSCENTER::Object> outputs = directHandle->DetectMovement(frame, cameraId);
frame.release();
std::string st = ANSCENTER::ANSUtilityHelper::VectorDetectionToJsonString(outputs);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceFromJpegStringWithCameraId_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
ANS_DBG("ANSOD","RunInferenceFromJpegStringWithCameraId_LV_V2: handleVal=%llu handle=%p, bufferLength=%u, cam=%s",
(unsigned long long)handleVal, (void*)reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal),
bufferLength, cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
ANSCENTER::ANSODBase* directHandle = reinterpret_cast<ANSCENTER::ANSODBase*>(handleVal);
if (directHandle == nullptr) return 0;
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
if (frame.empty()) return 0;
std::string st;
directHandle->RunInference(frame, cameraId, st);
frame.release();
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (error == noErr) {
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (...) { return 0; }
}
extern "C" ANSODENGINE_API int RunInferenceComplete_CPP(ANSCENTER::ANSODBase** Handle, cv::Mat** cvImage, const char* cameraId, const char* activeROIMode, std::vector<ANSCENTER::Object> &detectionResult) {
ANS_DBG("ANSOD","RunInferenceComplete_CPP: HandlePtr=%p, *Handle=%p, cam=%s, roi=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr),
cameraId ? cameraId : "(null)", activeROIMode ? activeROIMode : "(null)");
2026-03-28 16:54:11 +11:00
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null in RunInferenceComplete_CPP!" << std::endl;
return -1;
}
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) {
std::cerr << "Error: Invalid or empty input image in RunInferenceComplete_CPP!" << std::endl;
return -2;
}
ODHandleGuard guard(AcquireODHandle(*Handle));
if (!guard) {
return -3; // Handle was already released
}
auto* engine = guard.get();
try {
// Save/restore thread-local to support nested calls (e.g., custom model DLLs
// calling back into ANSODEngine via ANSLIB.dll). Without this, the inner call
// would overwrite the outer caller's valid GpuFrameData* with nullptr.
GpuFrameData* savedFrame = tl_currentGpuFrame();
GpuFrameData* gpuFrame = ANSGpuFrameRegistry::instance().lookup(*cvImage);
cv::Mat localImage = (**cvImage).clone();
int originalWidth = localImage.cols;
int originalHeight = localImage.rows;
if (originalWidth == 0 || originalHeight == 0) {
tl_currentGpuFrame() = savedFrame;
return -2;
}
// Only use gpuFrame if this cv::Mat* has NV12 data.
// Do NOT propagate savedFrame to inner engines — the inner call's cv::Mat*
// is a local copy (from ANSLIB.dll) that doesn't correspond to the outer
// frame's NV12 geometry. Using savedFrame here would cause the inner engine
// to read NV12 data that doesn't match its input image.
tl_currentGpuFrame() = gpuFrame;
detectionResult = engine->RunInferenceWithOption(localImage, cameraId, activeROIMode);
tl_currentGpuFrame() = savedFrame;
if (detectionResult.empty()) return 0;
else return 1;
}
catch (const std::exception& ex) {
std::cerr << "Exception in RunInferenceComplete_CPP: " << ex.what() << std::endl;
return 0;
}
catch (...) {
std::cerr << "Unknown exception in RunInferenceComplete_CPP!" << std::endl;
return 0;
}
}
extern "C" ANSODENGINE_API int RunInference_CPP(ANSCENTER::ANSODBase** Handle, cv::Mat** cvImage, const char* cameraId, std::vector<ANSCENTER::Object>& detectionResult) {
ANS_DBG("ANSOD","RunInference_CPP: HandlePtr=%p, *Handle=%p, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null in RunInference_CPP!" << std::endl;
return -1;
}
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) {
std::cerr << "Error: Invalid or empty input image in RunInference_CPP!" << std::endl;
return -2;
}
ODHandleGuard guard(AcquireODHandle(*Handle));
if (!guard) {
return -3; // Handle was already released
}
auto* engine = guard.get();
try {
// Save/restore thread-local (same nested-call protection as RunInferenceComplete_CPP)
GpuFrameData* savedFrame = tl_currentGpuFrame();
GpuFrameData* gpuFrame = ANSGpuFrameRegistry::instance().lookup(*cvImage);
cv::Mat localImage = (**cvImage).clone();
int originalWidth = localImage.cols;
int originalHeight = localImage.rows;
if (originalWidth == 0 || originalHeight == 0) {
tl_currentGpuFrame() = savedFrame;
return -2;
}
// Only use gpuFrame if this cv::Mat* has NV12 data (see RunInferenceComplete_CPP comment)
tl_currentGpuFrame() = gpuFrame;
detectionResult = engine->RunInference(localImage, cameraId);
tl_currentGpuFrame() = savedFrame;
if (detectionResult.empty()) return 0;
else return 1;
}
catch (const std::exception& ex) {
std::cerr << "Exception in RunInference_CPP: " << ex.what() << std::endl;
return 0;
}
catch (...) {
std::cerr << "Unknown exception in RunInference_CPP!" << std::endl;
return 0;
}
}
// Utility function to convert a string to a vector of cv::Point
extern "C" __declspec(dllexport) int GetEngineType() {
ANS_DBG("ANSOD","GetEngineType: called");
2026-03-28 16:54:11 +11:00
ANSCENTER::EngineType engineType = ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
switch (engineType) {
case ANSCENTER::EngineType::CPU:
return 0; // CPU
case ANSCENTER::EngineType::NVIDIA_GPU:
return 1; // GPU
case ANSCENTER::EngineType::OPENVINO_GPU:
return 2; // NPU
2026-04-09 11:00:34 +10:00
case ANSCENTER::EngineType::AMD_GPU:
return 3;
2026-03-28 16:54:11 +11:00
default:
2026-04-09 11:00:34 +10:00
return 99; // Unknown
2026-03-28 16:54:11 +11:00
}
}
extern "C" __declspec(dllexport) int GetActiveRect(ANSCENTER::ANSODBase** Handle, cv::Mat cvImage, cv::Rect& activeWindow) {
ANS_DBG("ANSOD","GetActiveRect: HandlePtr=%p, *Handle=%p, image=%dx%d",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), cvImage.cols, cvImage.rows);
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (cvImage.empty()) {
std::cerr << "Error: Input image is empty!" << std::endl;
return -1;
}
activeWindow = (*Handle)->GetActiveWindow(cvImage);
if (activeWindow.empty()) {
std::cerr << "Error: Active window is empty!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in GetActiveWindow!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int DetectMovement(ANSCENTER::ANSODBase** Handle, cv::Mat image, const char* cameraId, std::vector<ANSCENTER::Object>& results) {
ANS_DBG("ANSOD","DetectMovement: HandlePtr=%p, *Handle=%p, image=%dx%d, cam=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), image.cols, image.rows,
cameraId ? cameraId : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (image.empty()) {
std::cerr << "Error: Input image is empty!" << std::endl;
return -1;
}
results = (*Handle)->DetectMovement(image, cameraId);
if (results.empty()) {
std::cerr << "Error: No detection results!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in DetectMovement!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int Optimize(ANSCENTER::ANSODBase** Handle, bool fp16) {
ANS_DBG("ANSOD","Optimize: HandlePtr=%p, *Handle=%p, fp16=%d",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), fp16 ? 1 : 0);
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
std::string modelFolder;
bool result = (*Handle)->OptimizeModel(fp16, modelFolder);
if (!result) {
std::cerr << "Error: Model optimization failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in Optimize!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int SetODParameters(ANSCENTER::ANSODBase** Handle, const char* parameters) {
ANS_DBG("ANSOD","SetODParameters: HandlePtr=%p, *Handle=%p, parameters=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), parameters ? parameters : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (parameters == nullptr) {
std::cerr << "Error: Parameters string is null!" << std::endl;
return -1;
}
std::string paramStr(parameters);
ANSCENTER::Params param = ANSCENTER::ANSUtilityHelper::ParseCustomParameters(paramStr);
bool result = (*Handle)->SetParameters(param);
if (!result) {
std::cerr << "Error: Setting parameters failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in SetParameters!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int GetODParameters(ANSCENTER::ANSODBase** Handle, ANSCENTER::Params& param) {
ANS_DBG("ANSOD","GetODParameters: HandlePtr=%p, *Handle=%p",
(void*)Handle, (void*)(Handle ? *Handle : nullptr));
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
param = (*Handle)->GetParameters();
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in GetParameters!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int GetConfiguredParameters(ANSCENTER::ANSODBase** Handle, LStrHandle stParam) {
ANS_DBG("ANSOD","GetConfiguredParameters: HandlePtr=%p, *Handle=%p",
(void*)Handle, (void*)(Handle ? *Handle : nullptr));
2026-03-28 16:54:11 +11:00
__try {
return [&]() -> int {
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (stParam == nullptr) {
std::cerr << "Error: stParam LStrHandle is null!" << std::endl;
return -1;
}
ANSCENTER::Params param;
bool result = (*Handle)->ConfigureParameters(param);
if (!result) {
std::cerr << "Error: Getting parameters failed!" << std::endl;
return 0;
}
std::string st = ANSCENTER::ANSUtilityHelper::SerializeCustomParamters(param);
if (st.empty()) return 0;
int size = static_cast<int>(st.length());
MgErr error;
error = DSSetHandleSize(stParam, sizeof(int32) + size * sizeof(uChar));
if (error == noErr)
{
(*stParam)->cnt = size;
memcpy((*stParam)->str, st.c_str(), size);
return 1;
}
else return 0;
}
catch (const std::exception& e) {
std::cerr << "GetConfiguredParameters exception: " << e.what() << std::endl;
return 0;
}
catch (...) {
std::cerr << "GetConfiguredParameters unknown exception" << std::endl;
return 0;
}
}();
}
__except (EXCEPTION_EXECUTE_HANDLER) {
std::cerr << "GetConfiguredParameters SEH exception (code: " << GetExceptionCode() << ")" << std::endl;
return -2;
}
}
extern "C" __declspec(dllexport) int GetConfiguredParameters_CPP(ANSCENTER::ANSODBase** Handle, std::string& stParam) {
ANS_DBG("ANSOD","GetConfiguredParameters_CPP: HandlePtr=%p, *Handle=%p",
(void*)Handle, (void*)(Handle ? *Handle : nullptr));
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
ANSCENTER::Params param;
bool result = (*Handle)->ConfigureParameters(param);
if (!result) {
std::cerr << "Error: Getting parameters failed!" << std::endl;
return 0;
}
stParam = ANSCENTER::ANSUtilityHelper::SerializeCustomParamters(param);
if (stParam.empty()) return 0;
else return 1;
}
catch (...) {
return 0;
}
}
extern "C" ANSODENGINE_API int ShutdownPythonEngine() {
ANS_DBG("ANSOD","ShutdownPythonEngine: called");
2026-03-28 16:54:11 +11:00
ANSCENTER::ANSCUSTOMPY::SafeShutdownAll(false);
return 1;
}
extern "C" ANSODENGINE_API int ShutdownPythonEngine_CPP() {
ANS_DBG("ANSOD","ShutdownPythonEngine_CPP: called");
2026-03-28 16:54:11 +11:00
ANSCENTER::ANSCUSTOMPY::SafeShutdownAll(true);
return 1;
}
extern "C" __declspec(dllexport) int UpdateDetectionMinScore(ANSCENTER::ANSODBase** Handle, float detectionScore) {
ANS_DBG("ANSOD","UpdateDetectionMinScore: HandlePtr=%p, *Handle=%p, detectionScore=%.4f",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), detectionScore);
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
bool result = (*Handle)->UpdateDetectionThreshold(detectionScore);
if (!result) {
std::cerr << "Error: Updating detection threshold failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in UpdateDetectionMinScore!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int SetPrompt(ANSCENTER::ANSODBase** Handle, const char* textPrompt) {
ANS_DBG("ANSOD","SetPrompt: HandlePtr=%p, *Handle=%p, textPrompt=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), textPrompt ? textPrompt : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (textPrompt == nullptr) {
std::cerr << "Error: Text prompt is null!" << std::endl;
return -1;
}
std::string promptStr(textPrompt);
bool result = (*Handle)->SetPrompt(promptStr);
if (!result) {
std::cerr << "Error: Setting text prompt failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in SetPrompt!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int SetTracker(ANSCENTER::ANSODBase** Handle, int trackerType, int enableTracker) {
ANS_DBG("ANSOD","SetTracker: HandlePtr=%p, *Handle=%p, trackerType=%d, enableTracker=%d",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), trackerType, enableTracker);
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
ANSCENTER::TrackerType ansTrackerType;
switch (trackerType) {
case 0:
ansTrackerType = ANSCENTER::TrackerType::BYTETRACK;
break;
case 1:
ansTrackerType = ANSCENTER::TrackerType::UCMC;
break;
case 2:
ansTrackerType = ANSCENTER::TrackerType::OCSORT;
break;
default:
ansTrackerType = ANSCENTER::TrackerType::BYTETRACK;
break;
}
bool enable = false;
if (enableTracker == 1)enable = true;
bool result = (*Handle)->SetTracker(ansTrackerType, enable);
if (!result) {
std::cerr << "Error: Setting tracker failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in SetTracker!" << std::endl;
return -1;
}
}
extern "C" __declspec(dllexport) int SetTrackerParameters(ANSCENTER::ANSODBase** Handle, const char* trackerParams) {
ANS_DBG("ANSOD","SetTrackerParameters: HandlePtr=%p, *Handle=%p, trackerParams=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), trackerParams ? trackerParams : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
std::cerr << "Error: Handle is null!" << std::endl;
return -1;
}
if (trackerParams == nullptr) {
std::cerr << "Error: Tracker parameters string is null!" << std::endl;
return -1;
}
std::string paramStr(trackerParams);
bool result = (*Handle)->SetTrackerParameters(paramStr);
if (!result) {
std::cerr << "Error: Setting tracker parameters failed!" << std::endl;
return 0;
}
return 1; // Success
}
catch (...) {
std::cerr << "Unknown exception in SetTrackerParameters!" << std::endl;
return -1;
}
}
// Set stabilization parameters (JSON string).
// All keys are optional — omit to keep current defaults.
// Example: {"ema_alpha":0.4, "class_consistency_frames":8, "hysteresis_enter":0.5, "hysteresis_keep":0.3}
extern "C" __declspec(dllexport) int SetStabilizationParameters(ANSCENTER::ANSODBase** Handle, const char* stabParams) {
ANS_DBG("ANSOD","SetStabilizationParameters: HandlePtr=%p, *Handle=%p, stabParams=%s",
(void*)Handle, (void*)(Handle ? *Handle : nullptr), stabParams ? stabParams : "(null)");
2026-03-28 16:54:11 +11:00
try {
if (Handle == nullptr || *Handle == nullptr) {
return -1;
}
if (stabParams == nullptr) {
return -1;
}
std::string paramStr(stabParams);
bool result = (*Handle)->SetStabilizationParameters(paramStr);
return result ? 1 : 0;
}
catch (...) {
return -1;
}
}