2026-03-28 16:54:11 +11:00
|
|
|
// dllmain.cpp : Defines the entry point for the DLL application.
|
|
|
|
|
#include "pch.h"
|
|
|
|
|
#include "ANSFR.h"
|
|
|
|
|
#include "NV12PreprocessHelper.h" // tl_currentGpuFrame()
|
|
|
|
|
#include "ANSGpuFrameRegistry.h" // gpu_frame_lookup(cv::Mat*)
|
|
|
|
|
#include <opencv2/imgcodecs.hpp>
|
|
|
|
|
#include "ANSOVFaceDetector.h"
|
|
|
|
|
#include "SCRFDFaceDetector.h"
|
|
|
|
|
#include "FaceNet.h"
|
|
|
|
|
#include "ANSFaceRecognizer.h"
|
|
|
|
|
#include "ANSLibsLoader.h"
|
2026-03-30 09:59:09 +11:00
|
|
|
#include "engine/TRTEngineCache.h"
|
|
|
|
|
#include "engine/EnginePoolManager.h"
|
2026-03-28 16:54:11 +11:00
|
|
|
#include <memory>
|
2026-03-30 09:59:09 +11:00
|
|
|
#include <climits>
|
2026-03-28 16:54:11 +11:00
|
|
|
#include <unordered_map>
|
|
|
|
|
#include <condition_variable>
|
|
|
|
|
#include <cstdint>
|
|
|
|
|
#include <atomic>
|
|
|
|
|
|
|
|
|
|
// Each DLL that instantiates Engine<T> templates needs its own definition
|
|
|
|
|
// of g_forceNoPool (referenced by EngineBuildLoadNetwork.inl).
|
|
|
|
|
// ANSODEngine exports its own; ANSFR needs a local copy.
|
|
|
|
|
std::atomic<bool> g_forceNoPool{false};
|
|
|
|
|
#include <boost/uuid/uuid.hpp>
|
|
|
|
|
#include <boost/uuid/uuid_generators.hpp>
|
|
|
|
|
#include <boost/uuid/uuid_io.hpp>
|
|
|
|
|
|
|
|
|
|
// Handle registry with refcount — prevents use-after-free when
|
|
|
|
|
// ReleaseANSRFHandle is called while inference is still running.
|
|
|
|
|
static std::unordered_map<ANSCENTER::ANSFacialRecognition*, int>& FRHandleRegistry() {
|
|
|
|
|
static std::unordered_map<ANSCENTER::ANSFacialRecognition*, int> s;
|
|
|
|
|
return s;
|
|
|
|
|
}
|
|
|
|
|
static std::mutex& FRHandleRegistryMutex() {
|
|
|
|
|
static std::mutex m;
|
|
|
|
|
return m;
|
|
|
|
|
}
|
|
|
|
|
static std::condition_variable& FRHandleRegistryCV() {
|
|
|
|
|
static std::condition_variable cv;
|
|
|
|
|
return cv;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void RegisterFRHandle(ANSCENTER::ANSFacialRecognition* h) {
|
|
|
|
|
std::lock_guard<std::mutex> lk(FRHandleRegistryMutex());
|
|
|
|
|
FRHandleRegistry()[h] = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static ANSCENTER::ANSFacialRecognition* AcquireFRHandle(ANSCENTER::ANSFacialRecognition* h) {
|
|
|
|
|
std::lock_guard<std::mutex> lk(FRHandleRegistryMutex());
|
|
|
|
|
auto it = FRHandleRegistry().find(h);
|
|
|
|
|
if (it == FRHandleRegistry().end()) return nullptr;
|
|
|
|
|
it->second++;
|
|
|
|
|
return h;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool ReleaseFRHandleRef(ANSCENTER::ANSFacialRecognition* h) {
|
|
|
|
|
std::lock_guard<std::mutex> lk(FRHandleRegistryMutex());
|
|
|
|
|
auto it = FRHandleRegistry().find(h);
|
|
|
|
|
if (it == FRHandleRegistry().end()) return false;
|
|
|
|
|
it->second--;
|
|
|
|
|
if (it->second <= 0) {
|
|
|
|
|
FRHandleRegistry().erase(it);
|
|
|
|
|
FRHandleRegistryCV().notify_all();
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool UnregisterFRHandle(ANSCENTER::ANSFacialRecognition* h) {
|
|
|
|
|
std::unique_lock<std::mutex> lk(FRHandleRegistryMutex());
|
|
|
|
|
auto it = FRHandleRegistry().find(h);
|
|
|
|
|
if (it == FRHandleRegistry().end()) return false;
|
|
|
|
|
it->second--;
|
|
|
|
|
bool ok = FRHandleRegistryCV().wait_for(lk, std::chrono::seconds(30), [&]() {
|
|
|
|
|
auto it2 = FRHandleRegistry().find(h);
|
|
|
|
|
return it2 == FRHandleRegistry().end() || it2->second <= 0;
|
|
|
|
|
});
|
|
|
|
|
if (!ok) {
|
|
|
|
|
OutputDebugStringA("WARNING: UnregisterFRHandle timed out waiting for in-flight inference\n");
|
|
|
|
|
}
|
|
|
|
|
FRHandleRegistry().erase(h);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// RAII guard — ensures ReleaseFRHandleRef is always called, preventing
|
|
|
|
|
// refcount leaks that would cause UnregisterFRHandle to deadlock.
|
|
|
|
|
class FRHandleGuard {
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* engine;
|
|
|
|
|
public:
|
|
|
|
|
explicit FRHandleGuard(ANSCENTER::ANSFacialRecognition* e) : engine(e) {}
|
|
|
|
|
~FRHandleGuard() { if (engine) ReleaseFRHandleRef(engine); }
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* get() const { return engine; }
|
|
|
|
|
explicit operator bool() const { return engine != nullptr; }
|
|
|
|
|
FRHandleGuard(const FRHandleGuard&) = delete;
|
|
|
|
|
FRHandleGuard& operator=(const FRHandleGuard&) = delete;
|
|
|
|
|
};
|
|
|
|
|
|
2026-03-30 09:59:09 +11:00
|
|
|
// Determine maxSlotsPerGpu based on GPU topology:
|
2026-04-12 17:16:16 +10:00
|
|
|
// non-NVIDIA (AMD/Intel/CPU) → 1 (no TensorRT pool, never grows)
|
|
|
|
|
// 1 NVIDIA GPU → 1 (single slot, no round-robin needed)
|
|
|
|
|
// >1 GPU, VRAM<24GB → 1 (round-robin: 1 slot per GPU)
|
|
|
|
|
// >1 GPU, VRAM≥24GB → -1 (elastic: on-demand slot growth)
|
|
|
|
|
//
|
|
|
|
|
// IMPORTANT: Must be gated on CheckHardwareInformation() first — calling
|
|
|
|
|
// cudaGetDeviceCount/cudaSetDevice/cudaMemGetInfo on non-NVIDIA hardware
|
|
|
|
|
// wakes up the CUDA runtime unnecessarily and, combined with DirectML on
|
|
|
|
|
// AMD, has been observed to trigger amdkmdag instability. Return 1 early
|
|
|
|
|
// on anything that isn't a detected NVIDIA GPU so the TRT pool is never
|
|
|
|
|
// exercised on those machines.
|
2026-03-30 09:59:09 +11:00
|
|
|
static int GetPoolMaxSlotsPerGpu() {
|
|
|
|
|
static int s_result = INT_MIN;
|
|
|
|
|
static std::mutex s_mutex;
|
|
|
|
|
std::lock_guard<std::mutex> lk(s_mutex);
|
|
|
|
|
if (s_result != INT_MIN) return s_result;
|
2026-04-12 17:16:16 +10:00
|
|
|
|
|
|
|
|
const ANSCENTER::EngineType detected =
|
|
|
|
|
ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
|
|
|
|
|
if (detected != ANSCENTER::EngineType::NVIDIA_GPU) {
|
|
|
|
|
s_result = 1;
|
|
|
|
|
std::cout << "Info [FR GPU]: engineType=" << static_cast<int>(detected)
|
|
|
|
|
<< " — not NVIDIA, TRT pool disabled (slot=1), skipping CUDA probe"
|
|
|
|
|
<< std::endl;
|
|
|
|
|
return s_result;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-30 09:59:09 +11:00
|
|
|
int gpuCount = 0;
|
|
|
|
|
cudaGetDeviceCount(&gpuCount);
|
|
|
|
|
if (gpuCount <= 1) {
|
|
|
|
|
s_result = 1;
|
|
|
|
|
std::cout << "Info [FR GPU]: Single GPU — pool mode: 1 slot, no round-robin" << std::endl;
|
|
|
|
|
return s_result;
|
|
|
|
|
}
|
|
|
|
|
constexpr size_t kLargeVramBytes = 24ULL * 1024 * 1024 * 1024; // 24 GB
|
|
|
|
|
size_t totalMem = 0, freeMem = 0;
|
|
|
|
|
cudaSetDevice(0);
|
|
|
|
|
cudaMemGetInfo(&freeMem, &totalMem);
|
|
|
|
|
if (totalMem >= kLargeVramBytes) {
|
|
|
|
|
s_result = -1;
|
|
|
|
|
std::cout << "Info [FR GPU]: " << gpuCount << " GPUs, VRAM >= 24 GB — pool mode: elastic" << std::endl;
|
|
|
|
|
} else {
|
|
|
|
|
s_result = 1;
|
|
|
|
|
std::cout << "Info [FR GPU]: " << gpuCount << " GPUs, VRAM < 24 GB — pool mode: round-robin" << std::endl;
|
|
|
|
|
}
|
|
|
|
|
return s_result;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-28 16:54:11 +11:00
|
|
|
BOOL APIENTRY DllMain( HMODULE hModule,
|
|
|
|
|
DWORD ul_reason_for_call,
|
|
|
|
|
LPVOID lpReserved
|
|
|
|
|
) noexcept
|
|
|
|
|
{
|
|
|
|
|
switch (ul_reason_for_call)
|
|
|
|
|
{
|
|
|
|
|
case DLL_PROCESS_ATTACH:
|
|
|
|
|
// Pin the DLL so it is never unmapped while idle-timer threads are
|
|
|
|
|
// still running. During LabVIEW shutdown the CLR/COM teardown can
|
|
|
|
|
// unload DLLs before all threads exit → crash at unmapped code.
|
|
|
|
|
{
|
|
|
|
|
HMODULE hSelf = nullptr;
|
|
|
|
|
GetModuleHandleExW(
|
|
|
|
|
GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
|
|
|
|
|
GET_MODULE_HANDLE_EX_FLAG_PIN,
|
|
|
|
|
reinterpret_cast<LPCWSTR>(&DllMain),
|
|
|
|
|
&hSelf);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case DLL_THREAD_ATTACH:
|
|
|
|
|
case DLL_THREAD_DETACH:
|
|
|
|
|
break;
|
|
|
|
|
case DLL_PROCESS_DETACH:
|
2026-03-30 09:59:09 +11:00
|
|
|
// ExitProcess: OS killed worker threads, CUDA context is dead.
|
|
|
|
|
// Set flag so Engine/Pool destructors skip CUDA cleanup.
|
|
|
|
|
if (lpReserved != nullptr) {
|
|
|
|
|
g_processExiting().store(true, std::memory_order_relaxed);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Dynamic FreeLibrary — threads are still alive, safe to clean up.
|
2026-03-28 16:54:11 +11:00
|
|
|
try {
|
|
|
|
|
std::vector<ANSCENTER::ANSFacialRecognition*> leakedHandles;
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lk(FRHandleRegistryMutex());
|
|
|
|
|
for (auto& [h, _] : FRHandleRegistry())
|
|
|
|
|
leakedHandles.push_back(h);
|
|
|
|
|
FRHandleRegistry().clear();
|
|
|
|
|
}
|
|
|
|
|
for (auto* h : leakedHandles) {
|
|
|
|
|
try { h->Destroy(); delete h; } catch (...) {}
|
|
|
|
|
}
|
2026-03-30 09:59:09 +11:00
|
|
|
try { EnginePoolManager<float>::instance().clearAll(); } catch (...) {}
|
|
|
|
|
try { TRTEngineCache::instance().clearAll(); } catch (...) {}
|
2026-03-28 16:54:11 +11:00
|
|
|
} catch (...) {}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Helper: safely copy a std::string into a LabVIEW LStrHandle.
|
|
|
|
|
// Returns 1 on success, 0 on failure (empty string or allocation error).
|
|
|
|
|
static int CopyToLStrHandle(LStrHandle handle, const std::string& str) noexcept {
|
|
|
|
|
if (str.empty() || handle == nullptr) return 0;
|
|
|
|
|
const auto size = static_cast<int32>(str.length());
|
|
|
|
|
MgErr error = DSSetHandleSize(handle, sizeof(int32) + size * sizeof(uChar));
|
|
|
|
|
if (error != noErr) return 0;
|
|
|
|
|
(*handle)->cnt = size;
|
|
|
|
|
memcpy((*handle)->str, str.c_str(), static_cast<size_t>(size));
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int CreateANSRFHandle(ANSCENTER::ANSFacialRecognition** Handle,
|
|
|
|
|
const char* licenseKey,
|
|
|
|
|
const char* configFilePath,
|
|
|
|
|
const char* databaseFilePath,
|
|
|
|
|
const char* recogniserFilePath,
|
|
|
|
|
const char* detectorFilePath,
|
|
|
|
|
int precisionType,
|
|
|
|
|
float knownPersonThreshold,
|
|
|
|
|
int enableAgeGender,
|
|
|
|
|
int enableFaceEmotions,
|
|
|
|
|
int enableHeadPose,
|
|
|
|
|
int minFaceSize,
|
|
|
|
|
float faceDetectorThreshold,
|
|
|
|
|
int enableFaceLiveness,
|
|
|
|
|
int enableAntiSpoofing)
|
|
|
|
|
{
|
|
|
|
|
try {
|
|
|
|
|
// Ensure all shared DLLs (OpenCV, OpenVINO, TRT, ORT) are pre-loaded
|
|
|
|
|
ANSCENTER::ANSLibsLoader::Initialize();
|
|
|
|
|
|
|
|
|
|
if (!Handle || !licenseKey || !configFilePath || !databaseFilePath || !recogniserFilePath) return -1;
|
|
|
|
|
|
2026-04-12 17:16:16 +10:00
|
|
|
// Log the detected vendor path so field triage between NVIDIA / AMD /
|
|
|
|
|
// Intel / CPU machines is trivial from the debug log. Mirrors the
|
|
|
|
|
// vendorTag logging already in ANSLPR_OD::LoadEngine and ANSOCR
|
|
|
|
|
// CreateANSOCRHandleEx.
|
|
|
|
|
{
|
|
|
|
|
ANSCENTER::EngineType detected =
|
|
|
|
|
ANSCENTER::ANSLicenseHelper::CheckHardwareInformation();
|
|
|
|
|
const char* vendorTag =
|
|
|
|
|
detected == ANSCENTER::EngineType::NVIDIA_GPU ? "NVIDIA_GPU (TensorRT + CUDA preproc, SCRFD face detector)" :
|
|
|
|
|
detected == ANSCENTER::EngineType::AMD_GPU ? "AMD_GPU (ONNX Runtime / DirectML, OV face detector, NV12/CUDA DISABLED)" :
|
|
|
|
|
detected == ANSCENTER::EngineType::OPENVINO_GPU ? "OPENVINO_GPU (OpenVINO, OV face detector, NV12/CUDA DISABLED)" :
|
|
|
|
|
"CPU (ONNX Runtime / OpenVINO CPU, NV12/CUDA DISABLED)";
|
|
|
|
|
char buf[224];
|
|
|
|
|
snprintf(buf, sizeof(buf),
|
|
|
|
|
"[ANSFR] CreateANSRFHandle: detected engineType=%d [%s]\n",
|
|
|
|
|
static_cast<int>(detected), vendorTag);
|
|
|
|
|
OutputDebugStringA(buf);
|
|
|
|
|
std::cout << buf;
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-18 20:51:50 +10:00
|
|
|
// Pure constructor: ignore *Handle(in). LabVIEW's CLF Node marshalling
|
|
|
|
|
// reuses the same temp buffer per call site, so *Handle(in) often holds
|
|
|
|
|
// leftover bytes from the previous Create's output even when the actual
|
|
|
|
|
// LabVIEW wire is a different, freshly-allocated instance. Inspecting
|
|
|
|
|
// *Handle(in) and destroying what we "see" tears down legitimate
|
|
|
|
|
// parallel instances. (Same reasoning as CreateANSAWSHandle.)
|
|
|
|
|
// Trade-off: a true double-Create on the same wire leaks the prior
|
|
|
|
|
// handle -- caller's bug; the alternative is far worse.
|
|
|
|
|
*Handle = nullptr;
|
2026-03-28 16:54:11 +11:00
|
|
|
|
|
|
|
|
// std::unique_ptr ensures automatic cleanup if Initialize() throws
|
|
|
|
|
auto ptr = std::make_unique<ANSCENTER::ANSFacialRecognition>();
|
|
|
|
|
|
|
|
|
|
const bool _enableFaceLiveness = (enableFaceLiveness == 1);
|
|
|
|
|
const bool _enableAntiSpoofing = (enableAntiSpoofing == 1);
|
|
|
|
|
|
2026-03-30 09:59:09 +11:00
|
|
|
ptr->SetMaxSlotsPerGpu(GetPoolMaxSlotsPerGpu());
|
|
|
|
|
|
2026-03-28 16:54:11 +11:00
|
|
|
int result = ptr->Initialize(licenseKey,
|
|
|
|
|
configFilePath,
|
|
|
|
|
databaseFilePath,
|
|
|
|
|
recogniserFilePath,
|
|
|
|
|
detectorFilePath ? detectorFilePath : "",
|
|
|
|
|
precisionType,
|
|
|
|
|
knownPersonThreshold,
|
|
|
|
|
enableAgeGender,
|
|
|
|
|
enableFaceEmotions,
|
|
|
|
|
enableHeadPose,
|
|
|
|
|
minFaceSize,
|
|
|
|
|
faceDetectorThreshold,
|
|
|
|
|
_enableFaceLiveness,
|
|
|
|
|
_enableAntiSpoofing);
|
|
|
|
|
|
|
|
|
|
if (result < 0) {
|
|
|
|
|
*Handle = nullptr;
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Transfer ownership to caller on success
|
|
|
|
|
*Handle = ptr.release();
|
|
|
|
|
RegisterFRHandle(*Handle);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int LoadANSRFEngine(ANSCENTER::ANSFacialRecognition** Handle) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
bool result = (*Handle)->LoadEngine();
|
|
|
|
|
if (result == true) return 1;
|
|
|
|
|
else return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int ReleaseANSRFHandle_Impl(ANSCENTER::ANSFacialRecognition** Handle) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return 1;
|
|
|
|
|
if (!UnregisterFRHandle(*Handle)) {
|
|
|
|
|
*Handle = nullptr;
|
|
|
|
|
return 1; // Not in registry — already freed
|
|
|
|
|
}
|
|
|
|
|
(*Handle)->Destroy();
|
|
|
|
|
delete *Handle;
|
|
|
|
|
*Handle = nullptr;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (...) {
|
|
|
|
|
if (Handle) *Handle = nullptr;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int ReleaseANSRFHandle(ANSCENTER::ANSFacialRecognition** Handle) {
|
|
|
|
|
__try {
|
|
|
|
|
return ReleaseANSRFHandle_Impl(Handle);
|
|
|
|
|
}
|
|
|
|
|
__except (EXCEPTION_EXECUTE_HANDLER) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API std::string RunANSRFInference(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0) return "";
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return "";
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return "";
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Inference(frame);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string result = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return ""; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API std::string RunANSRFInferenceBinary(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_bytes || width == 0 || height == 0) return "";
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return "";
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::Mat(height, width, CV_8UC3, jpeg_bytes).clone();
|
|
|
|
|
if (frame.empty()) return "";
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Inference(frame);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string result = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return ""; }
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API std::string RunANSRFRecognition(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0) return "";
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return "";
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return "";
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Recognize(frame);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string result = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return ""; }
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API std::string RunANSRFRecognitionBinary(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_bytes || width == 0 || height == 0) return "";
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return "";
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::Mat(height, width, CV_8UC3, jpeg_bytes).clone();
|
|
|
|
|
if (frame.empty()) return "";
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Recognize(frame);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string result = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return ""; }
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API std::string RunANSRFDetectorBinary(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_bytes || width == 0 || height == 0) return "";
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return "";
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::Mat(height, width, CV_8UC3, jpeg_bytes).clone();
|
|
|
|
|
if (frame.empty()) return "";
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Detect(frame);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string result = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return ""; }
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API std::string RunANSRFDetector(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0) return "";
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return "";
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return "";
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Detect(frame);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string result = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return ""; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//// For LabVIEW API
|
|
|
|
|
extern "C" ANSFR_API int RunInference_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0 || !detectionResult) return -1;
|
|
|
|
|
std::string st = RunANSRFInference(Handle, jpeg_string, bufferLength);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int RunInferenceWithCamId_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0 || !cameraId || !detectionResult) return -1;
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return 0;
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Inference(frame, cameraId);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string st = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunDetector_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult)
|
|
|
|
|
{
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0 || !detectionResult) return -1;
|
|
|
|
|
std::string st = RunANSRFDetector(Handle, jpeg_string, bufferLength);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunDetectorWithCamId_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0 || !cameraId || !detectionResult) return -1;
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return 0;
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Detect(frame, cameraId);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string st = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunRecognition_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0 || !detectionResult) return -1;
|
|
|
|
|
std::string st = RunANSRFRecognition(Handle, jpeg_string, bufferLength);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int RunRecognitionWithCamId_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0 || !cameraId || !detectionResult) return -1;
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return 0;
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Recognize(frame, cameraId);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string st = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int RunFaceDetection_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0 || !cameraId || !detectionResult) return -1;
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return 0;
|
|
|
|
|
std::vector<ANSCENTER::Object> outputs = engine->FaceDetect(frame, cameraId);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string st = engine->FaceToJsonString(outputs);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API std::string RunANSRFFaceDetector(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height)
|
|
|
|
|
{
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_bytes || width == 0 || height == 0) return "";
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return "";
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::Mat(height, width, CV_8UC3, jpeg_bytes).clone();
|
|
|
|
|
if (frame.empty()) return "";
|
|
|
|
|
std::vector<ANSCENTER::Object> outputs = engine->FaceDetect(frame, "0000");
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string st = engine->FaceToJsonString(outputs);
|
|
|
|
|
return st;
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return ""; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// User management
|
|
|
|
|
extern "C" ANSFR_API int InsertUser(ANSCENTER::ANSFacialRecognition** Handle, const char* userCode, const char* userName) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !userCode || !userName) return -1;
|
|
|
|
|
int result = (*Handle)->InsertUser(userCode, userName);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-04-08 13:45:52 +10:00
|
|
|
|
|
|
|
|
// Helper: repair mixed-encoding LabVIEW LStrHandle to clean UTF-16LE.
|
|
|
|
|
// LabVIEW text controls may produce a mix of UTF-16LE pairs, embedded UTF-8
|
|
|
|
|
// multi-byte sequences, and lone space bytes (0x20 without 0x00 high byte).
|
|
|
|
|
// This normalizes everything to proper UTF-16LE pairs.
|
|
|
|
|
// Input: BOM-stripped raw bytes. Output: clean UTF-16LE vector.
|
|
|
|
|
static std::vector<unsigned char> RepairLabVIEWUTF16LE_Local(const unsigned char* data, int len) {
|
|
|
|
|
std::vector<unsigned char> repaired;
|
|
|
|
|
if (!data || len <= 0) return repaired;
|
|
|
|
|
repaired.reserve(len + 32);
|
|
|
|
|
|
|
|
|
|
auto emitU16 = [&](uint16_t cp) {
|
|
|
|
|
repaired.push_back(static_cast<unsigned char>(cp & 0xFF));
|
|
|
|
|
repaired.push_back(static_cast<unsigned char>((cp >> 8) & 0xFF));
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < len; ) {
|
|
|
|
|
unsigned char b = data[i];
|
|
|
|
|
|
|
|
|
|
// 1. Detect embedded UTF-8 multi-byte sequences
|
|
|
|
|
// 2-byte UTF-8: C2-DF followed by 80-BF
|
|
|
|
|
if (b >= 0xC2 && b <= 0xDF && i + 1 < len) {
|
|
|
|
|
unsigned char b1 = data[i + 1];
|
|
|
|
|
if ((b1 & 0xC0) == 0x80) {
|
|
|
|
|
uint32_t cp = ((b & 0x1F) << 6) | (b1 & 0x3F);
|
|
|
|
|
emitU16(static_cast<uint16_t>(cp));
|
|
|
|
|
i += 2; continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// 3-byte UTF-8: E0-EF followed by 80-BF 80-BF
|
|
|
|
|
if (b >= 0xE0 && b <= 0xEF && i + 2 < len) {
|
|
|
|
|
unsigned char b1 = data[i + 1], b2 = data[i + 2];
|
|
|
|
|
if ((b1 & 0xC0) == 0x80 && (b2 & 0xC0) == 0x80) {
|
|
|
|
|
uint32_t cp = ((b & 0x0F) << 12) | ((b1 & 0x3F) << 6) | (b2 & 0x3F);
|
|
|
|
|
if (cp >= 0x0800 && (cp < 0xD800 || cp > 0xDFFF)) {
|
|
|
|
|
emitU16(static_cast<uint16_t>(cp));
|
|
|
|
|
i += 3; continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// 4-byte UTF-8: F0-F4 followed by 80-BF 80-BF 80-BF
|
|
|
|
|
if (b >= 0xF0 && b <= 0xF4 && i + 3 < len) {
|
|
|
|
|
unsigned char b1 = data[i + 1], b2 = data[i + 2], b3 = data[i + 3];
|
|
|
|
|
if ((b1 & 0xC0) == 0x80 && (b2 & 0xC0) == 0x80 && (b3 & 0xC0) == 0x80) {
|
|
|
|
|
uint32_t cp = ((b & 0x07) << 18) | ((b1 & 0x3F) << 12)
|
|
|
|
|
| ((b2 & 0x3F) << 6) | (b3 & 0x3F);
|
|
|
|
|
if (cp >= 0x10000 && cp <= 0x10FFFF) {
|
|
|
|
|
cp -= 0x10000;
|
|
|
|
|
emitU16(static_cast<uint16_t>(0xD800 + (cp >> 10)));
|
|
|
|
|
emitU16(static_cast<uint16_t>(0xDC00 + (cp & 0x3FF)));
|
|
|
|
|
i += 4; continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// 2. Normal UTF-16LE pair (low byte + 0x00 high byte)
|
|
|
|
|
if (i + 1 < len && data[i + 1] == 0x00) {
|
|
|
|
|
repaired.push_back(data[i]); repaired.push_back(0x00); i += 2;
|
|
|
|
|
}
|
|
|
|
|
// 3. Lone space byte — LabVIEW dropped the 0x00 high byte
|
|
|
|
|
else if (b == 0x20 && (i + 1 >= len || data[i + 1] != 0x00)) {
|
|
|
|
|
repaired.push_back(0x20); repaired.push_back(0x00); i += 1;
|
|
|
|
|
}
|
|
|
|
|
// 4. Non-ASCII UTF-16LE pair
|
|
|
|
|
else if (i + 1 < len) {
|
|
|
|
|
repaired.push_back(data[i]); repaired.push_back(data[i + 1]); i += 2;
|
|
|
|
|
}
|
|
|
|
|
// 5. Trailing odd byte — skip
|
|
|
|
|
else { i++; }
|
|
|
|
|
}
|
|
|
|
|
return repaired;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Helper: convert LStrHandle (mixed UTF-8/UTF-16LE or system codepage) to UTF-8 string
|
|
|
|
|
static std::string LStrHandleToUTF8(LStrHandle handle) {
|
|
|
|
|
if (!handle) return "";
|
|
|
|
|
int byteLen = (*handle)->cnt;
|
|
|
|
|
if (byteLen <= 0) return "";
|
|
|
|
|
const unsigned char* data = reinterpret_cast<const unsigned char*>((*handle)->str);
|
|
|
|
|
|
|
|
|
|
// Check for BOM or 0x00 bytes → UTF-16LE (possibly mixed with UTF-8)
|
|
|
|
|
bool isUtf16le = false;
|
|
|
|
|
if (byteLen >= 2 && data[0] == 0xFF && data[1] == 0xFE) isUtf16le = true;
|
|
|
|
|
if (!isUtf16le) {
|
|
|
|
|
for (int i = 0; i < byteLen; i++) {
|
|
|
|
|
if (data[i] == 0x00) { isUtf16le = true; break; }
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (isUtf16le) {
|
|
|
|
|
const unsigned char* convData = data;
|
|
|
|
|
int convLen = byteLen;
|
|
|
|
|
if (convLen >= 2 && convData[0] == 0xFF && convData[1] == 0xFE) { convData += 2; convLen -= 2; }
|
|
|
|
|
if (convLen <= 0) return "";
|
|
|
|
|
|
|
|
|
|
// Repair mixed encoding (UTF-8 islands, lone spaces) → clean UTF-16LE
|
|
|
|
|
auto repaired = RepairLabVIEWUTF16LE_Local(convData, convLen);
|
|
|
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
int wideLen = static_cast<int>(repaired.size()) / 2;
|
|
|
|
|
const wchar_t* wideStr = reinterpret_cast<const wchar_t*>(repaired.data());
|
|
|
|
|
int utf8Len = WideCharToMultiByte(CP_UTF8, 0, wideStr, wideLen, nullptr, 0, nullptr, nullptr);
|
|
|
|
|
if (utf8Len > 0) {
|
|
|
|
|
std::string utf8(utf8Len, 0);
|
|
|
|
|
WideCharToMultiByte(CP_UTF8, 0, wideStr, wideLen, &utf8[0], utf8Len, nullptr, nullptr);
|
|
|
|
|
return utf8;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
return std::string(reinterpret_cast<const char*>(repaired.data()), repaired.size());
|
|
|
|
|
} else {
|
|
|
|
|
// No 0x00 bytes — try UTF-8 first, fall back to system codepage.
|
|
|
|
|
// IsValidUTF8: check if bytes form valid UTF-8 with at least one multi-byte sequence.
|
|
|
|
|
auto IsValidUTF8 = [](const unsigned char* d, int l) -> bool {
|
|
|
|
|
bool hasMulti = false;
|
|
|
|
|
for (int j = 0; j < l; ) {
|
|
|
|
|
unsigned char c = d[j];
|
|
|
|
|
if (c <= 0x7F) { j++; }
|
|
|
|
|
else if (c >= 0xC2 && c <= 0xDF) {
|
|
|
|
|
if (j + 1 >= l || (d[j + 1] & 0xC0) != 0x80) return false;
|
|
|
|
|
hasMulti = true; j += 2;
|
|
|
|
|
} else if (c >= 0xE0 && c <= 0xEF) {
|
|
|
|
|
if (j + 2 >= l || (d[j + 1] & 0xC0) != 0x80 || (d[j + 2] & 0xC0) != 0x80) return false;
|
|
|
|
|
hasMulti = true; j += 3;
|
|
|
|
|
} else if (c >= 0xF0 && c <= 0xF4) {
|
|
|
|
|
if (j + 3 >= l || (d[j + 1] & 0xC0) != 0x80 || (d[j + 2] & 0xC0) != 0x80 || (d[j + 3] & 0xC0) != 0x80) return false;
|
|
|
|
|
hasMulti = true; j += 4;
|
|
|
|
|
} else { return false; }
|
|
|
|
|
}
|
|
|
|
|
return hasMulti;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if (IsValidUTF8(data, byteLen)) {
|
|
|
|
|
return std::string(reinterpret_cast<const char*>(data), byteLen);
|
|
|
|
|
}
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
int wideLen = MultiByteToWideChar(CP_ACP, 0, reinterpret_cast<const char*>(data), byteLen, nullptr, 0);
|
|
|
|
|
if (wideLen > 0) {
|
|
|
|
|
std::wstring wideStr(wideLen, 0);
|
|
|
|
|
MultiByteToWideChar(CP_ACP, 0, reinterpret_cast<const char*>(data), byteLen, &wideStr[0], wideLen);
|
|
|
|
|
int utf8Len = WideCharToMultiByte(CP_UTF8, 0, wideStr.c_str(), wideLen, nullptr, 0, nullptr, nullptr);
|
|
|
|
|
if (utf8Len > 0) {
|
|
|
|
|
std::string utf8(utf8Len, 0);
|
|
|
|
|
WideCharToMultiByte(CP_UTF8, 0, wideStr.c_str(), wideLen, &utf8[0], utf8Len, nullptr, nullptr);
|
|
|
|
|
return utf8;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
return std::string(reinterpret_cast<const char*>(data), byteLen);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int InsertUser_LV(ANSCENTER::ANSFacialRecognition** Handle, const char* userCode, LStrHandle userName) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !userCode || !userName) return -1;
|
|
|
|
|
std::string utf8Name = LStrHandleToUTF8(userName);
|
|
|
|
|
if (utf8Name.empty()) return -1;
|
|
|
|
|
return (*Handle)->InsertUser(userCode, utf8Name);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int UpdateUser_LV(ANSCENTER::ANSFacialRecognition** Handle, int userId, const char* userCode, LStrHandle userName) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !userCode || !userName) return -1;
|
|
|
|
|
std::string utf8Name = LStrHandleToUTF8(userName);
|
|
|
|
|
if (utf8Name.empty()) return -1;
|
|
|
|
|
return (*Handle)->UpdateUser(userId, userCode, utf8Name);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-28 16:54:11 +11:00
|
|
|
extern "C" ANSFR_API int UpdateUser(ANSCENTER::ANSFacialRecognition** Handle, int userId, const char* userCode, const char* userName) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !userCode || !userName) return -1;
|
|
|
|
|
int result = (*Handle)->UpdateUser(userId, userCode, userName);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int DeleteUser(ANSCENTER::ANSFacialRecognition** Handle, int userId) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
int result = (*Handle)->DeleteUser(userId);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int DeleteUsers(ANSCENTER::ANSFacialRecognition** Handle, int* userIds, int count) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !userIds || count <= 0) return -1;
|
|
|
|
|
std::vector<int> ids(userIds, userIds + count);
|
|
|
|
|
return (*Handle)->DeleteUsers(ids);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int InsertFace(ANSCENTER::ANSFacialRecognition** Handle, int userId, unsigned char* jpeg_string, unsigned int bufferLength) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0) return -1;
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
int result = (*Handle)->InsertFace(userId, frame);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int InsertFaces(ANSCENTER::ANSFacialRecognition** Handle, int userId, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle faceIdsStr) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0 || !faceIdsStr) return -1;
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return 0;
|
|
|
|
|
std::vector<int> faceIds = (*Handle)->InsertMultipleFaces(userId, frame);
|
|
|
|
|
std::string st;
|
|
|
|
|
for (size_t i = 0; i < faceIds.size(); ++i) {
|
|
|
|
|
if (i > 0) st += ";";
|
|
|
|
|
st += std::to_string(faceIds[i]);
|
|
|
|
|
}
|
|
|
|
|
return CopyToLStrHandle(faceIdsStr, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int CheckFaceEmbedding(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_string || bufferLength == 0) return -1;
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
int result = (*Handle)->CheckFace(frame);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int InsertFaceBinary(ANSCENTER::ANSFacialRecognition** Handle, int userId, unsigned char* jpeg_bytes, unsigned int width, unsigned int height) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !jpeg_bytes || width == 0 || height == 0) return -1;
|
|
|
|
|
cv::Mat frame = cv::Mat(height, width, CV_8UC3, jpeg_bytes).clone(); // make a copy
|
|
|
|
|
if (frame.empty()) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
int result = (*Handle)->InsertFace(userId, frame);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int DeleteFace(ANSCENTER::ANSFacialRecognition** Handle, int faceId) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
int result = (*Handle)->DeleteFace(faceId);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int Reload(ANSCENTER::ANSFacialRecognition** Handle) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
bool result = (*Handle)->Reload();
|
|
|
|
|
if (result == true) return 1;
|
|
|
|
|
else return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// New management API
|
|
|
|
|
extern "C" ANSFR_API int GetUser(ANSCENTER::ANSFacialRecognition** Handle, int userId, LStrHandle userRecord) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !userRecord) return -1;
|
|
|
|
|
std::string st;
|
|
|
|
|
(*Handle)->GetUser(userId, st);
|
|
|
|
|
return CopyToLStrHandle(userRecord, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int GetUsers(ANSCENTER::ANSFacialRecognition** Handle, LStrHandle userRecords) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !userRecords) return -1;
|
|
|
|
|
std::string st;
|
|
|
|
|
std::vector<int> userIds;
|
|
|
|
|
(*Handle)->GetUsers(st, userIds);
|
|
|
|
|
return CopyToLStrHandle(userRecords, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int GetFace(ANSCENTER::ANSFacialRecognition** Handle, int faceId, LStrHandle faceRecord) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !faceRecord) return -1;
|
|
|
|
|
std::string st;
|
|
|
|
|
(*Handle)->GetFace(faceId, st);
|
|
|
|
|
return CopyToLStrHandle(faceRecord, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int GetFaces(ANSCENTER::ANSFacialRecognition** Handle, int userId, LStrHandle faceRecords) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !faceRecords) return -1;
|
|
|
|
|
std::string st;
|
|
|
|
|
(*Handle)->GetFaces(userId, st);
|
|
|
|
|
return CopyToLStrHandle(faceRecords, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int DeleteFacesByUser(ANSCENTER::ANSFacialRecognition** Handle, int userId) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
int ret = (*Handle)->DeleteFacesByUser(userId);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// For testing only
|
|
|
|
|
extern "C" ANSFR_API int GetUserString(ANSCENTER::ANSFacialRecognition** Handle, int userId, std::string& userRecord) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
(*Handle)->GetUser(userId, userRecord);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int GetUsersString(ANSCENTER::ANSFacialRecognition** Handle, std::string& userRecords, std::vector<int>& userIds) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
(*Handle)->GetUsers(userRecords, userIds);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int GetFaceString(ANSCENTER::ANSFacialRecognition** Handle, int faceId, std::string& faceRecord) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
(*Handle)->GetFace(faceId, faceRecord);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int GetFacesString(ANSCENTER::ANSFacialRecognition** Handle, int userId, std::string& faceRecords) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
(*Handle)->GetFaces(userId, faceRecords);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API double BlurCalculation(unsigned char* jpeg_string, unsigned int bufferLength) {
|
|
|
|
|
try {
|
|
|
|
|
if (!jpeg_string || bufferLength == 0) return -1;
|
|
|
|
|
cv::Mat image = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
cv::Mat gray;
|
|
|
|
|
cvtColor(image, gray, cv::COLOR_BGR2GRAY);
|
|
|
|
|
cv::Mat laplacian;
|
|
|
|
|
Laplacian(gray, laplacian, CV_64F);
|
|
|
|
|
cv::Scalar mean, stddev;
|
|
|
|
|
meanStdDev(laplacian, mean, stddev);
|
|
|
|
|
image.release();
|
|
|
|
|
gray.release();
|
|
|
|
|
return stddev.val[0] * stddev.val[0];
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2026-03-31 14:10:21 +11:00
|
|
|
// Unicode conversion utilities for LabVIEW wrapper classes
|
2026-03-31 21:52:47 +11:00
|
|
|
extern "C" ANSFR_API int ANSFR_ConvertUTF8ToUTF16LE(const char* utf8Str, LStrHandle result, int includeBOM) {
|
2026-03-31 14:10:21 +11:00
|
|
|
try {
|
|
|
|
|
if (!utf8Str || !result) return -1;
|
|
|
|
|
int len = (int)strlen(utf8Str);
|
|
|
|
|
if (len == 0) return 0;
|
2026-03-31 21:52:47 +11:00
|
|
|
const char bom[2] = { '\xFF', '\xFE' };
|
2026-03-31 14:10:21 +11:00
|
|
|
bool hasUnicodeEscapes = false;
|
2026-03-31 21:52:47 +11:00
|
|
|
for (int i = 0; i + 1 < len; i++) {
|
|
|
|
|
if (utf8Str[i] == '\\' && utf8Str[i + 1] == 'u') { hasUnicodeEscapes = true; break; }
|
2026-03-31 14:10:21 +11:00
|
|
|
}
|
|
|
|
|
if (hasUnicodeEscapes) {
|
|
|
|
|
std::string utf16le;
|
2026-03-31 21:52:47 +11:00
|
|
|
if (includeBOM) utf16le.assign(bom, 2);
|
|
|
|
|
utf16le.reserve(len * 2 + 2);
|
2026-03-31 14:10:21 +11:00
|
|
|
for (int i = 0; i < len; ) {
|
|
|
|
|
if (i + 5 < len && utf8Str[i] == '\\' && utf8Str[i + 1] == 'u') {
|
|
|
|
|
char hex[5] = { utf8Str[i + 2], utf8Str[i + 3], utf8Str[i + 4], utf8Str[i + 5], 0 };
|
|
|
|
|
uint16_t cp = (uint16_t)strtoul(hex, nullptr, 16);
|
|
|
|
|
utf16le += static_cast<char>(cp & 0xFF);
|
|
|
|
|
utf16le += static_cast<char>((cp >> 8) & 0xFF);
|
|
|
|
|
i += 6;
|
|
|
|
|
} else {
|
|
|
|
|
utf16le += utf8Str[i];
|
|
|
|
|
utf16le += '\0';
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
int size = (int)utf16le.size();
|
|
|
|
|
MgErr error = DSSetHandleSize(result, sizeof(int32) + size * sizeof(uChar));
|
|
|
|
|
if (error != noErr) return -2;
|
|
|
|
|
(*result)->cnt = size;
|
|
|
|
|
memcpy((*result)->str, utf16le.data(), size);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
int wideLen = MultiByteToWideChar(CP_UTF8, 0, utf8Str, len, nullptr, 0);
|
|
|
|
|
if (wideLen <= 0) return 0;
|
|
|
|
|
std::wstring wideStr(wideLen, 0);
|
|
|
|
|
MultiByteToWideChar(CP_UTF8, 0, utf8Str, len, &wideStr[0], wideLen);
|
2026-03-31 21:52:47 +11:00
|
|
|
int dataSize = wideLen * (int)sizeof(wchar_t);
|
|
|
|
|
int bomSize = includeBOM ? 2 : 0;
|
|
|
|
|
int totalSize = bomSize + dataSize;
|
|
|
|
|
MgErr error = DSSetHandleSize(result, sizeof(int32) + totalSize * sizeof(uChar));
|
2026-03-31 14:10:21 +11:00
|
|
|
if (error != noErr) return -2;
|
2026-03-31 21:52:47 +11:00
|
|
|
(*result)->cnt = totalSize;
|
|
|
|
|
if (includeBOM) memcpy((*result)->str, bom, 2);
|
|
|
|
|
memcpy((*result)->str + bomSize, wideStr.data(), dataSize);
|
2026-03-31 14:10:21 +11:00
|
|
|
return 1;
|
|
|
|
|
#else
|
|
|
|
|
return 0;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int ANSFR_ConvertUTF16LEToUTF8(const unsigned char* utf16leBytes, int byteLen, LStrHandle result) {
|
|
|
|
|
try {
|
|
|
|
|
if (!utf16leBytes || byteLen <= 0 || !result) return -1;
|
|
|
|
|
bool isUtf16le = (byteLen >= 2 && byteLen % 2 == 0);
|
|
|
|
|
if (isUtf16le) {
|
|
|
|
|
bool isAscii = true;
|
|
|
|
|
for (int i = 1; i < byteLen; i += 2) {
|
|
|
|
|
if (utf16leBytes[i] != 0x00) { isAscii = false; break; }
|
|
|
|
|
}
|
|
|
|
|
if (isAscii) {
|
|
|
|
|
int asciiLen = byteLen / 2;
|
|
|
|
|
MgErr error = DSSetHandleSize(result, sizeof(int32) + asciiLen * sizeof(uChar));
|
|
|
|
|
if (error != noErr) return -2;
|
|
|
|
|
(*result)->cnt = asciiLen;
|
|
|
|
|
for (int i = 0; i < asciiLen; i++) (*result)->str[i] = utf16leBytes[i * 2];
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
int wideLen = byteLen / (int)sizeof(wchar_t);
|
|
|
|
|
const wchar_t* wideStr = reinterpret_cast<const wchar_t*>(utf16leBytes);
|
|
|
|
|
int utf8Len = WideCharToMultiByte(CP_UTF8, 0, wideStr, wideLen, nullptr, 0, nullptr, nullptr);
|
|
|
|
|
if (utf8Len <= 0) return 0;
|
|
|
|
|
std::string utf8Str(utf8Len, 0);
|
|
|
|
|
WideCharToMultiByte(CP_UTF8, 0, wideStr, wideLen, &utf8Str[0], utf8Len, nullptr, nullptr);
|
|
|
|
|
MgErr error = DSSetHandleSize(result, sizeof(int32) + utf8Len * sizeof(uChar));
|
|
|
|
|
if (error != noErr) return -2;
|
|
|
|
|
(*result)->cnt = utf8Len;
|
|
|
|
|
memcpy((*result)->str, utf8Str.data(), utf8Len);
|
|
|
|
|
return 1;
|
|
|
|
|
#else
|
|
|
|
|
return 0;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-28 16:54:11 +11:00
|
|
|
extern "C" ANSFR_API int UpdateParameters(ANSCENTER::ANSFacialRecognition** Handle, float knownPersonThreshold, int enableAgeGender, int enableFaceEmotions, int enableHeadPose, int minFaceSize, float faceDetectorThreshold, int enableFaceliveness, int antiSpoof, int removeFakeFaces) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
bool _enableAgeGender = false;
|
|
|
|
|
bool _enableFaceEmotions = false;
|
|
|
|
|
bool _enableHeadPose = false;
|
|
|
|
|
bool _enablefaceLiveness = false;
|
|
|
|
|
bool _enableantiSpoof = false;
|
|
|
|
|
bool _removeFakeFaces = false;
|
|
|
|
|
if (enableAgeGender == 1)_enableAgeGender = true;
|
|
|
|
|
if (enableFaceEmotions == 1)_enableFaceEmotions = true;
|
|
|
|
|
if (enableHeadPose == 1)_enableHeadPose = true;
|
|
|
|
|
if (enableFaceliveness == 1)_enablefaceLiveness = true;
|
|
|
|
|
if (antiSpoof == 1)_enableantiSpoof = true;
|
|
|
|
|
if (removeFakeFaces == 1)_removeFakeFaces = true;
|
|
|
|
|
bool result = (*Handle)->UpdateParameters(knownPersonThreshold, _enableAgeGender, _enableFaceEmotions, _enableHeadPose, minFaceSize, faceDetectorThreshold, _enablefaceLiveness, _enableantiSpoof, _removeFakeFaces);
|
|
|
|
|
if (result)return 1;
|
|
|
|
|
else return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int GetParamters(ANSCENTER::ANSFacialRecognition** Handle, LStrHandle faceParams) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !faceParams) return -1;
|
|
|
|
|
std::string st;
|
|
|
|
|
float knownPersonThrehold;
|
|
|
|
|
bool enableAgeGender;
|
|
|
|
|
bool enableEmotions;
|
|
|
|
|
bool enableHeadPose;
|
|
|
|
|
bool enablefaceLiveness;
|
|
|
|
|
bool enableantiSpoof;
|
|
|
|
|
bool removeFakeFaces;
|
|
|
|
|
|
|
|
|
|
int _enableAgeGender = 0;
|
|
|
|
|
int _enableEmotions = 0;
|
|
|
|
|
int _enableHeadPose = 0;
|
|
|
|
|
int _enablefaceLiveness = 0;
|
|
|
|
|
int _enableantiSpoof = 0;
|
|
|
|
|
int _removeFakeFaces = 0;
|
|
|
|
|
|
|
|
|
|
int _minFaceSize = 0;
|
|
|
|
|
float faceDetectionThreshold;
|
|
|
|
|
(*Handle)->GetFaceParameters(knownPersonThrehold, enableAgeGender, enableEmotions, enableHeadPose, _minFaceSize, faceDetectionThreshold, enablefaceLiveness, enableantiSpoof,removeFakeFaces);
|
|
|
|
|
|
|
|
|
|
if (enableAgeGender)_enableAgeGender = 1;
|
|
|
|
|
if (enableEmotions)_enableEmotions = 1;
|
|
|
|
|
if (enableHeadPose)_enableHeadPose = 1;
|
|
|
|
|
if (enablefaceLiveness)_enablefaceLiveness = 1;
|
|
|
|
|
if (enableantiSpoof)_enableantiSpoof = 1;
|
|
|
|
|
if (removeFakeFaces)_removeFakeFaces = 1;
|
|
|
|
|
|
|
|
|
|
st = std::to_string(knownPersonThrehold) + ";" +
|
|
|
|
|
std::to_string(_enableAgeGender) + ";" +
|
|
|
|
|
std::to_string(_enableEmotions) + ";" +
|
|
|
|
|
std::to_string(_enableHeadPose) + ";" +
|
|
|
|
|
std::to_string(_minFaceSize) + ";" +
|
|
|
|
|
std::to_string(faceDetectionThreshold)+";"+
|
|
|
|
|
std::to_string(_enablefaceLiveness) + ";" +
|
|
|
|
|
std::to_string(_enableantiSpoof) + ";" +
|
|
|
|
|
std::to_string(_removeFakeFaces);
|
|
|
|
|
|
|
|
|
|
return CopyToLStrHandle(faceParams, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int UpdateFaceQueue(ANSCENTER::ANSFacialRecognition** Handle, int queueSize, int numKnownFaceInQueue, int enableFaceQueue) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle) return -1;
|
|
|
|
|
bool _enableFaceQueue = false;
|
|
|
|
|
if (enableFaceQueue == 1)_enableFaceQueue = true;
|
|
|
|
|
bool result = (*Handle)->UpdateFaceQueue(queueSize, numKnownFaceInQueue, _enableFaceQueue);
|
|
|
|
|
if (result)return 1;
|
|
|
|
|
else return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int GetFaceQueue(ANSCENTER::ANSFacialRecognition** Handle, LStrHandle faceQueue) {
|
|
|
|
|
try {
|
|
|
|
|
if (!Handle || !*Handle || !faceQueue) return -1;
|
|
|
|
|
std::string st;
|
|
|
|
|
int faceQueueSize;
|
|
|
|
|
int numKnownFaceInQueue;
|
|
|
|
|
bool enableFaceQueue = false;
|
|
|
|
|
int _enableAgeGender = 0;
|
|
|
|
|
|
|
|
|
|
(*Handle)->GetFaceQueue(faceQueueSize, numKnownFaceInQueue, enableFaceQueue);
|
|
|
|
|
|
|
|
|
|
if (enableFaceQueue)_enableAgeGender = 1;
|
|
|
|
|
st = std::to_string(faceQueueSize) + ";" +
|
|
|
|
|
std::to_string(numKnownFaceInQueue) + ";" +
|
|
|
|
|
std::to_string(_enableAgeGender);
|
|
|
|
|
|
|
|
|
|
return CopyToLStrHandle(faceQueue, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunInferenceComplete_LV(
|
|
|
|
|
ANSCENTER::ANSFacialRecognition** Handle,
|
|
|
|
|
cv::Mat** cvImage,
|
|
|
|
|
const char* cameraId,
|
|
|
|
|
int getJpegString,
|
|
|
|
|
int jpegImageSize,
|
|
|
|
|
LStrHandle detectionResult,
|
|
|
|
|
LStrHandle imageStr)
|
|
|
|
|
{
|
|
|
|
|
// Validate inputs
|
|
|
|
|
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) return -2;
|
|
|
|
|
if (!Handle || !(*Handle)) return -2;
|
|
|
|
|
if (!cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
// Lookup NV12 frame data BEFORE cloning (clone creates new cv::Mat*)
|
|
|
|
|
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
|
|
|
|
|
// Clone image for thread safety - streaming thread may swap the pointer
|
|
|
|
|
cv::Mat localImage = (**cvImage).clone();
|
|
|
|
|
int originalWidth = localImage.cols;
|
|
|
|
|
int originalHeight = localImage.rows;
|
|
|
|
|
if (originalWidth == 0 || originalHeight == 0) return -2;
|
|
|
|
|
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Inference(localImage, cameraId);
|
|
|
|
|
tl_currentGpuFrame() = nullptr; // Clear after inference — prevent leaking to unrelated calls
|
|
|
|
|
|
|
|
|
|
// All processing below is thread-local (no shared state)
|
|
|
|
|
bool getJpeg = (getJpegString == 1);
|
|
|
|
|
std::string stImage;
|
|
|
|
|
|
|
|
|
|
int maxImageSize = originalWidth;
|
|
|
|
|
bool resizeNeeded = (jpegImageSize > 0) && (jpegImageSize < maxImageSize);
|
|
|
|
|
|
|
|
|
|
float ratio = 1.0f;
|
|
|
|
|
int newWidth = originalWidth;
|
|
|
|
|
int newHeight = originalHeight;
|
|
|
|
|
|
|
|
|
|
// Scale bounding boxes if resizing
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
newWidth = jpegImageSize;
|
|
|
|
|
newHeight = static_cast<int>(std::round(newWidth * static_cast<double>(originalHeight) / originalWidth));
|
|
|
|
|
ratio = static_cast<float>(newWidth) / originalWidth;
|
|
|
|
|
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x * ratio), newWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y * ratio), newHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width * ratio), newWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height * ratio), newHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// Clamp to image bounds
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x), originalWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y), originalHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width), originalWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height), originalHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert to JPEG if requested
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
cv::Mat processedImage = localImage;
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
cv::resize(localImage, processedImage, cv::Size(newWidth, newHeight), 0, 0, cv::INTER_AREA);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<uchar> buf;
|
|
|
|
|
if (cv::imencode(".jpg", processedImage, buf, { cv::IMWRITE_JPEG_QUALITY, 50 })) {
|
|
|
|
|
stImage.assign(buf.begin(), buf.end());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert to JSON and write detection result
|
|
|
|
|
std::string stDetectionResult = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
if (!CopyToLStrHandle(detectionResult, stDetectionResult)) return 0;
|
|
|
|
|
|
|
|
|
|
// Write JPEG to LabVIEW string handle if requested
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
if (!CopyToLStrHandle(imageStr, stImage)) return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception&) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (...) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int RunFaceDetectionComplete_LV(
|
|
|
|
|
ANSCENTER::ANSFacialRecognition** Handle,
|
|
|
|
|
cv::Mat** cvImage,
|
|
|
|
|
const char* cameraId,
|
|
|
|
|
int getJpegString,
|
|
|
|
|
int jpegImageSize,
|
|
|
|
|
LStrHandle detectionResult,
|
|
|
|
|
LStrHandle imageStr)
|
|
|
|
|
{
|
|
|
|
|
// Validate inputs
|
|
|
|
|
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) return -2;
|
|
|
|
|
if (!Handle || !(*Handle)) return -2;
|
|
|
|
|
if (!cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
// Lookup NV12 frame data BEFORE cloning (clone creates new cv::Mat*)
|
|
|
|
|
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
|
|
|
|
|
// Clone image for thread safety - streaming thread may swap the pointer
|
|
|
|
|
cv::Mat localImage = (**cvImage).clone();
|
|
|
|
|
int originalWidth = localImage.cols;
|
|
|
|
|
int originalHeight = localImage.rows;
|
|
|
|
|
if (originalWidth == 0 || originalHeight == 0) return -2;
|
|
|
|
|
|
|
|
|
|
std::vector<ANSCENTER::Object> outputs = engine->FaceDetect(localImage, cameraId);
|
|
|
|
|
tl_currentGpuFrame() = nullptr;
|
|
|
|
|
|
|
|
|
|
// All processing below is thread-local (no shared state)
|
|
|
|
|
bool getJpeg = (getJpegString == 1);
|
|
|
|
|
std::string stImage;
|
|
|
|
|
|
|
|
|
|
int maxImageSize = originalWidth;
|
|
|
|
|
bool resizeNeeded = (jpegImageSize > 0) && (jpegImageSize < maxImageSize);
|
|
|
|
|
|
|
|
|
|
float ratio = 1.0f;
|
|
|
|
|
int newWidth = originalWidth;
|
|
|
|
|
int newHeight = originalHeight;
|
|
|
|
|
|
|
|
|
|
// Scale bounding boxes if resizing
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
newWidth = jpegImageSize;
|
|
|
|
|
newHeight = static_cast<int>(std::round(newWidth * static_cast<double>(originalHeight) / originalWidth));
|
|
|
|
|
ratio = static_cast<float>(newWidth) / originalWidth;
|
|
|
|
|
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x * ratio), newWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y * ratio), newHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width * ratio), newWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height * ratio), newHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// Clamp to image bounds
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x), originalWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y), originalHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width), originalWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height), originalHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert to JPEG if requested
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
cv::Mat processedImage = localImage;
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
cv::resize(localImage, processedImage, cv::Size(newWidth, newHeight), 0, 0, cv::INTER_AREA);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<uchar> buf;
|
|
|
|
|
if (cv::imencode(".jpg", processedImage, buf, { cv::IMWRITE_JPEG_QUALITY, 50 })) {
|
|
|
|
|
stImage.assign(buf.begin(), buf.end());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert to JSON and write detection result
|
|
|
|
|
std::string stDetectionResult = engine->FaceToJsonString(outputs);
|
|
|
|
|
if (!CopyToLStrHandle(detectionResult, stDetectionResult)) return 0;
|
|
|
|
|
|
|
|
|
|
// Write JPEG to LabVIEW string handle if requested
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
if (!CopyToLStrHandle(imageStr, stImage)) return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception&) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (...) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
extern "C" ANSFR_API int RunFaceRecogniserComplete_LV(
|
|
|
|
|
ANSCENTER::ANSFacialRecognition** Handle,
|
|
|
|
|
cv::Mat** cvImage,
|
|
|
|
|
const char* cameraId,
|
|
|
|
|
int getJpegString,
|
|
|
|
|
int jpegImageSize,
|
|
|
|
|
LStrHandle detectionResult,
|
|
|
|
|
LStrHandle imageStr)
|
|
|
|
|
{
|
|
|
|
|
// Validate inputs
|
|
|
|
|
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) return -2;
|
|
|
|
|
if (!Handle || !(*Handle)) return -2;
|
|
|
|
|
if (!cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(*Handle));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
// Lookup NV12 frame data BEFORE cloning (clone creates new cv::Mat*)
|
|
|
|
|
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
|
|
|
|
|
// Clone image for thread safety - streaming thread may swap the pointer
|
|
|
|
|
cv::Mat localImage = (**cvImage).clone();
|
|
|
|
|
int originalWidth = localImage.cols;
|
|
|
|
|
int originalHeight = localImage.rows;
|
|
|
|
|
if (originalWidth == 0 || originalHeight == 0) return -2;
|
|
|
|
|
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Recognize(localImage, cameraId);
|
|
|
|
|
tl_currentGpuFrame() = nullptr;
|
|
|
|
|
|
|
|
|
|
// All processing below is thread-local (no shared state)
|
|
|
|
|
bool getJpeg = (getJpegString == 1);
|
|
|
|
|
std::string stImage;
|
|
|
|
|
|
|
|
|
|
int maxImageSize = originalWidth;
|
|
|
|
|
bool resizeNeeded = (jpegImageSize > 0) && (jpegImageSize < maxImageSize);
|
|
|
|
|
|
|
|
|
|
float ratio = 1.0f;
|
|
|
|
|
int newWidth = originalWidth;
|
|
|
|
|
int newHeight = originalHeight;
|
|
|
|
|
|
|
|
|
|
// Scale bounding boxes if resizing
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
newWidth = jpegImageSize;
|
|
|
|
|
newHeight = static_cast<int>(std::round(newWidth * static_cast<double>(originalHeight) / originalWidth));
|
|
|
|
|
ratio = static_cast<float>(newWidth) / originalWidth;
|
|
|
|
|
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x * ratio), newWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y * ratio), newHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width * ratio), newWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height * ratio), newHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// Clamp to image bounds
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x), originalWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y), originalHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width), originalWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height), originalHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert to JPEG if requested
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
cv::Mat processedImage = localImage;
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
cv::resize(localImage, processedImage, cv::Size(newWidth, newHeight), 0, 0, cv::INTER_AREA);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<uchar> buf;
|
|
|
|
|
if (cv::imencode(".jpg", processedImage, buf, { cv::IMWRITE_JPEG_QUALITY, 50 })) {
|
|
|
|
|
stImage.assign(buf.begin(), buf.end());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert to JSON and write detection result
|
|
|
|
|
std::string stDetectionResult = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
if (!CopyToLStrHandle(detectionResult, stDetectionResult)) return 0;
|
|
|
|
|
|
|
|
|
|
// Write JPEG to LabVIEW string handle if requested
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
if (!CopyToLStrHandle(imageStr, stImage)) return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception&) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (...) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
// V2 API — accepts uint64_t handle by value (eliminates LabVIEW buffer reuse bug)
|
|
|
|
|
// ============================================================================
|
|
|
|
|
#define FR_V2_HANDLE_SETUP(handleVal) \
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* _v2Direct = reinterpret_cast<ANSCENTER::ANSFacialRecognition*>(handleVal); \
|
|
|
|
|
if (_v2Direct == nullptr) return 0; \
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* _v2Arr[1] = { _v2Direct }; \
|
|
|
|
|
ANSCENTER::ANSFacialRecognition** Handle = &_v2Arr[0];
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunInference_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
|
|
|
|
|
FR_V2_HANDLE_SETUP(handleVal);
|
|
|
|
|
try {
|
|
|
|
|
if (!jpeg_string || bufferLength == 0 || !detectionResult) return -1;
|
|
|
|
|
std::string st = RunANSRFInference(Handle, jpeg_string, bufferLength);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunInferenceWithCamId_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* _v2Direct = reinterpret_cast<ANSCENTER::ANSFacialRecognition*>(handleVal);
|
|
|
|
|
if (_v2Direct == nullptr) return -1;
|
|
|
|
|
if (!jpeg_string || bufferLength == 0 || !cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(_v2Direct));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return 0;
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Inference(frame, cameraId);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string st = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunDetector_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
|
|
|
|
|
FR_V2_HANDLE_SETUP(handleVal);
|
|
|
|
|
try {
|
|
|
|
|
if (!jpeg_string || bufferLength == 0 || !detectionResult) return -1;
|
|
|
|
|
std::string st = RunANSRFDetector(Handle, jpeg_string, bufferLength);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunDetectorWithCamId_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* _v2Direct = reinterpret_cast<ANSCENTER::ANSFacialRecognition*>(handleVal);
|
|
|
|
|
if (_v2Direct == nullptr) return -1;
|
|
|
|
|
if (!jpeg_string || bufferLength == 0 || !cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(_v2Direct));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return 0;
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Detect(frame, cameraId);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string st = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunRecognition_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult) {
|
|
|
|
|
FR_V2_HANDLE_SETUP(handleVal);
|
|
|
|
|
try {
|
|
|
|
|
if (!jpeg_string || bufferLength == 0 || !detectionResult) return -1;
|
|
|
|
|
std::string st = RunANSRFRecognition(Handle, jpeg_string, bufferLength);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception& e) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunRecognitionWithCamId_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* _v2Direct = reinterpret_cast<ANSCENTER::ANSFacialRecognition*>(handleVal);
|
|
|
|
|
if (_v2Direct == nullptr) return -1;
|
|
|
|
|
if (!jpeg_string || bufferLength == 0 || !cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(_v2Direct));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return 0;
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Recognize(frame, cameraId);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string st = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunFaceDetection_LV_V2(uint64_t handleVal, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult) {
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* _v2Direct = reinterpret_cast<ANSCENTER::ANSFacialRecognition*>(handleVal);
|
|
|
|
|
if (_v2Direct == nullptr) return -1;
|
|
|
|
|
if (!jpeg_string || bufferLength == 0 || !cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(_v2Direct));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
try {
|
|
|
|
|
cv::Mat frame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, jpeg_string), cv::IMREAD_COLOR);
|
|
|
|
|
if (frame.empty()) return 0;
|
|
|
|
|
std::vector<ANSCENTER::Object> outputs = engine->FaceDetect(frame, cameraId);
|
|
|
|
|
frame.release();
|
|
|
|
|
std::string st = engine->FaceToJsonString(outputs);
|
|
|
|
|
return CopyToLStrHandle(detectionResult, st);
|
|
|
|
|
}
|
|
|
|
|
catch (...) { return -1; }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunInferenceComplete_LV_V2(
|
|
|
|
|
uint64_t handleVal,
|
|
|
|
|
cv::Mat** cvImage,
|
|
|
|
|
const char* cameraId,
|
|
|
|
|
int getJpegString,
|
|
|
|
|
int jpegImageSize,
|
|
|
|
|
LStrHandle detectionResult,
|
|
|
|
|
LStrHandle imageStr)
|
|
|
|
|
{
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* _v2Direct = reinterpret_cast<ANSCENTER::ANSFacialRecognition*>(handleVal);
|
|
|
|
|
if (_v2Direct == nullptr) return -1;
|
|
|
|
|
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) return -2;
|
|
|
|
|
if (!cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(_v2Direct));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
// Lookup NV12 frame data BEFORE cloning (clone creates new cv::Mat*)
|
|
|
|
|
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
|
|
|
|
|
cv::Mat localImage = (**cvImage).clone();
|
|
|
|
|
int originalWidth = localImage.cols;
|
|
|
|
|
int originalHeight = localImage.rows;
|
|
|
|
|
if (originalWidth == 0 || originalHeight == 0) return -2;
|
|
|
|
|
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Inference(localImage, cameraId);
|
|
|
|
|
tl_currentGpuFrame() = nullptr;
|
|
|
|
|
|
|
|
|
|
bool getJpeg = (getJpegString == 1);
|
|
|
|
|
std::string stImage;
|
|
|
|
|
|
|
|
|
|
int maxImageSize = originalWidth;
|
|
|
|
|
bool resizeNeeded = (jpegImageSize > 0) && (jpegImageSize < maxImageSize);
|
|
|
|
|
|
|
|
|
|
float ratio = 1.0f;
|
|
|
|
|
int newWidth = originalWidth;
|
|
|
|
|
int newHeight = originalHeight;
|
|
|
|
|
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
newWidth = jpegImageSize;
|
|
|
|
|
newHeight = static_cast<int>(std::round(newWidth * static_cast<double>(originalHeight) / originalWidth));
|
|
|
|
|
ratio = static_cast<float>(newWidth) / originalWidth;
|
|
|
|
|
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x * ratio), newWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y * ratio), newHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width * ratio), newWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height * ratio), newHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x), originalWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y), originalHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width), originalWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height), originalHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
cv::Mat processedImage = localImage;
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
cv::resize(localImage, processedImage, cv::Size(newWidth, newHeight), 0, 0, cv::INTER_AREA);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<uchar> buf;
|
|
|
|
|
if (cv::imencode(".jpg", processedImage, buf, { cv::IMWRITE_JPEG_QUALITY, 50 })) {
|
|
|
|
|
stImage.assign(buf.begin(), buf.end());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string stDetectionResult = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
if (!CopyToLStrHandle(detectionResult, stDetectionResult)) return 0;
|
|
|
|
|
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
if (!CopyToLStrHandle(imageStr, stImage)) return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception&) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (...) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunFaceDetectionComplete_LV_V2(
|
|
|
|
|
uint64_t handleVal,
|
|
|
|
|
cv::Mat** cvImage,
|
|
|
|
|
const char* cameraId,
|
|
|
|
|
int getJpegString,
|
|
|
|
|
int jpegImageSize,
|
|
|
|
|
LStrHandle detectionResult,
|
|
|
|
|
LStrHandle imageStr)
|
|
|
|
|
{
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* _v2Direct = reinterpret_cast<ANSCENTER::ANSFacialRecognition*>(handleVal);
|
|
|
|
|
if (_v2Direct == nullptr) return -1;
|
|
|
|
|
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) return -2;
|
|
|
|
|
if (!cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(_v2Direct));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
// Lookup NV12 frame data BEFORE cloning (clone creates new cv::Mat*)
|
|
|
|
|
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
|
|
|
|
|
cv::Mat localImage = (**cvImage).clone();
|
|
|
|
|
int originalWidth = localImage.cols;
|
|
|
|
|
int originalHeight = localImage.rows;
|
|
|
|
|
if (originalWidth == 0 || originalHeight == 0) return -2;
|
|
|
|
|
|
|
|
|
|
std::vector<ANSCENTER::Object> outputs = engine->FaceDetect(localImage, cameraId);
|
|
|
|
|
tl_currentGpuFrame() = nullptr;
|
|
|
|
|
|
|
|
|
|
bool getJpeg = (getJpegString == 1);
|
|
|
|
|
std::string stImage;
|
|
|
|
|
|
|
|
|
|
int maxImageSize = originalWidth;
|
|
|
|
|
bool resizeNeeded = (jpegImageSize > 0) && (jpegImageSize < maxImageSize);
|
|
|
|
|
|
|
|
|
|
float ratio = 1.0f;
|
|
|
|
|
int newWidth = originalWidth;
|
|
|
|
|
int newHeight = originalHeight;
|
|
|
|
|
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
newWidth = jpegImageSize;
|
|
|
|
|
newHeight = static_cast<int>(std::round(newWidth * static_cast<double>(originalHeight) / originalWidth));
|
|
|
|
|
ratio = static_cast<float>(newWidth) / originalWidth;
|
|
|
|
|
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x * ratio), newWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y * ratio), newHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width * ratio), newWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height * ratio), newHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x), originalWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y), originalHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width), originalWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height), originalHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
cv::Mat processedImage = localImage;
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
cv::resize(localImage, processedImage, cv::Size(newWidth, newHeight), 0, 0, cv::INTER_AREA);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<uchar> buf;
|
|
|
|
|
if (cv::imencode(".jpg", processedImage, buf, { cv::IMWRITE_JPEG_QUALITY, 50 })) {
|
|
|
|
|
stImage.assign(buf.begin(), buf.end());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string stDetectionResult = engine->FaceToJsonString(outputs);
|
|
|
|
|
if (!CopyToLStrHandle(detectionResult, stDetectionResult)) return 0;
|
|
|
|
|
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
if (!CopyToLStrHandle(imageStr, stImage)) return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception&) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (...) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C" ANSFR_API int RunFaceRecogniserComplete_LV_V2(
|
|
|
|
|
uint64_t handleVal,
|
|
|
|
|
cv::Mat** cvImage,
|
|
|
|
|
const char* cameraId,
|
|
|
|
|
int getJpegString,
|
|
|
|
|
int jpegImageSize,
|
|
|
|
|
LStrHandle detectionResult,
|
|
|
|
|
LStrHandle imageStr)
|
|
|
|
|
{
|
|
|
|
|
ANSCENTER::ANSFacialRecognition* _v2Direct = reinterpret_cast<ANSCENTER::ANSFacialRecognition*>(handleVal);
|
|
|
|
|
if (_v2Direct == nullptr) return -1;
|
|
|
|
|
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) return -2;
|
|
|
|
|
if (!cameraId || !detectionResult) return -1;
|
|
|
|
|
|
|
|
|
|
FRHandleGuard guard(AcquireFRHandle(_v2Direct));
|
|
|
|
|
if (!guard) return -3;
|
|
|
|
|
auto* engine = guard.get();
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
// Lookup NV12 frame data BEFORE cloning (clone creates new cv::Mat*)
|
|
|
|
|
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
|
|
|
|
|
cv::Mat localImage = (**cvImage).clone();
|
|
|
|
|
int originalWidth = localImage.cols;
|
|
|
|
|
int originalHeight = localImage.rows;
|
|
|
|
|
if (originalWidth == 0 || originalHeight == 0) return -2;
|
|
|
|
|
|
|
|
|
|
std::vector<ANSCENTER::FaceResultObject> outputs = engine->Recognize(localImage, cameraId);
|
|
|
|
|
tl_currentGpuFrame() = nullptr;
|
|
|
|
|
|
|
|
|
|
bool getJpeg = (getJpegString == 1);
|
|
|
|
|
std::string stImage;
|
|
|
|
|
|
|
|
|
|
int maxImageSize = originalWidth;
|
|
|
|
|
bool resizeNeeded = (jpegImageSize > 0) && (jpegImageSize < maxImageSize);
|
|
|
|
|
|
|
|
|
|
float ratio = 1.0f;
|
|
|
|
|
int newWidth = originalWidth;
|
|
|
|
|
int newHeight = originalHeight;
|
|
|
|
|
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
newWidth = jpegImageSize;
|
|
|
|
|
newHeight = static_cast<int>(std::round(newWidth * static_cast<double>(originalHeight) / originalWidth));
|
|
|
|
|
ratio = static_cast<float>(newWidth) / originalWidth;
|
|
|
|
|
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x * ratio), newWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y * ratio), newHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width * ratio), newWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height * ratio), newHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
for (auto& obj : outputs) {
|
|
|
|
|
obj.box.x = std::max(0, std::min(static_cast<int>(obj.box.x), originalWidth - 1));
|
|
|
|
|
obj.box.y = std::max(0, std::min(static_cast<int>(obj.box.y), originalHeight - 1));
|
|
|
|
|
obj.box.width = std::max(1, std::min(static_cast<int>(obj.box.width), originalWidth - obj.box.x));
|
|
|
|
|
obj.box.height = std::max(1, std::min(static_cast<int>(obj.box.height), originalHeight - obj.box.y));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
cv::Mat processedImage = localImage;
|
|
|
|
|
if (resizeNeeded) {
|
|
|
|
|
cv::resize(localImage, processedImage, cv::Size(newWidth, newHeight), 0, 0, cv::INTER_AREA);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<uchar> buf;
|
|
|
|
|
if (cv::imencode(".jpg", processedImage, buf, { cv::IMWRITE_JPEG_QUALITY, 50 })) {
|
|
|
|
|
stImage.assign(buf.begin(), buf.end());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string stDetectionResult = engine->FaceObjectsToJsonString(outputs);
|
|
|
|
|
if (!CopyToLStrHandle(detectionResult, stDetectionResult)) return 0;
|
|
|
|
|
|
|
|
|
|
if (getJpeg) {
|
|
|
|
|
if (!CopyToLStrHandle(imageStr, stImage)) return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
catch (const std::exception&) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
catch (...) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|