Files
ANSCORE/modules/ANSFR/ANSFR.h
Tuan Nghia Nguyen c1b919ec47 Fix setting GPU behaviour:
Condition	maxSlotsPerGpu	Behavior
OptimizeModelStr	0	Bypass: non-shared temporary engine
1 GPU	1	Single slot, no round-robin
>1 GPU, VRAM < 24 GB	1	Round-robin: 1 slot per GPU
>1 GPU, VRAM >= 24 GB	-1	Elastic: on-demand slot growth
2026-03-30 09:59:09 +11:00

355 lines
20 KiB
C++

#ifndef ANSFR_H
#define ANSFR_H
#define ANSFR_API __declspec(dllexport)
#pragma once
#include "ANSFRCommon.h"
#include "FaceDatabase.h"
#include <shared_mutex>
#include <atomic>
#define MAX_FACE_CHECKER_FRAMES 20
#define FACE_CONFIDENT_LEVEL 5
namespace ANSCENTER
{
struct FaceDetection {
FaceResultObject faceObject; // Bounding box
int faceConfLevel; // face confident level, if face is detected then it will increase otherwise, it will be recreased.
};
class FaceChecker {
public:
FaceChecker(int maxFrames = MAX_FACE_CHECKER_FRAMES, float minScore = 0.7f)
: maxFrames(maxFrames), minScore(minScore) {}
// Add detected faces from a new frame
std::vector<FaceResultObject> ValidateDetectedFaces(const std::vector<FaceResultObject>& faces);
std::vector<FaceResultObject> UniqueFaces(const std::vector<FaceResultObject>& faces);
private:
int maxFrames;
float minScore;
std::recursive_mutex _mutex;
std::unordered_map<std::string, std::vector<std::vector<FaceDetection>>> frameBufferMap; // Keyed by cameraId
};
/////
// Fix #11: ScopedTimer now logs via SPDLogger instead of being a dead no-op
class ScopedTimer {
public:
explicit ScopedTimer(const std::string& operation_name)
: _name(operation_name)
, _start(std::chrono::steady_clock::now())
{
}
~ScopedTimer() noexcept {
try {
auto end = std::chrono::steady_clock::now();
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - _start).count();
ANSCENTER::SPDLogger::GetInstance("ANSFR").LogDebug(
"ScopedTimer", _name + " took " + std::to_string(ms) + " ms",
__FILE__, __LINE__);
}
catch (...) {} // Never throw from destructor
}
private:
std::string _name;
std::chrono::steady_clock::time_point _start;
};
class ANSFR_API ANSFacialRecognition
{
public:
[[nodiscard]] int Initialize(const std::string & licenseKey,
const std::string& configFile,
const std::string& databaseFilePath,
const std::string& recogniserFilePath,
const std::string& detectorFilePath="",
int precisionType=0,
float knownPersonThreshold=0.35,
bool enableAgeGender=true,
bool enableFaceEmotions=true,
bool enableHeadPose=true,
int minFaceSize=30,
float faceDetectorScoreThreshold=0.5,
bool faceliveness=true,
bool antiSpoofing=true);
[[nodiscard]] bool LoadEngine();
int InsertUser(const std::string& userCode,const std::string& userName);
int UpdateUser(int userId, const std::string& userCode, const std::string& userName);
int DeleteUser(int userId);
int DeleteUsers(const std::vector<int>& userIds);
int InsertFace(int userId, const cv::Mat& image);
[[nodiscard]] std::vector<int> InsertMultipleFaces(int userId, const cv::Mat& image);
[[nodiscard]] int CheckFace(const cv::Mat& image);// return 0 if face is not valid otherwise 1
int DeleteFace(int faceId);
int DeleteFacesByUser(int userId);
int GetUser(int userId, std::string& userRecord);
int GetUsers(std::string& userRecords, std::vector<int>&userIds);
int GetFace(int faceId, std::string& faceRecord);
int GetFaces(int userId, std::string& faceRecords);
void CheckLicense();
bool Reload();
bool UpdateUserDictionary();
bool UpdateParameters(float knownPersonThreshold = 0.35,bool enableAgeGender = true,bool enableFaceEmotions = true,bool enableHeadPose = true, int minFaceSize=112, float faceDetectorScoreThreshold=0.5, bool faceliveness=true, bool antiSpoof=true, bool removeFakeFace=false);
bool GetFaceParameters(float& knownPersonThreshold, bool& enableAgeGender, bool& enableFaceEmotions, bool& enableHeadPose, int &minFaceSize, float& faceDetectorScoreThreshold, bool& faceliveness, bool& antiSpoof, bool& removeFakeFace);
bool UpdateFaceQueue(int queueSize, int faceThresholdSize, bool enableFaceQueue);
bool GetFaceQueue(int& queueSize, int& faceThresholdSize, bool& enableFaceQueue);
[[nodiscard]] std::vector<FaceResultObject> Recognize(const cv::Mat& frame); // Expect to return on 1 object
[[nodiscard]] std::vector<FaceResultObject> Inference(const cv::Mat& input);
[[nodiscard]] std::vector<FaceResultObject> Detect(const cv::Mat& input);
[[nodiscard]] std::vector<Object> FaceDetect(const cv::Mat& input);
[[nodiscard]] std::vector<FaceResultObject> Recognize(const cv::Mat& frame, const std::string& camera_id); // Expect to return on 1 object
[[nodiscard]] std::vector<FaceResultObject> Inference(const cv::Mat& input, const std::string& camera_id);
[[nodiscard]] std::vector<FaceResultObject> Detect(const cv::Mat& input, const std::string& camera_id);
[[nodiscard]] std::vector<Object> FaceDetect(const cv::Mat& input, const std::string& camera_id);
[[nodiscard]] std::string FaceObjectsToJsonString(const std::vector<FaceResultObject>& faces);
[[nodiscard]] std::string FaceToJsonString(const std::vector<Object>& faces);
[[nodiscard]] std::string PolygonToString(const std::vector<cv::Point2f>& polygon);
[[nodiscard]] std::string KeypointsToString(const std::vector<float>& kps);
enum class LogLevel { Debug, Info, Warn, Error, Fatal };
void LogThreadSafe(const std::string& function, const std::string& message,
LogLevel level = LogLevel::Error);
void LogError(const std::string& function,const std::string& message,const std::string& camera_id);
void MarkAsUnknown(FaceResultObject& face);
ANSFacialRecognition();
~ANSFacialRecognition() noexcept;
void UnloadEngine();
void Destroy();
void SetMaxSlotsPerGpu(int n) { m_maxSlotsPerGpu = n; }
private:
int m_maxSlotsPerGpu{ 1 }; // set by dllmain based on GPU topology
int GetUser(int userId, UserRecord& userRecord);
int GetUser(int userId, const std::string& userCode,const std::string& userName, UserRecord& userRecord);
int GetUsers(std::vector<UserRecord>& userRecords, std::vector<int>& userIds);
int GetFace(int faceId, FaceRecord& faceRecord);
int GetFaces(int userId, std::vector<FaceRecord>& faceRecords);
std::vector<FaceResultObject> UpdateFaceAttributes(const std::vector<FaceResultObject>& resultObjects,
const std::string& camera_id = "");
std::vector<Object> CheckFaces(const std::vector<Object>& faceObjects,bool checkFaceSize=true);
std::string GetOpenVINODevice();
ModelConfig CreateDetectorModelConfig();
bool InitializeDetector();
bool InitializeRecognizer();
bool InitializeAntispoofingModel(const std::string& deviceName);
bool InitializeAgeGenderModel(const std::string& deviceName);
bool InitializeEmotionModel(const std::string& deviceName);
bool InitializeHeadPoseModel(const std::string& deviceName);
void ensureUniqueUserIdWithHighestConfidence(std::vector<FaceResultObject>& resultObjects);
Object GetLargestObject(const std::vector<Object>& objects);
bool AreFacesSimilar(const FaceResultObject& face1, const FaceResultObject& face2);
size_t GenerateFaceHash(const FaceResultObject& face, const std::vector<FaceResultObject>& detectedObjects);
std::vector<FaceResultObject> FindFrequentFaces(const std::deque<std::vector<FaceResultObject>>& faceQueue,
const std::vector<FaceResultObject>& detectedObjects, int occurrenceThreshold = 8);
protected:
bool _licenseValid;
std::string _licenseKey;
std::string _detectorFilePath;
std::string _recognizerFilePath;
std::string _recognizerModelFolder;
std::string _databaseFilePath;
int _precisionType;
std::map<std::string, std::string> _userDict;
ANSCENTER::FRConfig _config;
std::unique_ptr<FaceDatabase> _db =std::make_unique<FaceDatabase>();
EngineType engineType;
bool _enableAgeGender;
bool _enableFaceEmotions;
bool _enableFaceLandmarks;
bool _enableHeadPose;
bool _enableAntiSpoof;
bool _enableFaceliveness;
bool _removeFakeFaces;
bool _isInitialized{false};
int _minFaceSize;
int _queueSize; // number of frames to store in the queue
int _faceThresholdSize; // number of frames to consider for face recognition
bool _enableFaceQueue;
double _faceDetectorScoreThreshold; // Face threshold score for face checker
int _attributeInterval{ 5 }; // Run slow attributes every N frames (1=every frame)
FaceChecker faceChecker;
// Add fine-grained mutexes for shared resources
std::mutex _detectionMutex; // Protects face detector
std::mutex _recognitionMutex; // Protects face recognizer (Feature/Forward inference only)
std::mutex _databaseMutex; // For database operations
// Double-buffer synchronization: protects atomic swap of recognizer index + userDict
// shared_lock for inference (concurrent reads), unique_lock for reload (exclusive write)
mutable std::shared_mutex _indexSwapMutex;
// Reload guard: prevents redundant index rebuilds.
// Set to true after any DB mutation; cleared by a successful Reload().
// If third-party code calls Reload() after a mutation that already auto-reloaded,
// the flag will be false and Reload() returns immediately (no-op).
std::atomic<bool> _reloadNeeded{ true };
std::mutex _ageGenderMutex; // For age/gender detector
std::mutex _emotionsMutex; // For emotions detector
std::mutex _antispoofMutex; // For antispoof detector
std::mutex _headPoseMutex; // For head pose detector
std::mutex _loggerMutex; // Protects logger (if not thread-safe)
std::mutex _faceQueueMutex; // NEW: Protects face queue operations
std::mutex _configMutex; // For configuration
std::mutex _cameraMutex; // For camera data
//std::unordered_map<std::string, std::deque<std::vector<FaceResultObject>>> _cameraFaceQueues;
std::recursive_mutex _mutex;
ANSCENTER::SPDLogger& _logger = ANSCENTER::SPDLogger::GetInstance("ANSFR");
std::shared_ptr<ov::Core> core; // Persistent OpenVINO core instance
std::unique_ptr<ANSFRBase>_recognizer = nullptr;
std::unique_ptr<ANSFDBase>_detector = nullptr;
std::unique_ptr<AntispoofingClassifier>_antiSpoofDetector = nullptr;
std::unique_ptr<AgeGenderDetection>_ageGenderDetector = nullptr;
std::unique_ptr<HeadPoseDetection>_headPoseDetector = nullptr;
std::unique_ptr<EmotionsDetection>_emotionsDetector = nullptr;
const size_t QUEUE_SIZE = 10;
const size_t FACE_THRESHOLD_SIZE = 5;
// Cached attribute JSON per tracked face (everything except head pose).
// Keyed by trackId from ANSMOT. Updated every _attributeInterval frames.
// On non-attribute frames, head pose is run fresh and spliced into this JSON.
struct CachedFaceAttributes {
std::string attributeJson; // Full JSON from FaceAttributeToJsonString
bool isUnknown = false; // Whether face was marked unknown (extreme pose etc.)
};
struct CameraData
{
std::deque<std::vector<FaceResultObject>> _detectionQueue; // That stores the detection results
int attributeFrameCounter = 0; // counts frames for attribute skip logic
std::unordered_map<int, CachedFaceAttributes> cachedAttributes; // trackId → cached attrs
// Adaptive interval state
int currentAttributeInterval = 5; // current adaptive interval
int stableFrameCount = 0; // frames with no new/lost trackIds
int previousTrackIdCount = 0; // trackId count from last frame
void clear()
{
for (auto& detectionVector : _detectionQueue)
{
detectionVector.clear();
}
_detectionQueue.clear();
attributeFrameCounter = 0;
cachedAttributes.clear();
currentAttributeInterval = 5;
stableFrameCount = 0;
previousTrackIdCount = 0;
}
};
std::unordered_map<std::string, CameraData> _cameras;
CameraData& GetCameraData(const std::string& cameraId) {
std::lock_guard<std::recursive_mutex> lock(_mutex);
// Use try_emplace to insert a default CameraData if cameraId does not exist
auto [iterator, inserted] = _cameras.try_emplace(cameraId, CameraData{});
return iterator->second; // Return the reference to CameraData
}
void EnqueueDetection(const std::vector<FaceResultObject>& detectedObjects, const std::string& cameraId);
std::deque<std::vector<FaceResultObject>> DequeueDetection(const std::string& cameraId);
std::vector<FaceResultObject> PostProcess(const std::vector<FaceResultObject>& detectedObjects, const std::string& cameraId);
std::vector<FaceResultObject> BuildFaceResultObjects(const std::vector<Object>& validFaces, const cv::Mat& originalInput, const std::string& camera_id);
void ProcessFaceAttributes(Face& face, const cv::Mat& mask);
// Validation helper methods
bool ValidateInitialization();
bool ValidateInput(const cv::Mat& input);
bool ValidateComponents();
// Frame preparation
cv::Mat PrepareInputFrame(const cv::Mat& input);
// Pipeline stage methods with timing
std::vector<Object> DetectFaces(const cv::Mat& frame, const std::string& camera_id);
std::vector<FaceResultObject> RecognizeFaces(const cv::Mat& frame, const std::vector<Object>& validFaces);
};
};
extern "C" ANSFR_API int CreateANSRFHandle(ANSCENTER::ANSFacialRecognition * *Handle,
const char* licenseKey,
const char* configFilePath,
const char* databaseFilePath,
const char* recogniserFilePath,
const char* detectorFilePath,
int precisionType=0,
float knownPersonThreshold=0.35,
int enableAgeGender=1,
int enableFaceEmotions=1,
int enableHeadPose=1,
int minFaceSize=30,
float faceDetectorThreshold=0.5,
int enableFaceLiveness=1,
int enableAntiSpoofing=1);
extern "C" ANSFR_API int LoadANSRFEngine(ANSCENTER::ANSFacialRecognition * *Handle);
extern "C" ANSFR_API int ReleaseANSRFHandle(ANSCENTER::ANSFacialRecognition * *Handle);
extern "C" ANSFR_API std::string RunANSRFDetector(ANSCENTER::ANSFacialRecognition * *Handle, unsigned char* jpeg_string, unsigned int bufferLength);
extern "C" ANSFR_API std::string RunANSRFDetectorBinary(ANSCENTER::ANSFacialRecognition * *Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height);
extern "C" ANSFR_API std::string RunANSRFInference(ANSCENTER::ANSFacialRecognition * *Handle, unsigned char* jpeg_string, unsigned int bufferLength);
extern "C" ANSFR_API std::string RunANSRFInferenceBinary(ANSCENTER::ANSFacialRecognition * *Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height);
extern "C" ANSFR_API std::string RunANSRFRecognition(ANSCENTER::ANSFacialRecognition * *Handle, unsigned char* jpeg_string, unsigned int bufferLength);
extern "C" ANSFR_API std::string RunANSRFRecognitionBinary(ANSCENTER::ANSFacialRecognition * *Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height);
extern "C" ANSFR_API std::string RunANSRFFaceDetector(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_bytes, unsigned int width, unsigned int height);
//// For LabVIEW API
extern "C" ANSFR_API int RunDetector_LV(ANSCENTER::ANSFacialRecognition * *Handle, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult);
extern "C" ANSFR_API int RunInference_LV(ANSCENTER::ANSFacialRecognition * *Handle, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult);
extern "C" ANSFR_API int RunRecognition_LV(ANSCENTER::ANSFacialRecognition * *Handle, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle detectionResult);
extern "C" ANSFR_API int RunDetectorWithCamId_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId,LStrHandle detectionResult);
extern "C" ANSFR_API int RunInferenceWithCamId_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId,LStrHandle detectionResult);
extern "C" ANSFR_API int RunRecognitionWithCamId_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId,LStrHandle detectionResult);
extern "C" ANSFR_API int RunFaceDetection_LV(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength, const char* cameraId, LStrHandle detectionResult);
extern "C" ANSFR_API int RunInferenceComplete_LV(ANSCENTER::ANSFacialRecognition** Handle, cv::Mat** cvImage, const char* cameraId, int getJpegString, int jpegImageSize, LStrHandle detectionResult, LStrHandle imageStr);
extern "C" ANSFR_API int RunFaceDetectionComplete_LV(ANSCENTER::ANSFacialRecognition** Handle, cv::Mat** cvImage, const char* cameraId, int getJpegString, int jpegImageSize,LStrHandle detectionResult, LStrHandle imageStr);
extern "C" ANSFR_API int RunFaceRecogniserComplete_LV(ANSCENTER::ANSFacialRecognition** Handle, cv::Mat** cvImage, const char* cameraId, int getJpegString, int jpegImageSize,LStrHandle detectionResult, LStrHandle imageStr);
// User management APIs
extern "C" ANSFR_API int InsertUser(ANSCENTER::ANSFacialRecognition * *Handle, const char* userCode, const char* userName);
extern "C" ANSFR_API int UpdateUser(ANSCENTER::ANSFacialRecognition * *Handle, int userId, const char* userCode, const char* userName);
extern "C" ANSFR_API int DeleteUser(ANSCENTER::ANSFacialRecognition * *Handle, int userId);
extern "C" ANSFR_API int DeleteUsers(ANSCENTER::ANSFacialRecognition * *Handle, int* userIds, int count);
extern "C" ANSFR_API int InsertFace(ANSCENTER::ANSFacialRecognition * *Handle, int userId, unsigned char* jpeg_string, unsigned int bufferLength);
extern "C" ANSFR_API int InsertFaces(ANSCENTER::ANSFacialRecognition** Handle, int userId, unsigned char* jpeg_string, unsigned int bufferLength, LStrHandle faceIds);
extern "C" ANSFR_API int CheckFaceEmbedding(ANSCENTER::ANSFacialRecognition** Handle, unsigned char* jpeg_string, unsigned int bufferLength);
extern "C" ANSFR_API int InsertFaceBinary(ANSCENTER::ANSFacialRecognition * *Handle, int userId, unsigned char* jpeg_bytes, unsigned int width, unsigned int height);
extern "C" ANSFR_API int DeleteFace(ANSCENTER::ANSFacialRecognition * *Handle, int faceId); // How to we know the faceId?
extern "C" ANSFR_API int Reload(ANSCENTER::ANSFacialRecognition * *Handle);
extern "C" ANSFR_API int UpdateParameters(ANSCENTER::ANSFacialRecognition * *Handle, float knownPersonThreshold, int enableAgeGender, int enableFaceEmotions, int enableHeadPose, int minFaceSize, float faceDetectorThreshold, int faceliveness, int antiSpoof, int removeFakeFaces);
extern "C" ANSFR_API int GetParamters(ANSCENTER::ANSFacialRecognition** Handle, LStrHandle faceParams);
extern "C" ANSFR_API int UpdateFaceQueue(ANSCENTER::ANSFacialRecognition** Handle, int queueSize, int numKnownFaceInQueue, int enableFaceQueue);
extern "C" ANSFR_API int GetFaceQueue(ANSCENTER::ANSFacialRecognition** Handle, LStrHandle faceQueue);
extern "C" ANSFR_API int GetUserString(ANSCENTER::ANSFacialRecognition * *Handle, int userId, std::string& userRecord);
extern "C" ANSFR_API int GetUsersString(ANSCENTER::ANSFacialRecognition * *Handle, std::string & userRecords, std::vector<int>& userIds);
extern "C" ANSFR_API int GetFaceString(ANSCENTER::ANSFacialRecognition * *Handle, int faceId, std::string & faceRecord);
extern "C" ANSFR_API int GetFacesString(ANSCENTER::ANSFacialRecognition * *Handle, int userId, std::string & faceRecords);
// For LabVIEW
extern "C" ANSFR_API int GetUser(ANSCENTER::ANSFacialRecognition * *Handle,int userId, LStrHandle userRecord);
extern "C" ANSFR_API int GetUsers(ANSCENTER::ANSFacialRecognition * *Handle,LStrHandle userRecords);
extern "C" ANSFR_API int GetFace(ANSCENTER::ANSFacialRecognition * *Handle,int faceId, LStrHandle faceRecord);
extern "C" ANSFR_API int GetFaces(ANSCENTER::ANSFacialRecognition * *Handle,int userId, LStrHandle faceRecords);
extern "C" ANSFR_API int DeleteFacesByUser(ANSCENTER::ANSFacialRecognition * *Handle,int userId);
extern "C" ANSFR_API double BlurCalculation(unsigned char* jpeg_string, unsigned int bufferLength);
#endif