Files
ANSCORE/modules/ANSFR/ANSFRCommon.h

371 lines
13 KiB
C
Raw Normal View History

2026-03-28 16:54:11 +11:00
#ifndef ANSFRCOMMON_H
#define ANSFRCOMMON_H
#define ANSF_API __declspec(dllexport)
#pragma once
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "NvInfer.h"
//#include <cublasLt.h>
#include "ANSLicense.h"
#include "LabVIEWHeader/extcode.h"
#include "Utility.h"
#include "macros.h"
#include "ANSEngineCommon.h"
#include "openvino/openvino.hpp"
using namespace nvinfer1;
#define CUDACHECK(status) \
do\
{\
auto ret = (status);\
if (ret != 0)\
{\
std::cerr << "Cuda failure: " << ret << std::endl;\
abort();\
}\
} while (0)
namespace ANSCENTER {
struct UserRecord {
int UserId;
std::string UserCode;
std::string UserName;
std::vector<int> FaceIds;
};
struct FaceRecord {
int FaceId;
int UserId;
std::string ImagePath;
};
struct alignas(float) Detection {
float bbox[4]; //x1 y1 x2 y2
float class_confidence;
float landmark[10];
};
struct CroppedFace {
cv::Mat face;
cv::Mat faceMat;
int x1, y1, x2, y2;
};
struct Bbox {
int x1, y1, x2, y2;
float score;
};
struct Paths {
std::string absPath;
std::string className;
};
struct AnchorBox {
float cx;
float cy;
float sx;
float sy;
};
struct FRConfig {
std::string _databasePath;
int _videoFrameWidth;
int _videoFrameHeight;
std::string _detEngineFile;
std::vector<int> _detInputShape;
std::string _detInputName;
std::vector<std::string> _detOutputNames;
int _detMaxBatchSize;
float _detThresholdNMS;
float _detThresholdBbox;
std::vector<int> _recInputShape;
int _recOutputDim;
std::string _recEngineFile;
int _maxFacesPerScene;
float _knownPersonThreshold;
int _recMaxBatchSize;
std::string _recInputName;
std::string _recOutputName;
bool _gen;
std::string _gen_imgSource;
bool _gen_imgIsCropped;
bool _apiImageIsCropped;
};
struct HeadPoseResults {
float angle_r;
float angle_p;
float angle_y;
};
struct AgeGenderResults {
float age;
float maleProb;
};
//// Class for face attribute
class ANSF_API Face {
public:
//using Ptr = std::shared_ptr<Face>;
explicit Face(size_t id, cv::Rect& location);
void updateAge(float value);
void updateGender(float value);
void updateEmotions(std::map<std::string, float> values);
void updateHeadPose(HeadPoseResults values);
void updateLandmarks(std::vector<float> values);
void updateRealFaceConfidence(float value);
void updateFaceLiveness(int value);
int getAge();
bool isMale();
bool isReal();
float getAntispoofingScore();
int getFaceLiveness();
std::map<std::string, float> getEmotions();
std::pair<std::string, float> getMainEmotion();
HeadPoseResults getHeadPose();
const std::vector<float>& getLandmarks();
size_t getId();
void ageGenderEnable(bool value);
void emotionsEnable(bool value);
void headPoseEnable(bool value);
void landmarksEnable(bool value);
void antispoofingEnable(bool value);
void faceLivenessEnable(bool value);
bool isAgeGenderEnabled();
bool isEmotionsEnabled();
bool isHeadPoseEnabled();
bool isLandmarksEnabled();
bool isAntispoofingEnabled();
bool isFaceLivenessEnabled();
public:
cv::Rect _location;
float _intensity_mean;
private:
size_t _id;
float _age;
float _maleScore;
float _femaleScore;
std::map<std::string, float> _emotions;
HeadPoseResults _headPose;
std::vector<float> _landmarks;
float _realFaceConfidence;
int _faceLiveness;
bool _isAgeGenderEnabled;
bool _isEmotionsEnabled;
bool _isHeadPoseEnabled;
bool _isLandmarksEnabled;
bool _isAntispoofingEnabled;
bool _isFaceLivenessEnabled;
};
class ANSF_API BaseDetection {
public:
BaseDetection(const std::string& pathToModel, bool doRawOutputMessages);
virtual ~BaseDetection() = default;
virtual std::shared_ptr<ov::Model> read(const ov::Core& core) = 0;
bool enabled() const;
ov::InferRequest request;
ov::Tensor inTensor;
std::string pathToModel;
ov::Shape inShape;
const bool doRawOutputMessages;
};
class ANSF_API HeadPoseDetection :public BaseDetection {
public:
std::recursive_mutex _mutex;
std::string outputAngleR;
std::string outputAngleP;
std::string outputAngleY;
std::string _modelFilePath;
//size_t enquedFaces;
cv::Mat cameraMatrix;
HeadPoseDetection(const std::string& pathToModel, bool doRawOutputMessages);
virtual std::shared_ptr<ov::Model> read(const ov::Core& core) override;
HeadPoseResults runInfer(const cv::Mat& frame);
//void submitRequest();
//void enqueue(const cv::Mat& face);
//HeadPoseResults getResult(int idx);
//std::vector<HeadPoseResults> getAllResults();
};
class ANSF_API AgeGenderDetection :public BaseDetection {
public:
std::recursive_mutex _mutex;
std::string outputAge;
std::string outputGender;
//size_t enquedFaces;
std::string _modelFilePath;
AgeGenderDetection(const std::string& pathToModel,
bool doRawOutputMessages);
virtual std::shared_ptr<ov::Model> read(const ov::Core& core) override;
AgeGenderResults runInfer(const cv::Mat& frame);
//void submitRequest();
//void enqueue(const cv::Mat& face);
//AgeGenderResults getResult(int idx);
//std::vector<AgeGenderResults> getAllResults();
};
class ANSF_API EmotionsDetection : public BaseDetection {
public:
std::recursive_mutex _mutex;
//size_t enquedFaces;
std::string _modelFilePath;
const std::vector<std::string> emotionsVec = { "neutral", "happy", "sad", "surprise", "anger" };
EmotionsDetection(const std::string& pathToModel,
bool doRawOutputMessages);
virtual std::shared_ptr<ov::Model> read(const ov::Core& core) override;
std::map<std::string, float> runInfer(const cv::Mat& frame);
//void submitRequest();
//void enqueue(const cv::Mat& face);
//std::map<std::string, float> getResult(int idx);
//std::vector<std::map<std::string, float>> getAllResults();
};
class FacialLandmarksDetection : public BaseDetection {
public:
//size_t enquedFaces;
std::vector<std::vector<float>> landmarks_results;
std::vector<cv::Rect> faces_bounding_boxes;
std::string _modelFilePath;
std::recursive_mutex _mutex;
FacialLandmarksDetection(const std::string& pathToModel,
bool doRawOutputMessages);
virtual std::shared_ptr<ov::Model> read(const ov::Core& core) override;
std::vector<float> runInfer(const cv::Mat& frame);
//void submitRequest();
//void enqueue(const cv::Mat& face);
//std::vector<float> getResult(int idx);
};
class ANSF_API AntispoofingClassifier : public BaseDetection {
public:
std::recursive_mutex _mutex;
//size_t enquedFaces;
std::string _modelFilePath;
AntispoofingClassifier(const std::string& pathToModel,bool doRawOutputMessages);
virtual std::shared_ptr<ov::Model> read(const ov::Core& core) override;
float runInfer(const cv::Mat& frame);
//void submitRequest();
//void enqueue(const cv::Mat& frame);
//float getResult(int idx);
};
class ANSFRHelper {
public:
static unsigned char* CVMatToBytes(cv::Mat image, unsigned int& bufferLengh);
static void GetCroppedFaces(const cv::Mat& input, std::vector<Object>& outputBbox, int resize_w, int resize_h, std::vector<struct CroppedFace>& croppedFaces);
static void GetFilePaths(std::string rootPath, std::vector<struct Paths>& paths);
static bool LoadConfigFile(std::string configFile, FRConfig& config);
static std::string StringCurrentDateTime(); // Return current datetime in string format
static std::string FaceObjectsToJsonString(const std::vector<FaceResultObject>& faces);
static cv::Mat PreprocessImg(cv::Mat& img, int inputW, int inputH);
static int ReadFilesInDir(const char* pDirName, std::vector<std::string>& fileNames);
static cv::Rect GetRectAdaptLandmark(cv::Mat& img, int inputW, int inputH, float bBox[4], float lmk[10]);
static float IOU(float lbox[4], float rbox[4]);
static bool CMP(const Detection& a, const Detection& b);
static void NMS(std::vector<Detection>& res, float* output, float nms_thresh = 0.4);
static std::map<std::string, Weights> LoadWeights(const std::string file);
static Weights GetWeights(std::map<std::string, Weights>& weightMap, std::string key);
static IScaleLayer* AddBatchNorm2D(INetworkDefinition* network, std::map<std::string, Weights>& weightMap, ITensor& input, std::string lName, float eps);
static std::string UserRecordToJsonString(const UserRecord userRecord);
static std::string UserRecordsToJsonString(const std::vector<UserRecord> userRecords);
static std::string FaceRecordToJsonString(const FaceRecord faceRecord);
static std::string FaceRecordsToJsonString(const std::vector<FaceRecord> faceRecords);
static std::string FaceAttributeToJsonString(Face face);
};
float calcIoU(cv::Rect& src, cv::Rect& dst);
float calcMean(const cv::Mat& src);
Face matchFace(cv::Rect rect, std::list<Face>& faces);
// Class to extend TensorRT logger
/* class TRTLogger : public nvinfer1::ILogger {
public:
void log(Severity severity, const char* msg) noexcept override {
if (severity <= Severity::kWARNING) {
std::cout << "TENSORRT_ENGINE:" << msg << std::endl;
}
}
void LogDebug(const std::string source, std::string message) {
std::cout << source << ":" << message << std::endl;
}
void LogInfo(const std::string source, std::string message) {
std::cout << source << ":" << message << std::endl;
}
void LogError(const std::string source, std::string message) {
std::cout << source << ":" << message << std::endl;
}
void LogFatal(const std::string source, std::string message) {
std::cout << source << ":" << message << std::endl;
}
~TRTLogger() {
}
};
class MatMul {
public:
MatMul();
~MatMul();
void Init(float* knownEmbeds, int numRow, int numCol);
void Calculate(float* embeds, int embedCount, float* outputs);
private:
cudaDataType_t cudaDataType = CUDA_R_32F;
cublasComputeType_t computeType = CUBLAS_COMPUTE_32F;
cublasLtHandle_t ltHandle;
cublasOperation_t transa = CUBLAS_OP_T;
cublasOperation_t transb = CUBLAS_OP_N;
void* workspace;
const size_t workspaceSize = 1024 * 1024 * 4;
cudaStream_t stream;
float* dA, * dB, * dC;
const float alpha = 1, beta = 0;
int m, n, k, lda, ldb, ldc;
cublasLtMatmulDesc_t operationDesc = NULL;
cublasLtMatrixLayout_t Adesc = NULL;
cublasLtMatmulPreference_t preference = NULL;
};
class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2
{
public:
Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true);
virtual ~Int8EntropyCalibrator2();
int getBatchSize() const TRT_NOEXCEPT override;
bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override;
const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override;
void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override;
private:
int batchsize_;
int input_w_;
int input_h_;
int img_idx_;
std::string img_dir_;
std::vector<std::string> img_files_;
size_t input_count_;
std::string calib_table_name_;
const char* input_blob_name_;
bool read_cache_;
void* device_input_;
std::vector<char> calib_cache_;
};*/
}
#endif