Files
ANSCORE/modules/ANSODEngine/ANSOVFaceDetector.h

69 lines
3.1 KiB
C++

#ifndef ANSOVFaceDetector_H
#define ANSOVFaceDetector_H
#pragma once
#include <string.h>
#include <vector>
#include <iostream>
#include <typeinfo>
#include "ANSEngineCommon.h"
#include "cnn.hpp"
#include "ONNXEngine.h"
//#define USEOVFACEDETECTOR
//#define FACEDEBUG
namespace ANSCENTER {
class ANSENGINE_API ANSOVFD :public ANSFDBase {
public:
virtual bool Initialize(std::string licenseKey, ModelConfig modelConfig, const std::string& modelZipFilePath, const std::string& modelZipPassword, std::string& labelMap) override;
virtual bool LoadModel(const std::string& modelZipFilePath, const std::string& modelZipPassword)override;
virtual bool LoadModelFromFolder(std::string licenseKey, ModelConfig modelConfig, std::string modelName, std::string className,const std::string& modelFolder, std::string& labelMap)override;
virtual bool OptimizeModel(bool fp16, std::string& optimizedModelFolder);
std::vector<Object> RunInference(const cv::Mat& input, bool useDynamicImage = true, bool validateFace=false, bool facelivenessCheck=true);
std::vector<Object> RunInference(const cv::Mat& input, const std::string& camera_id, bool useDynamicImage = true, bool validateFace=false, bool facelivenessCheck=true);
bool Destroy();
~ANSOVFD();
private:
std::mutex _mtx;
std::string _modelFilePath;
std::unique_ptr<SCRFD> _face_detector = nullptr;
// DML device-lost recovery (see ANSONNXYOLO.h for rationale)
bool _dmlDeviceLost{ false };
std::string _scrfdModelPath; // cached for CPU fallback recreation
std::vector<Object> _movementObjects;
int _retainDetectedFaces{ 0 };
std::vector<Object> Inference(const cv::Mat& input, const std::string& camera_id, bool useDynamicImage = true, bool validateFace=false);
#ifdef USEOVFACEDETECTOR
std::string _landmarkModelFilePath;
std::unique_ptr<VectorCNN> _faceLandmark = nullptr;
ov::InferRequest request;
ov::Tensor inTensor;
ov::Shape inShape;
ov::CompiledModel compiled_model_;
const size_t ndetections = 200;
std::string output;
std::string labels_output;
bool _useYoloFaceDetector{true};
size_t objectSize;
float width;
float height;
size_t model_input_width;
size_t model_input_height;
float bb_enlarge_coefficient;
float bb_dx_coefficient;
float bb_dy_coefficient;
std::shared_ptr<ov::Model> read(const ov::Core& core, std::string pathToModel);
std::vector<Object> runOVInference(const cv::Mat& frame, const std::string& cameraId);
void InitialSSDModel(const std::string& deviceName);
void InitialYoloModel(const std::string& deviceName);
cv::Mat PreProcessing(const cv::Mat& source);
std::vector<Object> runYoloInference(const cv::Mat& input, const std::string& cameraId);
cv::Mat resized_frame_;
cv::Point2f factor_;
cv::Size2f model_input_shape_;
cv::Size model_output_shape_;
bool InitFaceDetector();
#endif
};
}
#endif