Files
ANSCORE/ANSODEngine/ANSONNXPOSE.h

81 lines
3.9 KiB
C++

#ifndef ANSONNXPOSE_H
#define ANSONNXPOSE_H
#pragma once
#include "ANSEngineCommon.h"
#include "engine.h"
namespace ANSCENTER {
class ANSENGINE_API ANSONNXPOSE :public ANSODBase {
public:
virtual bool Initialize(std::string licenseKey, ModelConfig modelConfig, const std::string& modelZipFilePath, const std::string& modelZipPassword, std::string& labelMap) override;
virtual bool LoadModel(const std::string& modelZipFilePath, const std::string& modelZipPassword)override;
virtual bool LoadModelFromFolder(std::string licenseKey, ModelConfig modelConfig, std::string modelName, std::string className, const std::string& modelFolder, std::string& labelMap)override;
virtual bool OptimizeModel(bool fp16, std::string& optimizedModelFolder);
std::vector<Object> RunInference(const cv::Mat& input);
std::vector<Object> RunInference(const cv::Mat& input, const std::string& camera_id);
bool Destroy();
~ANSONNXPOSE();
int getInstanceId() const { return instanceId_; }
private:
std::string _modelFilePath;
bool _modelLoadValid;
bool _fp16{ false };
size_t vectorProduct(const std::vector<int64_t>& vector);
void letterBox(const cv::Mat& image, cv::Mat& outImage,
const cv::Size& newShape,
const cv::Scalar& color = cv::Scalar(114, 114, 114),
bool auto_ = true,
bool scaleFill = false,
bool scaleUp = true,
int stride = 32);
void NMSBoxes(const std::vector<BoundingBox>& boundingBoxes,
const std::vector<float>& scores,
float scoreThreshold,
float nmsThreshold,
std::vector<int>& indices);
void drawPoseEstimation(cv::Mat& image,
const std::vector<Object>& detections,
float confidenceThreshold = 0.5,
float kptThreshold = 0.5);
void warmupModel();
bool Init(const std::string& modelPath, bool useGPU=true, int deviceId = 0);
cv::Mat preprocess(const cv::Mat& image, float*& blob, std::vector<int64_t>& inputTensorShape);
std::vector<Object> postprocess(const cv::Size& originalImageSize, const cv::Size& resizedImageShape,
const std::vector<Ort::Value>& outputTensors, const std::string& camera_id);
std::vector<Object> detect(const cv::Mat& image, const std::string& camera_id);
private:
static std::atomic<int> instanceCounter_; // Thread-safe counter
int instanceId_;
int deviceId_ = 0;
Ort::Env env{ nullptr }; // ONNX Runtime environment
Ort::SessionOptions sessionOptions{ nullptr }; // Session options for ONNX Runtime
Ort::Session session{ nullptr }; // ONNX Runtime session for running inference
bool isDynamicInputShape{}; // Flag indicating if input shape is dynamic
cv::Size inputImageShape; // Expected input image shape for the model
// Vectors to hold allocated input and output node names
std::vector<Ort::AllocatedStringPtr> inputNodeNameAllocatedStrings;
std::vector<const char*> inputNames;
std::vector<Ort::AllocatedStringPtr> outputNodeNameAllocatedStrings;
std::vector<const char*> outputNames;
size_t numInputNodes, numOutputNodes; // Number of input and output nodes in the model
const std::vector<std::pair<int, int>> POSE_SKELETON = {
// Face connections
{0,1}, {0,2}, {1,3}, {2,4},
// Head-to-shoulder connections
{3,5}, {4,6},
// Arms
{5,7}, {7,9}, {6,8}, {8,10},
// Body
{5,6}, {5,11}, {6,12}, {11,12},
// Legs
{11,13}, {13,15}, {12,14}, {14,16}
};
};
}
#endif
#pragma once