1913 lines
85 KiB
C++
1913 lines
85 KiB
C++
|
|
#include "ANSODTest.h"
|
||
|
|
#include "ANSCUSTOMPY.h"
|
||
|
|
#include "ANSMotionDetector.h"
|
||
|
|
#include "ANSONNXSAM3.h"
|
||
|
|
#include <thread>
|
||
|
|
#include <atomic>
|
||
|
|
#include <chrono>
|
||
|
|
#include <mutex>
|
||
|
|
//#include "ANSSAM3.h" // TensorRT headers conflict with Windows SDK (ACCESS_MASK)
|
||
|
|
|
||
|
|
// Defined in ANSSAM3-UnitTest.cpp (separate translation unit to avoid header conflict)
|
||
|
|
int SAM3TRT_UnitTest();
|
||
|
|
int SAM3TRT_ImageTest();
|
||
|
|
|
||
|
|
std::string ReadJsonFileContent(std::string filePath) {
|
||
|
|
std::ifstream ifs(filePath);
|
||
|
|
return std::string((std::istreambuf_iterator<char>(ifs)),
|
||
|
|
(std::istreambuf_iterator<char>()));
|
||
|
|
}
|
||
|
|
//int ParseJSonFile() {
|
||
|
|
// std::string jsonFile = "C:\\Programs\\DemoAssets\\modelConfig1.json";
|
||
|
|
// std::string jsonStringContent = ReadJsonFileContent(jsonFile);
|
||
|
|
// ANSCENTER::Params param= ANSCENTER::ANSUtilityHelper::ParseCustomParameters(jsonStringContent);
|
||
|
|
//
|
||
|
|
// string jsonConvertContent = ANSCENTER::ANSUtilityHelper::SerializeCustomParamters(param);
|
||
|
|
// std::cout << "JSON Content: " << jsonConvertContent << std::endl;
|
||
|
|
//
|
||
|
|
// return 0;
|
||
|
|
//
|
||
|
|
//}
|
||
|
|
int ANSCUSTOMPY_Test() {
|
||
|
|
ANSCENTER::ANSODBase* infHandle;
|
||
|
|
infHandle = new ANSCENTER::ANSCUSTOMPY();
|
||
|
|
// Load model from folder
|
||
|
|
std::string modelFolder = "C:\\Programs\\PythonProjects\\YoloInference";
|
||
|
|
ANSCENTER::ModelConfig modelConfig;
|
||
|
|
modelConfig.modelConfThreshold = 0.5f;
|
||
|
|
modelConfig.modelMNSThreshold = 0.4f;
|
||
|
|
modelConfig.detectionScoreThreshold = 0.4f;
|
||
|
|
modelConfig.detectionType = ANSCENTER::DetectionType::DETECTION;
|
||
|
|
|
||
|
|
std::string labelMap;
|
||
|
|
infHandle->LoadModelFromFolder("", modelConfig, "", "", modelFolder, labelMap);
|
||
|
|
|
||
|
|
// Load image
|
||
|
|
cv::Mat image = cv::imread("C:\\Programs\\PythonProjects\\YoloInference\\bus.jpg");
|
||
|
|
std::vector<ANSCENTER::Object> results = infHandle->RunInference(image);
|
||
|
|
|
||
|
|
// Draw results
|
||
|
|
for (const auto& obj : results) {
|
||
|
|
cv::rectangle(image, cv::Point(obj.box.x, obj.box.y), cv::Point(obj.box.x + obj.box.width, obj.box.y + obj.box.height), cv::Scalar(0, 255, 0), 2);
|
||
|
|
cv::putText(image, std::to_string(obj.classId), cv::Point(obj.box.x, obj.box.y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 1);
|
||
|
|
}
|
||
|
|
cv::imshow("Detection", image);
|
||
|
|
cv::waitKey(0);
|
||
|
|
// Release resources
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
// Release model
|
||
|
|
|
||
|
|
|
||
|
|
//Release
|
||
|
|
infHandle->Destroy();
|
||
|
|
delete infHandle;
|
||
|
|
}
|
||
|
|
int CUSTOMPYEngine(std::string modelFilePath, std::string videoFilePath) {
|
||
|
|
boost::property_tree::ptree root;
|
||
|
|
boost::property_tree::ptree detectionObjects;
|
||
|
|
boost::property_tree::ptree pt;
|
||
|
|
std::vector<std::string> classes;
|
||
|
|
std::filesystem::path currentPath = std::filesystem::current_path();
|
||
|
|
ANSCENTER::ANSODBase* infHandle;
|
||
|
|
std::string labelMap;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
float detectionScoreThreshold = 0.48;
|
||
|
|
float modelConfThreshold = 0.48;
|
||
|
|
float modelNMSThreshold = 0.48;
|
||
|
|
int modelType = 18;//Custom python script model
|
||
|
|
int detectorType = 1; // Detection
|
||
|
|
labelMap = CreateANSODHandle(&infHandle, licenseKey.c_str(), modelFilePath.c_str(), modelZipFilePassword.c_str(), detectionScoreThreshold, modelConfThreshold, modelNMSThreshold, 1, modelType, detectorType);
|
||
|
|
std::stringstream ss(labelMap);
|
||
|
|
while (ss.good())
|
||
|
|
{
|
||
|
|
std::string substr;
|
||
|
|
getline(ss, substr, ',');
|
||
|
|
classes.push_back(substr);
|
||
|
|
}
|
||
|
|
std::cout << "classes:" << classes.size();
|
||
|
|
std::cout << "begin read video" << std::endl;
|
||
|
|
cv::VideoCapture capture(videoFilePath);
|
||
|
|
|
||
|
|
if (!capture.isOpened()) {
|
||
|
|
printf("could not read this video file...\n");
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
|
||
|
|
std::cout << "Reading settings:" << std::endl;
|
||
|
|
int index = 0;
|
||
|
|
std::string paramString;
|
||
|
|
GetConfiguredParameters_CPP(&infHandle, paramString);
|
||
|
|
std::cout << "Configured Parameters: " << paramString << std::endl;
|
||
|
|
int setResult = SetODParameters(&infHandle, paramString.c_str());
|
||
|
|
if (setResult == 0) {
|
||
|
|
std::cout << "SetODParameters failed" << std::endl;
|
||
|
|
}
|
||
|
|
else {
|
||
|
|
std::cout << "SetODParameters success" << std::endl;
|
||
|
|
}
|
||
|
|
std::cout << "Reading video..." << std::endl;
|
||
|
|
while (true)
|
||
|
|
{
|
||
|
|
index++;
|
||
|
|
cv::Mat frame;
|
||
|
|
if (!capture.read(frame)) // if not success, break loop
|
||
|
|
{
|
||
|
|
std::cout << "\n Cannot read the video file. please check your video.\n";
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
if (index >= 10) break;
|
||
|
|
unsigned int bufferLength = 0;
|
||
|
|
unsigned char* jpeg_string = ANSCENTER::ANSUtilityHelper::CVMatToBytes(frame, bufferLength);
|
||
|
|
int height = frame.rows;
|
||
|
|
int width = frame.cols;
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
/* measured work */
|
||
|
|
std::string detectionResult = RunInferenceBinary(&infHandle, jpeg_string, width, height);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
printf("Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
delete jpeg_string;
|
||
|
|
|
||
|
|
if (!detectionResult.empty()) {
|
||
|
|
pt.clear();
|
||
|
|
std::stringstream ss;
|
||
|
|
ss.clear();
|
||
|
|
ss << detectionResult;
|
||
|
|
boost::property_tree::read_json(ss, pt);
|
||
|
|
BOOST_FOREACH(const boost::property_tree::ptree::value_type & child, pt.get_child("results"))
|
||
|
|
{
|
||
|
|
const boost::property_tree::ptree& result = child.second;
|
||
|
|
const auto class_id = GetData<int>(result, "class_id");
|
||
|
|
const auto x = GetData<float>(result, "x");
|
||
|
|
const auto y = GetData<float>(result, "y");
|
||
|
|
const auto prob = GetData<float>(result, "prob");
|
||
|
|
const auto width = GetData<float>(result, "width");
|
||
|
|
const auto height = GetData<float>(result, "height");
|
||
|
|
cv::rectangle(frame, cv::Rect(x, y, width, height), 123, 2);
|
||
|
|
cv::putText(frame, cv::format("%s:%d:%.2f", classes[class_id], class_id, prob), cv::Point(x, y - 5),
|
||
|
|
0, 0.6, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
cv::imshow("ANS Object Tracking", frame);
|
||
|
|
if (cv::waitKey(30) == 27) // Wait for 'esc' key press to exit
|
||
|
|
{
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
capture.release();
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
std::cout << "End of program.\n";
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int CUSTOMPYEngine1(std::string modelFilePath, std::string videoFilePath) {
|
||
|
|
boost::property_tree::ptree root;
|
||
|
|
boost::property_tree::ptree detectionObjects;
|
||
|
|
boost::property_tree::ptree pt;
|
||
|
|
std::vector<std::string> classes;
|
||
|
|
std::filesystem::path currentPath = std::filesystem::current_path();
|
||
|
|
ANSCENTER::ANSODBase* infHandle;
|
||
|
|
std::string labelMap;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
float detectionScoreThreshold = 0.48;
|
||
|
|
float modelConfThreshold = 0.48;
|
||
|
|
float modelNMSThreshold = 0.48;
|
||
|
|
int modelType = 18;//Custom python script model
|
||
|
|
int detectorType = 1; // Detection
|
||
|
|
labelMap = CreateANSODHandle(&infHandle, licenseKey.c_str(), modelFilePath.c_str(), modelZipFilePassword.c_str(), detectionScoreThreshold, modelConfThreshold, modelNMSThreshold, 1, modelType, detectorType);
|
||
|
|
std::stringstream ss(labelMap);
|
||
|
|
while (ss.good())
|
||
|
|
{
|
||
|
|
std::string substr;
|
||
|
|
getline(ss, substr, ',');
|
||
|
|
classes.push_back(substr);
|
||
|
|
}
|
||
|
|
std::cout << "classes:" << classes.size();
|
||
|
|
std::cout << "begin read video" << std::endl;
|
||
|
|
cv::VideoCapture capture(videoFilePath);
|
||
|
|
|
||
|
|
if (!capture.isOpened()) {
|
||
|
|
printf("could not read this video file...\n");
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
|
||
|
|
std::cout << "Reading settings:" << std::endl;
|
||
|
|
int index = 0;
|
||
|
|
std::string paramString;
|
||
|
|
GetConfiguredParameters_CPP(&infHandle, paramString);
|
||
|
|
std::cout << "Configured Parameters: " << paramString << std::endl;
|
||
|
|
int setResult = SetODParameters(&infHandle, paramString.c_str());
|
||
|
|
if (setResult == 0) {
|
||
|
|
std::cout << "SetODParameters failed" << std::endl;
|
||
|
|
}
|
||
|
|
else {
|
||
|
|
std::cout << "SetODParameters success" << std::endl;
|
||
|
|
}
|
||
|
|
std::cout << "Reading video..." << std::endl;
|
||
|
|
while (true)
|
||
|
|
{
|
||
|
|
index++;
|
||
|
|
cv::Mat frame;
|
||
|
|
if (!capture.read(frame)) // if not success, break loop
|
||
|
|
{
|
||
|
|
std::cout << "\n Cannot read the video file. please check your video.\n";
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
if (index >= 10) break;
|
||
|
|
unsigned int bufferLength = 0;
|
||
|
|
unsigned char* jpeg_string = ANSCENTER::ANSUtilityHelper::CVMatToBytes(frame, bufferLength);
|
||
|
|
int height = frame.rows;
|
||
|
|
int width = frame.cols;
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
/* measured work */
|
||
|
|
std::string detectionResult = RunInferenceBinary(&infHandle, jpeg_string, width, height);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
printf("Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
delete jpeg_string;
|
||
|
|
|
||
|
|
if (!detectionResult.empty()) {
|
||
|
|
pt.clear();
|
||
|
|
std::stringstream ss;
|
||
|
|
ss.clear();
|
||
|
|
ss << detectionResult;
|
||
|
|
boost::property_tree::read_json(ss, pt);
|
||
|
|
BOOST_FOREACH(const boost::property_tree::ptree::value_type & child, pt.get_child("results"))
|
||
|
|
{
|
||
|
|
const boost::property_tree::ptree& result = child.second;
|
||
|
|
const auto class_id = GetData<int>(result, "class_id");
|
||
|
|
const auto x = GetData<float>(result, "x");
|
||
|
|
const auto y = GetData<float>(result, "y");
|
||
|
|
const auto prob = GetData<float>(result, "prob");
|
||
|
|
const auto width = GetData<float>(result, "width");
|
||
|
|
const auto height = GetData<float>(result, "height");
|
||
|
|
cv::rectangle(frame, cv::Rect(x, y, width, height), 123, 2);
|
||
|
|
cv::putText(frame, cv::format("%s:%d:%.2f", classes[class_id], class_id, prob), cv::Point(x, y - 5),
|
||
|
|
0, 0.6, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
cv::imshow("ANS Object Tracking", frame);
|
||
|
|
if (cv::waitKey(30) == 27) // Wait for 'esc' key press to exit
|
||
|
|
{
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
capture.release();
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
std::cout << "End of program.\n";
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int CustomPyTest() {
|
||
|
|
std::string cpumodelFilePath = "C:\\Programs\\PythonProjects\\CustomPyModel.zip";
|
||
|
|
std::string videoFile = "C:\\Programs\\DemoAssets\\Videos\\video_20.mp4";
|
||
|
|
CUSTOMPYEngine(cpumodelFilePath, videoFile);//yolov10
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int CustomPyTest1() {
|
||
|
|
std::string cpumodelFilePath = "C:\\Programs\\PythonProjects\\CustomPyModel.zip";
|
||
|
|
std::string videoFile = "C:\\Programs\\DemoAssets\\Videos\\video_20.mp4";
|
||
|
|
CUSTOMPYEngine1(cpumodelFilePath, videoFile);//yolov10
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int multithreadTest() {
|
||
|
|
/*CustomPyTest();*/
|
||
|
|
std::thread t1(CustomPyTest);
|
||
|
|
std::thread t2(CustomPyTest1);
|
||
|
|
//std::thread t3(CustomPyTest);
|
||
|
|
//std::thread t4(CustomPyTest);
|
||
|
|
//std::thread t5(CustomPyTest);
|
||
|
|
//std::thread t6(CustomPyTest);
|
||
|
|
|
||
|
|
t1.join();
|
||
|
|
t2.join();
|
||
|
|
//t3.join();
|
||
|
|
//t4.join();
|
||
|
|
//t5.join();
|
||
|
|
//t6.join();
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int CustomModelEngine(std::string modelFilePath, std::string videoFilePath) {
|
||
|
|
boost::property_tree::ptree root;
|
||
|
|
boost::property_tree::ptree detectionObjects;
|
||
|
|
boost::property_tree::ptree pt;
|
||
|
|
std::vector<std::string> classes;
|
||
|
|
std::filesystem::path currentPath = std::filesystem::current_path();
|
||
|
|
ANSCENTER::ANSODBase* infHandle;
|
||
|
|
std::string labelMap;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
float detectionScoreThreshold = 0.48;
|
||
|
|
float modelConfThreshold = 0.48;
|
||
|
|
float modelNMSThreshold = 0.48;
|
||
|
|
int modelType = 16; // OPENVINO
|
||
|
|
int detectorType = 1; // Detection
|
||
|
|
// Optimise model
|
||
|
|
std::cout << "Optimizing model, please wait...." << std::endl;
|
||
|
|
std::string optmizedModelFolder = OptimizeModelStr(modelFilePath.c_str(), modelZipFilePassword.c_str(), modelType, 1,1);
|
||
|
|
std::cout << "Optimized model folder: " << optmizedModelFolder << std::endl;
|
||
|
|
labelMap = CreateANSODHandle(&infHandle, licenseKey.c_str(), modelFilePath.c_str(), modelZipFilePassword.c_str(), detectionScoreThreshold, modelConfThreshold, modelNMSThreshold, 1, modelType, detectorType);
|
||
|
|
std::stringstream ss(labelMap);
|
||
|
|
while (ss.good()) {
|
||
|
|
std::string substr;
|
||
|
|
getline(ss, substr, ',');
|
||
|
|
classes.push_back(substr);
|
||
|
|
}
|
||
|
|
std::cout << "classes:" << classes.size() << std::endl;
|
||
|
|
|
||
|
|
while (true) {
|
||
|
|
std::cout << "begin read video" << std::endl;
|
||
|
|
cv::VideoCapture capture(videoFilePath);
|
||
|
|
|
||
|
|
if (!capture.isOpened()) {
|
||
|
|
printf("could not read this video file...\n");
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
|
||
|
|
std::cout << "end read video" << std::endl;
|
||
|
|
|
||
|
|
while (true) {
|
||
|
|
cv::Mat frame;
|
||
|
|
if (!capture.read(frame)) {
|
||
|
|
std::cout << "\n Cannot read the video file. Restarting...\n";
|
||
|
|
capture.set(cv::CAP_PROP_POS_FRAMES, 0); // Reset to the beginning of the video
|
||
|
|
continue;
|
||
|
|
}
|
||
|
|
|
||
|
|
unsigned int bufferLength = 0;
|
||
|
|
cv::Mat croppedFrame = frame(cv::Rect(0, 0, 1920, 1080));
|
||
|
|
auto start1 = std::chrono::system_clock::now();
|
||
|
|
|
||
|
|
std::vector<uchar> imageData;
|
||
|
|
bool success = cv::imencode(".jpg", croppedFrame, imageData);
|
||
|
|
if (!success) {
|
||
|
|
std::cout << "Failed to encode the image" << std::endl;
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
std::string jpegImage(imageData.begin(), imageData.end());
|
||
|
|
auto end1 = std::chrono::system_clock::now();
|
||
|
|
auto elapsed1 = std::chrono::duration_cast<std::chrono::milliseconds>(end1 - start1);
|
||
|
|
//printf("Conversion Time = %lld ms\n", static_cast<long long int>(elapsed1.count()));
|
||
|
|
|
||
|
|
int height = croppedFrame.rows;
|
||
|
|
int width = croppedFrame.cols;
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
/* measured work */
|
||
|
|
std::string detectionResult = RunInferenceFromCV(&infHandle, croppedFrame);//RunInferenceFromJpegString(&infHandle, jpegImage.c_str(), imageData.size());// , 100, 100, 0.2);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
printf("Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
|
||
|
|
if (!detectionResult.empty()) {
|
||
|
|
pt.clear();
|
||
|
|
std::stringstream ss;
|
||
|
|
ss.clear();
|
||
|
|
ss << detectionResult;
|
||
|
|
boost::property_tree::read_json(ss, pt);
|
||
|
|
BOOST_FOREACH(const boost::property_tree::ptree::value_type & child, pt.get_child("results")) {
|
||
|
|
const boost::property_tree::ptree& result = child.second;
|
||
|
|
const auto class_id = GetData<int>(result, "class_id");
|
||
|
|
const auto x = GetData<float>(result, "x");
|
||
|
|
const auto y = GetData<float>(result, "y");
|
||
|
|
const auto prob = GetData<float>(result, "prob");
|
||
|
|
const auto width = GetData<float>(result, "width");
|
||
|
|
const auto height = GetData<float>(result, "height");
|
||
|
|
cv::rectangle(croppedFrame, cv::Rect(x, y, width, height), 123, 2);
|
||
|
|
cv::putText(croppedFrame, cv::format("%s:%d:%.2f", classes[class_id], class_id, prob), cv::Point(x, y - 5),
|
||
|
|
0, 0.6, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
cv::imshow("ANS Object Tracking", croppedFrame);
|
||
|
|
if (cv::waitKey(30) == 27) { // Wait for 'esc' key press to exit
|
||
|
|
capture.release();
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
std::cout << "End of program.\n";
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
int TrafficLightEngine(std::string modelFilePath, std::string videoFilePath) {
|
||
|
|
boost::property_tree::ptree root;
|
||
|
|
boost::property_tree::ptree detectionObjects;
|
||
|
|
boost::property_tree::ptree pt;
|
||
|
|
std::vector<std::string> classes;
|
||
|
|
std::filesystem::path currentPath = std::filesystem::current_path();
|
||
|
|
ANSCENTER::ANSODBase* infHandle;
|
||
|
|
std::string labelMap;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
float detectionScoreThreshold = 0.48;
|
||
|
|
float modelConfThreshold = 0.48;
|
||
|
|
float modelNMSThreshold = 0.48;
|
||
|
|
int modelType = 16; // CUSTOM
|
||
|
|
int detectorType = 1; // Detection
|
||
|
|
// Optimise model
|
||
|
|
std::cout << "Optimizing model, please wait...." << std::endl;
|
||
|
|
std::string optmizedModelFolder = OptimizeModelStr(modelFilePath.c_str(), modelZipFilePassword.c_str(), modelType,1, 1);
|
||
|
|
std::cout << "Optimized model folder: " << optmizedModelFolder << std::endl;
|
||
|
|
labelMap = CreateANSODHandle(&infHandle, licenseKey.c_str(), modelFilePath.c_str(), modelZipFilePassword.c_str(), detectionScoreThreshold, modelConfThreshold, modelNMSThreshold, 1, modelType, detectorType,1);
|
||
|
|
if (labelMap.empty()) {
|
||
|
|
std::cout << "Failed to create ANSODHandle. Please check the model file path and parameters." << std::endl;
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
std::stringstream ss(labelMap);
|
||
|
|
while (ss.good()) {
|
||
|
|
std::string substr;
|
||
|
|
getline(ss, substr, ',');
|
||
|
|
classes.push_back(substr);
|
||
|
|
}
|
||
|
|
std::cout << "classes:" << classes.size() << std::endl;
|
||
|
|
|
||
|
|
// Load model settings file and set parameters
|
||
|
|
std::string paramString;
|
||
|
|
std::string modelSettingsFile = "C:\\Programs\\DemoAssets\\TrafficLights\\TrafficLightSettings.json";
|
||
|
|
if (!std::filesystem::exists(modelSettingsFile)) {
|
||
|
|
std::cout << "Model settings file does not exist: " << modelSettingsFile << std::endl;
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
std::ifstream settingsFile(modelSettingsFile);
|
||
|
|
if (!settingsFile.is_open()) {
|
||
|
|
std::cout << "Failed to open model settings file: " << modelSettingsFile << std::endl;
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
paramString.assign((std::istreambuf_iterator<char>(settingsFile)), std::istreambuf_iterator<char>());
|
||
|
|
settingsFile.close();
|
||
|
|
|
||
|
|
int setparamValue= SetODParameters(&infHandle, paramString.c_str());
|
||
|
|
if (setparamValue == 0) {
|
||
|
|
std::cout << "SetODParameters failed" << std::endl;
|
||
|
|
}
|
||
|
|
else {
|
||
|
|
std::cout << "SetODParameters success" << std::endl;
|
||
|
|
}
|
||
|
|
while (true) {
|
||
|
|
std::cout << "begin read video" << std::endl;
|
||
|
|
cv::VideoCapture capture(videoFilePath);
|
||
|
|
|
||
|
|
if (!capture.isOpened()) {
|
||
|
|
printf("could not read this video file...\n");
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
|
||
|
|
std::cout << "end read video" << std::endl;
|
||
|
|
|
||
|
|
while (true) {
|
||
|
|
cv::Mat frame;
|
||
|
|
if (!capture.read(frame)) {
|
||
|
|
std::cout << "\n Cannot read the video file. Restarting...\n";
|
||
|
|
capture.set(cv::CAP_PROP_POS_FRAMES, 0); // Reset to the beginning of the video
|
||
|
|
continue;
|
||
|
|
}
|
||
|
|
|
||
|
|
unsigned int bufferLength = 0;
|
||
|
|
cv::Mat croppedFrame = frame.clone();// (cv::Rect(0, 0, 1920, 1080));
|
||
|
|
auto start1 = std::chrono::system_clock::now();
|
||
|
|
|
||
|
|
std::vector<uchar> imageData;
|
||
|
|
bool success = cv::imencode(".jpg", croppedFrame, imageData);
|
||
|
|
if (!success) {
|
||
|
|
std::cout << "Failed to encode the image" << std::endl;
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
std::string jpegImage(imageData.begin(), imageData.end());
|
||
|
|
auto end1 = std::chrono::system_clock::now();
|
||
|
|
auto elapsed1 = std::chrono::duration_cast<std::chrono::milliseconds>(end1 - start1);
|
||
|
|
//printf("Conversion Time = %lld ms\n", static_cast<long long int>(elapsed1.count()));
|
||
|
|
|
||
|
|
int height = croppedFrame.rows;
|
||
|
|
int width = croppedFrame.cols;
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
/* measured work */
|
||
|
|
std::string detectionResult = RunInferenceFromCV(&infHandle, croppedFrame);//RunInferenceFromJpegString(&infHandle, jpegImage.c_str(), imageData.size());// , 100, 100, 0.2);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
printf("Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
|
||
|
|
if (!detectionResult.empty()) {
|
||
|
|
pt.clear();
|
||
|
|
std::stringstream ss;
|
||
|
|
ss.clear();
|
||
|
|
ss << detectionResult;
|
||
|
|
boost::property_tree::read_json(ss, pt);
|
||
|
|
BOOST_FOREACH(const boost::property_tree::ptree::value_type & child, pt.get_child("results")) {
|
||
|
|
const boost::property_tree::ptree& result = child.second;
|
||
|
|
const auto class_id = GetData<int>(result, "class_id");
|
||
|
|
const auto x = GetData<float>(result, "x");
|
||
|
|
const auto y = GetData<float>(result, "y");
|
||
|
|
const auto prob = GetData<float>(result, "prob");
|
||
|
|
const auto width = GetData<float>(result, "width");
|
||
|
|
const auto height = GetData<float>(result, "height");
|
||
|
|
cv::rectangle(croppedFrame, cv::Rect(x, y, width, height), 123, 2);
|
||
|
|
cv::putText(croppedFrame, cv::format("%s:%d:%.2f", classes[class_id], class_id, prob), cv::Point(x, y - 5),
|
||
|
|
0, 0.6, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
cv::imshow("ANS Object Tracking", croppedFrame);
|
||
|
|
if (cv::waitKey(30) == 27) { // Wait for 'esc' key press to exit
|
||
|
|
capture.release();
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
std::cout << "End of program.\n";
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
int HelMetDetection() {
|
||
|
|
std::string cpumodelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_Helmet(CPU)_v1.0.zip";
|
||
|
|
std::string gpumodelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_Helmet(GPU)_v1.0.zip";
|
||
|
|
std::string videoFile = "C:\\Programs\\DemoAssets\\Videos\\Helmet\\HM2.mp4"; ;
|
||
|
|
CustomModelEngine(gpumodelFilePath, videoFile);//yolov8
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int TrafficLight() {
|
||
|
|
std::string gpumodelFilePath = "C:\\Programs\\DemoAssets\\TrafficLights\\ANS_TrafficLightALPR_v1.0.zip";
|
||
|
|
std::string videoFile = "C:\\Programs\\DemoAssets\\TrafficLights\\trafficlight1.mp4"; ;
|
||
|
|
TrafficLightEngine(gpumodelFilePath, videoFile);//yolov8
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int SAMS_UnitTest() {
|
||
|
|
std::string samFile = "C:\\ProgramData\\ANSCENTER\\Shared\\ANS_SAM_v1.0.zip";
|
||
|
|
//std::string imageFile = "C:\\Programs\\TrainingToolTest\\SegmentationEngine\\SegmeTxtYolo\\data\\20241220_173948_695.jpg";
|
||
|
|
//std::string imageFile = "C:\\Programs\\DemoAssets\\Images\\bus.jpg";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\video_20.mp4";
|
||
|
|
ANSSAMTest(samFile, videoFile);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int ANSMotionTest(std::string modelFilePath, std::string videoPath) {
|
||
|
|
ANSCENTER::ANSMOTIONDETECTOR infHandle;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string labelmap = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
ANSCENTER::ModelConfig modelConfig;
|
||
|
|
modelConfig.modelConfThreshold = 0.5f;
|
||
|
|
modelConfig.detectionScoreThreshold = 0.8f;
|
||
|
|
infHandle.Initialize(licenseKey, modelConfig, modelFilePath, modelZipFilePassword, labelmap);
|
||
|
|
|
||
|
|
cv::VideoCapture capture(videoPath);
|
||
|
|
if (!capture.isOpened()) {
|
||
|
|
printf("could not read this video file...\n");
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
while (true)
|
||
|
|
{
|
||
|
|
cv::Mat frame;
|
||
|
|
if (!capture.read(frame)) // if not success, break loop
|
||
|
|
{
|
||
|
|
std::cout << "\n Cannot read the video file. please check your video.\n";
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
std::vector<ANSCENTER::Object> masks = infHandle.RunInference(frame);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
printf("Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
|
||
|
|
for (int i = 0; i < masks.size(); i++) {
|
||
|
|
cv::rectangle(frame, masks.at(i).box, 123, 2);
|
||
|
|
|
||
|
|
//const cv::Point* pts = (const cv::Point*)cv::Mat(masks.at(i).polygon).data;
|
||
|
|
//int npts = cv::Mat(masks.at(i).polygon).rows;
|
||
|
|
//polylines(frame, &pts, &npts, 1, true, cv::Scalar(0, 255, 0), 1); // Green color, line thickness 3
|
||
|
|
|
||
|
|
}
|
||
|
|
// Resize to fit 1920x1080 window
|
||
|
|
|
||
|
|
cv::resize(frame, frame, cv::Size(1920, 1080));
|
||
|
|
|
||
|
|
cv::imshow("ANS Object Tracking", frame);
|
||
|
|
if (cv::waitKey(30) == 27) // Wait for 'esc' key press to exit
|
||
|
|
{
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
capture.release();
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
infHandle.Destroy();
|
||
|
|
std::cout << "End of program.\n";
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int MotionDetectorEngine(std::string modelFilePath, std::string videoFilePath) {
|
||
|
|
boost::property_tree::ptree root;
|
||
|
|
boost::property_tree::ptree detectionObjects;
|
||
|
|
boost::property_tree::ptree pt;
|
||
|
|
std::vector<std::string> classes;
|
||
|
|
std::filesystem::path currentPath = std::filesystem::current_path();
|
||
|
|
ANSCENTER::ANSODBase* infHandle;
|
||
|
|
std::string labelMap;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
float detectionScoreThreshold = 0.5;
|
||
|
|
float modelConfThreshold = 0.48;
|
||
|
|
float modelNMSThreshold = 0.5;
|
||
|
|
int modelType = 19; // OPENVINO
|
||
|
|
int detectorType = 1; // Detection
|
||
|
|
// Optimise model
|
||
|
|
std::cout << "Optimizing model, please wait...." << std::endl;
|
||
|
|
std::string optmizedModelFolder = OptimizeModelStr(modelFilePath.c_str(), modelZipFilePassword.c_str(), modelType,1, 1);
|
||
|
|
std::cout << "Optimized model folder: " << optmizedModelFolder << std::endl;
|
||
|
|
labelMap = CreateANSODHandle(&infHandle, licenseKey.c_str(), modelFilePath.c_str(), modelZipFilePassword.c_str(), detectionScoreThreshold, modelConfThreshold, modelNMSThreshold, 1, modelType, detectorType);
|
||
|
|
std::stringstream ss(labelMap);
|
||
|
|
while (ss.good()) {
|
||
|
|
std::string substr;
|
||
|
|
getline(ss, substr, ',');
|
||
|
|
classes.push_back(substr);
|
||
|
|
}
|
||
|
|
std::cout << "classes:" << classes.size() << std::endl;
|
||
|
|
|
||
|
|
while (true) {
|
||
|
|
std::cout << "begin read video" << std::endl;
|
||
|
|
cv::VideoCapture capture(videoFilePath);
|
||
|
|
|
||
|
|
if (!capture.isOpened()) {
|
||
|
|
printf("could not read this video file...\n");
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
|
||
|
|
std::cout << "end read video" << std::endl;
|
||
|
|
|
||
|
|
while (true) {
|
||
|
|
cv::Mat frame;
|
||
|
|
if (!capture.read(frame)) {
|
||
|
|
std::cout << "\n Cannot read the video file. Restarting...\n";
|
||
|
|
capture.set(cv::CAP_PROP_POS_FRAMES, 0); // Reset to the beginning of the video
|
||
|
|
continue;
|
||
|
|
}
|
||
|
|
|
||
|
|
unsigned int bufferLength = 0;
|
||
|
|
//cv::Mat croppedFrame = frame(cv::Rect(0, 0, 1920, 1080));
|
||
|
|
auto start1 = std::chrono::system_clock::now();
|
||
|
|
|
||
|
|
std::vector<uchar> imageData;
|
||
|
|
bool success = cv::imencode(".jpg", frame, imageData);
|
||
|
|
if (!success) {
|
||
|
|
std::cout << "Failed to encode the image" << std::endl;
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
std::string jpegImage(imageData.begin(), imageData.end());
|
||
|
|
auto end1 = std::chrono::system_clock::now();
|
||
|
|
auto elapsed1 = std::chrono::duration_cast<std::chrono::milliseconds>(end1 - start1);
|
||
|
|
printf("Conversion Time = %lld ms\n", static_cast<long long int>(elapsed1.count()));
|
||
|
|
|
||
|
|
int height = frame.rows;
|
||
|
|
int width = frame.cols;
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
/* measured work */
|
||
|
|
std::string detectionResult = RunInferenceFromJpegString(&infHandle, jpegImage.c_str(), imageData.size(),"ID");// , 100, 100, 0.2);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
printf("Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
|
||
|
|
if (!detectionResult.empty()) {
|
||
|
|
pt.clear();
|
||
|
|
std::stringstream ss;
|
||
|
|
ss.clear();
|
||
|
|
ss << detectionResult;
|
||
|
|
boost::property_tree::read_json(ss, pt);
|
||
|
|
BOOST_FOREACH(const boost::property_tree::ptree::value_type & child, pt.get_child("results")) {
|
||
|
|
const boost::property_tree::ptree& result = child.second;
|
||
|
|
const auto class_id = GetData<int>(result, "class_id");
|
||
|
|
const auto x = GetData<float>(result, "x");
|
||
|
|
const auto y = GetData<float>(result, "y");
|
||
|
|
const auto prob = GetData<float>(result, "prob");
|
||
|
|
const auto width = GetData<float>(result, "width");
|
||
|
|
const auto height = GetData<float>(result, "height");
|
||
|
|
cv::rectangle(frame, cv::Rect(x, y, width, height), 123, 2);
|
||
|
|
cv::putText(frame, cv::format("%s:%d:%.2f", classes[class_id], class_id, prob), cv::Point(x, y - 5),
|
||
|
|
0, 0.6, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
cv::imshow("ANS Object Tracking", frame);
|
||
|
|
if (cv::waitKey(30) == 27) { // Wait for 'esc' key press to exit
|
||
|
|
capture.release();
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
std::cout << "End of program.\n";
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
int ImageDetectionEngine(std::string modelFilePath, std::string imageFilePath, int engineType = 20, int modelDetectionType = 1) {
|
||
|
|
boost::property_tree::ptree root;
|
||
|
|
boost::property_tree::ptree detectionObjects;
|
||
|
|
boost::property_tree::ptree pt;
|
||
|
|
std::vector<std::string> classes;
|
||
|
|
std::filesystem::path currentPath = std::filesystem::current_path();
|
||
|
|
ANSCENTER::ANSODBase* infHandle;
|
||
|
|
std::string labelMap;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
float detectionScoreThreshold = 0.5;
|
||
|
|
float modelConfThreshold = 0.48;
|
||
|
|
float modelNMSThreshold = 0.5;
|
||
|
|
int modelType = engineType; // ONNX CL
|
||
|
|
int detectorType = modelDetectionType; // Detection
|
||
|
|
// Optimise model
|
||
|
|
std::cout << "Optimizing model, please wait...." << std::endl;
|
||
|
|
std::string optmizedModelFolder = OptimizeModelStr(modelFilePath.c_str(), modelZipFilePassword.c_str(), modelType, modelDetectionType,1);
|
||
|
|
std::cout << "Optimized model folder: " << optmizedModelFolder << std::endl;
|
||
|
|
labelMap = CreateANSODHandle(&infHandle, licenseKey.c_str(), modelFilePath.c_str(), modelZipFilePassword.c_str(), detectionScoreThreshold, modelConfThreshold, modelNMSThreshold, 1, modelType, detectorType);
|
||
|
|
std::stringstream ss(labelMap);
|
||
|
|
while (ss.good()) {
|
||
|
|
std::string substr;
|
||
|
|
getline(ss, substr, ',');
|
||
|
|
classes.push_back(substr);
|
||
|
|
}
|
||
|
|
std::cout << "classes:" << classes.size() << std::endl;
|
||
|
|
cv::Mat frame = cv::imread(imageFilePath);
|
||
|
|
unsigned int bufferLength = 0;
|
||
|
|
//cv::Mat croppedFrame = frame(cv::Rect(0, 0, 1920, 1080));
|
||
|
|
auto start1 = std::chrono::system_clock::now();
|
||
|
|
std::vector<uchar> imageData;
|
||
|
|
bool success = cv::imencode(".jpg", frame, imageData);
|
||
|
|
if (!success) {
|
||
|
|
std::cout << "Failed to encode the image" << std::endl;
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
std::string jpegImage(imageData.begin(), imageData.end());
|
||
|
|
auto end1 = std::chrono::system_clock::now();
|
||
|
|
auto elapsed1 = std::chrono::duration_cast<std::chrono::milliseconds>(end1 - start1);
|
||
|
|
printf("Conversion Time = %lld ms\n", static_cast<long long int>(elapsed1.count()));
|
||
|
|
int height = frame.rows;
|
||
|
|
int width = frame.cols;
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
/* measured work */
|
||
|
|
std::string detectionResult = RunInferenceFromJpegString(&infHandle, jpegImage.c_str(), imageData.size(), "ID");// , 100, 100, 0.2);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
printf("Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
if (!detectionResult.empty()) {
|
||
|
|
pt.clear();
|
||
|
|
std::stringstream ss;
|
||
|
|
ss.clear();
|
||
|
|
ss << detectionResult;
|
||
|
|
boost::property_tree::read_json(ss, pt);
|
||
|
|
// Collect all detections first, then draw in two passes
|
||
|
|
// (single overlay for all polygons to avoid cumulative blending artifacts)
|
||
|
|
struct DetInfo {
|
||
|
|
int class_id; float prob;
|
||
|
|
cv::Rect box;
|
||
|
|
std::vector<cv::Point> polyPts; // empty if no polygon
|
||
|
|
};
|
||
|
|
std::vector<DetInfo> detections;
|
||
|
|
|
||
|
|
BOOST_FOREACH(const boost::property_tree::ptree::value_type & child, pt.get_child("results")) {
|
||
|
|
const boost::property_tree::ptree& result = child.second;
|
||
|
|
DetInfo d;
|
||
|
|
d.class_id = GetData<int>(result, "class_id");
|
||
|
|
const auto x = GetData<float>(result, "x");
|
||
|
|
const auto y = GetData<float>(result, "y");
|
||
|
|
d.prob = GetData<float>(result, "prob");
|
||
|
|
const auto w = GetData<float>(result, "width");
|
||
|
|
const auto h = GetData<float>(result, "height");
|
||
|
|
d.box = cv::Rect(static_cast<int>(x), static_cast<int>(y),
|
||
|
|
static_cast<int>(w), static_cast<int>(h));
|
||
|
|
|
||
|
|
const auto polygon_str = GetData<std::string>(result, "polygon");
|
||
|
|
if (!polygon_str.empty()) {
|
||
|
|
std::vector<float> coords;
|
||
|
|
std::stringstream pss(polygon_str);
|
||
|
|
std::string token;
|
||
|
|
while (std::getline(pss, token, ';')) {
|
||
|
|
if (!token.empty()) coords.push_back(std::stof(token));
|
||
|
|
}
|
||
|
|
// Auto-detect: if all values in [0,1] treat as normalized, else absolute
|
||
|
|
bool isNormalized = !coords.empty();
|
||
|
|
for (const auto& v : coords) {
|
||
|
|
if (v > 1.0f) { isNormalized = false; break; }
|
||
|
|
}
|
||
|
|
for (size_t p = 0; p + 1 < coords.size(); p += 2) {
|
||
|
|
int px = isNormalized ? static_cast<int>(coords[p] * frame.cols)
|
||
|
|
: static_cast<int>(coords[p]);
|
||
|
|
int py = isNormalized ? static_cast<int>(coords[p + 1] * frame.rows)
|
||
|
|
: static_cast<int>(coords[p + 1]);
|
||
|
|
d.polyPts.emplace_back(px, py);
|
||
|
|
}
|
||
|
|
if (d.polyPts.size() < 3) d.polyPts.clear();
|
||
|
|
}
|
||
|
|
detections.push_back(std::move(d));
|
||
|
|
}
|
||
|
|
|
||
|
|
// Pass 1: Draw all filled polygons on a single overlay
|
||
|
|
cv::Mat overlay = frame.clone();
|
||
|
|
bool anyPoly = false;
|
||
|
|
for (const auto& d : detections) {
|
||
|
|
if (!d.polyPts.empty()) {
|
||
|
|
anyPoly = true;
|
||
|
|
std::vector<std::vector<cv::Point>> polys = { d.polyPts };
|
||
|
|
cv::Scalar polyColor((d.class_id * 67 + 50) % 256,
|
||
|
|
(d.class_id * 123 + 100) % 256,
|
||
|
|
(d.class_id * 37 + 150) % 256);
|
||
|
|
cv::fillPoly(overlay, polys, polyColor);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (anyPoly) {
|
||
|
|
cv::addWeighted(overlay, 0.4, frame, 0.6, 0, frame);
|
||
|
|
}
|
||
|
|
|
||
|
|
// Pass 2: Draw outlines, boxes, and labels
|
||
|
|
auto drawDashedRect = [&](const cv::Rect& box, cv::Scalar color) {
|
||
|
|
int dashLen = 8, gapLen = 6;
|
||
|
|
auto drawDashedLine = [&](cv::Point p1, cv::Point p2) {
|
||
|
|
double dx = p2.x - p1.x, dy = p2.y - p1.y;
|
||
|
|
double len = std::sqrt(dx * dx + dy * dy);
|
||
|
|
if (len < 1) return;
|
||
|
|
dx /= len; dy /= len;
|
||
|
|
double d = 0;
|
||
|
|
bool draw = true;
|
||
|
|
while (d < len) {
|
||
|
|
double seg = draw ? dashLen : gapLen;
|
||
|
|
double dEnd = std::min(d + seg, len);
|
||
|
|
if (draw) {
|
||
|
|
cv::line(frame,
|
||
|
|
cv::Point(static_cast<int>(p1.x + d * dx), static_cast<int>(p1.y + d * dy)),
|
||
|
|
cv::Point(static_cast<int>(p1.x + dEnd * dx), static_cast<int>(p1.y + dEnd * dy)),
|
||
|
|
color, 1, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
d = dEnd;
|
||
|
|
draw = !draw;
|
||
|
|
}
|
||
|
|
};
|
||
|
|
cv::Point tl(box.x, box.y), tr(box.x + box.width, box.y);
|
||
|
|
cv::Point br(box.x + box.width, box.y + box.height), bl(box.x, box.y + box.height);
|
||
|
|
drawDashedLine(tl, tr); drawDashedLine(tr, br);
|
||
|
|
drawDashedLine(br, bl); drawDashedLine(bl, tl);
|
||
|
|
};
|
||
|
|
|
||
|
|
for (const auto& d : detections) {
|
||
|
|
if (!d.polyPts.empty()) {
|
||
|
|
// Polygon outline + dashed box
|
||
|
|
std::vector<std::vector<cv::Point>> polys = { d.polyPts };
|
||
|
|
cv::Scalar polyColor((d.class_id * 67 + 50) % 256,
|
||
|
|
(d.class_id * 123 + 100) % 256,
|
||
|
|
(d.class_id * 37 + 150) % 256);
|
||
|
|
cv::polylines(frame, polys, true, polyColor, 2, cv::LINE_AA);
|
||
|
|
drawDashedRect(d.box, cv::Scalar(0, 255, 0));
|
||
|
|
} else {
|
||
|
|
// Solid box
|
||
|
|
cv::rectangle(frame, d.box, cv::Scalar(0, 255, 0), 2);
|
||
|
|
}
|
||
|
|
std::string label = (d.class_id >= 0 && d.class_id < static_cast<int>(classes.size()))
|
||
|
|
? classes[d.class_id] : "?";
|
||
|
|
cv::putText(frame, cv::format("%s:%d:%.2f", label.c_str(), d.class_id, d.prob),
|
||
|
|
cv::Point(d.box.x, d.box.y - 5), 0, 0.6, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
//resize to fit 1920x1080 window
|
||
|
|
cv::resize(frame, frame, cv::Size(1920, 1080));
|
||
|
|
cv::imshow("ANS Object Tracking", frame);
|
||
|
|
|
||
|
|
// Wait indefinitely for key press or window close
|
||
|
|
std::cout << "Press ESC to exit or close the window..." << std::endl;
|
||
|
|
while (true) {
|
||
|
|
int key = cv::waitKey(100); // Check every 100ms
|
||
|
|
if (key == 27) { // ESC key pressed
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
// Check if window was closed
|
||
|
|
if (cv::getWindowProperty("ANS Object Tracking", cv::WND_PROP_VISIBLE) < 1) {
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
std::cout << "End of program.\n";
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int VideoDetectorEngine(std::string modelFilePath, std::string videoFilePath, int engineType = 20, int modelDetectionType=1) {
|
||
|
|
boost::property_tree::ptree root;
|
||
|
|
boost::property_tree::ptree detectionObjects;
|
||
|
|
boost::property_tree::ptree pt;
|
||
|
|
std::vector<std::string> classes;
|
||
|
|
std::filesystem::path currentPath = std::filesystem::current_path();
|
||
|
|
ANSCENTER::ANSODBase* infHandle;
|
||
|
|
std::string labelMap;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
float detectionScoreThreshold = 0.3;
|
||
|
|
float modelConfThreshold = 0.48;
|
||
|
|
float modelNMSThreshold = 0.5;
|
||
|
|
int modelType = engineType; // OPENVINO
|
||
|
|
int detectorType = modelDetectionType; // Detection
|
||
|
|
// Optimise model
|
||
|
|
std::cout << "Optimizing model, please wait...." << std::endl;
|
||
|
|
std::string optmizedModelFolder = OptimizeModelStr(modelFilePath.c_str(), modelZipFilePassword.c_str(), modelType, modelDetectionType,1);
|
||
|
|
std::cout << "Optimized model folder: " << optmizedModelFolder << std::endl;
|
||
|
|
labelMap = CreateANSODHandle(&infHandle, licenseKey.c_str(), modelFilePath.c_str(), modelZipFilePassword.c_str(), detectionScoreThreshold, modelConfThreshold, modelNMSThreshold, 1, modelType, detectorType,1);
|
||
|
|
std::stringstream ss(labelMap);
|
||
|
|
while (ss.good()) {
|
||
|
|
std::string substr;
|
||
|
|
getline(ss, substr, ',');
|
||
|
|
classes.push_back(substr);
|
||
|
|
}
|
||
|
|
std::cout << "classes:" << classes.size() << std::endl;
|
||
|
|
|
||
|
|
// setup tracker
|
||
|
|
int trackerResult = SetTracker(&infHandle, 0, 1); // 0 for BYTETRACK
|
||
|
|
int frameNum = 0;
|
||
|
|
// Track per-trackId detection history for flickering analysis
|
||
|
|
std::unordered_map<int, std::vector<std::pair<int, bool>>> trackPresenceLog; // trackId -> [(frame, wasReal)]
|
||
|
|
while (true) {
|
||
|
|
std::cout << "begin read video" << std::endl;
|
||
|
|
cv::VideoCapture capture(videoFilePath);
|
||
|
|
|
||
|
|
if (!capture.isOpened()) {
|
||
|
|
printf("could not read this video file...\n");
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
|
||
|
|
std::cout << "end read video" << std::endl;
|
||
|
|
|
||
|
|
while (true) {
|
||
|
|
cv::Mat frame;
|
||
|
|
if (!capture.read(frame)) {
|
||
|
|
std::cout << "\n Cannot read the video file. Restarting...\n";
|
||
|
|
capture.set(cv::CAP_PROP_POS_FRAMES, 0); // Reset to the beginning of the video
|
||
|
|
frameNum = 0;
|
||
|
|
continue;
|
||
|
|
}
|
||
|
|
|
||
|
|
unsigned int bufferLength = 0;
|
||
|
|
//cv::Mat croppedFrame = frame(cv::Rect(0, 0, 1920, 1080));
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
std::vector<uchar> imageData;
|
||
|
|
bool success = cv::imencode(".jpg", frame, imageData);
|
||
|
|
if (!success) {
|
||
|
|
std::cout << "Failed to encode the image" << std::endl;
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
std::string jpegImage(imageData.begin(), imageData.end());
|
||
|
|
/* auto end1 = std::chrono::system_clock::now();
|
||
|
|
auto elapsed1 = std::chrono::duration_cast<std::chrono::milliseconds>(end1 - start1);
|
||
|
|
printf("Jpeg conversion Time = %lld ms\n", static_cast<long long int>(elapsed1.count()));*/
|
||
|
|
|
||
|
|
int height = frame.rows;
|
||
|
|
int width = frame.cols;
|
||
|
|
//auto start = std::chrono::system_clock::now();
|
||
|
|
/* measured work */
|
||
|
|
std::string detectionResult = RunInferenceFromJpegString(&infHandle, jpegImage.c_str(), imageData.size(), "ID");// , 100, 100, 0.2);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
frameNum++;
|
||
|
|
printf("Inference Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
|
||
|
|
if (!detectionResult.empty()) {
|
||
|
|
// COCO pose skeleton connections
|
||
|
|
const std::vector<std::pair<int, int>> POSE_SKELETON = {
|
||
|
|
{0,1}, {0,2}, {1,3}, {2,4}, // Face
|
||
|
|
{3,5}, {4,6}, // Head-to-shoulder
|
||
|
|
{5,7}, {7,9}, {6,8}, {8,10}, // Arms
|
||
|
|
{5,6}, {5,11}, {6,12}, {11,12}, // Body
|
||
|
|
{11,13}, {13,15}, {12,14}, {14,16} // Legs
|
||
|
|
};
|
||
|
|
// Colors for different limb groups
|
||
|
|
const std::vector<cv::Scalar> LIMB_COLORS = {
|
||
|
|
{255,0,0}, {255,0,0}, {255,0,0}, {255,0,0}, // Face - blue
|
||
|
|
{0,255,255}, {0,255,255}, // Head-shoulder - yellow
|
||
|
|
{0,255,0}, {0,255,0}, {0,165,255}, {0,165,255}, // Arms - green/orange
|
||
|
|
{255,255,0}, {255,0,255}, {255,0,255}, {255,255,0},// Body - cyan/magenta
|
||
|
|
{0,255,0}, {0,255,0}, {0,165,255}, {0,165,255} // Legs - green/orange
|
||
|
|
};
|
||
|
|
|
||
|
|
pt.clear();
|
||
|
|
std::stringstream ss;
|
||
|
|
ss.clear();
|
||
|
|
ss << detectionResult;
|
||
|
|
boost::property_tree::read_json(ss, pt);
|
||
|
|
|
||
|
|
// ── Stabilization debug counters ──
|
||
|
|
int realCount = 0, stabilizedCount = 0;
|
||
|
|
|
||
|
|
BOOST_FOREACH(const boost::property_tree::ptree::value_type & child, pt.get_child("results")) {
|
||
|
|
const boost::property_tree::ptree& result = child.second;
|
||
|
|
const auto class_id = GetData<int>(result, "class_id");
|
||
|
|
const auto track_id = GetData<int>(result, "track_id");
|
||
|
|
const auto x = GetData<float>(result, "x");
|
||
|
|
const auto y = GetData<float>(result, "y");
|
||
|
|
const auto prob = GetData<float>(result, "prob");
|
||
|
|
const auto width = GetData<float>(result, "width");
|
||
|
|
const auto height = GetData<float>(result, "height");
|
||
|
|
const auto kps_str = GetData<std::string>(result, "kps");
|
||
|
|
const auto extra_info = GetData<std::string>(result, "extra_info");
|
||
|
|
|
||
|
|
const auto polygon_str = GetData<std::string>(result, "polygon");
|
||
|
|
bool hasPolygon = !polygon_str.empty();
|
||
|
|
bool hasKeypoints = !kps_str.empty();
|
||
|
|
|
||
|
|
// ── Detect stabilized (ghost) vs real detection ──
|
||
|
|
bool isStabilized = (extra_info.find("stabilized") != std::string::npos);
|
||
|
|
if (isStabilized) stabilizedCount++; else realCount++;
|
||
|
|
|
||
|
|
// Log per-track presence for flickering analysis
|
||
|
|
if (track_id > 0) {
|
||
|
|
trackPresenceLog[track_id].push_back({frameNum, !isStabilized});
|
||
|
|
}
|
||
|
|
|
||
|
|
// Draw bounding box: dashed if polygon or keypoints present, solid otherwise
|
||
|
|
cv::Rect bbox(static_cast<int>(x), static_cast<int>(y),
|
||
|
|
static_cast<int>(width), static_cast<int>(height));
|
||
|
|
|
||
|
|
// Color: GREEN = real detection, YELLOW = stabilized (ghost)
|
||
|
|
cv::Scalar boxColor = isStabilized ? cv::Scalar(0, 255, 255) : cv::Scalar(0, 255, 0);
|
||
|
|
int boxThickness = isStabilized ? 1 : 2;
|
||
|
|
|
||
|
|
// Dashed helper lambda
|
||
|
|
auto drawDashedLine = [&](cv::Point p1, cv::Point p2, cv::Scalar color, int dashLen = 8, int gapLen = 6) {
|
||
|
|
float dx = static_cast<float>(p2.x - p1.x);
|
||
|
|
float dy = static_cast<float>(p2.y - p1.y);
|
||
|
|
float dist = std::sqrt(dx * dx + dy * dy);
|
||
|
|
if (dist < 1.f) return;
|
||
|
|
float ux = dx / dist, uy = dy / dist;
|
||
|
|
float drawn = 0.f;
|
||
|
|
bool dash = true;
|
||
|
|
while (drawn < dist) {
|
||
|
|
float seg = dash ? static_cast<float>(dashLen) : static_cast<float>(gapLen);
|
||
|
|
float end = std::min(drawn + seg, dist);
|
||
|
|
if (dash) {
|
||
|
|
cv::Point a(static_cast<int>(p1.x + ux * drawn), static_cast<int>(p1.y + uy * drawn));
|
||
|
|
cv::Point b(static_cast<int>(p1.x + ux * end), static_cast<int>(p1.y + uy * end));
|
||
|
|
cv::line(frame, a, b, color, 1, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
drawn = end;
|
||
|
|
dash = !dash;
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
if (isStabilized || hasPolygon || hasKeypoints) {
|
||
|
|
// Dashed box for stabilized ghosts, polygons, and keypoints
|
||
|
|
cv::Point tl = bbox.tl(), br = bbox.br();
|
||
|
|
cv::Point tr(br.x, tl.y), bl(tl.x, br.y);
|
||
|
|
drawDashedLine(tl, tr, boxColor);
|
||
|
|
drawDashedLine(tr, br, boxColor);
|
||
|
|
drawDashedLine(br, bl, boxColor);
|
||
|
|
drawDashedLine(bl, tl, boxColor);
|
||
|
|
} else {
|
||
|
|
cv::rectangle(frame, bbox, boxColor, boxThickness);
|
||
|
|
}
|
||
|
|
|
||
|
|
// Label: class:classId:conf:trackId (append [S] if stabilized)
|
||
|
|
std::string label = cv::format("%s:%d:%.2f:%d", classes[class_id].c_str(), class_id, prob, track_id);
|
||
|
|
if (isStabilized) label += "[S]";
|
||
|
|
cv::Scalar textColor = isStabilized ? cv::Scalar(0, 200, 200) : cv::Scalar(0, 0, 255);
|
||
|
|
cv::putText(frame, label, cv::Point(static_cast<int>(x), static_cast<int>(y) - 5),
|
||
|
|
0, 0.6, textColor, 1, cv::LINE_AA);
|
||
|
|
|
||
|
|
// Draw polygon if available (format: x1;y1;x2;y2;... normalized coords)
|
||
|
|
if (!polygon_str.empty()) {
|
||
|
|
// Parse semicolon-separated normalized coordinates
|
||
|
|
std::vector<cv::Point> polyPts;
|
||
|
|
std::stringstream pss(polygon_str);
|
||
|
|
std::string token;
|
||
|
|
std::vector<float> coords;
|
||
|
|
while (std::getline(pss, token, ';')) {
|
||
|
|
if (!token.empty())
|
||
|
|
coords.push_back(std::stof(token));
|
||
|
|
}
|
||
|
|
for (size_t p = 0; p + 1 < coords.size(); p += 2) {
|
||
|
|
int px = static_cast<int>(coords[p] * frame.cols);
|
||
|
|
int py = static_cast<int>(coords[p + 1] * frame.rows);
|
||
|
|
polyPts.emplace_back(px, py);
|
||
|
|
}
|
||
|
|
if (polyPts.size() >= 3) {
|
||
|
|
// Draw filled polygon with transparency
|
||
|
|
cv::Mat overlay = frame.clone();
|
||
|
|
std::vector<std::vector<cv::Point>> polys = { polyPts };
|
||
|
|
// Use class_id to generate a unique color per class
|
||
|
|
cv::Scalar polyColor(
|
||
|
|
(class_id * 67 + 50) % 256,
|
||
|
|
(class_id * 123 + 100) % 256,
|
||
|
|
(class_id * 37 + 150) % 256);
|
||
|
|
cv::fillPoly(overlay, polys, polyColor);
|
||
|
|
cv::addWeighted(overlay, 0.4, frame, 0.6, 0, frame);
|
||
|
|
// Draw polygon outline
|
||
|
|
cv::polylines(frame, polys, true, polyColor, 2, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// Draw keypoints if available (format: x1;y1;score1;x2;y2;score2;... stride=3)
|
||
|
|
if (!kps_str.empty()) {
|
||
|
|
std::vector<float> kps = ANSCENTER::ANSUtilityHelper::StringToKeypoints(kps_str);
|
||
|
|
int numKeypoints = (int)kps.size() / 3; // stride of 3: x, y, confidence
|
||
|
|
|
||
|
|
// Collect keypoint positions and confidence
|
||
|
|
std::vector<cv::Point2f> keypointPts(numKeypoints);
|
||
|
|
std::vector<float> keypointConf(numKeypoints, 0.f);
|
||
|
|
for (int k = 0; k < numKeypoints; k++) {
|
||
|
|
keypointPts[k] = cv::Point2f(kps[k * 3], kps[k * 3 + 1]);
|
||
|
|
keypointConf[k] = kps[k * 3 + 2];
|
||
|
|
}
|
||
|
|
|
||
|
|
const float kpConfThreshold = 0.3f;
|
||
|
|
|
||
|
|
// Draw skeleton lines
|
||
|
|
for (size_t s = 0; s < POSE_SKELETON.size(); s++) {
|
||
|
|
int idx1 = POSE_SKELETON[s].first;
|
||
|
|
int idx2 = POSE_SKELETON[s].second;
|
||
|
|
if (idx1 < numKeypoints && idx2 < numKeypoints &&
|
||
|
|
keypointConf[idx1] > kpConfThreshold && keypointConf[idx2] > kpConfThreshold) {
|
||
|
|
cv::Scalar color = (s < LIMB_COLORS.size()) ? LIMB_COLORS[s] : cv::Scalar(200, 200, 200);
|
||
|
|
cv::line(frame, keypointPts[idx1], keypointPts[idx2], color, 2, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// Draw keypoint circles on top of lines
|
||
|
|
for (int k = 0; k < numKeypoints; k++) {
|
||
|
|
if (keypointConf[k] > kpConfThreshold) {
|
||
|
|
cv::circle(frame, keypointPts[k], 4, cv::Scalar(0, 0, 255), -1, cv::LINE_AA);
|
||
|
|
cv::circle(frame, keypointPts[k], 4, cv::Scalar(255, 255, 255), 1, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// ── Per-frame stabilization summary ──
|
||
|
|
printf(" Frame %d: %d real + %d stabilized = %d total detections\n",
|
||
|
|
frameNum, realCount, stabilizedCount, realCount + stabilizedCount);
|
||
|
|
|
||
|
|
// Print flickering report every 100 frames
|
||
|
|
if (frameNum % 100 == 0 && frameNum > 0) {
|
||
|
|
printf("\n=== FLICKERING REPORT (frame %d) ===\n", frameNum);
|
||
|
|
for (const auto& kv : trackPresenceLog) {
|
||
|
|
int trackId = kv.first;
|
||
|
|
const auto& history = kv.second;
|
||
|
|
if (history.size() < 5) continue; // skip short-lived tracks
|
||
|
|
int realFrames = 0, ghostFrames = 0, transitions = 0;
|
||
|
|
bool prevReal = history[0].second;
|
||
|
|
for (size_t i = 0; i < history.size(); ++i) {
|
||
|
|
if (history[i].second) realFrames++; else ghostFrames++;
|
||
|
|
if (i > 0 && history[i].second != prevReal) transitions++;
|
||
|
|
prevReal = history[i].second;
|
||
|
|
}
|
||
|
|
float stabilityPct = (history.size() > 0) ?
|
||
|
|
100.f * realFrames / static_cast<float>(history.size()) : 0.f;
|
||
|
|
// Only report tracks that have flickering (transitions > 2)
|
||
|
|
if (transitions > 2) {
|
||
|
|
printf(" Track %3d: %d frames (%d real, %d ghost), %d transitions, %.1f%% real\n",
|
||
|
|
trackId, (int)history.size(), realFrames, ghostFrames, transitions, stabilityPct);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
printf("=================================\n\n");
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// Draw frame number on screen
|
||
|
|
cv::putText(frame, cv::format("Frame: %d", frameNum), cv::Point(10, 30),
|
||
|
|
cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255, 255, 255), 2);
|
||
|
|
|
||
|
|
cv::imshow("ANS Object Tracking", frame);
|
||
|
|
if (cv::waitKey(30) == 27) { // Wait for 'esc' key press to exit
|
||
|
|
// Print final flickering report
|
||
|
|
printf("\n=== FINAL FLICKERING REPORT (frame %d) ===\n", frameNum);
|
||
|
|
for (const auto& kv : trackPresenceLog) {
|
||
|
|
int trackId = kv.first;
|
||
|
|
const auto& history = kv.second;
|
||
|
|
if (history.size() < 3) continue;
|
||
|
|
int realFrames = 0, ghostFrames = 0, transitions = 0;
|
||
|
|
bool prevReal = history[0].second;
|
||
|
|
for (size_t i = 0; i < history.size(); ++i) {
|
||
|
|
if (history[i].second) realFrames++; else ghostFrames++;
|
||
|
|
if (i > 0 && history[i].second != prevReal) transitions++;
|
||
|
|
prevReal = history[i].second;
|
||
|
|
}
|
||
|
|
float stabilityPct = (history.size() > 0) ?
|
||
|
|
100.f * realFrames / static_cast<float>(history.size()) : 0.f;
|
||
|
|
printf(" Track %3d: %d frames (%d real, %d ghost), %d transitions, %.1f%% real\n",
|
||
|
|
trackId, (int)history.size(), realFrames, ghostFrames, transitions, stabilityPct);
|
||
|
|
}
|
||
|
|
printf("===========================================\n");
|
||
|
|
|
||
|
|
capture.release();
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
std::cout << "End of program.\n";
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
int Motion_UnitTest() {
|
||
|
|
std::string motionFile = "C:\\ProgramData\\ANSCENTER\\Shared\\ANS_SAM_v1.0.zip";
|
||
|
|
//std::string imageFile = "C:\\Programs\\TrainingToolTest\\SegmentationEngine\\SegmeTxtYolo\\data\\20241220_173948_695.jpg";
|
||
|
|
//std::string imageFile = "C:\\Programs\\DemoAssets\\Images\\bus.jpg";
|
||
|
|
//std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\FireNSmoke\\ANSFireFull.mp4";
|
||
|
|
//std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\FireNSmoke\\Fail1\\Test2.mp4";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\ANSVIS_Issues\\FGFire2.mp4";
|
||
|
|
|
||
|
|
ANSMotionTest(motionFile, videoFile);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int MotionEngine_UnitTest() {
|
||
|
|
std::string motionFile = "";
|
||
|
|
//std::string imageFile = "C:\\Programs\\TrainingToolTest\\SegmentationEngine\\SegmeTxtYolo\\data\\20241220_173948_695.jpg";
|
||
|
|
//std::string imageFile = "C:\\Programs\\DemoAssets\\Images\\bus.jpg";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\FireNSmoke\\ANSFireFull.mp4";
|
||
|
|
//std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\FireNSmoke\\Fail1\\Test2.mp4";
|
||
|
|
//std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\ANSVIS_Issues\\FGFire2.mp4";
|
||
|
|
|
||
|
|
MotionDetectorEngine(motionFile, videoFile);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int ONNXCLE_Test() {
|
||
|
|
std::string onnxclmodelFilePath = "E:\\Programs\\DemoAssets\\ANSAIModels\\ClassifierModels\\ANSVehicleColour.zip";
|
||
|
|
std::string imageFile = "E:\\Programs\\DemoAssets\\ANSAIModels\\ClassifierModels\\data\\IMG-20251014-WA0008.jpg";
|
||
|
|
ImageDetectionEngine(onnxclmodelFilePath, imageFile,20);//yolov8
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int ONNXPOSE_Test() {
|
||
|
|
std::string onnxclmodelFilePath = "E:\\Programs\\DemoAssets\\ANSAIModels\\PoseEstimationModels\\ANSPOSEN.zip";
|
||
|
|
std::string imageFile = "E:\\Programs\\DemoAssets\\ANSAIModels\\PoseEstimationModels\\openpose.jpg";
|
||
|
|
ImageDetectionEngine(onnxclmodelFilePath, imageFile,21);//
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int ONNXPOSE_VideoTest() {
|
||
|
|
std::string onnxclmodelFilePath = "E:\\Programs\\DemoAssets\\ANSAIModels\\PoseEstimationModels\\ANSPOSEN.zip";
|
||
|
|
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\train.mp4";
|
||
|
|
|
||
|
|
|
||
|
|
VideoDetectorEngine(onnxclmodelFilePath, videoFile,21);// 22 is RT, 21 is onnx
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int Helmet_VideoTest() {
|
||
|
|
std::string helmetModel = "E:\\Programs\\DemoAssets\\TestInference\\ANS_helmet_1712(GPU)_1.zip";
|
||
|
|
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\Helmet\\HM1.mp4";
|
||
|
|
|
||
|
|
|
||
|
|
VideoDetectorEngine(helmetModel, videoFile, 4,0);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int BristleTest() {
|
||
|
|
|
||
|
|
std::string modelFilePath = "C:\\Programs\\DemoAssets\\ODHUB\\Bristle_Loss.zip";
|
||
|
|
std::string imageFile = "C:\\Programs\\DemoAssets\\ODHUB\\data\\20251028_120112.PNG";
|
||
|
|
int modelType = 13; // ONNX POSE
|
||
|
|
ImageDetectionEngine(modelFilePath, imageFile, modelType);//
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
|
||
|
|
int Yolov11Test() {
|
||
|
|
std::string modelFilePath = "C:\\ProgramData\\ANSCENTER\\Shared\\ANS_FireNSmoke_v2.0.zip";
|
||
|
|
std::string imageFile = "C:\\ProgramData\\ANSCENTER\\Shared\\20260107_011644_762.jpg";
|
||
|
|
std::string videoFile = "C:\\ProgramData\\ANSCENTER\\Shared\\ANSFireFull.mp4";
|
||
|
|
int modelType = 3; // ONNX POSE
|
||
|
|
//ImageDetectionEngine(modelFilePath, imageFile, modelType);//
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFile, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int Yolov12Test() {
|
||
|
|
std::string modelFilePath = "C:\\ProgramData\\ANSCENTER\\Shared\\GenericYolov12.zip";
|
||
|
|
std::string imageFile = "C:\\ProgramData\\ANSCENTER\\Shared\\20260107_011644_762.jpg";
|
||
|
|
std::string videoFile = "C:\\ProgramData\\ANSCENTER\\Shared\\test.mp4";
|
||
|
|
int modelType = 17; // ONNX POSE
|
||
|
|
//ImageDetectionEngine(modelFilePath, imageFile, modelType);//
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFile, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int TensorRTTest() {
|
||
|
|
std::string modelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_VehicleDetection(GPU)_v1.0.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\road.mp4";
|
||
|
|
int modelType = 4; // ONNX POSE
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFile, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int OpenVINOTest() {
|
||
|
|
std::string modelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_VehicleDetection(CPU)_v1.0.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\road.mp4";
|
||
|
|
int modelType = 5; // OpenVINO
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFile, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int OpenVINOYolo10Test() {
|
||
|
|
std::string modelFilePath = "C:\\ProgramData\\ANSCENTER\\Shared\\ANS_GenericOD(CPU)_v1.0.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\video_20.mp4";
|
||
|
|
int modelType = 15; // OpenVINO
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFile, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int FireNSmokeCustomDetection() {
|
||
|
|
std::string modelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_FireSmoke_v2.0.zip";
|
||
|
|
std::string videoFilePath = "E:\\Programs\\DemoAssets\\Videos\\FireNSmoke\\ANSFireFull.mp4";// passed
|
||
|
|
int modelType = 16; // Custom POSE
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFilePath, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int TensorRT10Test() {
|
||
|
|
std::string modelFilePath = "C:\\ProgramData\\ANSCENTER\\Shared\\ANS_GenericOD(GPU)_v1.0.zip";
|
||
|
|
std::string videoFile = "C:\\ProgramData\\ANSCENTER\\Shared\\classroom.mp4";
|
||
|
|
int modelType = 14; // TensorRT v10
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFile, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
|
||
|
|
|
||
|
|
int SAM3ONNX_UnitTest() {
|
||
|
|
std::string modelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_SAM_v3.0.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\video_20.mp4";
|
||
|
|
//ANSSAMONNXTest(samFile, videoFile);
|
||
|
|
|
||
|
|
// Use LoadModelFromFolder to avoid zip extraction issues with 3.3GB data file
|
||
|
|
std::string modelFolder = "C:\\Projects\\ANSVIS\\Models\\ANS_SAM_v3.0";
|
||
|
|
ANSCENTER::ANSONNXSAM3 infHandle;
|
||
|
|
ANSCENTER::ModelConfig modelConfig;
|
||
|
|
modelConfig.modelConfThreshold = 0.5f;
|
||
|
|
std::string labelmap;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
infHandle.Initialize(licenseKey, modelConfig, modelFilePath, modelZipFilePassword, labelmap);
|
||
|
|
infHandle.SetPrompt("person");
|
||
|
|
|
||
|
|
cv::VideoCapture capture(videoFile);
|
||
|
|
if (!capture.isOpened()) {
|
||
|
|
printf("could not read this video file...\n");
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
while (true) {
|
||
|
|
cv::Mat frame;
|
||
|
|
if (!capture.read(frame)) {
|
||
|
|
std::cout << "\n Cannot read the video file.\n";
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
std::vector<ANSCENTER::Object> masks = infHandle.RunInference(frame);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
printf("Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
|
||
|
|
for (int i = 0; i < masks.size(); i++) {
|
||
|
|
cv::rectangle(frame, masks.at(i).box, 123, 2);
|
||
|
|
}
|
||
|
|
cv::imshow("ANS Object Tracking", frame);
|
||
|
|
if (cv::waitKey(30) == 27) break;
|
||
|
|
}
|
||
|
|
capture.release();
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
infHandle.Destroy();
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int SAM3ONNX_ImageTest() {
|
||
|
|
std::string modelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_SAM_v3.0.zip";
|
||
|
|
std::string imageFile = "C:\\Projects\\Research\\sam3onnx\\sam3-onnx\\images\\dog.jpg";
|
||
|
|
|
||
|
|
ANSCENTER::ANSONNXSAM3 infHandle;
|
||
|
|
ANSCENTER::ModelConfig modelConfig;
|
||
|
|
modelConfig.modelConfThreshold = 0.5f;
|
||
|
|
std::string labelmap;
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
infHandle.Initialize(licenseKey, modelConfig, modelFilePath, modelZipFilePassword, labelmap);
|
||
|
|
infHandle.SetPrompt("dog");
|
||
|
|
|
||
|
|
cv::Mat image = cv::imread(imageFile);
|
||
|
|
if (image.empty()) {
|
||
|
|
std::cerr << "SAM3ONNX_ImageTest: could not read image file: " << imageFile << "\n";
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
|
||
|
|
auto start = std::chrono::system_clock::now();
|
||
|
|
std::vector<ANSCENTER::Object> masks = infHandle.RunInference(image);
|
||
|
|
auto end = std::chrono::system_clock::now();
|
||
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||
|
|
printf("Time = %lld ms\n", static_cast<long long int>(elapsed.count()));
|
||
|
|
|
||
|
|
for (size_t i = 0; i < masks.size(); i++) {
|
||
|
|
const auto& obj = masks[i];
|
||
|
|
// Draw bounding box
|
||
|
|
cv::Scalar boxColor(0, 255, 0);
|
||
|
|
cv::rectangle(image, obj.box, boxColor, 2);
|
||
|
|
|
||
|
|
// Draw label
|
||
|
|
std::string label = obj.className + " " + std::to_string(obj.confidence).substr(0, 4);
|
||
|
|
int baseline = 0;
|
||
|
|
cv::Size textSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseline);
|
||
|
|
cv::rectangle(image,
|
||
|
|
cv::Point(obj.box.x, obj.box.y - textSize.height - 4),
|
||
|
|
cv::Point(obj.box.x + textSize.width, obj.box.y),
|
||
|
|
boxColor, cv::FILLED);
|
||
|
|
cv::putText(image, label, cv::Point(obj.box.x, obj.box.y - 2),
|
||
|
|
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0), 1, cv::LINE_AA);
|
||
|
|
|
||
|
|
// Draw polygon (normalized coordinates)
|
||
|
|
if (obj.polygon.size() >= 3) {
|
||
|
|
std::vector<cv::Point> polyPts;
|
||
|
|
polyPts.reserve(obj.polygon.size());
|
||
|
|
for (const auto& pt : obj.polygon) {
|
||
|
|
polyPts.emplace_back(
|
||
|
|
static_cast<int>(pt.x * image.cols),
|
||
|
|
static_cast<int>(pt.y * image.rows));
|
||
|
|
}
|
||
|
|
// Semi-transparent filled polygon
|
||
|
|
cv::Mat overlay = image.clone();
|
||
|
|
std::vector<std::vector<cv::Point>> polys = { polyPts };
|
||
|
|
cv::Scalar polyColor((i * 67 + 50) % 256, (i * 123 + 100) % 256, (i * 37 + 150) % 256);
|
||
|
|
cv::fillPoly(overlay, polys, polyColor);
|
||
|
|
cv::addWeighted(overlay, 0.4, image, 0.6, 0, image);
|
||
|
|
cv::polylines(image, polys, true, polyColor, 2, cv::LINE_AA);
|
||
|
|
}
|
||
|
|
// Draw mask overlay if available
|
||
|
|
else if (!obj.mask.empty()) {
|
||
|
|
cv::Mat colorMask(obj.mask.size(), CV_8UC3, cv::Scalar((i * 67 + 50) % 256, (i * 123 + 100) % 256, (i * 37 + 150) % 256));
|
||
|
|
cv::Mat roiImg = image(obj.box);
|
||
|
|
cv::Mat maskBool;
|
||
|
|
if (obj.mask.type() != CV_8UC1)
|
||
|
|
obj.mask.convertTo(maskBool, CV_8UC1, 255.0);
|
||
|
|
else
|
||
|
|
maskBool = obj.mask;
|
||
|
|
colorMask.copyTo(roiImg, maskBool);
|
||
|
|
cv::addWeighted(roiImg, 0.4, image(obj.box), 0.6, 0, image(obj.box));
|
||
|
|
}
|
||
|
|
|
||
|
|
printf(" [%zu] %s conf=%.3f box=(%d,%d,%d,%d) polygon=%zu pts\n",
|
||
|
|
i, obj.className.c_str(), obj.confidence,
|
||
|
|
obj.box.x, obj.box.y, obj.box.width, obj.box.height,
|
||
|
|
obj.polygon.size());
|
||
|
|
}
|
||
|
|
cv::imshow("SAM3 ONNX Image Test", image);
|
||
|
|
cv::waitKey(0);
|
||
|
|
cv::destroyAllWindows();
|
||
|
|
infHandle.Destroy();
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
|
||
|
|
int YOLO26ODYolo12Test() {
|
||
|
|
std::string modelFilePath = "C:\\ProgramData\\ANSCENTER\\Shared\\GenericYolov12.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\road.mp4";
|
||
|
|
int modelType = 3; // ONNX YOLO (30) RT YOLO (31)
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFile, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int YOLO26ODYolo11Test() {
|
||
|
|
std::string modelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_VehicleDetection_v2.0.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\road.mp4";
|
||
|
|
int modelType = 31; // ONNX YOLO (30) RT YOLO (31)
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFile, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int YOLO26ODYolo10Test() {
|
||
|
|
std::string modelFilePath = "C:\\ProgramData\\ANSCENTER\\Shared\\ANS_GenericOD(GPU)_v1.0.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\road.mp4";
|
||
|
|
int modelType = 31; // ONNX YOLO (30) RT YOLO (31)
|
||
|
|
VideoDetectorEngine(modelFilePath, videoFile, modelType);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
// Classification test with ONNX CL engine. Note that this model is not optimised for ONNX CL, so performance will be poor and may cause timeouts in some cases. It's recommended to use a smaller model for testing ONNX CL classification, or to optimise the model for ONNX CL using the OptimizeModelStr function before running this test.
|
||
|
|
int YOLO26CLYolo11Test() {
|
||
|
|
int modelType = 30; // ONNX YOLO (30) RT YOLO (31)
|
||
|
|
std::string onnxclmodelFilePath = "E:\\Programs\\DemoAssets\\ANSAIModels\\ClassifierModels\\ANSVehicleColour.zip";
|
||
|
|
std::string imageFile = "E:\\Programs\\DemoAssets\\ANSAIModels\\ClassifierModels\\data\\IMG-20251014-WA0002.jpg";
|
||
|
|
ImageDetectionEngine(onnxclmodelFilePath, imageFile, modelType);//yolov8
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int YOLO26SEGYolo11Test() {
|
||
|
|
int modelType = 30; // ONNX YOLO (30) RT YOLO (31)
|
||
|
|
std::string onnxclmodelFilePath = "C:\\Projects\\Research\\YoloSeg\\ANS_GenericSEG_v1.0.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\road.mp4";
|
||
|
|
VideoDetectorEngine(onnxclmodelFilePath, videoFile, modelType);// 22 is RT, 21 is onnx
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int YOLO26POSEYolo11Test() {
|
||
|
|
int modelType = 31; // ONNX YOLO (30) RT YOLO (31)
|
||
|
|
std::string onnxclmodelFilePath = "E:\\Programs\\DemoAssets\\ANSAIModels\\PoseEstimationModels\\ANSPOSEN.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\train.mp4";
|
||
|
|
|
||
|
|
VideoDetectorEngine(onnxclmodelFilePath, videoFile, modelType);// 22 is RT, 21 is onnx
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
|
||
|
|
int YOLO26OBBYolo11Test() {
|
||
|
|
std::string onnxclmodelFilePath = "C:\\Projects\\Research\\YoloOBB\\ANS_GenericOBB_v1.0.zip";
|
||
|
|
std::string imageFile = "C:\\Projects\\Research\\YoloOBB\\boats.jpg";
|
||
|
|
// Test with ONNX first
|
||
|
|
std::cout << "\n=== Testing OBB with ONNX (modelType=31) ===" << std::endl;
|
||
|
|
ImageDetectionEngine(onnxclmodelFilePath, imageFile, 31);
|
||
|
|
//// Test with TRT
|
||
|
|
//std::cout << "\n=== Testing OBB with TRT (modelType=31) ===" << std::endl;
|
||
|
|
//ImageDetectionEngine(onnxclmodelFilePath, imageFile, 31);
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
|
||
|
|
int HelmetDetectionTest() {
|
||
|
|
int modelType = 16; // Custom model type for helmet detection, adjust as needed based on how the model is set up in the engine
|
||
|
|
std::string onnxclmodelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_Helmet_v2.0.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\Helmet\\HM1.mp4";
|
||
|
|
|
||
|
|
VideoDetectorEngine(onnxclmodelFilePath, videoFile, modelType);// 22 is RT, 21 is onnx
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
|
||
|
|
|
||
|
|
// ============================================================================
|
||
|
|
// ElasticGPUTest — Verify elastic pool, round-robin dispatch, multi-GPU,
|
||
|
|
// concurrent inference, and idle-slot cleanup.
|
||
|
|
//
|
||
|
|
// Test matrix:
|
||
|
|
// 1. Single-handle creation → probe slot exists (capacity == 1)
|
||
|
|
// 2. Sequential inference → works on probe slot
|
||
|
|
// 3. Concurrent inference from N threads → pool grows on-demand
|
||
|
|
// 4. After burst → verify pool grew (capacity > 1)
|
||
|
|
// 5. Wait for idle cleanup → verify slots released (capacity == 1)
|
||
|
|
// 6. Post-cleanup inference → still works (probe slot alive)
|
||
|
|
// 7. Multi-GPU detection → reports all GPUs found
|
||
|
|
// 8. Clean release → no crash, no hang
|
||
|
|
// ============================================================================
|
||
|
|
int ElasticGPUTest() {
|
||
|
|
std::cout << "\n"
|
||
|
|
<< "============================================================\n"
|
||
|
|
<< " ElasticGPUTest — Elastic Pool + Round-Robin Verification\n"
|
||
|
|
<< "============================================================\n" << std::endl;
|
||
|
|
|
||
|
|
// --- Config ---
|
||
|
|
std::string modelFilePath = "C:\\ProgramData\\ANSCENTER\\Shared\\ANS_GenericOD(GPU)_v1.0.zip";
|
||
|
|
std::string videoFile = "C:\\ProgramData\\ANSCENTER\\Shared\\classroom.mp4";
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
float detectionScoreThreshold = 0.3f;
|
||
|
|
float modelConfThreshold = 0.48f;
|
||
|
|
float modelNMSThreshold = 0.5f;
|
||
|
|
int modelType = 14; // TensorRT v10
|
||
|
|
int detectorType = 1; // Detection
|
||
|
|
|
||
|
|
const int NUM_CONCURRENT_THREADS = 4;
|
||
|
|
const int FRAMES_PER_THREAD = 20;
|
||
|
|
const int IDLE_WAIT_SECONDS = 75; // > 60s cleanup threshold
|
||
|
|
|
||
|
|
int testsPassed = 0;
|
||
|
|
int testsFailed = 0;
|
||
|
|
|
||
|
|
auto PASS = [&](const std::string& name) {
|
||
|
|
testsPassed++;
|
||
|
|
std::cout << " [PASS] " << name << std::endl;
|
||
|
|
};
|
||
|
|
auto FAIL = [&](const std::string& name, const std::string& reason) {
|
||
|
|
testsFailed++;
|
||
|
|
std::cout << " [FAIL] " << name << " — " << reason << std::endl;
|
||
|
|
};
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// TEST 1: Create handle (should create probe slot, elastic mode)
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n--- Test 1: Handle creation (elastic mode) ---\n" << std::endl;
|
||
|
|
|
||
|
|
std::cout << "Optimizing model, please wait..." << std::endl;
|
||
|
|
std::string optimizedFolder = OptimizeModelStr(
|
||
|
|
modelFilePath.c_str(), modelZipFilePassword.c_str(),
|
||
|
|
modelType, detectorType, 1);
|
||
|
|
std::cout << "Optimized model folder: " << optimizedFolder << std::endl;
|
||
|
|
|
||
|
|
ANSCENTER::ANSODBase* infHandle = nullptr;
|
||
|
|
std::string labelMap = CreateANSODHandle(
|
||
|
|
&infHandle, licenseKey.c_str(), modelFilePath.c_str(),
|
||
|
|
modelZipFilePassword.c_str(), detectionScoreThreshold,
|
||
|
|
modelConfThreshold, modelNMSThreshold, 1, modelType, detectorType, 1);
|
||
|
|
|
||
|
|
if (infHandle == nullptr) {
|
||
|
|
FAIL("Handle creation", "infHandle is nullptr");
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
PASS("Handle creation");
|
||
|
|
|
||
|
|
// Parse classes
|
||
|
|
std::vector<std::string> classes;
|
||
|
|
{
|
||
|
|
std::stringstream ss(labelMap);
|
||
|
|
std::string substr;
|
||
|
|
while (std::getline(ss, substr, ',')) {
|
||
|
|
classes.push_back(substr);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
std::cout << " Classes loaded: " << classes.size() << std::endl;
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// TEST 2: Load test frames from video
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n--- Test 2: Load test frames ---\n" << std::endl;
|
||
|
|
|
||
|
|
cv::VideoCapture capture(videoFile);
|
||
|
|
if (!capture.isOpened()) {
|
||
|
|
FAIL("Video loading", "Cannot open " + videoFile);
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
|
||
|
|
// Pre-load frames and encode as JPEG for thread safety
|
||
|
|
const int TOTAL_FRAMES_NEEDED = NUM_CONCURRENT_THREADS * FRAMES_PER_THREAD;
|
||
|
|
std::vector<std::vector<uchar>> jpegFrames;
|
||
|
|
jpegFrames.reserve(TOTAL_FRAMES_NEEDED);
|
||
|
|
|
||
|
|
for (int i = 0; i < TOTAL_FRAMES_NEEDED; i++) {
|
||
|
|
cv::Mat frame;
|
||
|
|
if (!capture.read(frame)) {
|
||
|
|
capture.set(cv::CAP_PROP_POS_FRAMES, 0);
|
||
|
|
if (!capture.read(frame)) break;
|
||
|
|
}
|
||
|
|
std::vector<uchar> buf;
|
||
|
|
cv::imencode(".jpg", frame, buf);
|
||
|
|
jpegFrames.push_back(std::move(buf));
|
||
|
|
}
|
||
|
|
capture.release();
|
||
|
|
|
||
|
|
if (jpegFrames.empty()) {
|
||
|
|
FAIL("Frame loading", "No frames loaded");
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
return -1;
|
||
|
|
}
|
||
|
|
PASS("Frame loading (" + std::to_string(jpegFrames.size()) + " frames)");
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// TEST 3: Sequential inference (single-threaded, probe slot only)
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n--- Test 3: Sequential inference (probe slot) ---\n" << std::endl;
|
||
|
|
|
||
|
|
int seqSuccessCount = 0;
|
||
|
|
auto seqStart = std::chrono::steady_clock::now();
|
||
|
|
for (int i = 0; i < 5; i++) {
|
||
|
|
const auto& jpeg = jpegFrames[i % jpegFrames.size()];
|
||
|
|
std::string result = RunInferenceFromJpegString(
|
||
|
|
&infHandle,
|
||
|
|
reinterpret_cast<const char*>(jpeg.data()),
|
||
|
|
static_cast<unsigned long>(jpeg.size()),
|
||
|
|
"seq_test");
|
||
|
|
if (!result.empty() && result.find("results") != std::string::npos) {
|
||
|
|
seqSuccessCount++;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
auto seqEnd = std::chrono::steady_clock::now();
|
||
|
|
double seqMs = std::chrono::duration<double, std::milli>(seqEnd - seqStart).count();
|
||
|
|
|
||
|
|
if (seqSuccessCount == 5) {
|
||
|
|
PASS("Sequential inference (5/5, " + std::to_string(static_cast<int>(seqMs)) + " ms total)");
|
||
|
|
} else {
|
||
|
|
FAIL("Sequential inference", std::to_string(seqSuccessCount) + "/5 succeeded");
|
||
|
|
}
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// TEST 4: Concurrent inference (N threads → pool should grow)
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n--- Test 4: Concurrent inference (" << NUM_CONCURRENT_THREADS
|
||
|
|
<< " threads x " << FRAMES_PER_THREAD << " frames) ---\n" << std::endl;
|
||
|
|
|
||
|
|
std::atomic<int> totalSuccess{0};
|
||
|
|
std::atomic<int> totalFailed{0};
|
||
|
|
std::mutex printMutex;
|
||
|
|
|
||
|
|
auto workerFn = [&](int threadId) {
|
||
|
|
int localSuccess = 0;
|
||
|
|
int localFailed = 0;
|
||
|
|
|
||
|
|
for (int i = 0; i < FRAMES_PER_THREAD; i++) {
|
||
|
|
int frameIdx = (threadId * FRAMES_PER_THREAD + i) % static_cast<int>(jpegFrames.size());
|
||
|
|
const auto& jpeg = jpegFrames[frameIdx];
|
||
|
|
|
||
|
|
std::string cameraId = "cam_" + std::to_string(threadId);
|
||
|
|
std::string result = RunInferenceFromJpegString(
|
||
|
|
&infHandle,
|
||
|
|
reinterpret_cast<const char*>(jpeg.data()),
|
||
|
|
static_cast<unsigned long>(jpeg.size()),
|
||
|
|
cameraId.c_str());
|
||
|
|
|
||
|
|
if (!result.empty() && result.find("results") != std::string::npos) {
|
||
|
|
localSuccess++;
|
||
|
|
} else {
|
||
|
|
localFailed++;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
totalSuccess += localSuccess;
|
||
|
|
totalFailed += localFailed;
|
||
|
|
|
||
|
|
std::lock_guard<std::mutex> lk(printMutex);
|
||
|
|
std::cout << " Thread " << threadId << ": "
|
||
|
|
<< localSuccess << "/" << FRAMES_PER_THREAD << " succeeded" << std::endl;
|
||
|
|
};
|
||
|
|
|
||
|
|
auto concStart = std::chrono::steady_clock::now();
|
||
|
|
|
||
|
|
std::vector<std::thread> threads;
|
||
|
|
for (int t = 0; t < NUM_CONCURRENT_THREADS; t++) {
|
||
|
|
threads.emplace_back(workerFn, t);
|
||
|
|
}
|
||
|
|
for (auto& th : threads) {
|
||
|
|
th.join();
|
||
|
|
}
|
||
|
|
|
||
|
|
auto concEnd = std::chrono::steady_clock::now();
|
||
|
|
double concMs = std::chrono::duration<double, std::milli>(concEnd - concStart).count();
|
||
|
|
int totalInferences = NUM_CONCURRENT_THREADS * FRAMES_PER_THREAD;
|
||
|
|
|
||
|
|
std::cout << "\n Total: " << totalSuccess.load() << "/" << totalInferences
|
||
|
|
<< " succeeded, " << totalFailed.load() << " failed"
|
||
|
|
<< " (" << static_cast<int>(concMs) << " ms, "
|
||
|
|
<< static_cast<int>(concMs / totalInferences) << " ms/inference avg)" << std::endl;
|
||
|
|
|
||
|
|
if (totalSuccess.load() == totalInferences) {
|
||
|
|
PASS("Concurrent inference (" + std::to_string(totalInferences) + "/" +
|
||
|
|
std::to_string(totalInferences) + ")");
|
||
|
|
} else if (totalSuccess.load() > 0) {
|
||
|
|
// Some rejections are OK if GPU is full — not a failure
|
||
|
|
PASS("Concurrent inference (partial: " + std::to_string(totalSuccess.load()) +
|
||
|
|
"/" + std::to_string(totalInferences) + ", some slots busy — expected)");
|
||
|
|
} else {
|
||
|
|
FAIL("Concurrent inference", "All inferences failed");
|
||
|
|
}
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// TEST 5: Verify pool grew during concurrent burst
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n--- Test 5: Pool growth verification ---\n" << std::endl;
|
||
|
|
|
||
|
|
// Pool info is inside the Engine — we can't access it directly from ANSODBase.
|
||
|
|
// But the pool prints "Info [Pool]: On-demand slot created/recycled" to stdout
|
||
|
|
// during growth. We verify indirectly: if concurrent inference succeeded
|
||
|
|
// with N threads simultaneously, the pool must have grown (or the probe slot
|
||
|
|
// serialized all requests via the round-robin dispatch).
|
||
|
|
std::cout << " (Check stdout above for 'Info [Pool]: On-demand slot' messages)" << std::endl;
|
||
|
|
if (totalSuccess.load() > 0) {
|
||
|
|
PASS("Pool growth (concurrent inference succeeded — pool dispatched correctly)");
|
||
|
|
} else {
|
||
|
|
FAIL("Pool growth", "No successful inferences during concurrent burst");
|
||
|
|
}
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// TEST 6: Wait for idle cleanup (60s threshold + timer interval)
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n--- Test 6: Idle cleanup (waiting " << IDLE_WAIT_SECONDS
|
||
|
|
<< "s for 60s cleanup timer) ---\n" << std::endl;
|
||
|
|
|
||
|
|
std::cout << " Waiting..." << std::flush;
|
||
|
|
for (int i = 0; i < IDLE_WAIT_SECONDS; i++) {
|
||
|
|
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||
|
|
if ((i + 1) % 10 == 0) {
|
||
|
|
std::cout << " " << (i + 1) << "s" << std::flush;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
std::cout << " done." << std::endl;
|
||
|
|
|
||
|
|
// The idle timer should have fired and released extra slots.
|
||
|
|
// Check stdout for "Info [Pool]: Released N idle slot(s)" messages.
|
||
|
|
std::cout << " (Check stdout above for 'Info [Pool]: Released' and 'Releasing idle slot' messages)" << std::endl;
|
||
|
|
PASS("Idle cleanup wait completed (verify pool messages above)");
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// TEST 7: Post-cleanup inference (probe slot must still work)
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n--- Test 7: Post-cleanup inference ---\n" << std::endl;
|
||
|
|
|
||
|
|
int postCleanupSuccess = 0;
|
||
|
|
for (int i = 0; i < 3; i++) {
|
||
|
|
const auto& jpeg = jpegFrames[i % jpegFrames.size()];
|
||
|
|
std::string result = RunInferenceFromJpegString(
|
||
|
|
&infHandle,
|
||
|
|
reinterpret_cast<const char*>(jpeg.data()),
|
||
|
|
static_cast<unsigned long>(jpeg.size()),
|
||
|
|
"post_cleanup");
|
||
|
|
if (!result.empty() && result.find("results") != std::string::npos) {
|
||
|
|
postCleanupSuccess++;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
if (postCleanupSuccess == 3) {
|
||
|
|
PASS("Post-cleanup inference (3/3 — probe slot alive)");
|
||
|
|
} else {
|
||
|
|
FAIL("Post-cleanup inference", std::to_string(postCleanupSuccess) + "/3 succeeded");
|
||
|
|
}
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// TEST 8: Multi-GPU detection (via nvidia-smi — no CUDA link needed)
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n--- Test 8: Multi-GPU detection ---\n" << std::endl;
|
||
|
|
|
||
|
|
// Use nvidia-smi to query GPUs without linking cuda_runtime
|
||
|
|
int smiResult = system("nvidia-smi --query-gpu=index,name,memory.total,memory.free "
|
||
|
|
"--format=csv,noheader,nounits");
|
||
|
|
if (smiResult == 0) {
|
||
|
|
PASS("GPU detection (see nvidia-smi output above)");
|
||
|
|
} else {
|
||
|
|
FAIL("GPU detection", "nvidia-smi not found or failed");
|
||
|
|
}
|
||
|
|
std::cout << " (Pool init messages above show which GPUs were used for slot creation)" << std::endl;
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// TEST 9: Clean release (no crash, no hang)
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n--- Test 9: Handle release ---\n" << std::endl;
|
||
|
|
|
||
|
|
auto releaseStart = std::chrono::steady_clock::now();
|
||
|
|
int releaseResult = ReleaseANSODHandle(&infHandle);
|
||
|
|
auto releaseEnd = std::chrono::steady_clock::now();
|
||
|
|
double releaseMs = std::chrono::duration<double, std::milli>(releaseEnd - releaseStart).count();
|
||
|
|
|
||
|
|
if (releaseResult == 0 || infHandle == nullptr) {
|
||
|
|
PASS("Handle release (" + std::to_string(static_cast<int>(releaseMs)) + " ms)");
|
||
|
|
} else {
|
||
|
|
FAIL("Handle release", "ReleaseANSODHandle returned " + std::to_string(releaseResult));
|
||
|
|
}
|
||
|
|
|
||
|
|
// ================================================================
|
||
|
|
// SUMMARY
|
||
|
|
// ================================================================
|
||
|
|
std::cout << "\n"
|
||
|
|
<< "============================================================\n"
|
||
|
|
<< " ElasticGPUTest RESULTS: " << testsPassed << " passed, "
|
||
|
|
<< testsFailed << " failed\n"
|
||
|
|
<< "============================================================\n" << std::endl;
|
||
|
|
|
||
|
|
return testsFailed;
|
||
|
|
}
|
||
|
|
|
||
|
|
|
||
|
|
int FireNSmokeTest() {
|
||
|
|
int modelType = 16; // Custom model type for helmet detection, adjust as needed based on how the model is set up in the engine
|
||
|
|
std::string modelFilePath = "C:\\Projects\\ANSVIS\\Models\\ANS_FireSmoke_v2.0.zip";
|
||
|
|
std::string videoFile = "E:\\Programs\\DemoAssets\\Videos\\Helmet\\HM1.mp4";
|
||
|
|
|
||
|
|
|
||
|
|
ANSCENTER::ANSODBase* infHandle;
|
||
|
|
|
||
|
|
std::string licenseKey = "";
|
||
|
|
std::string modelZipFilePassword = "";
|
||
|
|
double detectionScoreThreshold = 0.3;
|
||
|
|
double modelConfThreshold = 0.48;
|
||
|
|
double modelNMSThreshold = 0.5;
|
||
|
|
int loadEngine = 0;
|
||
|
|
CreateANSODHandle(&infHandle, licenseKey.c_str(), modelFilePath.c_str(), modelZipFilePassword.c_str(),
|
||
|
|
detectionScoreThreshold, modelConfThreshold, modelNMSThreshold, 1, modelType, 1, loadEngine);
|
||
|
|
|
||
|
|
std::string stParam;
|
||
|
|
GetConfiguredParameters_CPP(&infHandle, stParam);
|
||
|
|
std::cout << "Configured parameters:\n" << stParam << std::endl;
|
||
|
|
|
||
|
|
ReleaseANSODHandle(&infHandle);
|
||
|
|
|
||
|
|
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
int main()
|
||
|
|
{
|
||
|
|
//FireNSmokeTest();
|
||
|
|
//ElasticGPUTest();
|
||
|
|
|
||
|
|
//YOLO26SEGYolo11Test();
|
||
|
|
//YOLO26POSEYolo11Test();
|
||
|
|
//YOLO26CLYolo11Test();
|
||
|
|
//YOLO26ODYolo12Test();
|
||
|
|
//YOLO26ODYolo11Test();
|
||
|
|
//YOLO26ODYolo10Test();
|
||
|
|
//YOLO26OBBYolo11Test();
|
||
|
|
//SAM3ONNX_ImageTest(); // ORT reference — runs first, prints decoder input stats
|
||
|
|
//SAM3TRT_ImageTest(); // TRT under test — compare decoder input stats with above
|
||
|
|
CustomModel_StressTest_FilePlayer(); // Multi-task stress test (LabVIEW flow)
|
||
|
|
//SAM3TRT_UnitTest(); // TensorRT SAM3 test (in ANSSAM3-UnitTest.cpp)
|
||
|
|
//TensorRT10Test();
|
||
|
|
//FireNSmokeCustomDetection();
|
||
|
|
//OpenVINOTest();
|
||
|
|
//OpenVINOYolo10Test();
|
||
|
|
//TensorRTTest();
|
||
|
|
//BristleTest();//
|
||
|
|
//Yolov12Test();
|
||
|
|
//Yolov11Test();//
|
||
|
|
//ONNXPOSE_VideoTest();
|
||
|
|
//ONNXCLE_Test();
|
||
|
|
//ONNXPOSE_Test();
|
||
|
|
//MotionEngine_UnitTest();
|
||
|
|
//Motion_UnitTest();
|
||
|
|
//GenericModelTest();
|
||
|
|
//RVATest();
|
||
|
|
//SAMS_UnitTest();
|
||
|
|
//TrafficLight();
|
||
|
|
//Helmet_VideoTest();
|
||
|
|
// HelMetDetection();
|
||
|
|
// ONNXPOSE_VideoTest();
|
||
|
|
|
||
|
|
//multithreadTest();
|
||
|
|
//CustomPyTest();
|
||
|
|
//for (int i = 0; i < 10; i++) {
|
||
|
|
// CustomPyTest();
|
||
|
|
// CustomPyTest();
|
||
|
|
//}
|
||
|
|
//ANSCUSTOMPY_Test();
|
||
|
|
|
||
|
|
//ParseJSonFile();
|
||
|
|
//TestODHUB();
|
||
|
|
//TestYOLOV12();
|
||
|
|
// GenericModelTest();
|
||
|
|
//Yolov10RT_Test();
|
||
|
|
//PersonHead();
|
||
|
|
//RectifierTest();
|
||
|
|
//LocSetTest();
|
||
|
|
//PPETest();
|
||
|
|
//ShutdownPythonEngine_CPP();
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
|