Files
ANSCustomModels/ANSCustomWeaponDetection/ANSCustomCodeWeaponDetection.cpp

811 lines
28 KiB
C++
Raw Permalink Normal View History

#include "ANSCustomCodeWeaponDetection.h"
//#define FNS_DEBUG
ANSCustomWD::ANSCustomWD()
{
// Initialize the model
}
bool ANSCustomWD::OptimizeModel(bool fp16)
{
try {
if (this->_detector == nullptr || this->_filter == nullptr)
{
std::cout << "Model is not initialized." << std::endl;
return false;
}
int vResult = _detector->Optimize(fp16);
int tResult = _filter->Optimize(fp16);
if (vResult != 1 || tResult != 1)
{
std::cout << "Model optimization failed." << std::endl;
return false;
}
std::cout << "Model optimization successful." << std::endl;
return true;
}
catch (const std::exception& e) {
return false;
}
catch (...) {
return false;
}
}
std::vector<CustomObject> ANSCustomWD::RunInference(const cv::Mat& input)
{
return RunInference(input, "CustomCam");
}
bool ANSCustomWD::Destroy()
{
try {
this->_detector.reset();
this->_filter.reset();
return true;
}
catch (const std::exception& e) {
return false;
}
catch (...) {
return false;
}
}
bool ANSCustomWD::Initialize(const std::string& modelDirectory, float detectionScoreThreshold, std::string& labelMap)
{
//1. The modelDirectory is supplied by ANSVIS and contains the path to the model files
_modelDirectory = modelDirectory;
_detectionScoreThreshold = detectionScoreThreshold;
// Detector model configuration
_detectorModelName = "train_last";
_detectorClassName = "classes.names";
_detectorModelType = 4; // Assuming 4 represents TensorRT YoloV11
_detectorDetectionType = 1; // Assuming 1 represents object detection
// Fileter model configuration
_filterModelName = "filter";
_filterClassName = "filter.names";
_filterModelType = 4; // Assuming 4 represents TensorRT YoloV11
_filterDetectionType = 1; // Assuming 1 represents object detection
// Create model instances
_detector = ANSLIBPtr(ANSCENTER::ANSLIB::Create(), &ANSCENTER::ANSLIB::Destroy);
_filter = ANSLIBPtr(ANSCENTER::ANSLIB::Create(), &ANSCENTER::ANSLIB::Destroy);
_detectorModelType = 4; // Assuming 4 represents TensorRT YoloV11
_detectorDetectionType = 1; // Assuming 1 represents object detection
_filterModelType = 4; // Assuming 4 represents TensorRT YoloV12
_filterDetectionType = 1; // Assuming 1 represents object detection
// Check the hardware type
engineType = _detector->GetEngineType();
if (engineType == 1) {
// detector will be TensorRT (v8, v11) and filter will be TensorRT (v12)
_detectorModelType = 4; // Assuming 4 represents TensorRT YoloV11
_detectorDetectionType = 1; // Assuming 1 represents object detection
_filterModelType = 4; // Assuming 4 represents TensorRT YoloV12
_filterDetectionType = 1; // Assuming 1 represents object detection
std::cout << "NVIDIA GPU detected. Using TensorRT" << std::endl;
}
else {
// detetector will be OpenVINO (v8, v11) and filter will be Yolo (v12)
_detectorModelType = 5; // Assuming 5 represents OpenVINO YoloV11
_detectorDetectionType = 1; // Assuming 1 represents object detection
_filterModelType = 17; // Assuming 17 represents ONNX YoloV12
_filterDetectionType = 1; // Assuming 1 represents object detection
std::cout << "CPU detected. Using OpenVINO and ONNX" << std::endl;
}
//Clear parameters
this->_params.ROI_Config.clear();
this->_params.ROI_Options.clear();
this->_params.Parameters.clear();
this->_params.ROI_Values.clear();
//2. User can start impelementing the initialization logic here
double _modelConfThreshold = 0.5;
double _modelNMSThreshold = 0.5;
std::string licenseKey = "";
#ifdef FNS_DEBUG // Corrected preprocessor directive
_loadEngineOnCreate = true;
#endif
int loadEngineOnCreation = 0; // Load engine on creation
if (_loadEngineOnCreate)loadEngineOnCreation = 1;
int autoEngineDetection = 1; // Auto engine detection
int detectorResult = _detector->LoadModelFromFolder(licenseKey.c_str(),
_detectorModelName.c_str(),
_detectorClassName.c_str(),
detectionScoreThreshold,
_modelConfThreshold,
_modelNMSThreshold, autoEngineDetection,
_detectorModelType,
_detectorDetectionType,
loadEngineOnCreation,
_modelDirectory.c_str(),
labelMap);
int filterResult = _filter->LoadModelFromFolder(licenseKey.c_str(),
_filterModelName.c_str(),
_filterClassName.c_str(),
_detectionScoreThreshold,
_modelConfThreshold,
_modelNMSThreshold, autoEngineDetection,
_filterModelType,
_filterDetectionType,
loadEngineOnCreation,
_modelDirectory.c_str(),
_filterLabelMap);
if ((detectorResult == 1) && (filterResult == 1)) return true;
return false;
}
ANSCustomWD::~ANSCustomWD()
{
Destroy();
}
std::vector<CustomObject> ANSCustomWD::RunInference(const cv::Mat& input, const std::string& camera_id) {
std::lock_guard<std::recursive_mutex> lock(_mutex);
// Early validation
if (input.empty() || input.cols < 10 || input.rows < 10) {
return {};
}
if (!_detector) {
return {};
}
try {
#ifdef FNS_DEBUG
cv::Mat draw = input.clone();
#endif
std::vector<ANSCENTER::Object> output;
// A. Check if detected area is retained and valid
if (_detectedArea.width > 50 && _detectedArea.height > 50) {
output = ProcessExistingDetectedArea(input, camera_id
#ifdef FNS_DEBUG
, draw
#endif
);
}
else {
// B. Find new detected area
ProcessNewDetectedArea(input, camera_id, output
#ifdef FNS_DEBUG
, draw
#endif
);
}
#ifdef FNS_DEBUG
cv::imshow("Combined Detected Areas", draw);
cv::waitKey(1);
#endif
// Convert to CustomObjects
std::vector<CustomObject> results;
if (!output.empty()) {
results.reserve(output.size());
const float scoreThreshold = _detectionScoreThreshold;
for (const auto& obj : output) {
if (obj.confidence >= scoreThreshold) {
CustomObject customObj;
customObj.box = obj.box;
customObj.classId = obj.classId;
customObj.confidence = obj.confidence;
customObj.cameraId = obj.cameraId;
customObj.className = obj.className;
customObj.extraInfo = "Detection Score Threshold:" + std::to_string(scoreThreshold);
results.push_back(std::move(customObj));
}
}
}
if (results.empty()) {
UpdateNoDetectionCondition();
}
return results;
}
catch (const std::exception& e) {
return {};
}
}
std::vector<ANSCENTER::Object> ANSCustomWD::RunFilterGetPersons(const cv::Mat& frame) {
std::vector<ANSCENTER::Object> persons;
if (!_filter) return persons;
std::vector<ANSCENTER::Object> filteredObjects;
_filter->RunInference(frame, "cam", filteredObjects);
for (const auto& obj : filteredObjects) {
if (obj.classId == 0) { // person
persons.push_back(obj);
}
}
return persons;
}
std::vector<ANSCENTER::Object> ANSCustomWD::ProcessExistingDetectedArea(
const cv::Mat& frame,
const std::string& camera_id
#ifdef FNS_DEBUG
, cv::Mat& draw
#endif
) {
std::vector<ANSCENTER::Object> output;
// Validate _detectedArea is within frame bounds
cv::Rect frameRect(0, 0, frame.cols, frame.rows);
_detectedArea &= frameRect;
if (_detectedArea.width <= 0 || _detectedArea.height <= 0) {
_detectedArea = cv::Rect();
UpdateNoDetectionCondition();
return output;
}
#ifdef FNS_DEBUG
cv::rectangle(draw, _detectedArea, cv::Scalar(0, 0, 255), 2); // RED
#endif
cv::Mat activeROI = frame(_detectedArea);
// Run weapon detection on cropped ROI
std::vector<ANSCENTER::Object> detectedObjects;
int detectorDetectionResult = _detector->RunInference(activeROI, camera_id.c_str(), detectedObjects);
if (detectedObjects.empty()) {
UpdateNoDetectionCondition();
return output;
}
// Run filter once for all candidates in this frame
std::vector<ANSCENTER::Object> personDetections = RunFilterGetPersons(frame);
bool filterHadResults = !personDetections.empty() || !_filter;
const float scoreThreshold = _detectionScoreThreshold;
std::vector<ANSCENTER::Object> movementObjects;
int detectedMovementResult = _detector->DetectMovement(activeROI, camera_id.c_str(), movementObjects);
// Convert movement objects to frame coordinates
std::vector<ANSCENTER::Object> weaponRects;
weaponRects.reserve(movementObjects.size());
for (const auto& rect : movementObjects) {
ANSCENTER::Object mObj;
mObj.box = rect.box;
mObj.box.x += _detectedArea.x;
mObj.box.y += _detectedArea.y;
weaponRects.push_back(mObj);
}
#ifdef FNS_DEBUG
for (const auto& rect : weaponRects) {
cv::rectangle(draw, rect.box, cv::Scalar(0, 255, 0), 2); // GREEN
}
#endif
for (auto& detectedObj : detectedObjects) {
// Adjust to frame coordinates
detectedObj.box.x += _detectedArea.x;
detectedObj.box.y += _detectedArea.y;
detectedObj.cameraId = camera_id;
if (detectedObj.confidence <= scoreThreshold) {
UpdateNoDetectionCondition();
continue;
}
const float area_threshold = calculateIoU(_detectedArea, detectedObj.box);
if (area_threshold >= 0.5f) {
UpdateNoDetectionCondition();
continue;
}
#ifdef FNS_DEBUG
cv::rectangle(draw, detectedObj.box, cv::Scalar(0, 255, 255), 2); // Yellow
#endif
if (weaponRects.empty()) {
UpdateNoDetectionCondition();
continue;
}
if (!IsOverlapping(detectedObj, weaponRects, 0)) {
UpdateNoDetectionCondition();
continue;
}
// Process valid detection
if (ProcessWeaponDetection(frame, detectedObj, output, personDetections, filterHadResults
#ifdef FNS_DEBUG
, draw
#endif
)) {
continue;
}
}
return output;
}
bool ANSCustomWD::ProcessWeaponDetection(
const cv::Mat& frame,
ANSCENTER::Object& detectedObj,
std::vector<ANSCENTER::Object>& output,
const std::vector<ANSCENTER::Object>& personDetections,
bool filterHadResults
#ifdef FNS_DEBUG
, cv::Mat& draw
#endif
) {
// Already established as real weapon after FILTERFRAMES consecutive confirmations
if (_realWeaponCheck > FILTERFRAMES) {
AddConfirmedWeaponDetection(frame, detectedObj, output);
return true;
}
// Filter not available - confirm directly
if (!_filter) {
AddConfirmedWeaponDetection(frame, detectedObj, output);
return true;
}
// Filter ran but detected nothing at all - treat as inconclusive
// Require more consecutive frames before confirming without person validation
if (!filterHadResults) {
_realWeaponCheck++;
if (_realWeaponCheck > FILTERFRAMES / 2) {
AddConfirmedWeaponDetection(frame, detectedObj, output);
return true;
}
return false;
}
#ifdef FNS_DEBUG
for (const auto& person : personDetections) {
cv::rectangle(draw, person.box, cv::Scalar(23, 25, 0), 2);
cv::putText(draw, "person",
cv::Point(person.box.x, person.box.y - 10),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 2);
}
#endif
// No persons found - weapon without person context
if (personDetections.empty()) {
_realWeaponCheck = std::max(0, _realWeaponCheck - 1);
if (_realWeaponCheck <= 0) _isRealWeaponFrame = false;
return false;
}
// Check if weapon overlaps with ANY person (use meaningful IoU threshold)
if (IsOverlapping(detectedObj, personDetections, 0.05f)) {
AddConfirmedWeaponDetection(frame, detectedObj, output);
_realWeaponCheck++;
return true;
}
// Detection doesn't overlap with any person
_realWeaponCheck = std::max(0, _realWeaponCheck - 1);
if (_realWeaponCheck <= 0) {
_isRealWeaponFrame = false;
}
return false;
}
void ANSCustomWD::AddConfirmedWeaponDetection(
const cv::Mat& frame,
ANSCENTER::Object& detectedObj,
std::vector<ANSCENTER::Object>& output)
{
output.push_back(detectedObj);
UpdateActiveROI(frame, detectedObj);
_isWeaponDetected = true;
_retainDetectedArea = 0;
_isRealWeaponFrame = true;
}
void ANSCustomWD::ProcessNewDetectedArea(
const cv::Mat& frame,
const std::string& camera_id,
std::vector<ANSCENTER::Object>& output
#ifdef FNS_DEBUG
, cv::Mat& draw
#endif
) {
// Decay _realWeaponCheck gradually instead of hard reset
_isRealWeaponFrame = false;
_realWeaponCheck = std::max(0, _realWeaponCheck - 1);
// Divide image and get priority region
std::vector<ImageSection> sections = divideImage(frame);
const int lowestPriority = getLowestPriorityRegion();
if (_currentPriority > lowestPriority || _currentPriority == 0) {
_currentPriority = getHighestPriorityRegion();
}
else {
_currentPriority++;
}
_detectedArea = getRegionByPriority(_currentPriority);
#ifdef FNS_DEBUG
cv::rectangle(draw, _detectedArea, cv::Scalar(0, 0, 255), 4); // RED
#endif
// Validate _detectedArea is within frame bounds
cv::Rect frameRect(0, 0, frame.cols, frame.rows);
_detectedArea &= frameRect;
if (_detectedArea.width <= 50 || _detectedArea.height <= 50) {
_detectedArea = cv::Rect();
return;
}
cv::Mat detectedROI = frame(_detectedArea);
std::vector<ANSCENTER::Object> detectedObjects;
int detectorResult = _detector->RunInference(detectedROI, camera_id.c_str(), detectedObjects);
if (detectedObjects.empty()) {
_detectedArea = cv::Rect();
return;
}
// Use configurable threshold instead of hardcoded 0.35
const float scanThreshold = std::min(_detectionScoreThreshold, 0.35f);
ANSCENTER::Object* bestDetection = nullptr;
float maxScore = 0.0f;
for (auto& detectedObj : detectedObjects) {
detectedObj.box.x += _detectedArea.x;
detectedObj.box.y += _detectedArea.y;
detectedObj.cameraId = camera_id;
if (detectedObj.confidence <= scanThreshold) {
continue;
}
if (detectedObj.confidence > maxScore) {
maxScore = detectedObj.confidence;
bestDetection = &detectedObj;
}
}
if (!bestDetection) {
_detectedArea = cv::Rect();
return;
}
// Set up tracking area around the best detection
UpdateDetectedAreaFromObject(frame, *bestDetection);
_isWeaponDetected = true;
_retainDetectedArea = 0;
// Validate in the same frame — run filter to check for person overlap
std::vector<ANSCENTER::Object> personDetections = RunFilterGetPersons(frame);
bool filterHadResults = !personDetections.empty() || !_filter;
ProcessWeaponDetection(frame, *bestDetection, output, personDetections, filterHadResults
#ifdef FNS_DEBUG
, draw
#endif
);
}
void ANSCustomWD::UpdateDetectedAreaFromObject(const cv::Mat& frame, const ANSCENTER::Object& detectedObj) {
const int imageSize = std::max(frame.cols, frame.rows);
int cropSize;
if (imageSize > 1920) cropSize = 640;
else if (imageSize > 1280) cropSize = 480;
else if (imageSize > 640) cropSize = 320;
else cropSize = 224;
// Cap cropSize to frame dimensions to avoid negative clamp range (UB)
cropSize = std::min(cropSize, std::min(frame.cols, frame.rows));
if (cropSize <= 0) {
_detectedArea = cv::Rect();
return;
}
const int xc = detectedObj.box.x + detectedObj.box.width / 2;
const int yc = detectedObj.box.y + detectedObj.box.height / 2;
int x1_new = std::clamp(xc - cropSize / 2, 0, frame.cols - cropSize);
int y1_new = std::clamp(yc - cropSize / 2, 0, frame.rows - cropSize);
_detectedArea.x = x1_new;
_detectedArea.y = y1_new;
_detectedArea.width = std::min(cropSize, frame.cols - _detectedArea.x);
_detectedArea.height = std::min(cropSize, frame.rows - _detectedArea.y);
}
// Functions for screen size division
double ANSCustomWD::calculateDistanceToCenter(const cv::Point& center, const cv::Rect& rect) {
cv::Point rectCenter(rect.x + rect.width / 2, rect.y + rect.height / 2);
return std::sqrt(std::pow(rectCenter.x - center.x, 2) + std::pow(rectCenter.y - center.y, 2));
}
std::vector<ANSCustomWD::ImageSection> ANSCustomWD::divideImage(const cv::Mat& image) {
if (image.empty()) {
std::cerr << "Error: Empty image!" << std::endl;
return cachedSections;
}
cv::Size currentSize(image.cols, image.rows);
// Check if the image size has changed
if (currentSize == previousImageSize) {
return cachedSections; // Return cached sections if size is the same
}
// Update previous size
previousImageSize = currentSize;
cachedSections.clear();
int width = image.cols;
int height = image.rows;
int maxDimension = std::max(width, height);
int numSections = 10;// std::max(1, numSections); // Ensure at least 1 section
if (maxDimension <= 2560)numSections = 8;
if (maxDimension <= 1280)numSections = 6;
if (maxDimension <= 960)numSections = 4;
if (maxDimension <= 640)numSections = 2;
if (maxDimension <= 320)numSections = 1;
int gridRows = std::sqrt(numSections);
int gridCols = (numSections + gridRows - 1) / gridRows; // Ensure all sections are covered
int sectionWidth = width / gridCols;
int sectionHeight = height / gridRows;
cv::Point imageCenter(width / 2, height / 2);
std::vector<std::pair<double, ImageSection>> distancePriorityList;
// Create sections and store their distance from the center
for (int r = 0; r < gridRows; ++r) {
for (int c = 0; c < gridCols; ++c) {
int x = c * sectionWidth;
int y = r * sectionHeight;
int w = (c == gridCols - 1) ? width - x : sectionWidth;
int h = (r == gridRows - 1) ? height - y : sectionHeight;
ImageSection section(cv::Rect(x, y, w, h));
double distance = calculateDistanceToCenter(imageCenter, section.region);
distancePriorityList.emplace_back(distance, section);
}
}
// Sort sections based on distance from center, then top-to-bottom, then left-to-right
std::sort(distancePriorityList.begin(), distancePriorityList.end(),
[](const std::pair<double, ImageSection>& a, const std::pair<double, ImageSection>& b) {
if (std::abs(a.first - b.first) > 1e-5) {
return a.first < b.first; // Sort by closest distance to center
}
// If distance is the same, prioritize top to bottom, then left to right
return a.second.region.y == b.second.region.y
? a.second.region.x < b.second.region.x
: a.second.region.y < b.second.region.y;
});
// Assign priority
int priority = 1;
for (auto& entry : distancePriorityList) {
entry.second.priority = priority++;
cachedSections.push_back(entry.second);
}
return cachedSections;
}
int ANSCustomWD::getHighestPriorityRegion() {
if (!cachedSections.empty()) {
return cachedSections.front().priority; // First element has the highest priority
}
return 0; // Return empty rect if no sections exist
}
int ANSCustomWD::getLowestPriorityRegion() {
if (!cachedSections.empty()) {
return cachedSections.back().priority; // Last element has the lowest priority
}
return 0; // Return empty rect if no sections exist
}
cv::Rect ANSCustomWD::getRegionByPriority(int priority) {
for (const auto& section : cachedSections) {
if (section.priority == priority) {
return section.region;
}
}
return cv::Rect(); // Return empty rect if priority not found
}
void ANSCustomWD::UpdateNoDetectionCondition() {
_isRealWeaponFrame = false;
_realWeaponCheck = 0;
if (_isWeaponDetected) {
_retainDetectedArea++;
if (_retainDetectedArea >= RETAINFRAMES) {// Reset detected area after 80 frames
_detectedArea.width = 0;
_detectedArea.height = 0;
_retainDetectedArea = 0;
_isWeaponDetected = false;
}
}
else {
_detectedArea.width = 0;
_detectedArea.height = 0;
_retainDetectedArea = 0;
}
}
float ANSCustomWD::calculateIoU(const cv::Rect& box1, const cv::Rect& box2) {
int x1 = std::max(box1.x, box2.x);
int y1 = std::max(box1.y, box2.y);
int x2 = std::min(box1.x + box1.width, box2.x + box2.width);
int y2 = std::min(box1.y + box1.height, box2.y + box2.height);
int intersectionArea = std::max(0, x2 - x1) * std::max(0, y2 - y1);
int box1Area = box1.width * box1.height;
int box2Area = box2.width * box2.height;
int unionArea = box1Area + box2Area - intersectionArea;
if (unionArea <= 0) return 0.0f;
return static_cast<float>(intersectionArea) / unionArea;
}
bool ANSCustomWD::IsOverlapping(const ANSCENTER::Object& obj, const std::vector<ANSCENTER::Object>& objectList, float iouThreshold)
{
for (const auto& otherObj : objectList)
{
float iou = calculateIoU(obj.box, otherObj.box);
//std::cout << "IoU: " << iou << std::endl;
if (iou > iouThreshold)
{
return true; // Overlapping found
}
}
return false; // No overlapping object found
}
void ANSCustomWD::UpdateActiveROI(const cv::Mat& frame, ANSCENTER::Object detectedObj) {
int cropSize = 640;
int imagegSize = std::max(frame.cols, frame.rows);
if (imagegSize > 1920) cropSize = 640;
else if (imagegSize > 1280) cropSize = 480;
else if (imagegSize > 640) cropSize = 320;
else cropSize = 224;
// Cap cropSize to frame dimensions to avoid negative coordinates
cropSize = std::min(cropSize, std::min(frame.cols, frame.rows));
if (cropSize <= 0) {
_detectedArea = cv::Rect();
return;
}
int xc = detectedObj.box.x + detectedObj.box.width / 2;
int yc = detectedObj.box.y + detectedObj.box.height / 2;
int x1_new = std::clamp(xc - cropSize / 2, 0, frame.cols - cropSize);
int y1_new = std::clamp(yc - cropSize / 2, 0, frame.rows - cropSize);
_detectedArea.x = x1_new;
_detectedArea.y = y1_new;
_detectedArea.width = std::min(cropSize, frame.cols - _detectedArea.x);
_detectedArea.height = std::min(cropSize, frame.rows - _detectedArea.y);
}
bool ANSCustomWD::ConfigureParameters(CustomParams& param)
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
try {
// Clear current parameters
param.ROI_Config.clear();
param.ROI_Options.clear();
param.Parameters.clear();
param.ROI_Values.clear();
//// Fill ROI_Config
//if (this->_params.ROI_Config.empty()) {
// // Traffic Light ROIs
// param.ROI_Config.push_back({ true, false, false, 0, 10, "TrafficLight_One", "All Corners" });
// param.ROI_Config.push_back({ true, false, false, 0, 10, "TrafficLight_Two", "All Corners" });
// param.ROI_Config.push_back({ true, false, false, 0, 10, "TrafficLight_Three", "All Corners" });
// // Vehicle Detector Zones
// param.ROI_Config.push_back({ true, true, false, 0, 10, "CarZone_One", "All Corners" });
// param.ROI_Config.push_back({ true, true, false, 0, 10, "CarZone_Two", "All Corners" });
// param.ROI_Config.push_back({ true, true, false, 0, 10, "CarZone_Three", "All Corners" });
// // Cross Line ROIs
// param.ROI_Config.push_back({ false, false, true, 0, 10, "CrossLine_One", "All Corners" });
// param.ROI_Config.push_back({ false, false, true, 0, 10, "CrossLine_Two", "All Corners" });
// param.ROI_Config.push_back({ false, false, true, 0, 10, "CrossLine_Three", "All Corners" });
//}
//else {
// // Reuse existing ROI_Config
// param.ROI_Config = this->_params.ROI_Config;
//}
//// Safely reuse ROI_Values only if valid
//size_t suspiciousCap = this->_params.ROI_Values.capacity();
//if (!this->_params.ROI_Values.empty() && suspiciousCap < 10000) {
// for (const auto& roi : this->_params.ROI_Values) {
// CustomROIValue roiValue;
// roiValue.Name = roi.Name;
// roiValue.ROIPoints = roi.ROIPoints;
// roiValue.ROIMatch = roi.ROIMatch;
// roiValue.Option = roi.Option;
// roiValue.OriginalImageSize = roi.OriginalImageSize;
// param.ROI_Values.push_back(roiValue);
// }
//}
//else {
// // Use default harcoded values
// param.ROI_Values = {
// // TrafficLight
// {"Centre Point", {{700, 100}, {950, 100}, {950, 200}, {700, 200}}, "Left side", "TrafficLight_One_1", 1920},
// {"Centre Point", {{1000, 100}, {2000, 100}, {2000, 200}, {1000, 200}}, "Left side", "TrafficLight_Two_1", 1920},
// {"Centre Point", {{2100, 100}, {2200, 100}, {2200, 200}, {2100, 200}}, "Left side", "TrafficLight_Three_1", 1920},
// // VehicleDetector
// {"Centre Point", {{700, 650}, {950, 650}, {950, 700}, {600, 700}}, "Inside ROI", "CarZone_One_1", 1920},
// {"Centre Point", {{950, 650}, {1900, 650}, {1900, 770}, {950, 700}}, "Inside ROI", "CarZone_Two_1", 1920},
// {"Centre Point", {{1900, 650}, {2150, 650}, {2150, 770}, {1900, 770}}, "Inside ROI", "CarZone_Three_1", 1920},
// // CrossLine
// {"Centre Point", {{600, 670}, {2150, 750}}, "Above", "CrossLine_One_1", 1920},
// {"Centre Point", {{600, 670}, {2150, 750}}, "Left side", "CrossLine_Two_1", 1920},
// {"Centre Point", {{600, 670}, {2150, 750}}, "Left side", "CrossLine_Three_1", 1920}
// };
//}
//// Add ALPR parameter if ALPR is available
//if (_ALPRVisible) {
// CustomParameter stALPRParam;
// stALPRParam.Name = "ALPR";
// stALPRParam.DataType = "Boolean";
// stALPRParam.NoOfDecimals = 0;
// stALPRParam.MaxValue = 0;
// stALPRParam.MinValue = 0;
// stALPRParam.StartValue = "false";
// stALPRParam.ListItems.clear();
// stALPRParam.DefaultValue = "false";
// stALPRParam.Value = _useALPR ? "true" : "false";
// param.Parameters.push_back(stALPRParam);
//}
//// Add Display TL parameter
//CustomParameter stTLParam;
//stTLParam.Name = "Show Traffic Light";
//stTLParam.DataType = "Boolean";
//stTLParam.NoOfDecimals = 0;
//stTLParam.MaxValue = 0;
//stTLParam.MinValue = 0;
//stTLParam.StartValue = "false";
//stTLParam.ListItems.clear();
//stTLParam.DefaultValue = "false";
//stTLParam.Value = _displayTL ? "true" : "false";
//param.Parameters.push_back(stTLParam);
////Add Traffic Light AI parameter
//CustomParameter stTrafficLightAI;
//stTrafficLightAI.Name = "TrafficLightAI";
//stTrafficLightAI.DataType = "Boolean";
//stTrafficLightAI.NoOfDecimals = 0;
//stTrafficLightAI.MaxValue = 0;
//stTrafficLightAI.MinValue = 0;
//stTrafficLightAI.StartValue = "false";
//stTrafficLightAI.ListItems.clear();
//stTrafficLightAI.DefaultValue = "false";
//stTrafficLightAI.Value = m_bTrafficLightAI ? "true" : "false";
//param.Parameters.push_back(stTrafficLightAI);
return true;
}
catch (const std::exception& e) {
std::cerr << "Error in ConfigureParamaters: " << e.what() << std::endl;
return false;
}
catch (...) {
std::cerr << "Unknown error in ConfigureParamaters." << std::endl;
return false;
}
}