Files
ANSCORE/ANSODEngine/ANSFireNSmoke.h

296 lines
12 KiB
C++

#ifndef ANSFIRENSMOKE_H
#define ANSFIRENSMOKE_H
#pragma once
#include "ANSEngineCommon.h"
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <openvino/openvino.hpp>
#include "ANSYOLOV10OVOD.h"
#include "ANSYOLOV10RTOD.h"
namespace ANSCENTER
{
class ImageProcessor {
const int MAX_QUEUE_SIZE = 10;
const int THRESHOLD = 8;
public:
// Add image to the queue and process if queue has 10 images
std::vector<cv::Rect> addImageToQueue(const cv::Mat& image) {
width = image.cols;
height = image.rows;
// Check if queue size is already 10, if so, remove the oldest image
if (imageQueue.size() == MAX_QUEUE_SIZE) {
imageQueue.pop();
}
// Add the new image to the queue
imageQueue.push(image.clone());
// Process images if we have exactly 10 images in the queue
if (imageQueue.size() == MAX_QUEUE_SIZE) {
return processQueueImages();
}
// Return an empty vector if not yet ready to process
return std::vector<cv::Rect>();
}
private:
int width, height;
std::queue<cv::Mat> imageQueue;
// Sum all images in the queue, create a mask, and return array of bounding rectangles
std::vector<cv::Rect> processQueueImages() {
// Initialize the sum image with zeros
cv::Mat sumImage = cv::Mat::zeros(height, width, CV_32FC1);
// Use a temporary queue to preserve the original images
std::queue<cv::Mat> tempQueue = imageQueue;
// Sum up all images in the queue
while (!tempQueue.empty()) {
cv::Mat img = tempQueue.front();
tempQueue.pop();
// Accumulate image pixels in sumImage
sumImage += img;
}
// Threshold the summed image to create a binary mask
cv::Mat mask;
cv::threshold(sumImage, mask, THRESHOLD, 1, cv::THRESH_BINARY);
// Convert mask to 8-bit to find contours
mask.convertTo(mask, CV_8UC1);
// Find contours on the mask
std::vector<std::vector<cv::Point>> contours;
cv::findContours(mask, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
// Extract bounding rectangles for each contour
std::vector<cv::Rect> boundingRects;
for (const auto& contour : contours) {
cv::Rect boundingRect = cv::boundingRect(contour);
boundingRects.push_back(boundingRect);
}
return boundingRects;
}
};
class GLCM {
public:
explicit GLCM(const std::string& channel) : channel(channel) {}
std::vector<cv::Mat> getGLCM(const cv::Mat& imageChannel,
const std::vector<int>& distances = { 5 },
const std::vector<float>& angles = { 0.0f },// { 0.0f, CV_PI / 4.0f, CV_PI / 2.0f, 3.0f * CV_PI / 4.0f },
int levels = 256,
bool symmetric = false,
bool normed = false) {
cv::Mat imageChannel8;
imageChannel.convertTo(imageChannel8, CV_8U);
return graycomatrix(imageChannel8, distances, angles, levels, symmetric, normed);
}
void getStockPropFromGLCM(const std::vector<cv::Mat>& glcmMatrices, const std::string& prop) {
auto featureVector = graycoprops(glcmMatrices, prop);
for (size_t i = 0; i < featureVector.size(); ++i) {
props[prop + std::to_string(i + 1) + "_channel" + channel] = featureVector[i];
}
}
const std::map<std::string, double>& getProps() const {
return props;
}
private:
std::string channel;
std::map<std::string, double> props;
void normalizeGLCM(cv::Mat& glcm);
std::vector<cv::Mat> graycomatrix(const cv::Mat& image,
const std::vector<int>& distances,
const std::vector<float>& angles,
int levels = 256,
bool symmetric = false,
bool normed = false);
std::vector<double> graycoprops(const std::vector<cv::Mat>& glcmMatrices, const std::string& property);
};
class HaralickFeatureExtractor {
public:
HaralickFeatureExtractor() {
}
~HaralickFeatureExtractor()
{
}
void Init(const std::string& colourSpace = "RGB",
const std::vector<std::string>& properties = { "energy","homogeneity","dissimilarity","contrast"})
{
props = properties;
for (char channel : colourSpace) {
channels.push_back(std::string(1, channel));
}
}
cv::Mat resizeWithMinAspectRatio(const cv::Mat& inputImage, int minSize) {
// Get original dimensions
int originalWidth = inputImage.cols;
int originalHeight = inputImage.rows;
if ((originalWidth > minSize) && (originalHeight > minSize))return inputImage;
// Calculate aspect ratio
float aspectRatio = static_cast<float>(originalWidth) / static_cast<float>(originalHeight);
// Calculate new dimensions based on the minimum size
int newWidth, newHeight;
if (originalWidth > originalHeight) {
newHeight = minSize;
newWidth = static_cast<int>(minSize * aspectRatio);
}
else {
newWidth = minSize;
newHeight = static_cast<int>(minSize / aspectRatio);
}
// Resize the image
cv::Mat resizedImage;
cv::resize(inputImage, resizedImage, cv::Size(newWidth, newHeight));
return resizedImage;
}
std::vector<double> createFeatureVector(const cv::Mat& inputImage) {
//0. Resize the image to a minimum size
cv::Mat frame = resizeWithMinAspectRatio(inputImage, 200);// min is 20
//1. Convert to RGB format if necessary
cv::Mat imageArray;
if (inputImage.channels() == 3) {
cv::cvtColor(frame, imageArray, cv::COLOR_BGR2RGB); // Convert BGR to RGB
}
else {
return std::vector<double>();
}
// Split the image into individual channels
std::vector<cv::Mat> imageChannels(3);
cv::split(imageArray, imageChannels);
// Initialize GLCM objects for each channel
GLCM glcmChannel0("R"); // Red channel
GLCM glcmChannel1("G"); // Green channel
GLCM glcmChannel2("B"); // Blue channel
// Calculate GLCM matrices for each channel
auto glcmChannel0Matx = glcmChannel0.getGLCM(imageChannels[0]);
auto glcmChannel1Matx = glcmChannel1.getGLCM(imageChannels[1]);
auto glcmChannel2Matx = glcmChannel2.getGLCM(imageChannels[2]);
// Compute properties for each channel
for (const auto& prop : props) {
glcmChannel0.getStockPropFromGLCM(glcmChannel0Matx, prop);
glcmChannel1.getStockPropFromGLCM(glcmChannel1Matx, prop);
glcmChannel2.getStockPropFromGLCM(glcmChannel2Matx, prop);
}
// Combine all properties into a single feature vector in the desired order
std::vector<double> featureVector;
// Iterate over channels first (R, G, B)
for (size_t channelIdx = 0; channelIdx < 3; ++channelIdx) {
const GLCM* glcmChannel = nullptr;
std::string channelName;
// Select the appropriate channel
if (channelIdx == 0) {
glcmChannel = &glcmChannel0;
channelName = "R";
}
else if (channelIdx == 1) {
glcmChannel = &glcmChannel1;
channelName = "G";
}
else if (channelIdx == 2) {
glcmChannel = &glcmChannel2;
channelName = "B";
}
// Retrieve properties for the selected channel
const auto& propsMap = glcmChannel->getProps();
// Iterate over properties for the current channel
for (const auto& prop : props) {
std::string key = prop + "1_channel" + channelName;
if (propsMap.find(key) != propsMap.end()) {
featureVector.push_back(propsMap.at(key));
}
}
}
frame.release();
return featureVector;
}
private:
std::vector<std::string> props;
std::vector<std::string> channels;
};
class ANSENGINE_API ANSFIRENSMOKE :public ANSODBase
{
public:
virtual bool Initialize(std::string licenseKey, ModelConfig modelConfig, const std::string& modelZipFilePath, const std::string& modelZipPassword, std::string& labelMap) override;
virtual bool LoadModel(const std::string& modelZipFilePath, const std::string& modelZipPassword) override;
virtual bool LoadModelFromFolder(std::string licenseKey, ModelConfig modelConfig, std::string modelName, std::string className,const std::string& modelFolder, std::string& labelMap)override;
bool OptimizeModel(bool fp16, std::string& optimizedModelFolder);
std::vector<Object> RunInference(const cv::Mat& input);
std::vector<Object> RunInference(const cv::Mat& input, const std::string& camera_id);
bool Destroy();
~ANSFIRENSMOKE();
private:
double _hsvThreshold;
std::vector<Range> _smoke_colour;
std::vector<Range> _fire_colour;
std::string _classifierModelPath;
ANSOYOLOV10OVOD _cpuObjectDetector;
ANSYOLOV10RTOD _gpuObjectDetector;
EngineType _engineType;
ImageProcessor _smokeImageProcessor;
ImageProcessor _fireImageProcessor;
ANNHUBClassifier _annhub;
const int SMOKE_THRESHOLD_SIZE = 4;
const int HISTORY_SIZE = 30;
const int MIN_MATCHES = 9;
const float IOU_THRESHOLD = 0.4;
const int MINIMUM_STABLE_DURATION = 30;
const float MAX_MOVEMENT = 40.0;
const float ALPHA = 0.7;
cv::Rect _detectedArea;// Area where fire and smoke are detected
cv::Rect _previousDetectedArea;// Area where fire and smoke are detected
int _retainDetectedArea{ 0 };
bool _isFireNSmokeDetected{ false };
bool _classifierInitialized{ false };
HaralickFeatureExtractor _extractor;
std::vector<std::pair<Object, int>> _persistent_detections;
std::deque<std::vector<Object>> _stablization_queue;
private:
bool MajorityColourInFrame(const cv::Mat frame, Range range, float area_threshold = 0.8);
bool MajorityColourInFrame(const cv::Mat frame, std::vector<Range> ranges, float area_threshold = 0.8);
bool DetectFireNSmokeColourInFrame(const cv::Mat& frame, const cv::Rect bBox, const std::vector<Range>& ranges, float area_threshold);
cv::Mat CreateBinaryImageWithRects(int width, int height, const std::vector<cv::Rect>& rects);
cv::Point2f CalculateCentroid(const std::vector<cv::Rect>& group);
cv::Point2f CalculateGroupCenter(const std::vector<cv::Rect>& group);
std::vector<cv::Rect> CreateCoveringMaskRects(const cv::Mat& image, const std::vector<cv::Rect>& maskRects, float maxDistance);
std::vector<Object> ProcecssDetection(const std::vector<Object>& detectedObjects, const std::string& camera_id, int threshold = 8);
cv::Rect CreateMinimumSquareBoundingBox(const std::vector<Object>& detectedObjects, int minSize = 640);
bool IsOverlap(const cv::Rect& target, const std::vector<Object>& rectArray);
bool IsFireDetected(const cv::Mat image, const cv::Rect bBox);
bool IsSmokeDetected(const cv::Mat image, const cv::Rect bBox);
float calculateIOU(const cv::Rect& box_a, const cv::Rect& box_b);
std::vector<Object> StablizeDetection(const std::vector<Object>& detectedObjects, const std::string& camera_id);
};
}
#endif