Fix mutex lock issues

This commit is contained in:
2026-04-13 19:48:32 +10:00
parent 56a8f09adf
commit 844d7396b2
30 changed files with 445 additions and 575 deletions

View File

@@ -58,6 +58,7 @@ namespace ANSCENTER
}
bool TENSORRTOD::LoadModel(const std::string& modelZipFilePath, const std::string& modelZipPassword) {
std::lock_guard<std::recursive_mutex> lock(_mutex);
ModelLoadingGuard mlg(_modelLoading);
try {
_isFixedBatch = false;
bool result = ANSODBase::LoadModel(modelZipFilePath, modelZipPassword);
@@ -151,6 +152,7 @@ namespace ANSCENTER
}
bool TENSORRTOD::LoadModelFromFolder(std::string licenseKey, ModelConfig modelConfig, std::string modelName, std::string className, const std::string& modelFolder, std::string& labelMap) {
std::lock_guard<std::recursive_mutex> lock(_mutex);
ModelLoadingGuard mlg(_modelLoading);
try
{
_isFixedBatch = false;
@@ -256,6 +258,7 @@ namespace ANSCENTER
}
bool TENSORRTOD::Initialize(std::string licenseKey, ModelConfig modelConfig, const std::string& modelZipFilePath, const std::string& modelZipPassword, std::string& labelMap) {
std::lock_guard<std::recursive_mutex> lock(_mutex);
ModelLoadingGuard mlg(_modelLoading);
try {
const bool engineAlreadyLoaded = _modelLoadValid && _isInitialized && m_trtEngine != nullptr;
_modelLoadValid = false;
@@ -364,31 +367,7 @@ namespace ANSCENTER
{
// Validate state under a brief lock — do NOT hold across DetectObjects so that
// the Engine pool can run concurrent inferences on different GPU slots.
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
if (!_modelLoadValid) {
_logger.LogError("TENSORRTOD::RunInference",
"Cannot load TensorRT model", __FILE__, __LINE__);
return {};
}
if (!_licenseValid) {
_logger.LogError("TENSORRTOD::RunInference",
"Invalid license", __FILE__, __LINE__);
return {};
}
if (!_isInitialized) {
_logger.LogError("TENSORRTOD::RunInference",
"Model not initialized", __FILE__, __LINE__);
return {};
}
if (inputImgBGR.empty() || inputImgBGR.cols < 10 || inputImgBGR.rows < 10) {
return {};
}
} // mutex released here — DetectObjects manages its own locking per phase
if (!PreInferenceCheck("TENSORRTOD::RunInference")) return {};
try {
return DetectObjects(inputImgBGR, camera_id);
@@ -401,34 +380,7 @@ namespace ANSCENTER
std::vector<std::vector<Object>> TENSORRTOD::RunInferencesBatch(const std::vector<cv::Mat>& inputs, const std::string& camera_id) {
// Validate state under a brief lock — do NOT hold across DetectObjectsBatch so that
// the Engine pool can serve concurrent batch requests on different GPU slots.
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
// Validate model, license, and initialization
if (!_modelLoadValid) {
this->_logger.LogFatal("TENSORRTOD::RunInferenceBatch",
"Cannot load the TensorRT model. Please check if it exists", __FILE__, __LINE__);
return {};
}
if (!_licenseValid) {
this->_logger.LogFatal("TENSORRTOD::RunInferenceBatch",
"Runtime license is not valid or expired. Please contact ANSCENTER", __FILE__, __LINE__);
return {};
}
if (!_isInitialized) {
this->_logger.LogFatal("TENSORRTOD::RunInferenceBatch",
"Initialisation is not valid or expired. Please contact ANSCENTER", __FILE__, __LINE__);
return {};
}
// Validate inputs
if (inputs.empty()) {
this->_logger.LogFatal("TENSORRTOD::RunInferenceBatch",
"Input images vector is empty", __FILE__, __LINE__);
return {};
}
} // mutex released here — DetectObjectsBatch manages its own GPU dispatch
if (!PreInferenceCheck("TENSORRTOD::RunInferencesBatch")) return {};
try {
if (_isFixedBatch) return ANSODBase::RunInferencesBatch(inputs, camera_id);