Fix ALPR Batch and memory leak

This commit is contained in:
2026-04-15 09:23:05 +10:00
parent 7778f8c214
commit b05c49ad93
9 changed files with 686 additions and 83 deletions

View File

@@ -143,7 +143,15 @@
"Bash(cmd.exe //c \"cd /d C:\\\\\\\\Projects\\\\\\\\CLionProjects\\\\\\\\ANSCORE\\\\\\\\cmake-build-release && call \\\\\"C:\\\\\\\\Program Files\\\\\\\\Microsoft Visual Studio\\\\\\\\2022\\\\\\\\Community\\\\\\\\VC\\\\\\\\Auxiliary\\\\\\\\Build\\\\\\\\vcvars64.bat\\\\\" && cmake --build . --target ANSLPR-UnitTest\")", "Bash(cmd.exe //c \"cd /d C:\\\\\\\\Projects\\\\\\\\CLionProjects\\\\\\\\ANSCORE\\\\\\\\cmake-build-release && call \\\\\"C:\\\\\\\\Program Files\\\\\\\\Microsoft Visual Studio\\\\\\\\2022\\\\\\\\Community\\\\\\\\VC\\\\\\\\Auxiliary\\\\\\\\Build\\\\\\\\vcvars64.bat\\\\\" && cmake --build . --target ANSLPR-UnitTest\")",
"Bash(cmd.exe //c \"C:\\\\\\\\Projects\\\\\\\\CLionProjects\\\\\\\\ANSCORE\\\\\\\\cmake-build-release\\\\\\\\__build_check.bat\")", "Bash(cmd.exe //c \"C:\\\\\\\\Projects\\\\\\\\CLionProjects\\\\\\\\ANSCORE\\\\\\\\cmake-build-release\\\\\\\\__build_check.bat\")",
"Bash(cmd.exe //c \"tasklist\")", "Bash(cmd.exe //c \"tasklist\")",
"Bash(cmd.exe //c \"taskkill /F /PID 45704\")" "Bash(cmd.exe //c \"taskkill /F /PID 45704\")",
"Bash(ls CMakeLists.txt cmake-build-* build)",
"Bash(cmake --build . --target ANSOCR)",
"Bash(cmd.exe /c '\"C:\\\\Program Files\\\\Microsoft Visual Studio\\\\2022\\\\Community\\\\VC\\\\Auxiliary\\\\Build\\\\vcvars64.bat\" >nul 2>&1 && cd cmake-build-release && cmake --build . --target ANSOCR 2>&1')",
"Bash(cmd.exe /c 'call \"C:\\\\Program Files\\\\Microsoft Visual Studio\\\\2022\\\\Community\\\\VC\\\\Auxiliary\\\\Build\\\\vcvars64.bat\" && cd cmake-build-release && cmake --build . --target ANSOCR 2>&1')",
"Bash(powershell -Command '& { & '\\\\''C:\\\\Program Files\\\\Microsoft Visual Studio\\\\2022\\\\Community\\\\Common7\\\\Tools\\\\Launch-VsDevShell.ps1'\\\\'' -Arch amd64 -HostArch amd64 > $null 2>&1; Set-Location '\\\\''C:\\\\Projects\\\\CLionProjects\\\\ANSCORE\\\\cmake-build-release'\\\\''; cmake --build . --target ANSOCR 2>&1 | Select-Object -Last 60 }')",
"Bash(powershell -Command '& { & '\\\\''C:\\\\Program Files\\\\Microsoft Visual Studio\\\\2022\\\\Community\\\\Common7\\\\Tools\\\\Launch-VsDevShell.ps1'\\\\'' -Arch amd64 -HostArch amd64 > $null 2>&1; Set-Location '\\\\''C:\\\\Projects\\\\CLionProjects\\\\ANSCORE\\\\cmake-build-release'\\\\''; cmake --build . --target ANSLPR 2>&1 | Select-Object -Last 40 }')",
"Bash(powershell -Command '& { & '\\\\''C:\\\\Program Files\\\\Microsoft Visual Studio\\\\2022\\\\Community\\\\Common7\\\\Tools\\\\Launch-VsDevShell.ps1'\\\\'' -Arch amd64 -HostArch amd64 > $null 2>&1; Set-Location '\\\\''C:\\\\Projects\\\\CLionProjects\\\\ANSCORE\\\\cmake-build-release'\\\\''; cmake --build . --target ANSLPR 2>&1 | Select-Object -Last 30 }')",
"Bash(powershell -Command '& { & '\\\\''C:\\\\Program Files\\\\Microsoft Visual Studio\\\\2022\\\\Community\\\\Common7\\\\Tools\\\\Launch-VsDevShell.ps1'\\\\'' -Arch amd64 -HostArch amd64 > $null 2>&1; dumpbin /exports '\\\\''C:\\\\Projects\\\\CLionProjects\\\\ANSCORE\\\\cmake-build-release\\\\bin\\\\ANSLPR.dll'\\\\'' 2>&1 | Select-String '\\\\''RunInferencesBatch'\\\\'' }')"
] ]
} }
} }

View File

@@ -304,11 +304,18 @@ namespace ANSCENTER {
p.framesSinceLastSeen++; p.framesSinceLastSeen++;
} }
// Periodic pruning: remove stale entries // Periodic pruning: remove stale entries.
static thread_local int pruneCounterById = 0; // NOTE: previously used `static thread_local int pruneCounterById`.
pruneCounterById++; // LabVIEW dispatches calls across a worker-thread pool, so the
if (pruneCounterById >= 30 && plates.size() > 20) { // thread-local counter fragmented across threads and pruning
pruneCounterById = 0; // effectively stopped firing — `trackedPlatesById[cameraId]` then
// grew unbounded (one entry per frame when trackIds never repeat),
// which manifested as a slow LabVIEW-side memory leak. The counter
// is now a plain instance member guarded by `_mutex` (taken above),
// so every 30th call across the whole engine triggers a prune pass.
_pruneCounterById++;
if (_pruneCounterById >= 30 && plates.size() > 20) {
_pruneCounterById = 0;
int staleThreshold = maxFrames * 3; int staleThreshold = maxFrames * 3;
for (auto it = plates.begin(); it != plates.end(); ) { for (auto it = plates.begin(); it != plates.end(); ) {
if (it->second.framesSinceLastSeen > staleThreshold) { if (it->second.framesSinceLastSeen > staleThreshold) {
@@ -521,6 +528,36 @@ namespace ANSCENTER {
ANSALPR::~ANSALPR(){}; ANSALPR::~ANSALPR(){};
bool ANSALPR::Destroy() { return true; }; bool ANSALPR::Destroy() { return true; };
// Default batch implementation — fallback that loops RunInference over
// vehicle crops one at a time. Subclasses should override this with a
// true batched path (see ANSALPR_OD::RunInferencesBatch and
// ANSALPR_OCR::RunInferencesBatch) to issue a single LP-detect and a
// single OCR call per frame instead of N. The fallback preserves the
// "transform bboxes back to frame coordinates" contract so a subclass
// that never overrides still produces valid output.
std::vector<Object> ANSALPR::RunInferencesBatch(
const cv::Mat& input,
const std::vector<cv::Rect>& vehicleBoxes,
const std::string& cameraId)
{
std::vector<Object> out;
if (input.empty() || vehicleBoxes.empty()) return out;
const cv::Rect frameRect(0, 0, input.cols, input.rows);
out.reserve(vehicleBoxes.size());
for (const auto& r : vehicleBoxes) {
cv::Rect c = r & frameRect;
if (c.width <= 0 || c.height <= 0) continue;
const cv::Mat crop = input(c);
std::vector<Object> perVehicle = RunInference(crop, cameraId);
for (auto& obj : perVehicle) {
obj.box.x += c.x;
obj.box.y += c.y;
out.push_back(std::move(obj));
}
}
return out;
}
std::vector<cv::Rect> ANSCENTER::ANSALPR::GetBoundingBoxes(const std::string& strBBoxes) { std::vector<cv::Rect> ANSCENTER::ANSALPR::GetBoundingBoxes(const std::string& strBBoxes) {
std::vector<cv::Rect> bBoxes; std::vector<cv::Rect> bBoxes;
bBoxes.clear(); bBoxes.clear();

View File

@@ -50,6 +50,7 @@ namespace ANSCENTER
}; };
// cameraId → (trackId → tracked plate) // cameraId → (trackId → tracked plate)
std::unordered_map<std::string, std::unordered_map<int, TrackedPlateById>> trackedPlatesById; std::unordered_map<std::string, std::unordered_map<int, TrackedPlateById>> trackedPlatesById;
int _pruneCounterById = 0; // counts checkPlateByTrackId calls for periodic pruning
public: public:
void Init(int framesToStore = MAX_ALPR_FRAME); void Init(int framesToStore = MAX_ALPR_FRAME);
@@ -100,6 +101,28 @@ namespace ANSCENTER
[[nodiscard]] virtual bool Inference(const cv::Mat& input, const std::vector<cv::Rect> & Bbox, std::string& lprResult) = 0; [[nodiscard]] virtual bool Inference(const cv::Mat& input, const std::vector<cv::Rect> & Bbox, std::string& lprResult) = 0;
[[nodiscard]] virtual bool Inference(const cv::Mat& input, const std::vector<cv::Rect> & Bbox, std::string& lprResult,const std::string & cameraId) = 0; [[nodiscard]] virtual bool Inference(const cv::Mat& input, const std::vector<cv::Rect> & Bbox, std::string& lprResult,const std::string & cameraId) = 0;
[[nodiscard]] virtual std::vector<Object> RunInference(const cv::Mat& input, const std::string &cameraId) = 0; [[nodiscard]] virtual std::vector<Object> RunInference(const cv::Mat& input, const std::string &cameraId) = 0;
/// Stateless batch inference for pipeline mode.
/// For each vehicle ROI in `vehicleBoxes` (in FRAME coordinates), crop
/// the vehicle, run LP detection and text recognition, and return
/// detected plates in FULL-FRAME coordinates.
///
/// Tracker, voting, spatial dedup, and per-camera accumulating state
/// are all bypassed — this is the fast-path for callers that already
/// have precise vehicle bboxes and want raw per-frame results with no
/// cross-frame memory. The implementation issues a single batched
/// `_lpDetector->RunInferencesBatch` call for detection and a single
/// batched recognizer call for OCR, so the ORT/TRT allocator sees
/// exactly one shape per frame regardless of how many vehicles the
/// caller passes.
///
/// Default implementation falls back to calling `RunInference` in a
/// loop per crop so older subclasses keep compiling.
[[nodiscard]] virtual std::vector<Object> RunInferencesBatch(
const cv::Mat& input,
const std::vector<cv::Rect>& vehicleBoxes,
const std::string& cameraId);
[[nodiscard]] std::string VectorDetectionToJsonString(const std::vector<Object>& dets); [[nodiscard]] std::string VectorDetectionToJsonString(const std::vector<Object>& dets);
void SetPlateFormats(const std::vector<std::string>& formats); void SetPlateFormats(const std::vector<std::string>& formats);
void SetPlateFormat(const std::string& format); void SetPlateFormat(const std::string& format);
@@ -167,6 +190,17 @@ extern "C" ANSLPR_API int ANSALPR_RunInferenceComplete_LV(ANSCENTER::ANSALPR
extern "C" ANSLPR_API int ANSALPR_RunInferenceComplete_CPP(ANSCENTER::ANSALPR** Handle, cv::Mat** cvImage, const char* cameraId, int getJpegString, int jpegImageSize,std::string& detectionResult, std::string& imageStr); extern "C" ANSLPR_API int ANSALPR_RunInferenceComplete_CPP(ANSCENTER::ANSALPR** Handle, cv::Mat** cvImage, const char* cameraId, int getJpegString, int jpegImageSize,std::string& detectionResult, std::string& imageStr);
extern "C" ANSLPR_API int ANSALPR_RunInferencesComplete_LV(ANSCENTER::ANSALPR** Handle, cv::Mat** cvImage, const char* cameraId, int maxImageSize,const char* strBboxes, LStrHandle detectionResult); extern "C" ANSLPR_API int ANSALPR_RunInferencesComplete_LV(ANSCENTER::ANSALPR** Handle, cv::Mat** cvImage, const char* cameraId, int maxImageSize,const char* strBboxes, LStrHandle detectionResult);
// Dedicated pipeline-mode batch inference:
// - always runs with tracker OFF, voting OFF, spatial dedup OFF
// - issues ONE batched LP-detect call and ONE batched recognizer call
// across every vehicle in `strBboxes`, instead of looping per crop
// - returns plate bboxes in the caller's resized coordinate space (same
// semantics as ANSALPR_RunInferencesComplete_LV)
// Use this in LabVIEW whenever the caller already has vehicle bboxes and
// wants raw per-frame results. Fixes the cross-frame allocator churn that
// causes ANSALPR_OCR memory growth under the per-bbox loop path.
extern "C" ANSLPR_API int ANSALPR_RunInferencesBatch_LV(ANSCENTER::ANSALPR** Handle, cv::Mat** cvImage, const char* cameraId, int maxImageSize, const char* strBboxes, LStrHandle detectionResult);
// Get/Set format // Get/Set format
extern "C" ANSLPR_API int ANSALPR_SetFormat(ANSCENTER::ANSALPR** Handle, const char* format); extern "C" ANSLPR_API int ANSALPR_SetFormat(ANSCENTER::ANSALPR** Handle, const char* format);
extern "C" ANSLPR_API int ANSALPR_SetFormats(ANSCENTER::ANSALPR** Handle, const char* formats);// comma separated formats extern "C" ANSLPR_API int ANSALPR_SetFormats(ANSCENTER::ANSALPR** Handle, const char* formats);// comma separated formats

View File

@@ -953,6 +953,163 @@ namespace ANSCENTER
return {}; return {};
} }
// ── Stateless batched inference for pipeline mode ───────────────────
// Caller supplies a full frame + a list of vehicle ROIs in FRAME
// coordinates. We run ONE LP-detect call across all vehicle crops and
// ONE text-recognizer call across every resulting plate (with the same
// 2-row split heuristic as ANSALPR_OCR::RunInference), and NO tracker,
// voting, spatial dedup, or per-camera accumulating state. This is the
// drop-in replacement for the per-bbox loop inside
// ANSALPR_RunInferencesComplete_LV (pipeline mode) and is exported as
// ANSALPR_RunInferencesBatch_LV / _V2 in dllmain.cpp. Calling this on
// ANSALPR_OCR avoids the ORT/TRT per-shape allocator churn that
// causes unbounded memory growth when the loop version is used.
std::vector<Object> ANSALPR_OCR::RunInferencesBatch(
const cv::Mat& input,
const std::vector<cv::Rect>& vehicleBoxes,
const std::string& cameraId)
{
if (!_licenseValid) {
this->_logger.LogError("ANSALPR_OCR::RunInferencesBatch", "Invalid license", __FILE__, __LINE__);
return {};
}
if (!_isInitialized) {
this->_logger.LogError("ANSALPR_OCR::RunInferencesBatch", "Model is not initialized", __FILE__, __LINE__);
return {};
}
if (input.empty() || input.cols < 5 || input.rows < 5) return {};
if (!_lpDetector) {
this->_logger.LogFatal("ANSALPR_OCR::RunInferencesBatch", "_lpDetector is null", __FILE__, __LINE__);
return {};
}
if (!_ocrEngine) {
this->_logger.LogFatal("ANSALPR_OCR::RunInferencesBatch", "_ocrEngine is null", __FILE__, __LINE__);
return {};
}
if (vehicleBoxes.empty()) return {};
try {
// Promote grayscale input to BGR once (matches RunInference).
cv::Mat localFrame;
if (input.channels() == 1) {
cv::cvtColor(input, localFrame, cv::COLOR_GRAY2BGR);
}
const cv::Mat& frame = (input.channels() == 1) ? localFrame : input;
// ── 1. Clamp and crop vehicle ROIs ────────────────────────
const cv::Rect frameRect(0, 0, frame.cols, frame.rows);
std::vector<cv::Mat> vehicleCrops;
std::vector<cv::Rect> clamped;
vehicleCrops.reserve(vehicleBoxes.size());
clamped.reserve(vehicleBoxes.size());
for (const auto& r : vehicleBoxes) {
cv::Rect c = r & frameRect;
if (c.width <= 5 || c.height <= 5) continue;
vehicleCrops.emplace_back(frame(c));
clamped.push_back(c);
}
if (vehicleCrops.empty()) return {};
// ── 2. ONE batched LP detection call across all vehicles ──
std::vector<std::vector<Object>> lpBatch =
_lpDetector->RunInferencesBatch(vehicleCrops, cameraId);
// ── 3. Flatten plates, splitting 2-row plates into top/bot ─
// Same aspect-ratio heuristic as ANSALPR_OCR::RunInference
// (lines ~820-870): narrow plates (aspect < 2.0) are split
// horizontally into two recognizer crops, wide plates stay as
// one. The recMap lets us stitch the per-crop OCR outputs
// back into per-plate combined strings.
struct PlateMeta {
size_t vehIdx; // index into vehicleCrops / clamped
Object lpObj; // LP detection in VEHICLE-local coords
cv::Mat plateROI; // full plate crop (kept for colour)
std::vector<size_t> cropIndices; // indices into allCrops below
};
std::vector<cv::Mat> allCrops;
std::vector<PlateMeta> metas;
allCrops.reserve(lpBatch.size() * 2);
metas.reserve(lpBatch.size());
for (size_t v = 0; v < lpBatch.size() && v < vehicleCrops.size(); ++v) {
const cv::Mat& veh = vehicleCrops[v];
const cv::Rect vehRect(0, 0, veh.cols, veh.rows);
for (const auto& lp : lpBatch[v]) {
cv::Rect lpBox = lp.box & vehRect;
if (lpBox.width <= 0 || lpBox.height <= 0) continue;
cv::Mat plateROI = veh(lpBox);
PlateMeta pm;
pm.vehIdx = v;
pm.lpObj = lp;
pm.plateROI = plateROI;
const float aspect =
static_cast<float>(plateROI.cols) /
std::max(1, plateROI.rows);
if (aspect < 2.0f && plateROI.rows >= 24) {
const int halfH = plateROI.rows / 2;
pm.cropIndices.push_back(allCrops.size());
allCrops.push_back(plateROI(cv::Rect(0, 0, plateROI.cols, halfH)));
pm.cropIndices.push_back(allCrops.size());
allCrops.push_back(plateROI(cv::Rect(0, halfH, plateROI.cols, plateROI.rows - halfH)));
} else {
pm.cropIndices.push_back(allCrops.size());
allCrops.push_back(plateROI);
}
metas.push_back(std::move(pm));
}
}
if (allCrops.empty()) return {};
// ── 4. ONE batched recognizer call across every plate ────
// ONNXOCRRecognizer buckets by width internally, so this is
// typically 1-2 ORT Runs regardless of plate count.
auto ocrResults = _ocrEngine->RecognizeTextBatch(allCrops);
// ── 5. Assemble — NO tracker, NO voting, NO dedup ────────
std::vector<Object> output;
output.reserve(metas.size());
for (const auto& pm : metas) {
std::string combined;
for (size_t c : pm.cropIndices) {
if (c >= ocrResults.size()) continue;
const std::string& line = ocrResults[c].first;
if (line.empty()) continue;
if (!combined.empty()) combined += " ";
combined += line;
}
if (combined.empty()) continue;
Object out = pm.lpObj;
out.className = combined; // raw OCR — no ALPRChecker
out.cameraId = cameraId;
out.box.x += clamped[pm.vehIdx].x;
out.box.y += clamped[pm.vehIdx].y;
// Colour lookup — text-keyed cache, bounded.
std::string colour = DetectLPColourCached(
pm.plateROI, cameraId, out.className);
if (!colour.empty()) out.extraInfo = "color:" + colour;
output.push_back(std::move(out));
}
return output;
}
catch (const cv::Exception& e) {
this->_logger.LogFatal("ANSALPR_OCR::RunInferencesBatch",
std::string("OpenCV Exception: ") + e.what(), __FILE__, __LINE__);
}
catch (const std::exception& e) {
this->_logger.LogFatal("ANSALPR_OCR::RunInferencesBatch",
e.what(), __FILE__, __LINE__);
}
catch (...) {
this->_logger.LogFatal("ANSALPR_OCR::RunInferencesBatch",
"Unknown exception occurred", __FILE__, __LINE__);
}
return {};
}
// ── Inference wrappers ─────────────────────────────────────────────── // ── Inference wrappers ───────────────────────────────────────────────
bool ANSALPR_OCR::Inference(const cv::Mat& input, std::string& lprResult) { bool ANSALPR_OCR::Inference(const cv::Mat& input, std::string& lprResult) {
if (input.empty()) return false; if (input.empty()) return false;

View File

@@ -135,6 +135,10 @@ namespace ANSCENTER
[[nodiscard]] bool Inference(const cv::Mat& input, const std::vector<cv::Rect>& Bbox, std::string& lprResult) override; [[nodiscard]] bool Inference(const cv::Mat& input, const std::vector<cv::Rect>& Bbox, std::string& lprResult) override;
[[nodiscard]] bool Inference(const cv::Mat& input, const std::vector<cv::Rect>& Bbox, std::string& lprResult, const std::string& cameraId) override; [[nodiscard]] bool Inference(const cv::Mat& input, const std::vector<cv::Rect>& Bbox, std::string& lprResult, const std::string& cameraId) override;
[[nodiscard]] std::vector<Object> RunInference(const cv::Mat& input, const std::string& cameraId) override; [[nodiscard]] std::vector<Object> RunInference(const cv::Mat& input, const std::string& cameraId) override;
[[nodiscard]] std::vector<Object> RunInferencesBatch(
const cv::Mat& input,
const std::vector<cv::Rect>& vehicleBoxes,
const std::string& cameraId) override;
[[nodiscard]] bool Destroy() override; [[nodiscard]] bool Destroy() override;
/// Propagate country to inner OCR engine so ALPR post-processing /// Propagate country to inner OCR engine so ALPR post-processing

View File

@@ -2617,6 +2617,124 @@ namespace ANSCENTER {
return {}; return {};
} }
// ── Stateless batched inference for pipeline mode ───────────────────
// Caller supplies a full frame + a list of vehicle ROIs in FRAME
// coordinates. We run ONE LP-detect call across all vehicle crops and
// ONE char-OCR batch across every resulting plate, with NO tracker,
// voting, spatial dedup, or per-camera accumulating state. This is the
// drop-in replacement for the per-bbox loop inside
// ANSALPR_RunInferencesComplete_LV (pipeline mode) and is exported as
// ANSALPR_RunInferencesBatch_LV / _V2 in dllmain.cpp.
std::vector<Object> ANSALPR_OD::RunInferencesBatch(
const cv::Mat& input,
const std::vector<cv::Rect>& vehicleBoxes,
const std::string& cameraId)
{
if (!_licenseValid || !valid || !_isInitialized) {
this->_logger.LogWarn("ANSALPR_OD::RunInferencesBatch",
"Invalid state: license=" + std::to_string(_licenseValid) +
" valid=" + std::to_string(valid) +
" init=" + std::to_string(_isInitialized), __FILE__, __LINE__);
return {};
}
if (input.empty() || input.cols < 5 || input.rows < 5) return {};
if (!this->_lpDetector || !this->_ocrDetector) {
this->_logger.LogFatal("ANSALPR_OD::RunInferencesBatch",
"Detector instances are null", __FILE__, __LINE__);
return {};
}
if (vehicleBoxes.empty()) return {};
try {
// ── 1. Clamp and crop vehicle ROIs ────────────────────────
const cv::Rect frameRect(0, 0, input.cols, input.rows);
std::vector<cv::Mat> vehicleCrops;
std::vector<cv::Rect> clamped;
vehicleCrops.reserve(vehicleBoxes.size());
clamped.reserve(vehicleBoxes.size());
for (const auto& r : vehicleBoxes) {
cv::Rect c = r & frameRect;
if (c.width <= 5 || c.height <= 5) continue;
vehicleCrops.emplace_back(input(c));
clamped.push_back(c);
}
if (vehicleCrops.empty()) return {};
// ── 2. ONE batched LP detection call across all vehicles ──
// ANSODBase::RunInferencesBatch is fixed-shape YOLO, so this
// is a single ORT/TRT call regardless of how many vehicles
// the caller passed.
std::vector<std::vector<Object>> lpBatch =
_lpDetector->RunInferencesBatch(vehicleCrops, cameraId);
// ── 3. Flatten detected plates, keeping back-reference ───
struct PlateMeta {
size_t vehIdx; // index into vehicleCrops / clamped
Object lpObj; // LP detection in VEHICLE-local coords
};
std::vector<cv::Mat> alignedLPRBatch;
std::vector<PlateMeta> metas;
alignedLPRBatch.reserve(lpBatch.size() * 2);
metas.reserve(lpBatch.size() * 2);
for (size_t v = 0; v < lpBatch.size() && v < vehicleCrops.size(); ++v) {
const cv::Mat& veh = vehicleCrops[v];
const cv::Rect vehRect(0, 0, veh.cols, veh.rows);
for (const auto& lp : lpBatch[v]) {
cv::Rect lpBox = lp.box & vehRect;
if (lpBox.width <= 0 || lpBox.height <= 0) continue;
alignedLPRBatch.emplace_back(veh(lpBox));
metas.push_back({ v, lp });
}
}
if (alignedLPRBatch.empty()) return {};
// ── 4. ONE batched char-OCR call across every plate ──────
std::vector<std::string> ocrTextBatch =
DetectLicensePlateStringBatch(alignedLPRBatch, cameraId);
if (ocrTextBatch.size() != alignedLPRBatch.size()) {
this->_logger.LogWarn("ANSALPR_OD::RunInferencesBatch",
"Char OCR batch size mismatch", __FILE__, __LINE__);
return {};
}
// ── 5. Assemble — NO tracker, NO voting, NO dedup ────────
std::vector<Object> output;
output.reserve(alignedLPRBatch.size());
for (size_t i = 0; i < alignedLPRBatch.size(); ++i) {
const std::string& text = ocrTextBatch[i];
if (text.empty()) continue;
Object out = metas[i].lpObj;
out.className = text; // raw OCR — no ALPRChecker
out.cameraId = cameraId;
out.box.x += clamped[metas[i].vehIdx].x;
out.box.y += clamped[metas[i].vehIdx].y;
// Colour lookup — uses text-keyed cache, bounded by
// COLOUR_CACHE_MAX_SIZE, no per-frame growth.
std::string colour = DetectLPColourCached(
alignedLPRBatch[i], cameraId, out.className);
if (!colour.empty()) out.extraInfo = "color:" + colour;
output.push_back(std::move(out));
}
return output;
}
catch (const cv::Exception& e) {
this->_logger.LogFatal("ANSALPR_OD::RunInferencesBatch",
std::string("OpenCV Exception: ") + e.what(), __FILE__, __LINE__);
}
catch (const std::exception& e) {
this->_logger.LogFatal("ANSALPR_OD::RunInferencesBatch",
e.what(), __FILE__, __LINE__);
}
catch (...) {
this->_logger.LogFatal("ANSALPR_OD::RunInferencesBatch",
"Unknown exception occurred", __FILE__, __LINE__);
}
return {};
}
std::vector<std::string> ANSALPR_OD::DetectLPColourDetectorBatch(const std::vector<cv::Mat>& lprROIs, const std::string& cameraId) { std::vector<std::string> ANSALPR_OD::DetectLPColourDetectorBatch(const std::vector<cv::Mat>& lprROIs, const std::string& cameraId) {
// Early validation - no lock needed for immutable config // Early validation - no lock needed for immutable config
if (_lpColourModelConfig.detectionScoreThreshold <= 0.0f || !_lpColourDetector) { if (_lpColourModelConfig.detectionScoreThreshold <= 0.0f || !_lpColourDetector) {

View File

@@ -234,6 +234,10 @@ namespace ANSCENTER
[[nodiscard]] bool Inference(const cv::Mat& input, const std::vector<cv::Rect> & Bbox, std::string& lprResult, const std::string & cameraId) override; [[nodiscard]] bool Inference(const cv::Mat& input, const std::vector<cv::Rect> & Bbox, std::string& lprResult, const std::string & cameraId) override;
[[nodiscard]] std::vector<Object> RunInferenceSingleFrame(const cv::Mat& input, const std::string& cameraId); [[nodiscard]] std::vector<Object> RunInferenceSingleFrame(const cv::Mat& input, const std::string& cameraId);
[[nodiscard]] std::vector<Object> RunInference(const cv::Mat& input, const std::string& cameraId) override; [[nodiscard]] std::vector<Object> RunInference(const cv::Mat& input, const std::string& cameraId) override;
[[nodiscard]] std::vector<Object> RunInferencesBatch(
const cv::Mat& input,
const std::vector<cv::Rect>& vehicleBoxes,
const std::string& cameraId) override;
[[nodiscard]] std::vector<std::string> DetectLicensePlateStringBatch(const std::vector<cv::Mat>& lprROIs, const std::string& cameraId); [[nodiscard]] std::vector<std::string> DetectLicensePlateStringBatch(const std::vector<cv::Mat>& lprROIs, const std::string& cameraId);
[[nodiscard]] std::vector<std::string> DetectLPColourDetectorBatch(const std::vector<cv::Mat>& lprROIs, const std::string& cameraId); [[nodiscard]] std::vector<std::string> DetectLPColourDetectorBatch(const std::vector<cv::Mat>& lprROIs, const std::string& cameraId);
[[nodiscard]] std::string ProcessSingleOCRResult(const std::vector<Object>& ocrOutput); [[nodiscard]] std::string ProcessSingleOCRResult(const std::vector<Object>& ocrOutput);

View File

@@ -104,6 +104,19 @@ public:
ALPRHandleGuard& operator=(const ALPRHandleGuard&) = delete; ALPRHandleGuard& operator=(const ALPRHandleGuard&) = delete;
}; };
// RAII guard — sets the per-thread current GPU frame pointer and always
// clears it on scope exit, even if the wrapped inference call throws.
// Without this, a throwing RunInference leaves tl_currentGpuFrame pointing
// at a GpuFrameData that may be freed before the next call on this thread,
// causing use-after-free or stale NV12 data on subsequent frames.
class GpuFrameScope {
public:
explicit GpuFrameScope(GpuFrameData* f) { tl_currentGpuFrame() = f; }
~GpuFrameScope() { tl_currentGpuFrame() = nullptr; }
GpuFrameScope(const GpuFrameScope&) = delete;
GpuFrameScope& operator=(const GpuFrameScope&) = delete;
};
BOOL APIENTRY DllMain( HMODULE hModule, BOOL APIENTRY DllMain( HMODULE hModule,
DWORD ul_reason_for_call, DWORD ul_reason_for_call,
LPVOID lpReserved LPVOID lpReserved
@@ -465,9 +478,6 @@ extern "C" ANSLPR_API int ANSALPR_RunInferenceComplete_LV(
try { try {
const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref
// Set thread-local NV12 frame data for fast-path inference
// Cleared after first RunInference to prevent NV12 mismatch on cropped sub-images (OCR, etc.)
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
int originalWidth = localImage.cols; int originalWidth = localImage.cols;
int originalHeight = localImage.rows; int originalHeight = localImage.rows;
@@ -476,8 +486,13 @@ extern "C" ANSLPR_API int ANSALPR_RunInferenceComplete_LV(
return -2; return -2;
} }
std::vector<ANSCENTER::Object> outputs = engine->RunInference(localImage, cameraId); std::vector<ANSCENTER::Object> outputs;
tl_currentGpuFrame() = nullptr; {
// Scoped NV12 fast-path pointer; cleared on any exit path (normal or
// throw) so the next call on this thread cannot see a stale frame.
GpuFrameScope _gfs(ANSGpuFrameRegistry::instance().lookup(*cvImage));
outputs = engine->RunInference(localImage, cameraId);
}
bool getJpeg = (getJpegString == 1); bool getJpeg = (getJpegString == 1);
std::string stImage; std::string stImage;
@@ -567,15 +582,17 @@ extern "C" ANSLPR_API int ANSALPR_RunInferenceComplete_CPP(ANSCENTER::ANSALP
try { try {
const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref
// Set thread-local NV12 frame data for fast-path inference
// Cleared after first RunInference to prevent NV12 mismatch on cropped sub-images (OCR, etc.)
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
int originalWidth = localImage.cols; int originalWidth = localImage.cols;
int originalHeight = localImage.rows; int originalHeight = localImage.rows;
int maxImageSize = originalWidth; int maxImageSize = originalWidth;
std::vector<ANSCENTER::Object> outputs = engine->RunInference(localImage, cameraId); std::vector<ANSCENTER::Object> outputs;
{
// Scoped NV12 fast-path pointer; cleared on any exit path (normal or throw).
GpuFrameScope _gfs(ANSGpuFrameRegistry::instance().lookup(*cvImage));
outputs = engine->RunInference(localImage, cameraId);
}
bool getJpeg = (getJpegString == 1); bool getJpeg = (getJpegString == 1);
std::string stImage; std::string stImage;
@@ -646,9 +663,6 @@ extern "C" ANSLPR_API int ANSALPR_RunInferencesComplete_LV(
try { try {
const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref
// Set thread-local NV12 frame data for fast-path inference
// Cleared after first RunInference to prevent NV12 mismatch on cropped sub-images (OCR, etc.)
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
std::vector<ANSCENTER::Object> objectDetectionResults; std::vector<ANSCENTER::Object> objectDetectionResults;
std::vector<cv::Rect> bBox = ANSCENTER::ANSALPR::GetBoundingBoxes(strBboxes); std::vector<cv::Rect> bBox = ANSCENTER::ANSALPR::GetBoundingBoxes(strBboxes);
@@ -659,10 +673,14 @@ extern "C" ANSLPR_API int ANSALPR_RunInferencesComplete_LV(
const double scaleFactor = (maxImageSize > 0) ? static_cast<double>(originalWidth) / maxImageSize : 1.0; const double scaleFactor = (maxImageSize > 0) ? static_cast<double>(originalWidth) / maxImageSize : 1.0;
if (bBox.empty()) { if (bBox.empty()) {
// Full-frame path: NV12 fast-path is only safe for the full-frame
// inference call. Scope the TL pointer so it is cleared on any exit
// path (normal or thrown) and is NOT seen by any subsequent
// cropped-image inference (which would mismatch the NV12 cache).
GpuFrameScope _gfs(ANSGpuFrameRegistry::instance().lookup(*cvImage));
objectDetectionResults = engine->RunInference(localImage, cameraId); objectDetectionResults = engine->RunInference(localImage, cameraId);
} }
tl_currentGpuFrame() = nullptr; // Clear before crop-based inference else {
if (!bBox.empty()) {
for (const auto& rect : bBox) { for (const auto& rect : bBox) {
cv::Rect scaledRect; cv::Rect scaledRect;
scaledRect.x = static_cast<int>(rect.x * scaleFactor); scaledRect.x = static_cast<int>(rect.x * scaleFactor);
@@ -708,6 +726,103 @@ extern "C" ANSLPR_API int ANSALPR_RunInferencesComplete_LV(
} }
} }
// Dedicated pipeline-mode export. Unlike ANSALPR_RunInferencesComplete_LV
// this function:
// 1. Always runs with tracker OFF, voting OFF, dedup OFF — it targets
// callers that already have precise per-vehicle bboxes and want raw,
// stateless results.
// 2. Issues ONE batched LP-detect call and ONE batched recognizer call
// per frame via ANSALPR::RunInferencesBatch, instead of looping
// engine->RunInference once per crop. This eliminates the per-shape
// ORT/TRT allocator churn that causes ANSALPR_OCR memory growth when
// the legacy pipeline-mode loop is used under LabVIEW worker threads.
// 3. Does NOT touch tl_currentGpuFrame — the caller is working with
// cropped regions, not the NV12-keyed source Mat, so the fast-path
// pointer is meaningless here. Eliminates a class of UAF risk.
// Output coordinates are rescaled back into the caller's resized space,
// matching the convention used by ANSALPR_RunInferencesComplete_LV.
extern "C" ANSLPR_API int ANSALPR_RunInferencesBatch_LV(
ANSCENTER::ANSALPR** Handle,
cv::Mat** cvImage,
const char* cameraId,
int maxImageSize,
const char* strBboxes,
LStrHandle detectionResult)
{
if (!Handle || !*Handle) return -1;
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) return -2;
ALPRHandleGuard guard(AcquireALPRHandle(*Handle));
if (!guard) return -3;
auto* engine = guard.get();
try {
const cv::Mat& frame = **cvImage;
const int frameW = frame.cols;
const int frameH = frame.rows;
if (frameW <= 0 || frameH <= 0) return -2;
// Same scaling convention as ANSALPR_RunInferencesComplete_LV:
// the bboxes in `strBboxes` are in a resized coordinate space;
// scale them up to the full frame for the crop, then rescale the
// plate outputs back to the caller's space.
const double scale =
(maxImageSize > 0) ? static_cast<double>(frameW) / maxImageSize : 1.0;
std::vector<cv::Rect> rawBoxes = ANSCENTER::ANSALPR::GetBoundingBoxes(strBboxes);
std::vector<cv::Rect> scaledBoxes;
scaledBoxes.reserve(rawBoxes.size());
const cv::Rect frameRect(0, 0, frameW, frameH);
for (const auto& r : rawBoxes) {
cv::Rect s(
static_cast<int>(r.x * scale),
static_cast<int>(r.y * scale),
static_cast<int>(r.width * scale),
static_cast<int>(r.height * scale));
s &= frameRect;
if (s.width > 0 && s.height > 0) scaledBoxes.push_back(s);
}
// Empty bbox list → fall through to full-frame RunInference so
// existing LabVIEW code that passes "" still works, matching the
// behaviour of ANSALPR_RunInferencesComplete_LV.
std::vector<ANSCENTER::Object> results;
if (scaledBoxes.empty()) {
results = engine->RunInference(frame, cameraId);
} else {
results = engine->RunInferencesBatch(frame, scaledBoxes, cameraId);
}
// Rescale plate boxes back into the caller's resized space.
if (scale != 1.0) {
const double inv = 1.0 / scale;
for (auto& o : results) {
o.box.x = static_cast<int>(o.box.x * inv);
o.box.y = static_cast<int>(o.box.y * inv);
o.box.width = static_cast<int>(o.box.width * inv);
o.box.height = static_cast<int>(o.box.height * inv);
}
}
std::string json = engine->VectorDetectionToJsonString(results);
if (json.empty()) return 0;
const int size = static_cast<int>(json.length());
MgErr err = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (err != noErr) return 0;
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, json.c_str(), size);
return 1;
}
catch (const std::exception& /*ex*/) {
return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSLPR_API int ANSALPR_SetFormat(ANSCENTER::ANSALPR** Handle, const char* format) { extern "C" ANSLPR_API int ANSALPR_SetFormat(ANSCENTER::ANSALPR** Handle, const char* format) {
if (!Handle || !*Handle) return -1; if (!Handle || !*Handle) return -1;
@@ -1042,9 +1157,6 @@ extern "C" ANSLPR_API int ANSALPR_RunInferenceComplete_LV_V2(
try { try {
const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref
// Set thread-local NV12 frame data for fast-path inference
// Cleared after first RunInference to prevent NV12 mismatch on cropped sub-images (OCR, etc.)
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
int originalWidth = localImage.cols; int originalWidth = localImage.cols;
int originalHeight = localImage.rows; int originalHeight = localImage.rows;
@@ -1053,8 +1165,12 @@ extern "C" ANSLPR_API int ANSALPR_RunInferenceComplete_LV_V2(
return -2; return -2;
} }
std::vector<ANSCENTER::Object> outputs = engine->RunInference(localImage, cameraId); std::vector<ANSCENTER::Object> outputs;
tl_currentGpuFrame() = nullptr; {
// Scoped NV12 fast-path pointer; cleared on any exit path (normal or throw).
GpuFrameScope _gfs(ANSGpuFrameRegistry::instance().lookup(*cvImage));
outputs = engine->RunInference(localImage, cameraId);
}
bool getJpeg = (getJpegString == 1); bool getJpeg = (getJpegString == 1);
std::string stImage; std::string stImage;
@@ -1132,6 +1248,85 @@ extern "C" ANSLPR_API int ANSALPR_RunInferenceComplete_LV_V2(
} }
} }
// V2 uint64_t handle variant of ANSALPR_RunInferencesBatch_LV.
// See the non-V2 version for semantics — identical behaviour, differs only
// in how the caller passes the ALPR engine handle (by value instead of via
// a LabVIEW Handle** pointer-to-pointer).
extern "C" ANSLPR_API int ANSALPR_RunInferencesBatch_LV_V2(
uint64_t handleVal,
cv::Mat** cvImage,
const char* cameraId,
int maxImageSize,
const char* strBboxes,
LStrHandle detectionResult)
{
ANSCENTER::ANSALPR* _v2Direct = reinterpret_cast<ANSCENTER::ANSALPR*>(handleVal);
if (_v2Direct == nullptr) return -1;
if (!cvImage || !(*cvImage) || (*cvImage)->empty()) return -2;
ALPRHandleGuard guard(AcquireALPRHandle(_v2Direct));
if (!guard) return -3;
auto* engine = guard.get();
try {
const cv::Mat& frame = **cvImage;
const int frameW = frame.cols;
const int frameH = frame.rows;
if (frameW <= 0 || frameH <= 0) return -2;
const double scale =
(maxImageSize > 0) ? static_cast<double>(frameW) / maxImageSize : 1.0;
std::vector<cv::Rect> rawBoxes = ANSCENTER::ANSALPR::GetBoundingBoxes(strBboxes);
std::vector<cv::Rect> scaledBoxes;
scaledBoxes.reserve(rawBoxes.size());
const cv::Rect frameRect(0, 0, frameW, frameH);
for (const auto& r : rawBoxes) {
cv::Rect s(
static_cast<int>(r.x * scale),
static_cast<int>(r.y * scale),
static_cast<int>(r.width * scale),
static_cast<int>(r.height * scale));
s &= frameRect;
if (s.width > 0 && s.height > 0) scaledBoxes.push_back(s);
}
std::vector<ANSCENTER::Object> results;
if (scaledBoxes.empty()) {
results = engine->RunInference(frame, cameraId);
} else {
results = engine->RunInferencesBatch(frame, scaledBoxes, cameraId);
}
if (scale != 1.0) {
const double inv = 1.0 / scale;
for (auto& o : results) {
o.box.x = static_cast<int>(o.box.x * inv);
o.box.y = static_cast<int>(o.box.y * inv);
o.box.width = static_cast<int>(o.box.width * inv);
o.box.height = static_cast<int>(o.box.height * inv);
}
}
std::string json = engine->VectorDetectionToJsonString(results);
if (json.empty()) return 0;
const int size = static_cast<int>(json.length());
MgErr err = DSSetHandleSize(detectionResult, sizeof(int32) + size * sizeof(uChar));
if (err != noErr) return 0;
(*detectionResult)->cnt = size;
memcpy((*detectionResult)->str, json.c_str(), size);
return 1;
}
catch (const std::exception& /*ex*/) {
return 0;
}
catch (...) {
return 0;
}
}
extern "C" ANSLPR_API int ANSALPR_RunInferencesComplete_LV_V2( extern "C" ANSLPR_API int ANSALPR_RunInferencesComplete_LV_V2(
uint64_t handleVal, uint64_t handleVal,
cv::Mat** cvImage, cv::Mat** cvImage,
@@ -1150,9 +1345,6 @@ extern "C" ANSLPR_API int ANSALPR_RunInferencesComplete_LV_V2(
try { try {
const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref const cv::Mat& localImage = **cvImage; // No clone — RunInference takes const ref
// Set thread-local NV12 frame data for fast-path inference
// Cleared after first RunInference to prevent NV12 mismatch on cropped sub-images (OCR, etc.)
tl_currentGpuFrame() = ANSGpuFrameRegistry::instance().lookup(*cvImage);
std::vector<ANSCENTER::Object> objectDetectionResults; std::vector<ANSCENTER::Object> objectDetectionResults;
std::vector<cv::Rect> bBox = ANSCENTER::ANSALPR::GetBoundingBoxes(strBboxes); std::vector<cv::Rect> bBox = ANSCENTER::ANSALPR::GetBoundingBoxes(strBboxes);
@@ -1163,10 +1355,14 @@ extern "C" ANSLPR_API int ANSALPR_RunInferencesComplete_LV_V2(
const double scaleFactor = (maxImageSize > 0) ? static_cast<double>(originalWidth) / maxImageSize : 1.0; const double scaleFactor = (maxImageSize > 0) ? static_cast<double>(originalWidth) / maxImageSize : 1.0;
if (bBox.empty()) { if (bBox.empty()) {
// Full-frame path: NV12 fast-path is only safe for the full-frame
// inference call. Scope the TL pointer so it is cleared on any exit
// path (normal or thrown) and is NOT seen by any subsequent
// cropped-image inference (which would mismatch the NV12 cache).
GpuFrameScope _gfs(ANSGpuFrameRegistry::instance().lookup(*cvImage));
objectDetectionResults = engine->RunInference(localImage, cameraId); objectDetectionResults = engine->RunInference(localImage, cameraId);
} }
tl_currentGpuFrame() = nullptr; // Clear before crop-based inference else {
if (!bBox.empty()) {
for (const auto& rect : bBox) { for (const auto& rect : bBox) {
cv::Rect scaledRect; cv::Rect scaledRect;
scaledRect.x = static_cast<int>(rect.x * scaleFactor); scaledRect.x = static_cast<int>(rect.x * scaleFactor);

View File

@@ -85,6 +85,19 @@ public:
OCRHandleGuard& operator=(const OCRHandleGuard&) = delete; OCRHandleGuard& operator=(const OCRHandleGuard&) = delete;
}; };
// RAII guard — sets the per-thread current GPU frame pointer and always
// clears it on scope exit, even if the wrapped inference call throws.
// Without this, a throwing RunInference leaves tl_currentGpuFrame pointing
// at a GpuFrameData that may be freed before the next call on this thread,
// causing use-after-free or stale NV12 data on subsequent frames.
class GpuFrameScope {
public:
explicit GpuFrameScope(GpuFrameData* f) { tl_currentGpuFrame() = f; }
~GpuFrameScope() { tl_currentGpuFrame() = nullptr; }
GpuFrameScope(const GpuFrameScope&) = delete;
GpuFrameScope& operator=(const GpuFrameScope&) = delete;
};
BOOL APIENTRY DllMain(HMODULE hModule, BOOL APIENTRY DllMain(HMODULE hModule,
DWORD ul_reason_for_call, DWORD ul_reason_for_call,
LPVOID lpReserved LPVOID lpReserved
@@ -265,13 +278,40 @@ extern "C" ANSOCR_API int CreateANSOCRHandleEx(ANSCENTER::ANSOCRBase** Handle,
if (useDilation == 1)modelConfig.useDilation = true; if (useDilation == 1)modelConfig.useDilation = true;
modelConfig.limitSideLen = limitSideLen; modelConfig.limitSideLen = limitSideLen;
int result = (*Handle)->Initialize(licenseKey, modelConfig, modelFilePath, modelFileZipPassword, engineMode); int result = (*Handle)->Initialize(licenseKey, modelConfig, modelFilePath, modelFileZipPassword, engineMode);
if (!result) {
// Initialize failed — tear down the engine we just registered so
// the caller, who sees a 0 return and (typically) does not call
// ReleaseANSOCRHandle, does not leak the engine + registry entry.
if (UnregisterOCRHandle(*Handle)) {
try { (*Handle)->Destroy(); } catch (...) {}
delete *Handle;
}
*Handle = nullptr;
return 0;
}
return result; return result;
} }
} }
catch (std::exception& e) { catch (std::exception& e) {
// Partially-constructed engine may already be registered — unwind it
// so the leak does not accumulate across repeated failed Create calls.
if (Handle && *Handle) {
if (UnregisterOCRHandle(*Handle)) {
try { (*Handle)->Destroy(); } catch (...) {}
delete *Handle;
}
*Handle = nullptr;
}
return 0; return 0;
} }
catch (...) { catch (...) {
if (Handle && *Handle) {
if (UnregisterOCRHandle(*Handle)) {
try { (*Handle)->Destroy(); } catch (...) {}
delete *Handle;
}
*Handle = nullptr;
}
return 0; return 0;
} }
} }
@@ -804,10 +844,11 @@ extern "C" ANSOCR_API int RunInferenceComplete_LV(
if (originalWidth == 0 || originalHeight == 0) return -2; if (originalWidth == 0 || originalHeight == 0) return -2;
tl_currentGpuFrame() = gpuFrame; std::vector<ANSCENTER::OCRObject> outputs;
std::vector<ANSCENTER::OCRObject> outputs = engine->RunInference(localImage, cameraId); {
tl_currentGpuFrame() = nullptr; GpuFrameScope _gfs(gpuFrame);
outputs = engine->RunInference(localImage, cameraId);
}
bool getJpeg = (getJpegString == 1); bool getJpeg = (getJpegString == 1);
std::string stImage; std::string stImage;
@@ -897,7 +938,8 @@ extern "C" ANSOCR_API int RunInferencesComplete_LV(ANSCENTER::ANSOCRBase** Handl
const double scaleFactor = (maxImageSize > 0) ? static_cast<double>(originalWidth) / maxImageSize : 1.0; const double scaleFactor = (maxImageSize > 0) ? static_cast<double>(originalWidth) / maxImageSize : 1.0;
tl_currentGpuFrame() = gpuFrame; {
GpuFrameScope _gfs(gpuFrame);
if (bBox.empty()) { if (bBox.empty()) {
objectDetectionResults = engine->RunInference(localImage, cameraId); objectDetectionResults = engine->RunInference(localImage, cameraId);
} }
@@ -926,7 +968,7 @@ extern "C" ANSOCR_API int RunInferencesComplete_LV(ANSCENTER::ANSOCRBase** Handl
} }
} }
} }
tl_currentGpuFrame() = nullptr; }
std::string stDetectionResult = ANSCENTER::ANSOCRUtility::OCRDetectionToJsonString(objectDetectionResults); std::string stDetectionResult = ANSCENTER::ANSOCRUtility::OCRDetectionToJsonString(objectDetectionResults);
if (stDetectionResult.empty()) return 0; if (stDetectionResult.empty()) return 0;
@@ -1087,9 +1129,11 @@ extern "C" ANSOCR_API int RunInferenceComplete_LV_V2(
if (originalWidth == 0 || originalHeight == 0) return -2; if (originalWidth == 0 || originalHeight == 0) return -2;
tl_currentGpuFrame() = gpuFrame; std::vector<ANSCENTER::OCRObject> outputs;
std::vector<ANSCENTER::OCRObject> outputs = engine->RunInference(localImage, cameraId); {
tl_currentGpuFrame() = nullptr; GpuFrameScope _gfs(gpuFrame);
outputs = engine->RunInference(localImage, cameraId);
}
bool getJpeg = (getJpegString == 1); bool getJpeg = (getJpegString == 1);
std::string stImage; std::string stImage;
@@ -1181,7 +1225,8 @@ extern "C" ANSOCR_API int RunInferencesComplete_LV_V2(uint64_t handleVal, cv::Ma
const double scaleFactor = (maxImageSize > 0) ? static_cast<double>(originalWidth) / maxImageSize : 1.0; const double scaleFactor = (maxImageSize > 0) ? static_cast<double>(originalWidth) / maxImageSize : 1.0;
tl_currentGpuFrame() = gpuFrame; {
GpuFrameScope _gfs(gpuFrame);
if (bBox.empty()) { if (bBox.empty()) {
objectDetectionResults = engine->RunInference(localImage, cameraId); objectDetectionResults = engine->RunInference(localImage, cameraId);
} }
@@ -1210,7 +1255,7 @@ extern "C" ANSOCR_API int RunInferencesComplete_LV_V2(uint64_t handleVal, cv::Ma
} }
} }
} }
tl_currentGpuFrame() = nullptr; }
std::string stDetectionResult = ANSCENTER::ANSOCRUtility::OCRDetectionToJsonString(objectDetectionResults); std::string stDetectionResult = ANSCENTER::ANSOCRUtility::OCRDetectionToJsonString(objectDetectionResults);
if (stDetectionResult.empty()) return 0; if (stDetectionResult.empty()) return 0;