Files
ANSCORE/ANSODEngine/patch_tensorrtcl.py

705 lines
36 KiB
Python
Raw Normal View History

2026-03-28 16:54:11 +11:00
import re, sys
CPP = r'C:\Projects\ANLS\ANSLIB\ANSODEngine\ANSTENSORRTCL.cpp'
with open(CPP, 'rb') as f:
data = f.read()
print('File size:', len(data))
CRLF = b'\r\n' in data
print('CRLF:', CRLF)
errors = []
def replace_once(data, old, new, label):
count = data.count(old)
if count != 1:
errors.append(f'{label}: found {count}, expected 1')
return data
return data.replace(old, new)
def replace_all(data, old, new, expected, label):
count = data.count(old)
if count != expected:
errors.append(f'{label}: found {count}, expected {expected}')
return data
return data.replace(old, new)
def replace_slice(data, start_anchor, end_anchor, new_content, label):
si = data.find(start_anchor)
if si == -1:
errors.append(f'{label}: start anchor not found: {start_anchor[:60]}')
return data
ei = data.find(end_anchor, si + len(start_anchor))
if ei == -1:
errors.append(f'{label}: end anchor not found: {end_anchor[:60]}')
return data
print(f' {label}: [{si}:{ei}] ({ei-si} bytes) -> {len(new_content)} bytes')
return data[:si] + new_content + data[ei:]
# ──────────────────────────────────────────────────────────────────────────────
# Step 1: Add #include <future>
# ──────────────────────────────────────────────────────────────────────────────
data = replace_once(data,
b'#include <opencv2/cudaimgproc.hpp>\r\n',
b'#include <opencv2/cudaimgproc.hpp>\r\n#include <future>\r\n',
'Step 1: Add #include <future>')
print('Step 1: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 2: buildLoadNetwork — add m_maxSlotsPerGpu (3 calls)
# ──────────────────────────────────────────────────────────────────────────────
data = replace_all(data,
b'buildLoadNetwork(_modelFilePath, SUB_VALS, DIV_VALS, NORMALIZE);',
b'buildLoadNetwork(_modelFilePath, SUB_VALS, DIV_VALS, NORMALIZE, m_maxSlotsPerGpu);',
3, 'Step 2: buildLoadNetwork add m_maxSlotsPerGpu')
print('Step 2: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 2b: Remove _isFixedBatch = false; from LoadModel and LoadModelFromFolder
# Use surrounding context to avoid matching inside 3-tab Initialize line
# ──────────────────────────────────────────────────────────────────────────────
data = replace_all(data,
b'\t\t_isFixedBatch = false;\r\n\t\tstd::lock_guard',
b'\t\tstd::lock_guard',
2, 'Step 2b: Remove _isFixedBatch=false in LoadModel/LoadModelFromFolder')
print('Step 2b: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 2c: Remove _isFixedBatch = false; from Initialize (3-tab indent)
# ──────────────────────────────────────────────────────────────────────────────
data = replace_all(data,
b'\t\t\t_isFixedBatch = false;\r\n',
b'',
1, 'Step 2c: Remove _isFixedBatch=false in Initialize')
print('Step 2c: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 3: RunInference(camera_id) — release mutex before DetectObjects
# ──────────────────────────────────────────────────────────────────────────────
data = replace_slice(data,
b'TENSORRTCL::RunInference(const cv::Mat& inputImgBGR,const std::string& camera_id)\r\n',
b'\tstd::vector<std::vector<Object>> TENSORRTCL::RunInferencesBatch(',
b'TENSORRTCL::RunInference(const cv::Mat& inputImgBGR,const std::string& camera_id)\r\n'
b'\t{\r\n'
b'\t\t// Validate state under brief lock\r\n'
b'\t\t{\r\n'
b'\t\t\tstd::lock_guard<std::recursive_mutex> lock(_mutex);\r\n'
b'\t\t\tif (!_modelLoadValid) {\r\n'
b'\t\t\t\t_logger.LogError("TENSORRTCL::RunInference",\r\n'
b'\t\t\t\t\t"Cannot load the TensorRT model. Please check if it exists",\r\n'
b'\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tif (!_licenseValid) {\r\n'
b'\t\t\t\t_logger.LogError("TENSORRTCL::RunInference",\r\n'
b'\t\t\t\t\t"Runtime license is not valid or expired. Please contact ANSCENTER",\r\n'
b'\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tif (!_isInitialized) {\r\n'
b'\t\t\t\t_logger.LogError("TENSORRTCL::RunInference",\r\n'
b'\t\t\t\t\t"Model is not initialized",\r\n'
b'\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tif (inputImgBGR.empty() || inputImgBGR.cols < 5 || inputImgBGR.rows < 5) {\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\t\t}\r\n'
b'\t\ttry {\r\n'
b'\t\t\treturn DetectObjects(inputImgBGR, camera_id);\r\n'
b'\t\t}\r\n'
b'\t\tcatch (const std::exception& e) {\r\n'
b'\t\t\t_logger.LogFatal("TENSORRTCL::RunInference", e.what(), __FILE__, __LINE__);\r\n'
b'\t\t\treturn {};\r\n'
b'\t\t}\r\n'
b'\t}\r\n'
b'\t',
'Step 3: RunInference brief lock')
print('Step 3: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 4: RunInferencesBatch — brief lock, remove _isFixedBatch fallback
# ──────────────────────────────────────────────────────────────────────────────
data = replace_slice(data,
b'TENSORRTCL::RunInferencesBatch(const std::vector<cv::Mat>& inputs, const std::string& camera_id) {\r\n',
b'\tTENSORRTCL::~TENSORRTCL() {',
b'TENSORRTCL::RunInferencesBatch(const std::vector<cv::Mat>& inputs, const std::string& camera_id) {\r\n'
b'\t\t// Validate state under brief lock\r\n'
b'\t\t{\r\n'
b'\t\t\tstd::lock_guard<std::recursive_mutex> lock(_mutex);\r\n'
b'\t\t\tif (!_modelLoadValid) {\r\n'
b'\t\t\t\tthis->_logger.LogFatal("TENSORRTCL::RunInferencesBatch",\r\n'
b'\t\t\t\t\t"Cannot load the TensorRT model. Please check if it exists", __FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tif (!_licenseValid) {\r\n'
b'\t\t\t\tthis->_logger.LogFatal("TENSORRTCL::RunInferencesBatch",\r\n'
b'\t\t\t\t\t"Runtime license is not valid or expired. Please contact ANSCENTER", __FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tif (!_isInitialized) {\r\n'
b'\t\t\t\tthis->_logger.LogFatal("TENSORRTCL::RunInferencesBatch",\r\n'
b'\t\t\t\t\t"Engine not initialized", __FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tif (inputs.empty()) return {};\r\n'
b'\t\t}\r\n'
b'\t\ttry {\r\n'
b'\t\t\treturn DetectObjectsBatch(inputs, camera_id);\r\n'
b'\t\t}\r\n'
b'\t\tcatch (const std::exception& e) {\r\n'
b'\t\t\tthis->_logger.LogFatal("TENSORRTCL::RunInferencesBatch", e.what(), __FILE__, __LINE__);\r\n'
b'\t\t\treturn {};\r\n'
b'\t\t}\r\n'
b'\t};\r\n'
b'\t',
'Step 4: RunInferencesBatch brief lock')
print('Step 4: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 5: DetectObjects — three-phase mutex
# ──────────────────────────────────────────────────────────────────────────────
data = replace_slice(data,
b'TENSORRTCL::DetectObjects(const cv::Mat& inputImage, const std::string& camera_id) {\r\n',
b'\tstd::vector<std::vector<cv::cuda::GpuMat>> TENSORRTCL::Preprocess(',
b'TENSORRTCL::DetectObjects(const cv::Mat& inputImage, const std::string& camera_id) {\r\n'
b'\t\t// Phase 1: Preprocess under brief lock\r\n'
b'\t\tImageMetadata meta;\r\n'
b'\t\tstd::vector<std::vector<cv::cuda::GpuMat>> input;\r\n'
b'\t\t{\r\n'
b'\t\t\tstd::lock_guard<std::recursive_mutex> lock(_mutex);\r\n'
b'\t\t\tinput = Preprocess(inputImage, meta);\r\n'
b'\t\t}\r\n'
b'\t\tif (input.empty()) return {};\r\n'
b'\r\n'
b'\t\t// Phase 2: Inference \xe2\x80\x94 mutex released; pool dispatches to idle GPU slot\r\n'
b'\t\tstd::vector<std::vector<std::vector<float>>> featureVectors;\r\n'
b'\t\tauto succ = m_trtEngine->runInference(input, featureVectors);\r\n'
b'\t\tif (!succ) {\r\n'
b'\t\t\tthis->_logger.LogFatal("TENSORRTCL::DetectObjects", "Error running inference", __FILE__, __LINE__);\r\n'
b'\t\t\treturn {};\r\n'
b'\t\t}\r\n'
b'\r\n'
b'\t\t// Phase 3: Postprocess under brief lock\r\n'
b'\t\tstd::lock_guard<std::recursive_mutex> lock(_mutex);\r\n'
b'\t\tstd::vector<float> featureVector;\r\n'
b'\t\tEngine<float>::transformOutput(featureVectors, featureVector);\r\n'
b'\t\treturn Postprocess(featureVector, camera_id, meta);\r\n'
b'\t}\r\n'
b'\t',
'Step 5: DetectObjects three-phase mutex')
print('Step 5: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 6: Preprocess — add outMeta param, remove lock, replace member writes
# ──────────────────────────────────────────────────────────────────────────────
data = replace_slice(data,
b'TENSORRTCL::Preprocess(const cv::Mat& inputImage) {\r\n',
b'\tstd::vector<Object> TENSORRTCL::Postprocess(',
b'TENSORRTCL::Preprocess(const cv::Mat& inputImage, ImageMetadata& outMeta) {\r\n'
b'\t\ttry {\r\n'
b'\t\t\tif (!_licenseValid) {\r\n'
b'\t\t\t\tthis->_logger.LogFatal("TENSORRTCL::Preprocess", "Invalid license", __FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tif (inputImage.empty()) {\r\n'
b'\t\t\t\tthis->_logger.LogFatal("TENSORRTCL::Preprocess", "Input image is empty", __FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tif ((inputImage.cols < 5) || (inputImage.rows < 5)) {\r\n'
b'\t\t\t\tthis->_logger.LogFatal("TENSORRTCL::Preprocess",\r\n'
b'\t\t\t\t\t"Input image is too small (Width: " + std::to_string(inputImage.cols) +\r\n'
b'\t\t\t\t\t", Height: " + std::to_string(inputImage.rows) + ")",\r\n'
b'\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\t// Populate the input vectors\r\n'
b'\t\t\tconst auto& inputDims = m_trtEngine->getInputDims();\r\n'
b'\t\t\tconst int inputH = inputDims[0].d[1];\r\n'
b'\t\t\tconst int inputW = inputDims[0].d[2];\r\n'
b'\r\n'
b'\t\t\t// Upload the image to GPU memory\r\n'
b'\t\t\tcv::cuda::Stream stream; // Create a custom stream\r\n'
b'\t\t\tcv::cuda::GpuMat img;\r\n'
b'\r\n'
b'\t\t\tif (inputImage.channels() == 1) {\r\n'
b'\t\t\t\t// Convert grayscale to 3-channel BGR before uploading\r\n'
b'\t\t\t\tcv::Mat img3Channel;\r\n'
b'\t\t\t\tcv::cvtColor(inputImage, img3Channel, cv::COLOR_GRAY2BGR);\r\n'
b'\t\t\t\timg.upload(img3Channel, stream);\r\n'
b'\t\t\t}\r\n'
b'\t\t\telse {\r\n'
b'\t\t\t\timg.upload(inputImage, stream);\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\t// Convert BGR to RGB\r\n'
b'\t\t\tcv::cuda::GpuMat imgRGB;\r\n'
b'\t\t\tcv::cuda::cvtColor(img, imgRGB, cv::COLOR_BGR2RGB, 0, stream);\r\n'
b'\t\t\tstream.waitForCompletion();\r\n'
b'\r\n'
b'\t\t\t// These parameters will be used in the post-processing stage\r\n'
b'\t\t\toutMeta.imgHeight = imgRGB.rows;\r\n'
b'\t\t\toutMeta.imgWidth = imgRGB.cols;\r\n'
b'\r\n'
b'\t\t\tif (outMeta.imgHeight <= 0 || outMeta.imgWidth <= 0) {\r\n'
b'\t\t\t\t_logger.LogFatal("TENSORRTCL::Preprocess", "Image height or width is zero", __FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tint maxImageSize = std::max(outMeta.imgHeight, outMeta.imgWidth);\r\n'
b'\t\t\tcv::cuda::GpuMat processed = imgRGB;\r\n'
b'\r\n'
b'\t\t\tif (outMeta.imgHeight > 0 && outMeta.imgWidth > 0) {\r\n'
b'\t\t\t\toutMeta.ratio = 1.f / std::min(inputDims[0].d[2] / static_cast<float>(imgRGB.cols),\r\n'
b'\t\t\t\t\tinputDims[0].d[1] / static_cast<float>(imgRGB.rows));\r\n'
b'\r\n'
b'\t\t\t\tcv::cuda::GpuMat resized = imgRGB;\r\n'
b'\r\n'
b'\t\t\t\t// Resize to the model\'s expected input size while maintaining aspect ratio with padding\r\n'
b'\t\t\t\tif (resized.rows != inputDims[0].d[1] || resized.cols != inputDims[0].d[2]) {\r\n'
b'\t\t\t\t\tresized = Engine<float>::resizeKeepAspectRatioPadRightBottom(imgRGB, inputDims[0].d[1], inputDims[0].d[2]);\r\n'
b'\t\t\t\t}\r\n'
b'\r\n'
b'\t\t\t\t// Convert to format expected by our inference engine\r\n'
b'\t\t\t\tstd::vector<cv::cuda::GpuMat> input{ std::move(resized) };\r\n'
b'\t\t\t\tstd::vector<std::vector<cv::cuda::GpuMat>> inputs{ std::move(input) };\r\n'
b'\t\t\t\treturn inputs;\r\n'
b'\t\t\t}\r\n'
b'\t\t\telse {\r\n'
b'\t\t\t\tthis->_logger.LogFatal("TENSORRTCL::Preprocess",\r\n'
b'\t\t\t\t\t"Image height or width is zero after processing (Width: " + std::to_string(outMeta.imgWidth) +\r\n'
b'\t\t\t\t\t", Height: " + std::to_string(outMeta.imgHeight) + ")",\r\n'
b'\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\t\t}\r\n'
b'\t\tcatch (const std::exception& e) {\r\n'
b'\t\t\tthis->_logger.LogFatal("TENSORRTCL::Preprocess", e.what(), __FILE__, __LINE__);\r\n'
b'\t\t\treturn {};\r\n'
b'\t\t}\r\n'
b'\t}\r\n'
b'\t',
'Step 6: Preprocess outMeta')
print('Step 6: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 7: Postprocess — add meta param, remove lock, replace member refs
# ──────────────────────────────────────────────────────────────────────────────
data = replace_slice(data,
b'TENSORRTCL::Postprocess(std::vector<float>& featureVector, const std::string& camera_id) {\r\n',
b'\tstd::vector<std::vector<Object>> TENSORRTCL::DetectObjectsBatch(',
b'TENSORRTCL::Postprocess(std::vector<float>& featureVector, const std::string& camera_id, const ImageMetadata& meta) {\r\n'
b'\t\tstd::vector<Object> outputs;\r\n'
b'\t\ttry {\r\n'
b'\t\t\tauto max_idx = std::max_element(featureVector.begin(), featureVector.end());\r\n'
b'\t\t\tint class_id = static_cast<int>(std::distance(featureVector.begin(), max_idx));\r\n'
b'\t\t\tfloat score = *max_idx;\r\n'
b'\t\t\tint classNameSize = _classes.size();\r\n'
b'\t\t\tObject clsResult;\r\n'
b'\t\t\tclsResult.classId = class_id;\r\n'
b'\t\t\tif (!_classes.empty()) {\r\n'
b'\t\t\t\tif (clsResult.classId < classNameSize) {\r\n'
b'\t\t\t\t\tclsResult.className = _classes[clsResult.classId];\r\n'
b'\t\t\t\t}\r\n'
b'\t\t\t\telse {\r\n'
b'\t\t\t\t\tclsResult.className = _classes[classNameSize - 1]; // Use last valid class name if out of range\r\n'
b'\t\t\t\t}\r\n'
b'\t\t\t}\r\n'
b'\t\t\telse {\r\n'
b'\t\t\t\tclsResult.className = "Unknown"; // Fallback if _classes is empty\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\r\n'
b'\t\t\tclsResult.confidence = score;\r\n'
b'\t\t\tif (meta.imgWidth > 20 && meta.imgHeight > 20) {\r\n'
b'\t\t\t\tclsResult.box = cv::Rect(10, 10, meta.imgWidth - 20, meta.imgHeight - 20);\r\n'
b'\t\t\t}\r\n'
b'\t\t\telse {\r\n'
b'\t\t\t\tclsResult.box = cv::Rect(0, 0, meta.imgWidth, meta.imgHeight);\r\n'
b'\t\t\t}\r\n'
b'\t\t\tclsResult.polygon = ANSUtilityHelper::RectToNormalizedPolygon(clsResult.box, meta.imgWidth, meta.imgHeight);\r\n'
b'\t\t\tclsResult.cameraId = camera_id;\r\n'
b'\t\t\toutputs.push_back(clsResult);\r\n'
b'\t\t\treturn outputs;\r\n'
b'\t\t\t//EnqueueDetection(objects, camera_id);\r\n'
b'\t\t}\r\n'
b'\t\tcatch (std::exception& e) {\r\n'
b'\t\t\tthis->_logger.LogFatal("TENSORRTCL::Postproces", e.what(), __FILE__, __LINE__);\r\n'
b'\t\t\treturn outputs;\r\n'
b'\t\t}\r\n'
b'\r\n'
b'\t}\r\n'
b'\t',
'Step 7: Postprocess meta')
print('Step 7: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 8: DetectObjectsBatch — auto-split + parallel postprocess, no _isFixedBatch
# ──────────────────────────────────────────────────────────────────────────────
data = replace_slice(data,
b'TENSORRTCL::DetectObjectsBatch(const std::vector<cv::Mat>& inputImages,const std::string& camera_id)\r\n',
b'\tstd::vector<std::vector<cv::cuda::GpuMat>> TENSORRTCL::PreprocessBatch(',
b'TENSORRTCL::DetectObjectsBatch(const std::vector<cv::Mat>& inputImages, const std::string& camera_id)\r\n'
b'\t{\r\n'
b'\t\t// Validate under brief lock\r\n'
b'\t\t{\r\n'
b'\t\t\tstd::lock_guard<std::recursive_mutex> lock(_mutex);\r\n'
b'\t\t\tif (inputImages.empty()) {\r\n'
b'\t\t\t\t_logger.LogFatal("TENSORRTCL::DetectObjectsBatch",\r\n'
b'\t\t\t\t\t"Empty input images vector", __FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\t\t}\r\n'
b'\r\n'
b'\t\t// Auto-split if batch exceeds engine capacity\r\n'
b'\t\tconst int maxBatch = m_options.maxBatchSize > 0 ? m_options.maxBatchSize : 1;\r\n'
b'\t\tif (static_cast<int>(inputImages.size()) > maxBatch) {\r\n'
b'\t\t\tconst size_t numImages = inputImages.size();\r\n'
b'\t\t\tstd::vector<std::vector<cv::Mat>> chunks;\r\n'
b'\t\t\tfor (size_t start = 0; start < numImages; start += static_cast<size_t>(maxBatch)) {\r\n'
b'\t\t\t\tconst size_t end = std::min(start + static_cast<size_t>(maxBatch), numImages);\r\n'
b'\t\t\t\tchunks.emplace_back(inputImages.begin() + start, inputImages.begin() + end);\r\n'
b'\t\t\t}\r\n'
b'\t\t\tstd::vector<std::future<std::vector<std::vector<Object>>>> futures;\r\n'
b'\t\t\tfutures.reserve(chunks.size());\r\n'
b'\t\t\tfor (size_t i = 0; i < chunks.size(); ++i) {\r\n'
b'\t\t\t\tfutures.push_back(std::async(std::launch::async,\r\n'
b'\t\t\t\t\t[this, c = chunks[i], cid = camera_id]() {\r\n'
b'\t\t\t\t\t\treturn DetectObjectsBatch(c, cid);\r\n'
b'\t\t\t\t\t}));\r\n'
b'\t\t\t}\r\n'
b'\t\t\tstd::vector<std::vector<Object>> allResults;\r\n'
b'\t\t\tallResults.reserve(numImages);\r\n'
b'\t\t\tfor (auto& fut : futures) {\r\n'
b'\t\t\t\tauto chunkResults = fut.get();\r\n'
b'\t\t\t\tfor (auto& r : chunkResults) allResults.push_back(std::move(r));\r\n'
b'\t\t\t}\r\n'
b'\t\t\treturn allResults;\r\n'
b'\t\t}\r\n'
b'\r\n'
b'\t\t_logger.LogInfo("TENSORRTCL::DetectObjectsBatch",\r\n'
b'\t\t\t"Processing batch of " + std::to_string(inputImages.size()) + " images",\r\n'
b'\t\t\t__FILE__, __LINE__);\r\n'
b'\r\n'
b'\t\t// Phase 1: Preprocess under brief lock\r\n'
b'\t\tBatchMetadata metadata;\r\n'
b'\t\tstd::vector<std::vector<cv::cuda::GpuMat>> inputs;\r\n'
b'\t\t{\r\n'
b'\t\t\tstd::lock_guard<std::recursive_mutex> lock(_mutex);\r\n'
b'\t\t\tinputs = PreprocessBatch(inputImages, metadata);\r\n'
b'\t\t}\r\n'
b'\t\tif (inputs.empty() || inputs[0].empty()) {\r\n'
b'\t\t\t_logger.LogFatal("TENSORRTCL::DetectObjectsBatch",\r\n'
b'\t\t\t\t"Preprocessing failed", __FILE__, __LINE__);\r\n'
b'\t\t\treturn {};\r\n'
b'\t\t}\r\n'
b'\r\n'
b'\t\t// Phase 2: Inference \xe2\x80\x94 mutex released; pool dispatches to idle GPU slot\r\n'
b'\t\tstd::vector<std::vector<std::vector<float>>> featureVectors;\r\n'
b'\t\tbool succ = m_trtEngine->runInference(inputs, featureVectors);\r\n'
b'\t\tif (!succ) {\r\n'
b'\t\t\t_logger.LogFatal("TENSORRTCL::DetectObjectsBatch",\r\n'
b'\t\t\t\t"Error running batch inference", __FILE__, __LINE__);\r\n'
b'\t\t\treturn {};\r\n'
b'\t\t}\r\n'
b'\r\n'
b'\t\t// Phase 3: Parallel postprocessing\r\n'
b'\t\tconst size_t numBatch = featureVectors.size();\r\n'
b'\t\tstd::vector<std::vector<Object>> batchDetections(numBatch);\r\n'
b'\t\tstd::vector<std::future<std::vector<Object>>> postFutures;\r\n'
b'\t\tpostFutures.reserve(numBatch);\r\n'
b'\r\n'
b'\t\tfor (size_t batchIdx = 0; batchIdx < numBatch; ++batchIdx) {\r\n'
b'\t\t\tconst auto& batchOutput = featureVectors[batchIdx];\r\n'
b'\t\t\tstd::vector<float> fv = batchOutput.empty() ? std::vector<float>{} : batchOutput[0];\r\n'
b'\t\t\tpostFutures.push_back(std::async(std::launch::async,\r\n'
b'\t\t\t\t[this, fv = std::move(fv), cid = camera_id, idx = batchIdx, &metadata]() mutable {\r\n'
b'\t\t\t\t\treturn PostprocessBatch(fv, cid, idx, metadata);\r\n'
b'\t\t\t\t}));\r\n'
b'\t\t}\r\n'
b'\t\tfor (size_t i = 0; i < numBatch; ++i)\r\n'
b'\t\t\tbatchDetections[i] = postFutures[i].get();\r\n'
b'\r\n'
b'\t\t_logger.LogInfo("TENSORRTCL::DetectObjectsBatch",\r\n'
b'\t\t\t"Batch processing complete. Images: " + std::to_string(numBatch),\r\n'
b'\t\t\t__FILE__, __LINE__);\r\n'
b'\t\treturn batchDetections;\r\n'
b'\t}\r\n'
b'\t',
'Step 8: DetectObjectsBatch rewrite')
print('Step 8: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 9: PreprocessBatch — add outMetadata param, remove lock, rename members
# ──────────────────────────────────────────────────────────────────────────────
data = replace_slice(data,
b'TENSORRTCL::PreprocessBatch(const std::vector<cv::Mat>& inputImages)\r\n',
b'\tstd::vector<Object> TENSORRTCL::PostprocessBatch(',
b'TENSORRTCL::PreprocessBatch(const std::vector<cv::Mat>& inputImages, BatchMetadata& outMetadata)\r\n'
b'\t{\r\n'
b'\t\ttry {\r\n'
b'\t\t\t// Validate license\r\n'
b'\t\t\tif (!_licenseValid) {\r\n'
b'\t\t\t\t_logger.LogFatal("TENSORRTCL::PreprocessBatch",\r\n'
b'\t\t\t\t\t"Invalid license", __FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\t// Validate input\r\n'
b'\t\t\tif (inputImages.empty()) {\r\n'
b'\t\t\t\t_logger.LogFatal("TENSORRTCL::PreprocessBatch",\r\n'
b'\t\t\t\t\t"Input images vector is empty", __FILE__, __LINE__);\r\n'
b'\t\t\t\treturn {};\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tsize_t batchSize = inputImages.size();\r\n'
b'\r\n'
b'\t\t\t// Get model input dimensions\r\n'
b'\t\t\tconst auto& inputDims = m_trtEngine->getInputDims();\r\n'
b'\t\t\tconst int inputH = inputDims[0].d[1];\r\n'
b'\t\t\tconst int inputW = inputDims[0].d[2];\r\n'
b'\r\n'
b'\t\t\t_logger.LogInfo("TENSORRTCL::PreprocessBatch",\r\n'
b'\t\t\t\t"Preprocessing " + std::to_string(batchSize) + " images to " +\r\n'
b'\t\t\t\tstd::to_string(inputW) + "x" + std::to_string(inputH),\r\n'
b'\t\t\t\t__FILE__, __LINE__);\r\n'
b'\r\n'
b'\t\t\t// Create CUDA stream for async operations\r\n'
b'\t\t\tcv::cuda::Stream stream;\r\n'
b'\r\n'
b'\t\t\t// Store ALL images in a SINGLE batch vector\r\n'
b'\t\t\tstd::vector<cv::cuda::GpuMat> batchedImages;\r\n'
b'\t\t\tbatchedImages.reserve(batchSize);\r\n'
b'\r\n'
b'\t\t\t// Store image dimensions for postprocessing\r\n'
b'\t\t\toutMetadata.imgHeights.clear();\r\n'
b'\t\t\toutMetadata.imgWidths.clear();\r\n'
b'\t\t\toutMetadata.ratios.clear();\r\n'
b'\t\t\toutMetadata.imgHeights.reserve(batchSize);\r\n'
b'\t\t\toutMetadata.imgWidths.reserve(batchSize);\r\n'
b'\t\t\toutMetadata.ratios.reserve(batchSize);\r\n'
b'\r\n'
b'\t\t\t// Process each image\r\n'
b'\t\t\tfor (size_t i = 0; i < batchSize; ++i) {\r\n'
b'\t\t\t\tconst cv::Mat& inputImage = inputImages[i];\r\n'
b'\r\n'
b'\t\t\t\t// Validate individual image\r\n'
b'\t\t\t\tif (inputImage.empty()) {\r\n'
b'\t\t\t\t\t_logger.LogFatal("TENSORRTCL::PreprocessBatch",\r\n'
b'\t\t\t\t\t\t"Input image at index " + std::to_string(i) + " is empty",\r\n'
b'\t\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\t\treturn {};\r\n'
b'\t\t\t\t}\r\n'
b'\r\n'
b'\t\t\t\tif (inputImage.cols < 5 || inputImage.rows < 5) {\r\n'
b'\t\t\t\t\t_logger.LogFatal("TENSORRTCL::PreprocessBatch",\r\n'
b'\t\t\t\t\t\t"Image at index " + std::to_string(i) +\r\n'
b'\t\t\t\t\t\t" is too small (Width: " + std::to_string(inputImage.cols) +\r\n'
b'\t\t\t\t\t\t", Height: " + std::to_string(inputImage.rows) + ")",\r\n'
b'\t\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\t\treturn {};\r\n'
b'\t\t\t\t}\r\n'
b'\r\n'
b'\t\t\t\t// Upload to GPU\r\n'
b'\t\t\t\tcv::cuda::GpuMat img;\r\n'
b'\t\t\t\tif (inputImage.channels() == 1) {\r\n'
b'\t\t\t\t\t// Convert grayscale to BGR\r\n'
b'\t\t\t\t\tcv::Mat img3Channel;\r\n'
b'\t\t\t\t\tcv::cvtColor(inputImage, img3Channel, cv::COLOR_GRAY2BGR);\r\n'
b'\t\t\t\t\timg.upload(img3Channel, stream);\r\n'
b'\t\t\t\t}\r\n'
b'\t\t\t\telse {\r\n'
b'\t\t\t\t\timg.upload(inputImage, stream);\r\n'
b'\t\t\t\t}\r\n'
b'\r\n'
b'\t\t\t\t// Convert BGR to RGB\r\n'
b'\t\t\t\tcv::cuda::GpuMat imgRGB;\r\n'
b'\t\t\t\tcv::cuda::cvtColor(img, imgRGB, cv::COLOR_BGR2RGB, 0, stream);\r\n'
b'\r\n'
b'\t\t\t\t// Store original dimensions\r\n'
b'\t\t\t\tint imgHeight = imgRGB.rows;\r\n'
b'\t\t\t\tint imgWidth = imgRGB.cols;\r\n'
b'\r\n'
b'\t\t\t\tif (imgHeight <= 0 || imgWidth <= 0) {\r\n'
b'\t\t\t\t\t_logger.LogFatal("TENSORRTCL::PreprocessBatch",\r\n'
b'\t\t\t\t\t\t"Image at index " + std::to_string(i) + " has zero height or width",\r\n'
b'\t\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\t\treturn {};\r\n'
b'\t\t\t\t}\r\n'
b'\r\n'
b'\t\t\t\toutMetadata.imgHeights.push_back(imgHeight);\r\n'
b'\t\t\t\toutMetadata.imgWidths.push_back(imgWidth);\r\n'
b'\r\n'
b'\t\t\t\t// Calculate resize ratio\r\n'
b'\t\t\t\tfloat ratio = 1.f / std::min(\r\n'
b'\t\t\t\t\tinputDims[0].d[2] / static_cast<float>(imgRGB.cols),\r\n'
b'\t\t\t\t\tinputDims[0].d[1] / static_cast<float>(imgRGB.rows)\r\n'
b'\t\t\t\t);\r\n'
b'\t\t\t\toutMetadata.ratios.push_back(ratio);\r\n'
b'\r\n'
b'\t\t\t\t// Resize maintaining aspect ratio with padding\r\n'
b'\t\t\t\tcv::cuda::GpuMat resized;\r\n'
b'\t\t\t\tif (imgRGB.rows != inputDims[0].d[1] || imgRGB.cols != inputDims[0].d[2]) {\r\n'
b'\t\t\t\t\tresized = Engine<float>::resizeKeepAspectRatioPadRightBottom(\r\n'
b'\t\t\t\t\t\timgRGB, inputDims[0].d[1], inputDims[0].d[2]\r\n'
b'\t\t\t\t\t);\r\n'
b'\t\t\t\t}\r\n'
b'\t\t\t\telse {\r\n'
b'\t\t\t\t\tresized = imgRGB;\r\n'
b'\t\t\t\t}\r\n'
b'\r\n'
b'\t\t\t\t// Add to batch\r\n'
b'\t\t\t\tbatchedImages.push_back(std::move(resized));\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\t// Wait for all GPU operations to complete\r\n'
b'\t\t\tstream.waitForCompletion();\r\n'
b'\r\n'
b'\t\t\t// Return as single batched input\r\n'
b'\t\t\tstd::vector<std::vector<cv::cuda::GpuMat>> result;\r\n'
b'\t\t\tresult.push_back(std::move(batchedImages));\r\n'
b'\r\n'
b'\t\t\treturn result;\r\n'
b'\t\t}\r\n'
b'\t\tcatch (const std::exception& e) {\r\n'
b'\t\t\t_logger.LogFatal("TENSORRTCL::PreprocessBatch",\r\n'
b'\t\t\t\te.what(), __FILE__, __LINE__);\r\n'
b'\t\t\treturn {};\r\n'
b'\t\t}\r\n'
b'\t}\r\n'
b'\t',
'Step 9: PreprocessBatch outMetadata')
print('Step 9: done')
# ──────────────────────────────────────────────────────────────────────────────
# Step 10: PostprocessBatch — add metadata param, remove lock, rename members
# ──────────────────────────────────────────────────────────────────────────────
data = replace_slice(data,
b'TENSORRTCL::PostprocessBatch(std::vector<float>& featureVector,const std::string& camera_id,size_t batchIdx)\r\n',
b'\t\r\n}',
b'TENSORRTCL::PostprocessBatch(std::vector<float>& featureVector, const std::string& camera_id, size_t batchIdx, const BatchMetadata& metadata)\r\n'
b'\t{\r\n'
b'\t\tstd::vector<Object> outputs;\r\n'
b'\r\n'
b'\t\ttry {\r\n'
b'\t\t\t// Validate batch index\r\n'
b'\t\t\tif (batchIdx >= metadata.imgHeights.size() ||\r\n'
b'\t\t\t\tbatchIdx >= metadata.imgWidths.size()) {\r\n'
b'\t\t\t\t_logger.LogFatal("TENSORRTCL::PostprocessBatch",\r\n'
b'\t\t\t\t\t"Batch index " + std::to_string(batchIdx) +\r\n'
b'\t\t\t\t\t" out of range (stored " + std::to_string(metadata.imgHeights.size()) + " images)",\r\n'
b'\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\treturn outputs;\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\t// Validate feature vector\r\n'
b'\t\t\tif (featureVector.empty()) {\r\n'
b'\t\t\t\t_logger.LogFatal("TENSORRTCL::PostprocessBatch",\r\n'
b'\t\t\t\t\t"Feature vector is empty for batch index " + std::to_string(batchIdx),\r\n'
b'\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\treturn outputs;\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\t// Get image dimensions for this batch index\r\n'
b'\t\t\tint imgHeight = metadata.imgHeights[batchIdx];\r\n'
b'\t\t\tint imgWidth = metadata.imgWidths[batchIdx];\r\n'
b'\r\n'
b'\t\t\t// Find max element (classification result)\r\n'
b'\t\t\tauto max_idx = std::max_element(featureVector.begin(), featureVector.end());\r\n'
b'\t\t\tif (max_idx == featureVector.end()) {\r\n'
b'\t\t\t\t_logger.LogFatal("TENSORRTCL::PostprocessBatch",\r\n'
b'\t\t\t\t\t"Failed to find max element in feature vector for batch index " +\r\n'
b'\t\t\t\t\tstd::to_string(batchIdx),\r\n'
b'\t\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\t\treturn outputs;\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tint class_id = static_cast<int>(std::distance(featureVector.begin(), max_idx));\r\n'
b'\t\t\tfloat score = *max_idx;\r\n'
b'\r\n'
b'\t\t\t// Create object result\r\n'
b'\t\t\tObject clsResult;\r\n'
b'\t\t\tclsResult.classId = class_id;\r\n'
b'\r\n'
b'\t\t\t// Get class name\r\n'
b'\t\t\tint classNameSize = static_cast<int>(_classes.size());\r\n'
b'\t\t\tif (!_classes.empty()) {\r\n'
b'\t\t\t\tif (class_id >= 0 && class_id < classNameSize) {\r\n'
b'\t\t\t\t\tclsResult.className = _classes[class_id];\r\n'
b'\t\t\t\t}\r\n'
b'\t\t\t\telse {\r\n'
b'\t\t\t\t\tclsResult.className = _classes[classNameSize - 1];\r\n'
b'\r\n'
b'\t\t\t\t}\r\n'
b'\t\t\t}\r\n'
b'\t\t\telse {\r\n'
b'\t\t\t\tclsResult.className = "Unknown";\r\n'
b'\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\tclsResult.confidence = score;\r\n'
b'\r\n'
b'\t\t\t// Create bounding box with margins\r\n'
b'\t\t\tif (imgWidth > 20 && imgHeight > 20) {\r\n'
b'\t\t\t\tclsResult.box = cv::Rect(10, 10, imgWidth - 20, imgHeight - 20);\r\n'
b'\t\t\t}\r\n'
b'\t\t\telse {\r\n'
b'\t\t\t\tclsResult.box = cv::Rect(0, 0, imgWidth, imgHeight);\r\n'
b'\t\t\t}\r\n'
b'\r\n'
b'\t\t\t// Convert to normalized polygon\r\n'
b'\t\t\tclsResult.polygon = ANSUtilityHelper::RectToNormalizedPolygon(\r\n'
b'\t\t\t\tclsResult.box, imgWidth, imgHeight\r\n'
b'\t\t\t);\r\n'
b'\r\n'
b'\t\t\tclsResult.cameraId = camera_id;\r\n'
b'\r\n'
b'\t\t\toutputs.push_back(std::move(clsResult));\r\n'
b'\r\n'
b'\t\t\treturn outputs;\r\n'
b'\t\t}\r\n'
b'\t\tcatch (const std::exception& e) {\r\n'
b'\t\t\t_logger.LogFatal("TENSORRTCL::PostprocessBatch",\r\n'
b'\t\t\t\t"Error for batch index " + std::to_string(batchIdx) + ": " + e.what(),\r\n'
b'\t\t\t\t__FILE__, __LINE__);\r\n'
b'\t\t\treturn outputs;\r\n'
b'\t\t}\r\n'
b'\t}\r\n',
'Step 10: PostprocessBatch metadata')
print('Step 10: done')
# ──────────────────────────────────────────────────────────────────────────────
# Verification: check no old member names remain
# ──────────────────────────────────────────────────────────────────────────────
stale = [b'm_ratio', b'm_imgWidth', b'm_imgHeight',
b'm_batchImgHeights', b'm_batchImgWidths', b'm_batchRatios',
b'_isFixedBatch']
for s in stale:
count = data.count(s)
if count > 0:
errors.append(f'Stale member {s.decode()} still appears {count}x')
# ──────────────────────────────────────────────────────────────────────────────
# Result
# ──────────────────────────────────────────────────────────────────────────────
if errors:
print('\nERRORS:')
for e in errors:
print(' ', e)
print('\nFile NOT written.')
sys.exit(1)
else:
with open(CPP, 'wb') as f:
f.write(data)
print(f'\nAll OK. File written ({len(data)} bytes).')