Remove [Engine] and [EnginePoolManager] debug log messages
Cleaned up verbose engine telemetry emitted to stdout/stderr and the Windows Event Viewer. Removes logEngineEvent/logEvent calls (and their diagnostic-only locals) across the TensorRT engine load, build, run, multi-GPU, and pool-manager paths, plus the now-unused logEvent helper in EnginePoolManager. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -486,10 +486,6 @@ bool Engine<T>::runInference(const std::vector<std::vector<cv::cuda::GpuMat>>& i
|
||||
// valid here. Guard against the (unlikely) edge case where runInference is
|
||||
// called before loadNetwork succeeds.
|
||||
if (!m_streamInitialized || !m_inferenceStream) {
|
||||
std::string errMsg = "Error: Inference stream not initialised. "
|
||||
"Call loadNetwork() / buildLoadNetwork() before runInference().";
|
||||
std::cout << errMsg << std::endl;
|
||||
logEngineEvent("[Engine] runInference: " + errMsg, true);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -902,20 +898,6 @@ bool Engine<T>::runInference(const std::vector<std::vector<cv::cuda::GpuMat>>& i
|
||||
}
|
||||
if (!success) {
|
||||
ANS_DBG("TRT_Engine", "ERROR: enqueueV3 FAILED batch=%d", batchSize);
|
||||
std::string debugInfo = "[Engine] runInference FAIL: enqueue returned false, batch="
|
||||
+ std::to_string(batchSize)
|
||||
+ ", dimsSpecified=" + (m_context->allInputDimensionsSpecified() ? "YES" : "NO");
|
||||
for (size_t i = 0; i < m_IOTensorNames.size(); ++i) {
|
||||
auto shape = m_context->getTensorShape(m_IOTensorNames[i].c_str());
|
||||
debugInfo += ", tensor'" + m_IOTensorNames[i] + "'=[";
|
||||
for (int j = 0; j < shape.nbDims; ++j) {
|
||||
if (j > 0) debugInfo += ",";
|
||||
debugInfo += std::to_string(shape.d[j]);
|
||||
}
|
||||
debugInfo += "]";
|
||||
}
|
||||
std::cout << debugInfo << std::endl;
|
||||
logEngineEvent(debugInfo, true);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -933,11 +915,6 @@ bool Engine<T>::runInference(const std::vector<std::vector<cv::cuda::GpuMat>>& i
|
||||
m_inferenceStream);
|
||||
|
||||
if (copyErr != cudaSuccess) {
|
||||
std::string errMsg = "[Engine] runInference FAIL: cudaMemcpyAsync output "
|
||||
+ std::to_string(outputIdx) + " batch " + std::to_string(batch)
|
||||
+ ": " + cudaGetErrorString(copyErr);
|
||||
std::cout << errMsg << std::endl;
|
||||
logEngineEvent(errMsg, true);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -953,10 +930,6 @@ bool Engine<T>::runInference(const std::vector<std::vector<cv::cuda::GpuMat>>& i
|
||||
if (syncErr != cudaSuccess) {
|
||||
ANS_DBG("TRT_Engine", "ERROR: cudaStreamSync FAILED err=%d (%s)",
|
||||
(int)syncErr, cudaGetErrorString(syncErr));
|
||||
std::string errMsg = "[Engine] runInference FAIL: cudaStreamSynchronize: "
|
||||
+ std::string(cudaGetErrorString(syncErr));
|
||||
std::cout << errMsg << std::endl;
|
||||
logEngineEvent(errMsg, true);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user