Disable NV12 path for ANSCV by default. Currenly use cv::Mat** directly
This commit is contained in:
@@ -1250,50 +1250,25 @@ cv::Mat CVideoPlayer::avframeNV12ToCvMat(const AVFrame* frame)
|
||||
m_nv12OrigWidth = width;
|
||||
m_nv12OrigHeight = height;
|
||||
|
||||
// Display optimization: resize NV12 planes to max 1080p before color conversion.
|
||||
// For 4K (3840x2160), this reduces pixel count by 4x:
|
||||
// - 4K NV12→BGR: ~13-76ms on slow CPU (Xeon 2GHz), ~2ms on fast CPU
|
||||
// - 1080p NV12→BGR: ~3-5ms on slow CPU, ~0.5ms on fast CPU
|
||||
// The full-res NV12 is preserved separately for inference (m_currentNV12Frame).
|
||||
const int MAX_DISPLAY_HEIGHT = 1080;
|
||||
bool needsResize = (height > MAX_DISPLAY_HEIGHT);
|
||||
// Return full-resolution BGR image.
|
||||
// No forced downscale — LabVIEW manages display resolution via SetDisplayResolution().
|
||||
// If the caller needs a specific display size, SetDisplayResolution(w, h) applies
|
||||
// resizing in GetImage() at the ANSRTSP/ANS*Client level after this returns.
|
||||
|
||||
// Store original NV12 dimensions for inference coordinate mapping
|
||||
m_nv12OrigWidth = width;
|
||||
m_nv12OrigHeight = height;
|
||||
|
||||
cv::Mat yPlane(height, width, CV_8UC1, frame->data[0], frame->linesize[0]);
|
||||
cv::Mat uvPlane(height / 2, width / 2, CV_8UC2, frame->data[1], frame->linesize[1]);
|
||||
|
||||
if (needsResize) {
|
||||
// Scale to fit within 1080p, maintaining aspect ratio
|
||||
double scale = (double)MAX_DISPLAY_HEIGHT / height;
|
||||
int dstW = (int)(width * scale) & ~1; // even width for NV12
|
||||
int dstH = (int)(height * scale) & ~1; // even height for NV12
|
||||
cv::Mat bgrImage;
|
||||
cv::cvtColorTwoPlane(yPlane, uvPlane, bgrImage, cv::COLOR_YUV2BGR_NV12);
|
||||
|
||||
cv::Mat yResized, uvResized;
|
||||
cv::resize(yPlane, yResized, cv::Size(dstW, dstH), 0, 0, cv::INTER_LINEAR);
|
||||
cv::resize(uvPlane, uvResized, cv::Size(dstW / 2, dstH / 2), 0, 0, cv::INTER_LINEAR);
|
||||
|
||||
cv::Mat bgrImage;
|
||||
cv::cvtColorTwoPlane(yResized, uvResized, bgrImage, cv::COLOR_YUV2BGR_NV12);
|
||||
|
||||
if (m_nImageQuality == 1) {
|
||||
bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0);
|
||||
}
|
||||
return bgrImage;
|
||||
}
|
||||
|
||||
// No resize needed (already <= 1080p)
|
||||
if (m_nImageQuality == 0) {
|
||||
cv::Mat bgrImage;
|
||||
cv::cvtColorTwoPlane(yPlane, uvPlane, bgrImage, cv::COLOR_YUV2BGR_NV12);
|
||||
return bgrImage;
|
||||
}
|
||||
|
||||
// Quality path with range expansion
|
||||
{
|
||||
cv::Mat bgrImage;
|
||||
cv::cvtColorTwoPlane(yPlane, uvPlane, bgrImage, cv::COLOR_YUV2BGR_NV12);
|
||||
if (m_nImageQuality == 1) {
|
||||
bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0);
|
||||
return bgrImage;
|
||||
}
|
||||
return bgrImage;
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
std::cerr << "Exception in avframeNV12ToCvMat: " << e.what() << std::endl;
|
||||
@@ -1861,6 +1836,12 @@ double CVideoPlayer::getFrameRate()
|
||||
|
||||
return 0;
|
||||
}
|
||||
void CVideoPlayer::setTargetFPS(double intervalMs)
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||||
m_targetIntervalMs = intervalMs;
|
||||
m_targetFPSInitialized = false; // reset timing on change
|
||||
}
|
||||
void CVideoPlayer::playVideo(uint8* data, int len, uint32 ts, uint16 seq)
|
||||
{
|
||||
if (m_bRecording)
|
||||
@@ -2080,6 +2061,25 @@ void CVideoPlayer::onVideoFrame(AVFrame* frame)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Frame rate limiting ---
|
||||
// Skip post-decode processing (clone, queue push, CUDA clone) if not enough
|
||||
// time has elapsed since the last processed frame. The decode itself still
|
||||
// runs for every packet to maintain the H.264/H.265 reference frame chain.
|
||||
if (m_targetIntervalMs > 0.0) {
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
if (!m_targetFPSInitialized) {
|
||||
m_lastProcessedTime = now;
|
||||
m_targetFPSInitialized = true;
|
||||
} else {
|
||||
auto elapsed = std::chrono::duration<double, std::milli>(now - m_lastProcessedTime).count();
|
||||
if (elapsed < m_targetIntervalMs) {
|
||||
return; // Skip this frame — too soon
|
||||
}
|
||||
}
|
||||
m_lastProcessedTime = now;
|
||||
}
|
||||
// --- End frame rate limiting ---
|
||||
|
||||
// Push frame to queue; during settle period getImage() will ignore the queue
|
||||
// and keep returning the last good cached image
|
||||
g_frameQueue.pushFrame(frame); // pushFrame() clones the frame internally
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <turbojpeg.h>
|
||||
#include <chrono>
|
||||
|
||||
typedef struct
|
||||
{
|
||||
@@ -146,6 +147,7 @@ public:
|
||||
}
|
||||
// Image quality mode: 0=fast (OpenCV BT.601, ~2ms), 1=quality (sws BT.709+range, ~12ms)
|
||||
virtual void setImageQuality(int mode) { m_nImageQuality = mode; }
|
||||
void setTargetFPS(double intervalMs); // Set minimum interval between processed frames in ms (0 = no limit, 100 = ~10 FPS)
|
||||
virtual void setRtpMulticast(BOOL flag) {}
|
||||
virtual void setRtpOverUdp(BOOL flag) {}
|
||||
|
||||
@@ -266,6 +268,11 @@ protected:
|
||||
int m_cleanFrameCount = 0; // Count of clean frames after keyframe
|
||||
static const int SETTLE_FRAME_COUNT = 5; // Number of clean frames before delivering new frames
|
||||
|
||||
// Frame rate limiting — skip post-decode processing for frames beyond target interval
|
||||
double m_targetIntervalMs = 100.0; // default 100ms (~10 FPS), 0 = no limit (process all frames)
|
||||
std::chrono::steady_clock::time_point m_lastProcessedTime; // timestamp of last processed frame
|
||||
bool m_targetFPSInitialized = false; // first-frame flag
|
||||
|
||||
BOOL m_bPlaying;
|
||||
BOOL m_bPaused;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user