From f068eb716fbfc6352f0f858a0ee3aa564edc60b3 Mon Sep 17 00:00:00 2001 From: Tuan Nghia Nguyen Date: Wed, 22 Apr 2026 18:25:38 +1000 Subject: [PATCH] Add more yuv conversion --- MediaClient/media/video_player.cpp | 525 ++++++++++++++++++++++++++++- MediaClient/media/video_player.h | 10 +- 2 files changed, 522 insertions(+), 13 deletions(-) diff --git a/MediaClient/media/video_player.cpp b/MediaClient/media/video_player.cpp index 59f2734..ebfd2cc 100644 --- a/MediaClient/media/video_player.cpp +++ b/MediaClient/media/video_player.cpp @@ -61,8 +61,15 @@ extern std::atomic g_contiguousBytesInFlight; // in avframeYUV420PToCvMat with a direct I420→RGB24 (== OpenCV BGR memory // order) call. When the submodule isn't checked out, ANSCORE_HAS_LIBYUV is // not defined and we fall back to the pre-libyuv path. +// +// Extended coverage: I420/J420/I422/J422/I444/J444/I010/I210 → BGR. +// 4:2:2 and 4:4:4 8-bit formats have direct ToRGB24 converters. +// 10-bit formats (I010/I210) require a two-step chain: +// I010/I210ToARGB → ARGBToRGB24 (drops alpha byte, no channel swap). #if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV -#include "libyuv/convert_argb.h" // libyuv::I420ToRGB24 +#include "libyuv/convert.h" // I212ToI422, I412ToI444 (12→8-bit downconvert) +#include "libyuv/convert_argb.h" // I420/J420/I422/I444/I010/I210/I410/I012 → ARGB/RGB24 +#include "libyuv/convert_from_argb.h" // ARGBToRGB24 (for 10/12-bit paths) #endif @@ -1356,22 +1363,32 @@ cv::Mat CVideoPlayer::avframeYUV420PToCvMat(const AVFrame* frame) { } #if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV - // libyuv path: direct I420 (3 strided planes) → RGB24 (== BGR in memory - // order for libyuv, matches cv::Mat CV_8UC3 default). No staging buffer, - // no memcpy, no cv::cvtColor — one SIMD-optimized sweep. + // libyuv path: direct I420/J420 (3 strided planes) → RGB24 (== BGR in + // memory order for libyuv, matches cv::Mat CV_8UC3 default). No staging + // buffer, no memcpy, no cv::cvtColor — one SIMD-optimized sweep. // // libyuv's "RGB24" is B,G,R per pixel in memory (see RGB24ToARGBRow_C // in libyuv/source/row_common.cc where src[0]=b, src[1]=g, src[2]=r). // That matches OpenCV's BGR layout — safe to wrap in CV_8UC3. + // + // YUVJ420P signals full-range ("JPEG-style"); use J420ToRGB24 so the + // luma/chroma offsets are not applied. Otherwise studio-range I420. cv::Mat bgrImage(height, width, CV_8UC3); - int ret = libyuv::I420ToRGB24( - frame->data[0], frame->linesize[0], - frame->data[1], frame->linesize[1], - frame->data[2], frame->linesize[2], - bgrImage.data, static_cast(bgrImage.step), - width, height); + int ret = (frame->format == AV_PIX_FMT_YUVJ420P) + ? libyuv::J420ToRGB24( + frame->data[0], frame->linesize[0], + frame->data[1], frame->linesize[1], + frame->data[2], frame->linesize[2], + bgrImage.data, static_cast(bgrImage.step), + width, height) + : libyuv::I420ToRGB24( + frame->data[0], frame->linesize[0], + frame->data[1], frame->linesize[1], + frame->data[2], frame->linesize[2], + bgrImage.data, static_cast(bgrImage.step), + width, height); if (ret != 0) { - std::cerr << "libyuv::I420ToRGB24 failed with ret=" << ret << std::endl; + std::cerr << "libyuv::I420/J420ToRGB24 failed with ret=" << ret << std::endl; return cv::Mat(); } if (m_nImageQuality == 1) { @@ -1469,6 +1486,428 @@ cv::Mat CVideoPlayer::avframeYUV420PToCvMat(const AVFrame* frame) { } } +// YUV422P / YUVJ422P → BGR via libyuv I422ToRGB24. +// libyuv provides only studio-range I422ToRGB24 (no J422ToRGB24), so for +// full-range YUVJ422P we route through J422ToARGB + ARGBToRGB24 to keep +// the correct full-range luma coefficients. +cv::Mat CVideoPlayer::avframeYUV422PToCvMat(const AVFrame* frame) { + try { + if (!frame || frame->width <= 0 || frame->height <= 0) return cv::Mat(); +#if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV + const int width = frame->width; + const int height = frame->height; + cv::Mat bgrImage(height, width, CV_8UC3); + int ret; + if (frame->format == AV_PIX_FMT_YUVJ422P) { + // Full-range: J422ToARGB (BGRA in memory) → ARGBToRGB24 (BGR) + static thread_local std::vector s_argbBuf; + const size_t argbBytes = static_cast(width) * height * 4; + if (s_argbBuf.size() < argbBytes) s_argbBuf.resize(argbBytes); + const int argbStride = width * 4; + int r1 = libyuv::J422ToARGB( + frame->data[0], frame->linesize[0], + frame->data[1], frame->linesize[1], + frame->data[2], frame->linesize[2], + s_argbBuf.data(), argbStride, width, height); + if (r1 != 0) { + std::cerr << "libyuv::J422ToARGB failed ret=" << r1 << std::endl; + return cv::Mat(); + } + ret = libyuv::ARGBToRGB24( + s_argbBuf.data(), argbStride, + bgrImage.data, static_cast(bgrImage.step), + width, height); + } else { + ret = libyuv::I422ToRGB24( + frame->data[0], frame->linesize[0], + frame->data[1], frame->linesize[1], + frame->data[2], frame->linesize[2], + bgrImage.data, static_cast(bgrImage.step), + width, height); + } + if (ret != 0) { + std::cerr << "libyuv::I422/J422ToRGB24 failed ret=" << ret << std::endl; + return cv::Mat(); + } + if (m_nImageQuality == 1) { + bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0); + } + return bgrImage; +#else + return avframeAnyToCvmat(frame); +#endif + } + catch (const std::exception& e) { + std::cerr << "Exception in avframeYUV422PToCvMat: " << e.what() << std::endl; + return cv::Mat(); + } +} + +// YUV444P / YUVJ444P → BGR via libyuv I444ToRGB24 (studio) or J444ToARGB + +// ARGBToRGB24 (full-range — no direct J444ToRGB24 in libyuv). +cv::Mat CVideoPlayer::avframeYUV444PToCvMat(const AVFrame* frame) { + try { + if (!frame || frame->width <= 0 || frame->height <= 0) return cv::Mat(); +#if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV + const int width = frame->width; + const int height = frame->height; + cv::Mat bgrImage(height, width, CV_8UC3); + int ret; + if (frame->format == AV_PIX_FMT_YUVJ444P) { + static thread_local std::vector s_argbBuf; + const size_t argbBytes = static_cast(width) * height * 4; + if (s_argbBuf.size() < argbBytes) s_argbBuf.resize(argbBytes); + const int argbStride = width * 4; + int r1 = libyuv::J444ToARGB( + frame->data[0], frame->linesize[0], + frame->data[1], frame->linesize[1], + frame->data[2], frame->linesize[2], + s_argbBuf.data(), argbStride, width, height); + if (r1 != 0) { + std::cerr << "libyuv::J444ToARGB failed ret=" << r1 << std::endl; + return cv::Mat(); + } + ret = libyuv::ARGBToRGB24( + s_argbBuf.data(), argbStride, + bgrImage.data, static_cast(bgrImage.step), + width, height); + } else { + ret = libyuv::I444ToRGB24( + frame->data[0], frame->linesize[0], + frame->data[1], frame->linesize[1], + frame->data[2], frame->linesize[2], + bgrImage.data, static_cast(bgrImage.step), + width, height); + } + if (ret != 0) { + std::cerr << "libyuv::I444/J444ToRGB24 failed ret=" << ret << std::endl; + return cv::Mat(); + } + if (m_nImageQuality == 1) { + bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0); + } + return bgrImage; +#else + return avframeAnyToCvmat(frame); +#endif + } + catch (const std::exception& e) { + std::cerr << "Exception in avframeYUV444PToCvMat: " << e.what() << std::endl; + return cv::Mat(); + } +} + +// YUV420P10LE → BGR via libyuv I010ToARGB (BGRA in memory) → ARGBToRGB24 (BGR). +// 10-bit planar: Y/U/V are uint16 little-endian with 10 valid bits; FFmpeg's +// linesize[] is in bytes, libyuv expects it in uint16 elements → divide by 2. +cv::Mat CVideoPlayer::avframeYUV420P10LEToCvMat(const AVFrame* frame) { + try { + if (!frame || frame->width <= 0 || frame->height <= 0) return cv::Mat(); +#if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV + const int width = frame->width; + const int height = frame->height; + + // Thread-local ARGB staging — ~8MB @ 1080p, ~32MB @ 4K. Reused across + // calls to avoid malloc+free on the hot path. + static thread_local std::vector s_argbBuf; + const size_t argbBytes = static_cast(width) * height * 4; + if (s_argbBuf.size() < argbBytes) s_argbBuf.resize(argbBytes); + const int argbStride = width * 4; + + int r1 = libyuv::I010ToARGB( + reinterpret_cast(frame->data[0]), frame->linesize[0] / 2, + reinterpret_cast(frame->data[1]), frame->linesize[1] / 2, + reinterpret_cast(frame->data[2]), frame->linesize[2] / 2, + s_argbBuf.data(), argbStride, width, height); + if (r1 != 0) { + std::cerr << "libyuv::I010ToARGB failed ret=" << r1 << std::endl; + return cv::Mat(); + } + + cv::Mat bgrImage(height, width, CV_8UC3); + int r2 = libyuv::ARGBToRGB24( + s_argbBuf.data(), argbStride, + bgrImage.data, static_cast(bgrImage.step), + width, height); + if (r2 != 0) { + std::cerr << "libyuv::ARGBToRGB24 failed ret=" << r2 << std::endl; + return cv::Mat(); + } + if (m_nImageQuality == 1) { + bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0); + } + return bgrImage; +#else + return avframeAnyToCvmat(frame); +#endif + } + catch (const std::exception& e) { + std::cerr << "Exception in avframeYUV420P10LEToCvMat: " << e.what() << std::endl; + return cv::Mat(); + } +} + +// YUV422P10LE → BGR via libyuv I210ToARGB → ARGBToRGB24. +cv::Mat CVideoPlayer::avframeYUV422P10LEToCvMat(const AVFrame* frame) { + try { + if (!frame || frame->width <= 0 || frame->height <= 0) return cv::Mat(); +#if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV + const int width = frame->width; + const int height = frame->height; + + static thread_local std::vector s_argbBuf; + const size_t argbBytes = static_cast(width) * height * 4; + if (s_argbBuf.size() < argbBytes) s_argbBuf.resize(argbBytes); + const int argbStride = width * 4; + + int r1 = libyuv::I210ToARGB( + reinterpret_cast(frame->data[0]), frame->linesize[0] / 2, + reinterpret_cast(frame->data[1]), frame->linesize[1] / 2, + reinterpret_cast(frame->data[2]), frame->linesize[2] / 2, + s_argbBuf.data(), argbStride, width, height); + if (r1 != 0) { + std::cerr << "libyuv::I210ToARGB failed ret=" << r1 << std::endl; + return cv::Mat(); + } + + cv::Mat bgrImage(height, width, CV_8UC3); + int r2 = libyuv::ARGBToRGB24( + s_argbBuf.data(), argbStride, + bgrImage.data, static_cast(bgrImage.step), + width, height); + if (r2 != 0) { + std::cerr << "libyuv::ARGBToRGB24 failed ret=" << r2 << std::endl; + return cv::Mat(); + } + if (m_nImageQuality == 1) { + bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0); + } + return bgrImage; +#else + return avframeAnyToCvmat(frame); +#endif + } + catch (const std::exception& e) { + std::cerr << "Exception in avframeYUV422P10LEToCvMat: " << e.what() << std::endl; + return cv::Mat(); + } +} + +// YUV444P10LE → BGR via libyuv I410ToARGBMatrix + ARGBToRGB24. +// Only the Matrix variant is provided by libyuv for 10-bit 4:4:4, so pass +// an explicit BT.601 constants block (matches the implicit default used by +// the non-Matrix ToARGB calls above). +cv::Mat CVideoPlayer::avframeYUV444P10LEToCvMat(const AVFrame* frame) { + try { + if (!frame || frame->width <= 0 || frame->height <= 0) return cv::Mat(); +#if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV + const int width = frame->width; + const int height = frame->height; + + static thread_local std::vector s_argbBuf; + const size_t argbBytes = static_cast(width) * height * 4; + if (s_argbBuf.size() < argbBytes) s_argbBuf.resize(argbBytes); + const int argbStride = width * 4; + + int r1 = libyuv::I410ToARGBMatrix( + reinterpret_cast(frame->data[0]), frame->linesize[0] / 2, + reinterpret_cast(frame->data[1]), frame->linesize[1] / 2, + reinterpret_cast(frame->data[2]), frame->linesize[2] / 2, + s_argbBuf.data(), argbStride, + &libyuv::kYuvI601Constants, width, height); + if (r1 != 0) { + std::cerr << "libyuv::I410ToARGBMatrix failed ret=" << r1 << std::endl; + return cv::Mat(); + } + + cv::Mat bgrImage(height, width, CV_8UC3); + int r2 = libyuv::ARGBToRGB24( + s_argbBuf.data(), argbStride, + bgrImage.data, static_cast(bgrImage.step), + width, height); + if (r2 != 0) { + std::cerr << "libyuv::ARGBToRGB24 failed ret=" << r2 << std::endl; + return cv::Mat(); + } + if (m_nImageQuality == 1) { + bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0); + } + return bgrImage; +#else + return avframeAnyToCvmat(frame); +#endif + } + catch (const std::exception& e) { + std::cerr << "Exception in avframeYUV444P10LEToCvMat: " << e.what() << std::endl; + return cv::Mat(); + } +} + +// YUV420P12LE → BGR via libyuv I012ToARGBMatrix + ARGBToRGB24. +// libyuv's 12-bit path truncates the low bits to 8-bit ARGB internally — fine +// for inference which is 8-bit anyway. +cv::Mat CVideoPlayer::avframeYUV420P12LEToCvMat(const AVFrame* frame) { + try { + if (!frame || frame->width <= 0 || frame->height <= 0) return cv::Mat(); +#if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV + const int width = frame->width; + const int height = frame->height; + + static thread_local std::vector s_argbBuf; + const size_t argbBytes = static_cast(width) * height * 4; + if (s_argbBuf.size() < argbBytes) s_argbBuf.resize(argbBytes); + const int argbStride = width * 4; + + int r1 = libyuv::I012ToARGBMatrix( + reinterpret_cast(frame->data[0]), frame->linesize[0] / 2, + reinterpret_cast(frame->data[1]), frame->linesize[1] / 2, + reinterpret_cast(frame->data[2]), frame->linesize[2] / 2, + s_argbBuf.data(), argbStride, + &libyuv::kYuvI601Constants, width, height); + if (r1 != 0) { + std::cerr << "libyuv::I012ToARGBMatrix failed ret=" << r1 << std::endl; + return cv::Mat(); + } + + cv::Mat bgrImage(height, width, CV_8UC3); + int r2 = libyuv::ARGBToRGB24( + s_argbBuf.data(), argbStride, + bgrImage.data, static_cast(bgrImage.step), + width, height); + if (r2 != 0) { + std::cerr << "libyuv::ARGBToRGB24 failed ret=" << r2 << std::endl; + return cv::Mat(); + } + if (m_nImageQuality == 1) { + bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0); + } + return bgrImage; +#else + return avframeAnyToCvmat(frame); +#endif + } + catch (const std::exception& e) { + std::cerr << "Exception in avframeYUV420P12LEToCvMat: " << e.what() << std::endl; + return cv::Mat(); + } +} + +// YUV422P12LE → BGR via libyuv I212ToI422 (12→8-bit 4:2:2 downconvert) then +// I422ToRGB24. libyuv has no direct I212ToARGB, so the two-step SIMD chain +// is the fast path. Intermediate I422 staging = 2*W*H bytes (≈16 MB @ 4K). +cv::Mat CVideoPlayer::avframeYUV422P12LEToCvMat(const AVFrame* frame) { + try { + if (!frame || frame->width <= 0 || frame->height <= 0) return cv::Mat(); +#if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV + const int width = frame->width; + const int height = frame->height; + const int uvW = width / 2; + + // Thread-local 8-bit I422 staging: [Y: W×H][U: W/2×H][V: W/2×H] + static thread_local std::vector s_i422Buf; + const size_t i422Bytes = static_cast(width) * height + + static_cast(uvW) * height * 2; + if (s_i422Buf.size() < i422Bytes) s_i422Buf.resize(i422Bytes); + + uint8_t* y8 = s_i422Buf.data(); + uint8_t* u8 = y8 + static_cast(width) * height; + uint8_t* v8 = u8 + static_cast(uvW) * height; + + int r1 = libyuv::I212ToI422( + reinterpret_cast(frame->data[0]), frame->linesize[0] / 2, + reinterpret_cast(frame->data[1]), frame->linesize[1] / 2, + reinterpret_cast(frame->data[2]), frame->linesize[2] / 2, + y8, width, + u8, uvW, + v8, uvW, + width, height); + if (r1 != 0) { + std::cerr << "libyuv::I212ToI422 failed ret=" << r1 << std::endl; + return cv::Mat(); + } + + cv::Mat bgrImage(height, width, CV_8UC3); + int r2 = libyuv::I422ToRGB24( + y8, width, + u8, uvW, + v8, uvW, + bgrImage.data, static_cast(bgrImage.step), + width, height); + if (r2 != 0) { + std::cerr << "libyuv::I422ToRGB24 failed ret=" << r2 << std::endl; + return cv::Mat(); + } + if (m_nImageQuality == 1) { + bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0); + } + return bgrImage; +#else + return avframeAnyToCvmat(frame); +#endif + } + catch (const std::exception& e) { + std::cerr << "Exception in avframeYUV422P12LEToCvMat: " << e.what() << std::endl; + return cv::Mat(); + } +} + +// YUV444P12LE → BGR via libyuv I412ToI444 (12→8-bit 4:4:4 downconvert) then +// I444ToRGB24. Intermediate I444 staging = 3*W*H bytes (≈24 MB @ 4K). +cv::Mat CVideoPlayer::avframeYUV444P12LEToCvMat(const AVFrame* frame) { + try { + if (!frame || frame->width <= 0 || frame->height <= 0) return cv::Mat(); +#if defined(ANSCORE_HAS_LIBYUV) && ANSCORE_HAS_LIBYUV + const int width = frame->width; + const int height = frame->height; + + // Thread-local 8-bit I444 staging: [Y: W×H][U: W×H][V: W×H] + static thread_local std::vector s_i444Buf; + const size_t planeBytes = static_cast(width) * height; + const size_t i444Bytes = planeBytes * 3; + if (s_i444Buf.size() < i444Bytes) s_i444Buf.resize(i444Bytes); + + uint8_t* y8 = s_i444Buf.data(); + uint8_t* u8 = y8 + planeBytes; + uint8_t* v8 = u8 + planeBytes; + + int r1 = libyuv::I412ToI444( + reinterpret_cast(frame->data[0]), frame->linesize[0] / 2, + reinterpret_cast(frame->data[1]), frame->linesize[1] / 2, + reinterpret_cast(frame->data[2]), frame->linesize[2] / 2, + y8, width, + u8, width, + v8, width, + width, height); + if (r1 != 0) { + std::cerr << "libyuv::I412ToI444 failed ret=" << r1 << std::endl; + return cv::Mat(); + } + + cv::Mat bgrImage(height, width, CV_8UC3); + int r2 = libyuv::I444ToRGB24( + y8, width, + u8, width, + v8, width, + bgrImage.data, static_cast(bgrImage.step), + width, height); + if (r2 != 0) { + std::cerr << "libyuv::I444ToRGB24 failed ret=" << r2 << std::endl; + return cv::Mat(); + } + if (m_nImageQuality == 1) { + bgrImage.convertTo(bgrImage, -1, 255.0 / 219.0, -16.0 * 255.0 / 219.0); + } + return bgrImage; +#else + return avframeAnyToCvmat(frame); +#endif + } + catch (const std::exception& e) { + std::cerr << "Exception in avframeYUV444P12LEToCvMat: " << e.what() << std::endl; + return cv::Mat(); + } +} + cv::Mat CVideoPlayer::avframeToCVMat(const AVFrame* pFrame) { // No _mutex here: caller (getImage) releases the mutex before invoking this // so the expensive NV12/YUV420P→BGR conversion does not block onVideoFrame. @@ -1514,12 +1953,74 @@ cv::Mat CVideoPlayer::avframeToCVMat(const AVFrame* pFrame) { case AV_PIX_FMT_YUVJ420P: if (logThis) { ANS_DBG("MEDIA_Convert", - "DISPATCH call#%llu fmt=%s %dx%d -> avframeYUV420PToCvMat (SW-decode path)", + "DISPATCH call#%llu fmt=%s %dx%d -> avframeYUV420PToCvMat (libyuv SW path)", (unsigned long long)dispN, (pFrame->format == AV_PIX_FMT_YUVJ420P) ? "YUVJ420P" : "YUV420P", pFrame->width, pFrame->height); } return avframeYUV420PToCvMat(pFrame); + case AV_PIX_FMT_YUV422P: + case AV_PIX_FMT_YUVJ422P: + if (logThis) { + ANS_DBG("MEDIA_Convert", + "DISPATCH call#%llu fmt=%s %dx%d -> avframeYUV422PToCvMat (libyuv SW path)", + (unsigned long long)dispN, + (pFrame->format == AV_PIX_FMT_YUVJ422P) ? "YUVJ422P" : "YUV422P", + pFrame->width, pFrame->height); + } + return avframeYUV422PToCvMat(pFrame); + case AV_PIX_FMT_YUV444P: + case AV_PIX_FMT_YUVJ444P: + if (logThis) { + ANS_DBG("MEDIA_Convert", + "DISPATCH call#%llu fmt=%s %dx%d -> avframeYUV444PToCvMat (libyuv SW path)", + (unsigned long long)dispN, + (pFrame->format == AV_PIX_FMT_YUVJ444P) ? "YUVJ444P" : "YUV444P", + pFrame->width, pFrame->height); + } + return avframeYUV444PToCvMat(pFrame); + case AV_PIX_FMT_YUV420P10LE: + if (logThis) { + ANS_DBG("MEDIA_Convert", + "DISPATCH call#%llu fmt=YUV420P10LE %dx%d -> avframeYUV420P10LEToCvMat (libyuv SW path)", + (unsigned long long)dispN, pFrame->width, pFrame->height); + } + return avframeYUV420P10LEToCvMat(pFrame); + case AV_PIX_FMT_YUV422P10LE: + if (logThis) { + ANS_DBG("MEDIA_Convert", + "DISPATCH call#%llu fmt=YUV422P10LE %dx%d -> avframeYUV422P10LEToCvMat (libyuv SW path)", + (unsigned long long)dispN, pFrame->width, pFrame->height); + } + return avframeYUV422P10LEToCvMat(pFrame); + case AV_PIX_FMT_YUV444P10LE: + if (logThis) { + ANS_DBG("MEDIA_Convert", + "DISPATCH call#%llu fmt=YUV444P10LE %dx%d -> avframeYUV444P10LEToCvMat (libyuv SW path)", + (unsigned long long)dispN, pFrame->width, pFrame->height); + } + return avframeYUV444P10LEToCvMat(pFrame); + case AV_PIX_FMT_YUV420P12LE: + if (logThis) { + ANS_DBG("MEDIA_Convert", + "DISPATCH call#%llu fmt=YUV420P12LE %dx%d -> avframeYUV420P12LEToCvMat (libyuv SW path)", + (unsigned long long)dispN, pFrame->width, pFrame->height); + } + return avframeYUV420P12LEToCvMat(pFrame); + case AV_PIX_FMT_YUV422P12LE: + if (logThis) { + ANS_DBG("MEDIA_Convert", + "DISPATCH call#%llu fmt=YUV422P12LE %dx%d -> avframeYUV422P12LEToCvMat (libyuv SW path, 12→8→BGR)", + (unsigned long long)dispN, pFrame->width, pFrame->height); + } + return avframeYUV422P12LEToCvMat(pFrame); + case AV_PIX_FMT_YUV444P12LE: + if (logThis) { + ANS_DBG("MEDIA_Convert", + "DISPATCH call#%llu fmt=YUV444P12LE %dx%d -> avframeYUV444P12LEToCvMat (libyuv SW path, 12→8→BGR)", + (unsigned long long)dispN, pFrame->width, pFrame->height); + } + return avframeYUV444P12LEToCvMat(pFrame); default: if (logThis) { const char* name = av_get_pix_fmt_name((AVPixelFormat)pFrame->format); diff --git a/MediaClient/media/video_player.h b/MediaClient/media/video_player.h index cf57e56..468ea91 100644 --- a/MediaClient/media/video_player.h +++ b/MediaClient/media/video_player.h @@ -252,7 +252,15 @@ protected: cv::Mat avframeAnyToCvmat(const AVFrame* frame); cv::Mat avframeNV12ToCvMat(const AVFrame* frame); - cv::Mat avframeYUV420PToCvMat(const AVFrame* frame); // YUV420P/YUVJ420P → BGR (OpenCV, no sws_scale) + cv::Mat avframeYUV420PToCvMat(const AVFrame* frame); // YUV420P/YUVJ420P → BGR (libyuv I420/J420ToRGB24) + cv::Mat avframeYUV422PToCvMat(const AVFrame* frame); // YUV422P/YUVJ422P → BGR (libyuv I422ToRGB24 / J422ToARGB+ARGBToRGB24) + cv::Mat avframeYUV444PToCvMat(const AVFrame* frame); // YUV444P/YUVJ444P → BGR (libyuv I444ToRGB24 / J444ToARGB+ARGBToRGB24) + cv::Mat avframeYUV420P10LEToCvMat(const AVFrame* frame); // YUV420P10LE → BGR (libyuv I010ToARGB + ARGBToRGB24) + cv::Mat avframeYUV422P10LEToCvMat(const AVFrame* frame); // YUV422P10LE → BGR (libyuv I210ToARGB + ARGBToRGB24) + cv::Mat avframeYUV444P10LEToCvMat(const AVFrame* frame); // YUV444P10LE → BGR (libyuv I410ToARGBMatrix + ARGBToRGB24) + cv::Mat avframeYUV420P12LEToCvMat(const AVFrame* frame); // YUV420P12LE → BGR (libyuv I012ToARGBMatrix + ARGBToRGB24) + cv::Mat avframeYUV422P12LEToCvMat(const AVFrame* frame); // YUV422P12LE → BGR (libyuv I212ToI422 + I422ToRGB24) + cv::Mat avframeYUV444P12LEToCvMat(const AVFrame* frame); // YUV444P12LE → BGR (libyuv I412ToI444 + I444ToRGB24) cv::Mat avframeYUVJ420PToCvmat(const AVFrame* frame); cv::Mat avframeToCVMat(const AVFrame* frame);