6811 lines
226 KiB
C++
6811 lines
226 KiB
C++
#include "ANSOpenCV.h"
|
||
#include "ANSMatRegistry.h"
|
||
#include <nivision.h>
|
||
#include <cstdint>
|
||
#include <cstdlib>
|
||
#include <memory>
|
||
#include <json.hpp>
|
||
#include "boost/property_tree/ptree.hpp"
|
||
#include "boost/property_tree/json_parser.hpp"
|
||
#include "boost/foreach.hpp"
|
||
#include "ReadBarcode.h"
|
||
#include "sys_inc.h"
|
||
#include "rtsp_cln.h"
|
||
#include "hqueue.h"
|
||
#include "http.h"
|
||
#include "http_parse.h"
|
||
#include "rtsp_player.h"
|
||
#include <filesystem>
|
||
#include <chrono>
|
||
#include <mutex>
|
||
#include <turbojpeg.h>
|
||
#include <nvjpeg.h>
|
||
#include <cuda_runtime.h>
|
||
#include "ANSCVVendorGate.h"
|
||
#include <thread>
|
||
#include <future>
|
||
#include <opencv2/imgproc.hpp>
|
||
#include <opencv2/highgui.hpp>
|
||
#include <opencv2/dnn.hpp>
|
||
|
||
|
||
// SIMD includes
|
||
#ifdef __AVX2__
|
||
#include <immintrin.h>
|
||
#elif defined(__SSE2__)
|
||
#include <emmintrin.h>
|
||
#endif
|
||
extern "C" {
|
||
#include <libavcodec/avcodec.h>
|
||
#include <libavformat/avformat.h>
|
||
#include <libswscale/swscale.h>
|
||
#include <libavutil/opt.h>
|
||
#include <libavutil/error.h>
|
||
#include <libavutil/imgutils.h>
|
||
}
|
||
std::mutex imageMutex; // Global mutex for thread safety
|
||
std::timed_mutex timeImageMutex;
|
||
|
||
static bool ansCVLicenceValid = false;
|
||
// Global once_flag to protect license checking
|
||
static std::once_flag ansCVLicenseOnceFlag;
|
||
|
||
//template <typename T>
|
||
//T GetData(const boost::property_tree::ptree& pt, const std::string& key)
|
||
//{
|
||
// T ret;
|
||
// if (boost::optional<T> data = pt.get_optional<T>(key))
|
||
// {
|
||
// ret = data.get();
|
||
// }
|
||
// return ret;
|
||
//}
|
||
namespace fs = std::filesystem;
|
||
|
||
namespace ANSCENTER
|
||
{
|
||
// Global function to verify license only once
|
||
static void VerifyGlobalLicense(const std::string& licenseKey) {
|
||
ansCVLicenceValid = ANSCENTER::ANSLicenseHelper::LicenseVerification(licenseKey, 1007, "ANSCV");
|
||
if (!ansCVLicenceValid) { // we also support ANSTS license
|
||
ansCVLicenceValid = ANSCENTER::ANSLicenseHelper::LicenseVerification(licenseKey, 1003, "ANSVIS");//Default productId=1003 (ANSVIS)
|
||
}
|
||
if (!ansCVLicenceValid) { // we also support ANSTS license
|
||
ansCVLicenceValid = ANSCENTER::ANSLicenseHelper::LicenseVerification(licenseKey, 1008, "ANSTS");//Default productId=1008 (ANSTS)
|
||
}
|
||
}
|
||
TurboJpegCompressor::TurboJpegCompressor() {
|
||
_handle = tjInitCompress();
|
||
if (!_handle) {
|
||
std::cerr << "Failed to initialize TurboJPEG: " << tjGetErrorStr() << std::endl;
|
||
std::exit(1);
|
||
}
|
||
|
||
// Pre-allocate buffer to avoid tjAlloc/tjFree overhead
|
||
_bufferSize = 2 * 1024 * 1024; // 2MB - should handle most images
|
||
_buffer = static_cast<unsigned char*>(tjAlloc(_bufferSize));
|
||
if (!_buffer) {
|
||
std::cerr << "Failed to allocate JPEG buffer" << std::endl;
|
||
tjDestroy(_handle);
|
||
std::exit(1);
|
||
}
|
||
}
|
||
TurboJpegCompressor::~TurboJpegCompressor() noexcept {
|
||
if (_buffer) {
|
||
tjFree(_buffer);
|
||
_buffer = nullptr;
|
||
}
|
||
if (_handle) {
|
||
tjDestroy(_handle);
|
||
_handle = nullptr;
|
||
}
|
||
}
|
||
|
||
// Your original logic with minimal optimizations
|
||
std::string TurboJpegCompressor::compress(const cv::Mat& image, int quality) {
|
||
if (image.empty()) {
|
||
std::cerr << "Error: Input image is empty!" << std::endl;
|
||
return "";
|
||
}
|
||
int pixelFormat;
|
||
int subsampling;
|
||
|
||
if (image.type() == CV_8UC1) {
|
||
pixelFormat = TJPF_GRAY;
|
||
subsampling = TJSAMP_GRAY;
|
||
}
|
||
else if (image.type() == CV_8UC3) {
|
||
pixelFormat = TJPF_BGR;
|
||
subsampling = TJSAMP_420;
|
||
}
|
||
else {
|
||
std::cerr << "Error: Unsupported image format!" << std::endl;
|
||
return "";
|
||
}
|
||
|
||
// Try with pre-allocated buffer first (fastest path)
|
||
unsigned long jpegSize = _bufferSize;
|
||
if (tjCompress2(_handle, image.data, image.cols, 0, image.rows, pixelFormat,
|
||
&_buffer, &jpegSize, subsampling, quality,
|
||
TJFLAG_FASTDCT | TJFLAG_FASTUPSAMPLE) >= 0) {
|
||
return std::string(reinterpret_cast<char*>(_buffer), jpegSize);
|
||
}
|
||
|
||
// Fallback to dynamic allocation (your original method)
|
||
unsigned char* jpegBuf = nullptr;
|
||
jpegSize = 0;
|
||
if (tjCompress2(_handle, image.data, image.cols, 0, image.rows, pixelFormat,
|
||
&jpegBuf, &jpegSize, subsampling, quality,
|
||
TJFLAG_FASTDCT | TJFLAG_FASTUPSAMPLE) < 0) {
|
||
std::cerr << "JPEG compression failed: " << tjGetErrorStr() << std::endl;
|
||
return "";
|
||
}
|
||
|
||
std::string jpegString(reinterpret_cast<char*>(jpegBuf), jpegSize);
|
||
tjFree(jpegBuf);
|
||
|
||
// If we needed more space, expand our buffer for next time
|
||
if (jpegSize > _bufferSize) {
|
||
tjFree(_buffer);
|
||
_bufferSize = jpegSize * 2;
|
||
_buffer = static_cast<unsigned char*>(tjAlloc(_bufferSize));
|
||
}
|
||
|
||
return jpegString;
|
||
}
|
||
// ── NvJpegCompressor: GPU-accelerated JPEG (NVIDIA only) ──
|
||
|
||
NvJpegCompressor::NvJpegCompressor() {
|
||
if (!anscv_vendor_gate::IsNvidiaGpuAvailable()) return;
|
||
|
||
auto handle = reinterpret_cast<nvjpegHandle_t*>(&_nvHandle);
|
||
auto state = reinterpret_cast<nvjpegEncoderState_t*>(&_encState);
|
||
auto params = reinterpret_cast<nvjpegEncoderParams_t*>(&_encParams);
|
||
auto stream = reinterpret_cast<cudaStream_t*>(&_stream);
|
||
|
||
if (nvjpegCreateSimple(handle) != NVJPEG_STATUS_SUCCESS) return;
|
||
if (nvjpegEncoderStateCreate(*handle, state, nullptr) != NVJPEG_STATUS_SUCCESS) { cleanup(); return; }
|
||
if (nvjpegEncoderParamsCreate(*handle, params, nullptr) != NVJPEG_STATUS_SUCCESS) { cleanup(); return; }
|
||
if (cudaStreamCreate(stream) != cudaSuccess) { cleanup(); return; }
|
||
|
||
_valid = true;
|
||
}
|
||
|
||
NvJpegCompressor::~NvJpegCompressor() noexcept { cleanup(); }
|
||
|
||
void NvJpegCompressor::cleanup() noexcept {
|
||
if (_gpuBuffer) { cudaFree(_gpuBuffer); _gpuBuffer = nullptr; _gpuBufferSize = 0; }
|
||
if (_stream) { cudaStreamDestroy(reinterpret_cast<cudaStream_t>(_stream)); _stream = nullptr; }
|
||
if (_encParams) { nvjpegEncoderParamsDestroy(reinterpret_cast<nvjpegEncoderParams_t>(_encParams)); _encParams = nullptr; }
|
||
if (_encState) { nvjpegEncoderStateDestroy(reinterpret_cast<nvjpegEncoderState_t>(_encState)); _encState = nullptr; }
|
||
if (_nvHandle) { nvjpegDestroy(reinterpret_cast<nvjpegHandle_t>(_nvHandle)); _nvHandle = nullptr; }
|
||
_valid = false;
|
||
}
|
||
|
||
std::string NvJpegCompressor::compress(const cv::Mat& image, int quality) {
|
||
if (!_valid || image.empty()) return "";
|
||
|
||
// Only support BGR 8-bit (the common path)
|
||
if (image.type() != CV_8UC3) return "";
|
||
|
||
auto handle = reinterpret_cast<nvjpegHandle_t>(_nvHandle);
|
||
auto state = reinterpret_cast<nvjpegEncoderState_t>(_encState);
|
||
auto params = reinterpret_cast<nvjpegEncoderParams_t>(_encParams);
|
||
auto stream = reinterpret_cast<cudaStream_t>(_stream);
|
||
|
||
int width = image.cols;
|
||
int height = image.rows;
|
||
size_t imageSize = static_cast<size_t>(width) * height * 3;
|
||
|
||
// Reuse GPU buffer, grow if needed
|
||
if (imageSize > _gpuBufferSize) {
|
||
if (_gpuBuffer) cudaFree(_gpuBuffer);
|
||
// Allocate with 25% headroom to reduce reallocations
|
||
_gpuBufferSize = imageSize + imageSize / 4;
|
||
if (cudaMalloc(&_gpuBuffer, _gpuBufferSize) != cudaSuccess) {
|
||
_gpuBuffer = nullptr;
|
||
_gpuBufferSize = 0;
|
||
return "";
|
||
}
|
||
}
|
||
|
||
// Upload interleaved BGR to GPU
|
||
if (cudaMemcpy(_gpuBuffer, image.data, imageSize, cudaMemcpyHostToDevice) != cudaSuccess)
|
||
return "";
|
||
|
||
// Configure encoder
|
||
if (nvjpegEncoderParamsSetQuality(params, quality, stream) != NVJPEG_STATUS_SUCCESS) return "";
|
||
if (nvjpegEncoderParamsSetSamplingFactors(params, NVJPEG_CSS_420, stream) != NVJPEG_STATUS_SUCCESS) return "";
|
||
if (nvjpegEncoderParamsSetOptimizedHuffman(params, 1, stream) != NVJPEG_STATUS_SUCCESS) return "";
|
||
|
||
// Set up nvjpegImage_t for interleaved BGR
|
||
nvjpegImage_t nv_image = {};
|
||
nv_image.channel[0] = _gpuBuffer;
|
||
nv_image.pitch[0] = static_cast<unsigned int>(width * 3);
|
||
|
||
// Encode
|
||
if (nvjpegEncodeImage(handle, state, params, &nv_image,
|
||
NVJPEG_INPUT_BGRI, width, height, stream) != NVJPEG_STATUS_SUCCESS)
|
||
return "";
|
||
|
||
// Get compressed size
|
||
size_t jpegSize = 0;
|
||
if (nvjpegEncodeRetrieveBitstream(handle, state, nullptr, &jpegSize, stream) != NVJPEG_STATUS_SUCCESS)
|
||
return "";
|
||
|
||
// Retrieve bitstream
|
||
std::string jpegStr(jpegSize, '\0');
|
||
if (nvjpegEncodeRetrieveBitstream(handle, state,
|
||
reinterpret_cast<unsigned char*>(jpegStr.data()), &jpegSize, stream) != NVJPEG_STATUS_SUCCESS)
|
||
return "";
|
||
|
||
if (cudaStreamSynchronize(stream) != cudaSuccess)
|
||
return "";
|
||
|
||
jpegStr.resize(jpegSize);
|
||
return jpegStr;
|
||
}
|
||
|
||
// ── NvJpegPool: VRAM-scaled pool of GPU encoders, lock-free acquire ──
|
||
|
||
int NvJpegPool::detectPoolSize() {
|
||
// Query VRAM via CUDA and scale: 1 encoder per 2 GB, min 1
|
||
int deviceCount = 0;
|
||
if (cudaGetDeviceCount(&deviceCount) != cudaSuccess || deviceCount <= 0)
|
||
return 0;
|
||
|
||
cudaDeviceProp prop{};
|
||
if (cudaGetDeviceProperties(&prop, 0) != cudaSuccess)
|
||
return 0;
|
||
|
||
size_t vramGB = prop.totalGlobalMem / (1024ULL * 1024ULL * 1024ULL);
|
||
int pool = static_cast<int>(vramGB / 2);
|
||
if (pool < 1) pool = 1;
|
||
|
||
ANS_DBG("ANSCV", "NvJpegPool: GPU=%s, VRAM=%zuGB, poolSize=%d", prop.name, vramGB, pool);
|
||
return pool;
|
||
}
|
||
|
||
NvJpegPool& NvJpegPool::Instance() {
|
||
static NvJpegPool instance;
|
||
return instance;
|
||
}
|
||
|
||
NvJpegPool::NvJpegPool() {
|
||
if (!anscv_vendor_gate::IsNvidiaGpuAvailable()) return;
|
||
|
||
_poolSize = detectPoolSize();
|
||
if (_poolSize <= 0) return;
|
||
|
||
_encoders.resize(_poolSize);
|
||
_inUse = std::make_unique<std::atomic<bool>[]>(_poolSize);
|
||
|
||
for (int i = 0; i < _poolSize; ++i) {
|
||
_inUse[i].store(false, std::memory_order_relaxed);
|
||
_encoders[i] = std::make_unique<NvJpegCompressor>();
|
||
if (!_encoders[i]->isValid()) {
|
||
_encoders[i].reset();
|
||
}
|
||
}
|
||
// Pool is available if at least one encoder initialized
|
||
for (int i = 0; i < _poolSize; ++i) {
|
||
if (_encoders[i]) { _available = true; break; }
|
||
}
|
||
|
||
ANS_DBG("ANSCV", "NvJpegPool: initialized %d encoder(s), available=%d", _poolSize, _available ? 1 : 0);
|
||
}
|
||
|
||
std::string NvJpegPool::tryCompress(const cv::Mat& image, int quality) {
|
||
if (!_available) return "";
|
||
|
||
// Lock-free slot acquisition: try each slot with compare_exchange
|
||
for (int i = 0; i < _poolSize; ++i) {
|
||
if (!_encoders[i]) continue;
|
||
bool expected = false;
|
||
if (_inUse[i].compare_exchange_strong(expected, true, std::memory_order_acquire)) {
|
||
std::string result = _encoders[i]->compress(image, quality);
|
||
_inUse[i].store(false, std::memory_order_release);
|
||
return result; // may be empty on encode failure — caller falls back
|
||
}
|
||
}
|
||
return ""; // All slots busy — caller falls back to TurboJPEG
|
||
}
|
||
|
||
// ── Unified entry point: nvJPEG pool on NVIDIA, TurboJPEG otherwise ──
|
||
|
||
std::string CompressJpegToString(const cv::Mat& image, int quality) {
|
||
// Try GPU path first (returns "" if non-NVIDIA, pool full, or encode fails)
|
||
//std::string result = NvJpegPool::Instance().tryCompress(image, quality);
|
||
//if (!result.empty()) return result;
|
||
|
||
// CPU fallback — always available
|
||
static thread_local TurboJpegCompressor compressor;
|
||
return compressor.compress(image, quality);
|
||
}
|
||
void ANSOPENCV::CheckLicense() {
|
||
try {
|
||
// Check once globally
|
||
std::call_once(ansCVLicenseOnceFlag, [this]() {
|
||
VerifyGlobalLicense(_licenseKey);
|
||
});
|
||
|
||
// Update this instance's local license flag
|
||
_licenseValid = ansCVLicenceValid;
|
||
}
|
||
catch (const std::exception& e) {
|
||
this->_logger.LogFatal("ANSOPENCV::CheckLicense. Error:", e.what(), __FILE__, __LINE__);
|
||
}
|
||
}
|
||
bool ANSOPENCV::Init(std::string licenseKey) {
|
||
if (ansCVLicenceValid) { // Global check
|
||
_licenseValid = true;
|
||
return true;
|
||
}
|
||
|
||
{
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
if (_licenseKey.empty()) {
|
||
_licenseKey = licenseKey;
|
||
}
|
||
else if (_licenseKey != licenseKey) {
|
||
std::cerr << "Warning: Attempt to reset license key with a different value!" << std::endl;
|
||
}
|
||
}
|
||
|
||
CheckLicense();
|
||
return _licenseValid;
|
||
}
|
||
//std::string ANSOPENCV::EncodeJpegString(const cv::Mat& img, int quality) {
|
||
// std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
// tjhandle _jpegCompressor = nullptr;
|
||
// unsigned char* jpegBuf = nullptr;
|
||
// try {
|
||
// _jpegCompressor = tjInitCompress();
|
||
// if (!_jpegCompressor) {
|
||
// this->_logger.LogError("ANSOPENCV::EncodeJpegString. Failed to initialize TurboJPEG compressor.", tjGetErrorStr(), __FILE__, __LINE__);
|
||
// return "";
|
||
// }
|
||
// int maxBufferSize = img.cols * img.rows * 3;
|
||
// jpegBuf = new unsigned char[maxBufferSize]; // Pre-allocated buffer
|
||
// long unsigned int jpegSize = maxBufferSize; // Size of the JPEG image (output)
|
||
// int subsamp = TJSAMP_444; // Chroma subsampling: TJSAMP_444, TJSAMP_422, TJSAMP_420, etc.
|
||
// int pixelFormat = img.channels() == 3 ? TJPF_BGR : TJPF_GRAY; // Pixel format based on channels
|
||
|
||
// // Compress the image into the pre-allocated buffer
|
||
// int result = tjCompress2(_jpegCompressor, img.data, img.cols, 0, img.rows, pixelFormat,
|
||
// &jpegBuf, &jpegSize, subsamp, quality, TJFLAG_FASTDCT);
|
||
|
||
// // Handle compression errors
|
||
// if (result != 0) {
|
||
// this->_logger.LogError("ANSOPENCV::EncodeJpegString. Compression error:", tjGetErrorStr(), __FILE__, __LINE__);
|
||
// if (jpegBuf) {
|
||
// tjFree(jpegBuf); // Free the buffer if allocated
|
||
// }
|
||
// tjDestroy(_jpegCompressor); // Destroy the TurboJPEG compressor
|
||
// return "";
|
||
// }
|
||
// // Create a string from the JPEG buffer
|
||
// std::string jpegString(reinterpret_cast<char*>(jpegBuf), jpegSize);
|
||
// // Clean up resources
|
||
// tjFree(jpegBuf);
|
||
// tjDestroy(_jpegCompressor);
|
||
// return jpegString;
|
||
// }
|
||
// catch (std::exception& e) {
|
||
// this->_logger.LogError("ANSOPENCV::EncodeJpegString:", e.what(), __FILE__, __LINE__);
|
||
// // Clean up resources in case of an exception
|
||
// if (jpegBuf) {
|
||
// tjFree(jpegBuf);
|
||
// }
|
||
// if (_jpegCompressor) {
|
||
// tjDestroy(_jpegCompressor);
|
||
// }
|
||
// }
|
||
// // Return an empty string in case of failure
|
||
// return "";
|
||
//}
|
||
//std::string ANSOPENCV::EncodeJpegString(const cv::Mat& img, int quality) {
|
||
// std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
// tjhandle _jpegCompressor = nullptr;
|
||
// unsigned char* jpegBuf = nullptr;
|
||
|
||
// try {
|
||
// // Validate input
|
||
// if (img.empty()) {
|
||
// this->_logger.LogError("ANSOPENCV::EncodeJpegString. Empty image.", "", __FILE__, __LINE__);
|
||
// return "";
|
||
// }
|
||
|
||
// // Determine pixel format
|
||
// int pixelFormat;
|
||
// if (img.channels() == 1) {
|
||
// pixelFormat = TJPF_GRAY;
|
||
// }
|
||
// else if (img.channels() == 3) {
|
||
// pixelFormat = TJPF_BGR;
|
||
// }
|
||
// else if (img.channels() == 4) {
|
||
// pixelFormat = TJPF_BGRA;
|
||
// }
|
||
// else {
|
||
// this->_logger.LogError("ANSOPENCV::EncodeJpegString. Unsupported channel count: " + std::to_string(img.channels()), "", __FILE__, __LINE__);
|
||
// return "";
|
||
// }
|
||
|
||
// _jpegCompressor = tjInitCompress();
|
||
// if (!_jpegCompressor) {
|
||
// this->_logger.LogError("ANSOPENCV::EncodeJpegString. Failed to initialize TurboJPEG compressor.", tjGetErrorStr(), __FILE__, __LINE__);
|
||
// return "";
|
||
// }
|
||
|
||
// // Correct buffer size based on actual channels
|
||
// unsigned long maxBufferSize = tjBufSize(img.cols, img.rows, TJSAMP_444);
|
||
// jpegBuf = new unsigned char[maxBufferSize];
|
||
// unsigned long jpegSize = maxBufferSize;
|
||
|
||
// int subsamp = (pixelFormat == TJPF_GRAY) ? TJSAMP_GRAY : TJSAMP_444;
|
||
|
||
// int pitch = img.step[0];
|
||
// int result = tjCompress2(_jpegCompressor, img.data, img.cols, pitch, img.rows, pixelFormat,
|
||
// &jpegBuf, &jpegSize, subsamp, quality, TJFLAG_FASTDCT);
|
||
|
||
// if (result != 0) {
|
||
// this->_logger.LogError("ANSOPENCV::EncodeJpegString. Compression error:", tjGetErrorStr(), __FILE__, __LINE__);
|
||
// tjFree(jpegBuf);
|
||
// tjDestroy(_jpegCompressor);
|
||
// return "";
|
||
// }
|
||
|
||
// std::string jpegString(reinterpret_cast<char*>(jpegBuf), jpegSize);
|
||
|
||
// tjFree(jpegBuf);
|
||
// tjDestroy(_jpegCompressor);
|
||
|
||
// return jpegString;
|
||
// }
|
||
// catch (std::exception& e) {
|
||
// this->_logger.LogError("ANSOPENCV::EncodeJpegString:", e.what(), __FILE__, __LINE__);
|
||
// if (jpegBuf) tjFree(jpegBuf);
|
||
// if (_jpegCompressor) tjDestroy(_jpegCompressor);
|
||
// }
|
||
|
||
// return "";
|
||
//}
|
||
|
||
std::string ANSOPENCV::EncodeJpegString(const cv::Mat& img, int quality) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
tjhandle _jpegCompressor = nullptr;
|
||
unsigned char* jpegBuf = nullptr;
|
||
|
||
try {
|
||
// Validate input
|
||
if (img.empty()) {
|
||
this->_logger.LogError("ANSOPENCV::EncodeJpegString. Empty image.", "", __FILE__, __LINE__);
|
||
return "";
|
||
}
|
||
|
||
// Ensure continuous memory layout
|
||
cv::Mat continuous_img;
|
||
if (!img.isContinuous()) {
|
||
continuous_img = img.clone();
|
||
}
|
||
else {
|
||
continuous_img = img;
|
||
}
|
||
|
||
// Determine pixel format
|
||
int pixelFormat;
|
||
int subsamp;
|
||
|
||
if (continuous_img.channels() == 1) {
|
||
pixelFormat = TJPF_GRAY;
|
||
subsamp = TJSAMP_GRAY;
|
||
}
|
||
else if (continuous_img.channels() == 3) {
|
||
pixelFormat = TJPF_BGR;
|
||
subsamp = TJSAMP_444;
|
||
}
|
||
else if (continuous_img.channels() == 4) {
|
||
pixelFormat = TJPF_BGRA;
|
||
subsamp = TJSAMP_444;
|
||
}
|
||
else {
|
||
this->_logger.LogError("ANSOPENCV::EncodeJpegString. Unsupported channel count: " +
|
||
std::to_string(continuous_img.channels()), "", __FILE__, __LINE__);
|
||
return "";
|
||
}
|
||
|
||
_jpegCompressor = tjInitCompress();
|
||
if (!_jpegCompressor) {
|
||
this->_logger.LogError("ANSOPENCV::EncodeJpegString. Failed to initialize TurboJPEG compressor.",
|
||
tjGetErrorStr(), __FILE__, __LINE__);
|
||
return "";
|
||
}
|
||
|
||
// Use tjAlloc instead of new[] - this is critical for TurboJPEG
|
||
unsigned long maxBufferSize = tjBufSize(continuous_img.cols, continuous_img.rows, subsamp);
|
||
jpegBuf = tjAlloc(maxBufferSize);
|
||
if (!jpegBuf) {
|
||
this->_logger.LogError("ANSOPENCV::EncodeJpegString. Failed to allocate JPEG buffer.", "", __FILE__, __LINE__);
|
||
tjDestroy(_jpegCompressor);
|
||
return "";
|
||
}
|
||
|
||
unsigned long jpegSize = maxBufferSize;
|
||
|
||
// Use pitch = 0 since we ensured continuous memory
|
||
int result = tjCompress2(_jpegCompressor, continuous_img.data, continuous_img.cols, 0,
|
||
continuous_img.rows, pixelFormat,
|
||
&jpegBuf, &jpegSize, subsamp, quality, TJFLAG_FASTDCT);
|
||
|
||
if (result != 0) {
|
||
this->_logger.LogError("ANSOPENCV::EncodeJpegString. Compression error (format=" +
|
||
std::to_string(pixelFormat) + ", subsamp=" + std::to_string(subsamp) +
|
||
", quality=" + std::to_string(quality) + "):",
|
||
tjGetErrorStr(), __FILE__, __LINE__);
|
||
tjFree(jpegBuf);
|
||
tjDestroy(_jpegCompressor);
|
||
return "";
|
||
}
|
||
|
||
// Create string from compressed buffer
|
||
std::string jpegString(reinterpret_cast<char*>(jpegBuf), jpegSize);
|
||
|
||
// Clean up
|
||
tjFree(jpegBuf);
|
||
tjDestroy(_jpegCompressor);
|
||
|
||
return jpegString;
|
||
}
|
||
catch (const std::exception& e) {
|
||
this->_logger.LogError("ANSOPENCV::EncodeJpegString:", e.what(), __FILE__, __LINE__);
|
||
if (jpegBuf) tjFree(jpegBuf);
|
||
if (_jpegCompressor) tjDestroy(_jpegCompressor);
|
||
}
|
||
catch (...) {
|
||
this->_logger.LogError("ANSOPENCV::EncodeJpegString: Unknown exception", "", __FILE__, __LINE__);
|
||
if (jpegBuf) tjFree(jpegBuf);
|
||
if (_jpegCompressor) tjDestroy(_jpegCompressor);
|
||
}
|
||
|
||
return "";
|
||
}
|
||
std::string ANSOPENCV::MatToBinaryData(const cv::Mat& image) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
// Check if the image is empty or has invalid data
|
||
if (image.empty() || !image.data || !image.u) {
|
||
return "";
|
||
}
|
||
try {
|
||
// Encode the image to a memory buffer
|
||
return EncodeJpegString(image, 100);
|
||
}
|
||
catch (const std::exception& e) {
|
||
this->_logger.LogFatal("ANSOPENCV::MatToBinaryData. Exception occurred:", e.what(), __FILE__, __LINE__);
|
||
}
|
||
catch (...) {
|
||
this->_logger.LogFatal("ANSWEBCAMPlayer::MatToBinaryData.", "Unknown exception occurred.", __FILE__, __LINE__);
|
||
}
|
||
|
||
// Return an empty string in case of failure
|
||
return "";
|
||
}
|
||
void ANSOPENCV::ImageResize(const cv::Mat& inputFrame, int width, int height, cv::Mat& outputFrame) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid) {
|
||
outputFrame = inputFrame;
|
||
return;
|
||
}
|
||
|
||
if (inputFrame.empty()) {
|
||
outputFrame = inputFrame;
|
||
return;
|
||
}
|
||
|
||
// Validate dimensions
|
||
if (width <= 0 || height <= 0) {
|
||
std::cerr << "Error: Invalid dimensions in ImageResize (" << width << "x" << height << ")" << std::endl;
|
||
outputFrame = inputFrame;
|
||
return;
|
||
}
|
||
|
||
// Check if resize is actually needed
|
||
if (inputFrame.cols == width && inputFrame.rows == height) {
|
||
outputFrame = inputFrame;
|
||
return;
|
||
}
|
||
|
||
try {
|
||
// Choose interpolation based on scaling direction
|
||
int interpolation;
|
||
const double scaleX = static_cast<double>(width) / inputFrame.cols;
|
||
const double scaleY = static_cast<double>(height) / inputFrame.rows;
|
||
const double scale = min(scaleX, scaleY);
|
||
|
||
if (scale < 1.0) {
|
||
// Downscaling - use INTER_AREA (best quality and fastest)
|
||
interpolation = cv::INTER_AREA;
|
||
}
|
||
else {
|
||
// Upscaling - use INTER_LINEAR (good balance)
|
||
interpolation = cv::INTER_LINEAR;
|
||
}
|
||
|
||
cv::resize(inputFrame, outputFrame, cv::Size(width, height), 0, 0, interpolation);
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ImageResize: " << e.what() << std::endl;
|
||
outputFrame = inputFrame;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ImageResize: " << e.what() << std::endl;
|
||
outputFrame = inputFrame;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ImageResize" << std::endl;
|
||
outputFrame = inputFrame;
|
||
}
|
||
}
|
||
void ANSOPENCV::ImageResizeWithRatio(const cv::Mat& inputFrame, int width, cv::Mat& outputFrame)
|
||
{
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
if (!_licenseValid) {
|
||
outputFrame = inputFrame; // Shallow copy (fast)
|
||
return;
|
||
}
|
||
// Validate inputs outside mutex
|
||
if (inputFrame.empty() || width <= 0) {
|
||
outputFrame = inputFrame;
|
||
return;
|
||
}
|
||
|
||
// Check if resize is needed
|
||
if (inputFrame.cols == width) {
|
||
outputFrame = inputFrame; // Already correct width, no resize needed
|
||
return;
|
||
}
|
||
|
||
try {
|
||
// Calculate new height maintaining aspect ratio
|
||
double aspectRatio = static_cast<double>(inputFrame.cols) / inputFrame.rows;
|
||
int height = static_cast<int>(std::round(width / aspectRatio));
|
||
|
||
// Validate calculated height
|
||
if (height <= 0) {
|
||
std::cerr << "Error: Calculated height is invalid in ImageResizeWithRatio! "
|
||
<< "Width: " << width << ", AspectRatio: " << aspectRatio << std::endl;
|
||
outputFrame = inputFrame;
|
||
return;
|
||
}
|
||
|
||
// Choose interpolation based on scaling direction
|
||
int interpolation;
|
||
if (width < inputFrame.cols) {
|
||
// Downscaling: INTER_AREA is best (good quality, fast)
|
||
interpolation = cv::INTER_AREA;
|
||
}
|
||
else {
|
||
// Upscaling: INTER_LINEAR is good enough and fast
|
||
interpolation = cv::INTER_LINEAR;
|
||
}
|
||
|
||
cv::resize(inputFrame, outputFrame, cv::Size(width, height), 0, 0, interpolation);
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV error in ImageResizeWithRatio: " << e.what() << std::endl;
|
||
outputFrame = inputFrame; // Shallow copy fallback
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown error in ImageResizeWithRatio!" << std::endl;
|
||
outputFrame = inputFrame; // Shallow copy fallback
|
||
}
|
||
}
|
||
cv::Mat ANSOPENCV::BlurObjects(const cv::Mat& image, const std::vector<cv::Rect>& objects) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
// Check for valid license and empty input
|
||
if (!_licenseValid || image.empty()) return image;
|
||
|
||
// Create a copy of the original image to apply the blurring
|
||
cv::Mat outputImage = image.clone();
|
||
|
||
// Define blur parameters
|
||
const int blurKernelSize = 45; // Kernel size for blurring
|
||
cv::Size kernelSize(blurKernelSize, blurKernelSize);
|
||
|
||
// Apply blur to each object ROI
|
||
for (const auto& obj : objects) {
|
||
// Ensure the ROI is within the image bounds
|
||
cv::Rect boundedRect = obj & cv::Rect(0, 0, image.cols, image.rows);
|
||
if (boundedRect.area() > 0) { // Check if the bounded rect is valid
|
||
cv::GaussianBlur(outputImage(boundedRect), outputImage(boundedRect), kernelSize, 0);
|
||
}
|
||
}
|
||
|
||
return outputImage;
|
||
}
|
||
cv::Mat ANSOPENCV::BlurBackground(const cv::Mat& image, const std::vector<cv::Rect>& objects) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
// Check for valid license and empty input
|
||
if (!_licenseValid || image.empty()) return image;
|
||
|
||
// Define blur parameters
|
||
const int blurKernelSize = 45; // Kernel size for blurring
|
||
cv::Size kernelSize(blurKernelSize, blurKernelSize);
|
||
|
||
// Blur the entire image
|
||
cv::Mat blurredImage;
|
||
cv::GaussianBlur(image, blurredImage, kernelSize, 0);
|
||
|
||
// Copy each ROI from the original image onto the blurred image
|
||
for (const auto& obj : objects) {
|
||
// Validate that the ROI is within image bounds
|
||
cv::Rect boundedRect = obj & cv::Rect(0, 0, image.cols, image.rows);
|
||
if (boundedRect.area() > 0) { // Proceed if the rect is valid
|
||
image(boundedRect).copyTo(blurredImage(boundedRect));
|
||
}
|
||
}
|
||
|
||
return blurredImage;
|
||
}
|
||
cv::Mat ANSOPENCV::ToGray(const cv::Mat& image) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
// Check for valid license
|
||
if (!_licenseValid) return image;
|
||
|
||
// Return the original if the image is empty
|
||
if (image.empty()) return image;
|
||
|
||
// Prepare the grayscale output
|
||
cv::Mat grayMat;
|
||
|
||
// Convert based on channel count
|
||
switch (image.channels()) {
|
||
case 3:
|
||
cv::cvtColor(image, grayMat, cv::COLOR_BGR2GRAY);
|
||
break;
|
||
case 4:
|
||
cv::cvtColor(image, grayMat, cv::COLOR_BGRA2GRAY);
|
||
break;
|
||
case 1:
|
||
grayMat = image.clone(); // Clone to ensure a copy is returned
|
||
break;
|
||
default:
|
||
// Unsupported number of channels, return the original
|
||
std::cerr << "Error: Unsupported image format. Expected 1, 3, or 4 channels." << std::endl;
|
||
return image;
|
||
}
|
||
|
||
return grayMat;
|
||
}
|
||
cv::Mat ANSOPENCV::ImageDenoise(const cv::Mat& image) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return image;
|
||
}
|
||
|
||
cv::Mat denoised_image;
|
||
|
||
// Bilateral filter: 10-50x faster than NLMeans
|
||
const int diameter = 9; // Kernel size
|
||
const double sigmaColor = 75; // Color space sigma
|
||
const double sigmaSpace = 75; // Coordinate space sigma
|
||
|
||
cv::bilateralFilter(image, denoised_image, diameter, sigmaColor, sigmaSpace);
|
||
|
||
return denoised_image;
|
||
}
|
||
cv::Mat ANSOPENCV::ImageCrop(const cv::Mat& inputImage, const cv::Rect& resizeROI, int originalImageSize) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
// License validation
|
||
if (!_licenseValid) {
|
||
std::cerr << "Error: License is not valid in ImageCrop." << std::endl;
|
||
return cv::Mat(); // Return empty Mat explicitly
|
||
}
|
||
|
||
// Early validation checks
|
||
if (inputImage.empty()) {
|
||
std::cerr << "Error: Input image is empty!" << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
|
||
if (resizeROI.width <= 0 || resizeROI.height <= 0) {
|
||
std::cerr << "Error: Invalid ROI size! Width=" << resizeROI.width
|
||
<< ", Height=" << resizeROI.height << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
|
||
try {
|
||
const int originalWidth = inputImage.cols;
|
||
const int originalHeight = inputImage.rows;
|
||
|
||
// Scale ROI if originalImageSize is provided and valid
|
||
cv::Rect roi = resizeROI;
|
||
|
||
if (originalImageSize > 0 && originalImageSize != originalWidth) {
|
||
const double scale = static_cast<double>(originalWidth) / originalImageSize;
|
||
|
||
// Use std::round for more accurate scaling
|
||
roi.x = static_cast<int>(std::round(resizeROI.x * scale));
|
||
roi.y = static_cast<int>(std::round(resizeROI.y * scale));
|
||
roi.width = static_cast<int>(std::round(resizeROI.width * scale));
|
||
roi.height = static_cast<int>(std::round(resizeROI.height * scale));
|
||
|
||
// Ensure dimensions are still positive after rounding
|
||
if (roi.width <= 0 || roi.height <= 0) {
|
||
std::cerr << "Error: Scaled ROI has invalid dimensions!" << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
}
|
||
|
||
// Pre-calculate image bounds
|
||
const cv::Rect imageBounds(0, 0, originalWidth, originalHeight);
|
||
|
||
// Check if ROI is within bounds (optimized check)
|
||
if (roi.x < 0 || roi.y < 0 ||
|
||
roi.x + roi.width > originalWidth ||
|
||
roi.y + roi.height > originalHeight) {
|
||
|
||
std::cerr << "Error: ROI exceeds image boundaries! "
|
||
<< "ROI=[" << roi.x << "," << roi.y << ","
|
||
<< roi.width << "," << roi.height << "], "
|
||
<< "Image=[" << originalWidth << "x" << originalHeight << "]" << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
|
||
// Crop and return (shallow copy - fast)
|
||
//return inputImage(roi);
|
||
return inputImage(roi).clone();
|
||
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ANSOPENCV::ImageCrop: " << e.what() << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSOPENCV::ImageCrop: " << e.what() << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
}
|
||
cv::Mat ANSOPENCV::ImageRepair(const cv::Mat& image) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return image;
|
||
}
|
||
|
||
try {
|
||
cv::Mat grayImage;
|
||
if (image.channels() == 1) {
|
||
grayImage = image;
|
||
}
|
||
else {
|
||
cv::cvtColor(image, grayImage, cv::COLOR_BGR2GRAY);
|
||
}
|
||
|
||
// Use more aggressive Canny thresholds to detect fewer edges
|
||
cv::Mat edges;
|
||
cv::Canny(grayImage, edges, 80, 200, 3, false); // Higher thresholds = fewer edges
|
||
|
||
cv::Mat mask;
|
||
cv::bitwise_not(edges, mask);
|
||
|
||
cv::Mat inpaintedImage;
|
||
const int inpaintRadius = 2; // Smaller radius = faster
|
||
cv::inpaint(image, mask, inpaintedImage, inpaintRadius, cv::INPAINT_NS); // NS is faster
|
||
|
||
return inpaintedImage;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ImageRepairFast: " << e.what() << std::endl;
|
||
return image;
|
||
}
|
||
}
|
||
std::string ANSOPENCV::PatternMatches(cv::Mat& image, cv::Mat& templateImage, double threshold) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
std::vector<DetectionObject> detectedObjects;
|
||
|
||
// Early validation
|
||
if (!_licenseValid || image.empty() || templateImage.empty()) {
|
||
return VectorDetectionToJsonString(detectedObjects);
|
||
}
|
||
|
||
// Validate template size
|
||
if (templateImage.cols > image.cols || templateImage.rows > image.rows) {
|
||
std::cerr << "Error: Template is larger than image in PatternMatches!" << std::endl;
|
||
return VectorDetectionToJsonString(detectedObjects);
|
||
}
|
||
|
||
try {
|
||
// Calculate result dimensions
|
||
const int result_cols = image.cols - templateImage.cols + 1;
|
||
const int result_rows = image.rows - templateImage.rows + 1;
|
||
|
||
// Perform template matching
|
||
cv::Mat result;
|
||
cv::matchTemplate(image, templateImage, result, cv::TM_CCOEFF_NORMED);
|
||
|
||
// TM_CCOEFF_NORMED already returns normalized values in [-1, 1]
|
||
// So we don't need to normalize again (major performance gain)
|
||
|
||
// Reserve space to avoid repeated allocations
|
||
detectedObjects.reserve(100); // Reasonable default
|
||
|
||
// Use OpenCV's threshold function instead of manual iteration
|
||
cv::Mat thresholdedResult;
|
||
cv::threshold(result, thresholdedResult, threshold, 1.0, cv::THRESH_BINARY);
|
||
|
||
// Find non-zero locations (matches above threshold)
|
||
std::vector<cv::Point> locations;
|
||
cv::findNonZero(thresholdedResult, locations);
|
||
|
||
// Convert to DetectionObjects
|
||
const int templateWidth = templateImage.cols;
|
||
const int templateHeight = templateImage.rows;
|
||
|
||
for (const auto& loc : locations) {
|
||
DetectionObject detectionObject;
|
||
detectionObject.classId = 0;
|
||
detectionObject.className = "Matched Object";
|
||
detectionObject.confidence = result.at<float>(loc.y, loc.x); // Actual confidence
|
||
detectionObject.box = cv::Rect(loc.x, loc.y, templateWidth, templateHeight);
|
||
detectedObjects.push_back(detectionObject);
|
||
}
|
||
|
||
// Apply Non-Maximum Suppression if we have matches
|
||
if (!detectedObjects.empty()) {
|
||
NonMaximumSuppression(detectedObjects, 0.5);
|
||
}
|
||
|
||
return VectorDetectionToJsonString(detectedObjects);
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in PatternMatches: " << e.what() << std::endl;
|
||
return VectorDetectionToJsonString(detectedObjects);
|
||
}
|
||
}
|
||
std::string ANSOPENCV::QRDecoder(const cv::Mat& image) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return "";
|
||
}
|
||
|
||
try {
|
||
// Convert to grayscale efficiently
|
||
cv::Mat grayImage;
|
||
if (image.channels() == 1) {
|
||
grayImage = image; // Already grayscale, no conversion needed
|
||
}
|
||
else if (image.channels() == 3) {
|
||
cv::cvtColor(image, grayImage, cv::COLOR_BGR2GRAY);
|
||
}
|
||
else if (image.channels() == 4) {
|
||
cv::cvtColor(image, grayImage, cv::COLOR_BGRA2GRAY);
|
||
}
|
||
else {
|
||
std::cerr << "Error: Unsupported image format in QRDecoder" << std::endl;
|
||
return "";
|
||
}
|
||
|
||
const int width = grayImage.cols;
|
||
const int height = grayImage.rows;
|
||
|
||
// Create ZXing image view
|
||
const ZXing::ImageView barcodeImage(grayImage.data, width, height,
|
||
ZXing::ImageFormat::Lum);
|
||
|
||
// Configure options (keep Any format for flexibility)
|
||
const auto options = ZXing::ReaderOptions()
|
||
.setFormats(ZXing::BarcodeFormat::Any)
|
||
.setTryHarder(false) // Set false for faster decoding
|
||
.setTryRotate(false); // Set false if rotation not needed
|
||
|
||
// Decode barcodes
|
||
const auto barcodes = ZXing::ReadBarcodes(barcodeImage, options);
|
||
|
||
// Early exit if no barcodes found
|
||
if (barcodes.empty()) {
|
||
return "";
|
||
}
|
||
|
||
// Reserve space for detected objects
|
||
std::vector<DetectionObject> detectedObjects;
|
||
detectedObjects.reserve(barcodes.size());
|
||
|
||
// Process each barcode
|
||
for (const auto& b : barcodes) {
|
||
DetectionObject detectedObject;
|
||
detectedObject.classId = static_cast<int>(b.format());
|
||
detectedObject.className = b.text();
|
||
detectedObject.confidence = 1.0;
|
||
detectedObject.extraInfo = ZXing::ToString(b.format());
|
||
|
||
// Calculate bounding box from position
|
||
const ZXing::Position& pos = b.position();
|
||
if (pos.size() == 4) {
|
||
// Use std::minmax_element for cleaner code
|
||
auto [minX, maxX] = std::minmax_element(
|
||
pos.begin(), pos.end(),
|
||
[](const auto& a, const auto& b) { return a.x < b.x; }
|
||
);
|
||
auto [minY, maxY] = std::minmax_element(
|
||
pos.begin(), pos.end(),
|
||
[](const auto& a, const auto& b) { return a.y < b.y; }
|
||
);
|
||
|
||
const int xmin = minX->x;
|
||
const int ymin = minY->y;
|
||
const int bwidth = maxX->x - xmin;
|
||
const int bheight = maxY->y - ymin;
|
||
|
||
// Validate bounding box
|
||
if (bwidth > 0 && bheight > 0) {
|
||
detectedObject.box = cv::Rect(xmin, ymin, bwidth, bheight);
|
||
}
|
||
else {
|
||
std::cerr << "Warning: Invalid bounding box calculated for barcode" << std::endl;
|
||
}
|
||
}
|
||
|
||
detectedObjects.push_back(std::move(detectedObject));
|
||
}
|
||
|
||
// Apply NMS only if multiple detections
|
||
if (detectedObjects.size() > 1) {
|
||
NonMaximumSuppression(detectedObjects, 0.5);
|
||
}
|
||
|
||
return VectorDetectionToJsonString(detectedObjects);
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in QRDecoder: " << e.what() << std::endl;
|
||
return "";
|
||
}
|
||
}
|
||
std::string ANSOPENCV::QRDecoderWithBBox(const cv::Mat& image, int maxImageSize, const std::vector<cv::Rect>& bBox) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return "";
|
||
}
|
||
|
||
// Early fallback if no bounding boxes
|
||
if (bBox.empty()) {
|
||
return QRDecoder(image);
|
||
}
|
||
|
||
try {
|
||
const int originalWidth = image.cols;
|
||
const int originalHeight = image.rows;
|
||
const double scaleFactor = (maxImageSize > 0) ?
|
||
static_cast<double>(originalWidth) / maxImageSize : 1.0;
|
||
|
||
// Pre-calculate inverse scale factor (avoid repeated division)
|
||
const double invScaleFactor = 1.0 / scaleFactor;
|
||
|
||
// Pre-calculate image bounds
|
||
const cv::Rect imageBounds(0, 0, originalWidth, originalHeight);
|
||
|
||
// Convert to grayscale once for all crops (major optimization)
|
||
cv::Mat grayImage;
|
||
if (image.channels() == 1) {
|
||
grayImage = image;
|
||
}
|
||
else if (image.channels() == 3) {
|
||
cv::cvtColor(image, grayImage, cv::COLOR_BGR2GRAY);
|
||
}
|
||
else if (image.channels() == 4) {
|
||
cv::cvtColor(image, grayImage, cv::COLOR_BGRA2GRAY);
|
||
}
|
||
else {
|
||
std::cerr << "Error: Unsupported image format in QRDecoderWithBBox" << std::endl;
|
||
return "";
|
||
}
|
||
|
||
// Reserve space for detected objects
|
||
std::vector<DetectionObject> detectedObjects;
|
||
detectedObjects.reserve(bBox.size() * 2); // Assume ~2 codes per box on average
|
||
|
||
// Configure ZXing options once
|
||
const auto options = ZXing::ReaderOptions()
|
||
.setFormats(ZXing::BarcodeFormat::Any)
|
||
.setTryHarder(false)
|
||
.setTryRotate(false);
|
||
|
||
// Process each bounding box
|
||
for (const auto& rect : bBox) {
|
||
// Scale the bounding box with rounding for better accuracy
|
||
cv::Rect scaledRect;
|
||
scaledRect.x = static_cast<int>(std::round(rect.x * scaleFactor));
|
||
scaledRect.y = static_cast<int>(std::round(rect.y * scaleFactor));
|
||
scaledRect.width = static_cast<int>(std::round(rect.width * scaleFactor));
|
||
scaledRect.height = static_cast<int>(std::round(rect.height * scaleFactor));
|
||
|
||
// Clamp to image bounds
|
||
scaledRect &= imageBounds;
|
||
|
||
// Skip invalid regions
|
||
if (scaledRect.width <= 0 || scaledRect.height <= 0) {
|
||
continue;
|
||
}
|
||
|
||
// Crop from grayscale image (no need to convert again)
|
||
const cv::Mat croppedGray = grayImage(scaledRect);
|
||
|
||
// Create ZXing image view
|
||
const ZXing::ImageView barcodeImage(croppedGray.data,
|
||
croppedGray.cols,
|
||
croppedGray.rows,
|
||
ZXing::ImageFormat::Lum);
|
||
|
||
// Decode barcodes in this region
|
||
const auto barcodes = ZXing::ReadBarcodes(barcodeImage, options);
|
||
|
||
// Process each detected barcode
|
||
for (const auto& b : barcodes) {
|
||
DetectionObject detectedObject;
|
||
detectedObject.classId = static_cast<int>(b.format());
|
||
detectedObject.className = b.text();
|
||
detectedObject.confidence = 1.0;
|
||
detectedObject.extraInfo = ZXing::ToString(b.format());
|
||
|
||
const ZXing::Position& pos = b.position();
|
||
if (pos.size() == 4) {
|
||
// Calculate bounding box using minmax_element
|
||
auto [minX, maxX] = std::minmax_element(
|
||
pos.begin(), pos.end(),
|
||
[](const auto& a, const auto& b) { return a.x < b.x; }
|
||
);
|
||
auto [minY, maxY] = std::minmax_element(
|
||
pos.begin(), pos.end(),
|
||
[](const auto& a, const auto& b) { return a.y < b.y; }
|
||
);
|
||
|
||
const int xmin = minX->x;
|
||
const int ymin = minY->y;
|
||
const int bwidth = maxX->x - xmin;
|
||
const int bheight = maxY->y - ymin;
|
||
|
||
// Validate dimensions
|
||
if (bwidth > 0 && bheight > 0) {
|
||
// Transform to global coordinates with proper rounding
|
||
detectedObject.box = cv::Rect(
|
||
static_cast<int>(std::round((xmin + scaledRect.x) * invScaleFactor)),
|
||
static_cast<int>(std::round((ymin + scaledRect.y) * invScaleFactor)),
|
||
static_cast<int>(std::round(bwidth * invScaleFactor)),
|
||
static_cast<int>(std::round(bheight * invScaleFactor))
|
||
);
|
||
|
||
detectedObjects.push_back(std::move(detectedObject));
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
// Apply NMS only if multiple detections
|
||
if (detectedObjects.size() > 1) {
|
||
NonMaximumSuppression(detectedObjects, 0.5);
|
||
}
|
||
|
||
return VectorDetectionToJsonString(detectedObjects);
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in QRDecoderWithBBox: " << e.what() << std::endl;
|
||
return "";
|
||
}
|
||
}
|
||
std::string ANSOPENCV::MatToBase64(const cv::Mat& image) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return "";
|
||
}
|
||
|
||
try {
|
||
// Use CompressJpegToString with quality parameter
|
||
// Note: Despite the function name "MatToBase64", this returns JPEG binary
|
||
// If you actually need Base64, see the alternatives below
|
||
std::string jpegString = CompressJpegToString(image, 100);
|
||
|
||
if (jpegString.empty()) {
|
||
std::cerr << "Error: JPEG compression failed in MatToBase64!" << std::endl;
|
||
return "";
|
||
}
|
||
|
||
return jpegString;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in MatToBase64: " << e.what() << std::endl;
|
||
return "";
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in MatToBase64: " << e.what() << std::endl;
|
||
return "";
|
||
}
|
||
}
|
||
cv::Mat ANSOPENCV::ImageDarkEnhancement(const cv::Mat& img, double brightnessScaleFactor) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || img.empty()) {
|
||
return img; // Shallow copy (fast)
|
||
}
|
||
|
||
// Validate and set default scale factor
|
||
if (brightnessScaleFactor <= 1.0) {
|
||
brightnessScaleFactor = 1.5;
|
||
}
|
||
|
||
try {
|
||
cv::Mat enhancedImage;
|
||
|
||
// Direct scaling for single-channel images (faster)
|
||
if (img.channels() == 1) {
|
||
img.convertTo(enhancedImage, -1, brightnessScaleFactor, 0);
|
||
return enhancedImage;
|
||
}
|
||
|
||
// For multi-channel images, use convertTo (much faster than split/merge)
|
||
img.convertTo(enhancedImage, -1, brightnessScaleFactor, 0);
|
||
|
||
return enhancedImage;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ImageDarkEnhancement: " << e.what() << std::endl;
|
||
return img;
|
||
}
|
||
}
|
||
cv::Mat ANSOPENCV::ImageContrastEnhancement(const cv::Mat& src) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
double clipLimit = 2.0;
|
||
if (!_licenseValid || src.empty()) {
|
||
return src;
|
||
}
|
||
|
||
try {
|
||
cv::Mat dst;
|
||
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(clipLimit, cv::Size(8, 8));
|
||
|
||
if (src.channels() == 1) {
|
||
clahe->apply(src, dst);
|
||
}
|
||
else if (src.channels() == 3) {
|
||
cv::Mat yuv;
|
||
cv::cvtColor(src, yuv, cv::COLOR_BGR2YUV);
|
||
|
||
std::vector<cv::Mat> channels;
|
||
cv::split(yuv, channels);
|
||
|
||
clahe->apply(channels[0], channels[0]);
|
||
|
||
cv::merge(channels, yuv);
|
||
cv::cvtColor(yuv, dst, cv::COLOR_YUV2BGR);
|
||
}
|
||
else if (src.channels() == 4) {
|
||
cv::Mat bgr;
|
||
cv::cvtColor(src, bgr, cv::COLOR_BGRA2BGR);
|
||
|
||
cv::Mat yuv;
|
||
cv::cvtColor(bgr, yuv, cv::COLOR_BGR2YUV);
|
||
|
||
std::vector<cv::Mat> channels;
|
||
cv::split(yuv, channels);
|
||
|
||
clahe->apply(channels[0], channels[0]);
|
||
|
||
cv::merge(channels, yuv);
|
||
cv::cvtColor(yuv, dst, cv::COLOR_YUV2BGR);
|
||
}
|
||
else {
|
||
return src;
|
||
}
|
||
|
||
return dst;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ImageContrastEnhancementCLAHE: " << e.what() << std::endl;
|
||
return src;
|
||
}
|
||
}
|
||
|
||
cv::Mat ANSOPENCV::ImageWhiteBalance(const cv::Mat& src) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || src.empty()) {
|
||
return src; // Shallow copy (fast)
|
||
}
|
||
|
||
// Only works with 3-channel (BGR) images
|
||
if (src.channels() != 3) {
|
||
std::cerr << "Warning: ImageWhiteBalance only works with 3-channel images" << std::endl;
|
||
return src;
|
||
}
|
||
|
||
try {
|
||
// Calculate mean for each channel directly (no split needed)
|
||
cv::Scalar meanValues = cv::mean(src);
|
||
|
||
double avgB = meanValues[0];
|
||
double avgG = meanValues[1];
|
||
double avgR = meanValues[2];
|
||
|
||
// Calculate total average
|
||
double avgTotal = (avgB + avgG + avgR) / 3.0;
|
||
|
||
// Validate averages to prevent division by zero
|
||
if (avgB < 1.0 || avgG < 1.0 || avgR < 1.0) {
|
||
std::cerr << "Warning: Very dark image in ImageWhiteBalance, skipping" << std::endl;
|
||
return src;
|
||
}
|
||
|
||
// Calculate scale factors
|
||
double scaleB = avgTotal / avgB;
|
||
double scaleG = avgTotal / avgG;
|
||
double scaleR = avgTotal / avgR;
|
||
|
||
// Apply scaling using convertTo with per-channel scales
|
||
cv::Mat dst;
|
||
std::vector<cv::Mat> channels(3);
|
||
cv::split(src, channels);
|
||
|
||
// Scale each channel in-place (8-bit, no float conversion needed)
|
||
channels[0].convertTo(channels[0], CV_8U, scaleB);
|
||
channels[1].convertTo(channels[1], CV_8U, scaleG);
|
||
channels[2].convertTo(channels[2], CV_8U, scaleR);
|
||
|
||
cv::merge(channels, dst);
|
||
|
||
return dst;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ImageWhiteBalance: " << e.what() << std::endl;
|
||
return src;
|
||
}
|
||
}
|
||
std::vector<cv::Rect> ANSOPENCV::GetBoundingBoxes(std::string strBBoxes) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
std::vector<cv::Rect> bBoxes;
|
||
if (!_licenseValid) return bBoxes;
|
||
|
||
bBoxes.clear();
|
||
|
||
try {
|
||
// Parse JSON string using nlohmann::json
|
||
nlohmann::json j = nlohmann::json::parse(strBBoxes);
|
||
|
||
// Check if "results" exists and is an array
|
||
if (j.contains("results") && j["results"].is_array()) {
|
||
// Iterate through the results array
|
||
for (const auto& result : j["results"]) {
|
||
// Extract values with type checking
|
||
if (result.contains("x") && result.contains("y") &&
|
||
result.contains("width") && result.contains("height")) {
|
||
|
||
const auto x = result["x"].get<float>();
|
||
const auto y = result["y"].get<float>();
|
||
const auto width = result["width"].get<float>();
|
||
const auto height = result["height"].get<float>();
|
||
|
||
cv::Rect rectTemp;
|
||
rectTemp.x = static_cast<int>(x);
|
||
rectTemp.y = static_cast<int>(y);
|
||
rectTemp.width = static_cast<int>(width);
|
||
rectTemp.height = static_cast<int>(height);
|
||
bBoxes.push_back(rectTemp);
|
||
}
|
||
}
|
||
}
|
||
}
|
||
catch (const nlohmann::json::exception& e) {
|
||
// Handle JSON parsing errors
|
||
// You might want to log this error or handle it according to your needs
|
||
// For now, we'll just return an empty vector
|
||
bBoxes.clear();
|
||
}
|
||
|
||
return bBoxes;
|
||
}
|
||
|
||
cv::Mat ANSOPENCV::RotateImage(const cv::Mat& image, double angle) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return image; // Shallow copy (fast)
|
||
}
|
||
|
||
try {
|
||
// Normalize angle to [-180, 180] range
|
||
angle = std::fmod(angle, 360.0);
|
||
if (angle > 180.0) angle -= 360.0;
|
||
if (angle < -180.0) angle += 360.0;
|
||
|
||
// Fast path for common angles (exact 90-degree multiples)
|
||
if (std::abs(angle) < 0.01) {
|
||
return image; // No rotation needed
|
||
}
|
||
|
||
// Use faster built-in functions for 90-degree rotations
|
||
if (std::abs(angle - 90.0) < 0.01) {
|
||
cv::Mat rotated;
|
||
cv::rotate(image, rotated, cv::ROTATE_90_CLOCKWISE);
|
||
return rotated;
|
||
}
|
||
if (std::abs(angle + 90.0) < 0.01 || std::abs(angle - 270.0) < 0.01) {
|
||
cv::Mat rotated;
|
||
cv::rotate(image, rotated, cv::ROTATE_90_COUNTERCLOCKWISE);
|
||
return rotated;
|
||
}
|
||
if (std::abs(std::abs(angle) - 180.0) < 0.01) {
|
||
cv::Mat rotated;
|
||
cv::rotate(image, rotated, cv::ROTATE_180);
|
||
return rotated;
|
||
}
|
||
|
||
// General rotation for arbitrary angles
|
||
const cv::Point2f center(image.cols / 2.0f, image.rows / 2.0f);
|
||
|
||
// Get rotation matrix
|
||
cv::Mat rotationMatrix = cv::getRotationMatrix2D(center, angle, 1.0);
|
||
|
||
// Calculate bounding box for rotated image
|
||
const cv::Rect2f bbox = cv::RotatedRect(cv::Point2f(), image.size(), angle).boundingRect2f();
|
||
|
||
// Adjust transformation matrix to center the rotated image
|
||
rotationMatrix.at<double>(0, 2) += bbox.width / 2.0 - center.x;
|
||
rotationMatrix.at<double>(1, 2) += bbox.height / 2.0 - center.y;
|
||
|
||
// Perform rotation with optimized interpolation
|
||
cv::Mat rotatedImage;
|
||
cv::warpAffine(image, rotatedImage, rotationMatrix, bbox.size(),
|
||
cv::INTER_LINEAR, // Faster than INTER_CUBIC, still good quality
|
||
cv::BORDER_CONSTANT,
|
||
cv::Scalar(0, 0, 0));
|
||
|
||
return rotatedImage;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in RotateImage: " << e.what() << std::endl;
|
||
return image;
|
||
}
|
||
}
|
||
|
||
cv::Mat ANSOPENCV::FlipImage(const cv::Mat& image, int flipCode) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return image; // Shallow copy (fast)
|
||
}
|
||
|
||
// Validate flipCode
|
||
// flipCode > 0: flip horizontally (mirror left-right)
|
||
// flipCode = 0: flip vertically (mirror top-bottom)
|
||
// flipCode < 0: flip both horizontally and vertically (180° rotation)
|
||
if (flipCode < -1 || flipCode > 1) {
|
||
std::cerr << "Warning: flipCode should be -1, 0, or 1. Using modulo." << std::endl;
|
||
// Normalize to valid range
|
||
if (flipCode > 1) flipCode = 1;
|
||
if (flipCode < -1) flipCode = -1;
|
||
}
|
||
|
||
try {
|
||
cv::Mat flippedImage;
|
||
cv::flip(image, flippedImage, flipCode);
|
||
return flippedImage;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in FlipImage: " << e.what() << std::endl;
|
||
return image;
|
||
}
|
||
}
|
||
|
||
cv::Mat ANSOPENCV::ShiftImage(const cv::Mat& image, int shiftX, int shiftY) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
if (!_licenseValid) return image;
|
||
if (image.empty()) return image;
|
||
|
||
// Calculate new dimensions to accommodate the shift
|
||
int newWidth = image.cols + std::abs(shiftX);
|
||
int newHeight = image.rows + std::abs(shiftY);
|
||
|
||
// Adjust translation to account for negative shifts
|
||
int translateX = (shiftX < 0) ? std::abs(shiftX) : 0;
|
||
int translateY = (shiftY < 0) ? std::abs(shiftY) : 0;
|
||
|
||
// Create translation matrix with adjusted offsets
|
||
cv::Mat translationMatrix = (cv::Mat_<double>(2, 3) <<
|
||
1, 0, translateX + shiftX,
|
||
0, 1, translateY + shiftY);
|
||
|
||
// Create output with expanded size
|
||
cv::Mat shiftedImage;
|
||
cv::Size newSize(newWidth, newHeight);
|
||
cv::warpAffine(image, shiftedImage, translationMatrix, newSize,
|
||
cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
|
||
|
||
return shiftedImage;
|
||
}
|
||
|
||
cv::Mat ANSOPENCV::AddGaussianNoise(const cv::Mat& image, double mean, double stddev) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return image;
|
||
}
|
||
|
||
if (stddev < 0) {
|
||
stddev = 0;
|
||
}
|
||
|
||
if (stddev == 0 && mean == 0) {
|
||
return image;
|
||
}
|
||
|
||
try {
|
||
// Convert to float for better precision
|
||
cv::Mat floatImage;
|
||
image.convertTo(floatImage, CV_32F);
|
||
|
||
// Generate noise
|
||
cv::Mat noise(image.size(), CV_32FC(image.channels()));
|
||
cv::randn(noise, mean, stddev);
|
||
|
||
// Add noise
|
||
floatImage += noise;
|
||
|
||
// Convert back with saturation
|
||
cv::Mat noisyImage;
|
||
floatImage.convertTo(noisyImage, image.type());
|
||
|
||
return noisyImage;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in AddGaussianNoiseFast: " << e.what() << std::endl;
|
||
return image;
|
||
}
|
||
}
|
||
|
||
cv::Mat ANSOPENCV::AddSaltAndPepperNoise(const cv::Mat& image, double amount) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return image;
|
||
}
|
||
|
||
if (amount <= 0 || amount >= 1) {
|
||
if (amount <= 0) return image;
|
||
amount = 0.99; // Clamp to valid range
|
||
}
|
||
|
||
try {
|
||
cv::Mat noisyImage = image.clone();
|
||
|
||
// Generate random mask for entire image (vectorized!)
|
||
cv::Mat randMat(image.size(), CV_32F);
|
||
cv::randu(randMat, 0, 1.0);
|
||
|
||
// Salt (white pixels) - top amount/2 percentile
|
||
double saltThreshold = 1.0 - (amount / 2.0);
|
||
cv::Mat saltMask = randMat > saltThreshold;
|
||
|
||
// Pepper (black pixels) - bottom amount/2 percentile
|
||
double pepperThreshold = amount / 2.0;
|
||
cv::Mat pepperMask = randMat < pepperThreshold;
|
||
|
||
// Apply noise using masks (MUCH faster than loops!)
|
||
noisyImage.setTo(cv::Scalar::all(255), saltMask);
|
||
noisyImage.setTo(cv::Scalar::all(0), pepperMask);
|
||
|
||
return noisyImage;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in AddSaltAndPepperNoiseFast: " << e.what() << std::endl;
|
||
return image;
|
||
}
|
||
}
|
||
|
||
cv::Mat ANSOPENCV::AddSpeckleNoise(const cv::Mat& image, double stddev) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid || image.empty()) {
|
||
return image; // Shallow copy (fast)
|
||
}
|
||
|
||
// Validate stddev parameter
|
||
if (stddev < 0) {
|
||
std::cerr << "Error: stddev must be non-negative in AddSpeckleNoise" << std::endl;
|
||
return image;
|
||
}
|
||
|
||
// Early exit if no noise
|
||
if (stddev == 0) {
|
||
return image;
|
||
}
|
||
|
||
try {
|
||
// Convert to float for proper multiplicative noise calculation
|
||
cv::Mat floatImage;
|
||
image.convertTo(floatImage, CV_32F);
|
||
|
||
// Generate Gaussian noise with mean 0 and given stddev
|
||
cv::Mat noise(image.size(), CV_32FC(image.channels()));
|
||
cv::randn(noise, 0, stddev);
|
||
|
||
// Add 1.0 to noise so it becomes a multiplicative factor around 1.0
|
||
// noise in range [1-3σ, 1+3σ] for ~99.7% of values
|
||
noise += 1.0;
|
||
|
||
// Apply multiplicative noise: output = input * (1 + noise)
|
||
cv::Mat noisyFloat;
|
||
cv::multiply(floatImage, noise, noisyFloat);
|
||
|
||
// Convert back to original type with saturation
|
||
cv::Mat noisyImage;
|
||
noisyFloat.convertTo(noisyImage, image.type());
|
||
|
||
return noisyImage;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in AddSpeckleNoise: " << e.what() << std::endl;
|
||
return image;
|
||
}
|
||
}
|
||
|
||
void ANSOPENCV::InitCameraNetwork() {
|
||
network_init();
|
||
sys_buf_init(200);
|
||
rtsp_parse_buf_init(200);
|
||
http_msg_buf_init(200);
|
||
}
|
||
void ANSOPENCV::DeinitCameraNetwork() {
|
||
http_msg_buf_deinit();
|
||
rtsp_parse_buf_deinit();
|
||
sys_buf_deinit();
|
||
network_deinit();
|
||
}
|
||
|
||
cv::Mat ANSOPENCV::resizeImageToFit(const cv::Mat& inputImage, int maxWidth, int maxHeight, int& newWidth, int& newHeight) {
|
||
|
||
if (inputImage.empty()) {
|
||
std::cerr << "Error: Input image is empty or license invalid!" << std::endl;
|
||
newWidth = 0;
|
||
newHeight = 0;
|
||
return cv::Mat();
|
||
}
|
||
|
||
// Validate max dimensions
|
||
if (maxWidth <= 0 || maxHeight <= 0) {
|
||
std::cerr << "Error: Invalid max dimensions in resizeImageToFit" << std::endl;
|
||
newWidth = inputImage.cols;
|
||
newHeight = inputImage.rows;
|
||
return inputImage;
|
||
}
|
||
|
||
const int width = inputImage.cols;
|
||
const int height = inputImage.rows;
|
||
|
||
// If image already fits, return as-is (no clone needed - shallow copy is fine)
|
||
if (width <= maxWidth && height <= maxHeight) {
|
||
newWidth = width;
|
||
newHeight = height;
|
||
return inputImage; // Shallow copy (fast, safe for read-only)
|
||
}
|
||
|
||
try {
|
||
// Compute optimal scale factor maintaining aspect ratio
|
||
const double scale = min(static_cast<double>(maxWidth) / width,
|
||
static_cast<double>(maxHeight) / height);
|
||
|
||
// Use rounding for more accurate dimensions
|
||
newWidth = static_cast<int>(std::round(width * scale));
|
||
newHeight = static_cast<int>(std::round(height * scale));
|
||
|
||
// Ensure dimensions are at least 1x1
|
||
newWidth = max(1, newWidth);
|
||
newHeight =max(1, newHeight);
|
||
|
||
// Choose interpolation method based on scaling direction
|
||
const int interpolation = (scale < 1.0) ? cv::INTER_AREA : cv::INTER_LINEAR;
|
||
|
||
// Resize (no need to pre-allocate - cv::resize handles it efficiently)
|
||
cv::Mat resizedImage;
|
||
cv::resize(inputImage, resizedImage, cv::Size(newWidth, newHeight), 0, 0, interpolation);
|
||
|
||
return resizedImage;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in resizeImageToFit: " << e.what() << std::endl;
|
||
newWidth = width;
|
||
newHeight = height;
|
||
return inputImage;
|
||
}
|
||
}
|
||
|
||
std::string ANSOPENCV::VectorDetectionToJsonString(const std::vector<DetectionObject>& dets)
|
||
{
|
||
if (dets.empty()) {
|
||
return R"({"results":[]})";
|
||
}
|
||
try {
|
||
nlohmann::json root;
|
||
auto& results = root["results"] = nlohmann::json::array();
|
||
|
||
for (const auto& det : dets) {
|
||
results.push_back({
|
||
{"class_id", std::to_string(det.classId)},
|
||
{"track_id", std::to_string(det.trackId)},
|
||
{"class_name", det.className},
|
||
{"prob", std::to_string(det.confidence)},
|
||
{"x", std::to_string(det.box.x)},
|
||
{"y", std::to_string(det.box.y)},
|
||
{"width", std::to_string(det.box.width)},
|
||
{"height", std::to_string(det.box.height)},
|
||
{"mask", ""}, // TODO: convert masks to comma separated string
|
||
{"extra_info",det.extraInfo}
|
||
});
|
||
}
|
||
return root.dump();
|
||
}
|
||
catch (const std::exception& e) {
|
||
// Add your error logging here if needed
|
||
return R"({"results":[],"error":"Serialization failed"})";
|
||
}
|
||
}
|
||
|
||
double ANSOPENCV::CalculateIoU(const cv::Rect& box1, const cv::Rect& box2) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
int x1 = max(box1.x, box2.x);
|
||
int y1 = max(box1.y, box2.y);
|
||
int x2 = min(box1.x + box1.width, box2.x + box2.width);
|
||
int y2 = min(box1.y + box1.height, box2.y + box2.height);
|
||
|
||
int intersectionArea = max(0, x2 - x1) * max(0, y2 - y1);
|
||
int box1Area = box1.width * box1.height;
|
||
int box2Area = box2.width * box2.height;
|
||
|
||
double iou = static_cast<double>(intersectionArea) / (box1Area + box2Area - intersectionArea);
|
||
return iou;
|
||
}
|
||
void ANSOPENCV::NonMaximumSuppression(std::vector<DetectionObject>& detectedObjects, double iouThreshold) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
std::sort(detectedObjects.begin(), detectedObjects.end(),
|
||
[](const DetectionObject& a, const DetectionObject& b) {
|
||
return a.confidence > b.confidence;
|
||
});
|
||
|
||
std::vector<DetectionObject> finalDetections;
|
||
while (!detectedObjects.empty()) {
|
||
finalDetections.push_back(detectedObjects.front());
|
||
detectedObjects.erase(detectedObjects.begin());
|
||
|
||
for (auto it = detectedObjects.begin(); it != detectedObjects.end(); ) {
|
||
if (CalculateIoU(finalDetections.back().box, it->box) > iouThreshold) {
|
||
it = detectedObjects.erase(it);
|
||
}
|
||
else {
|
||
++it;
|
||
}
|
||
}
|
||
}
|
||
|
||
detectedObjects = std::move(finalDetections);
|
||
}
|
||
|
||
cv::Mat ANSOPENCV::ImageResizeV2(const cv::Mat& inputImage, int resizeWidth, int originalImageSize) {
|
||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||
|
||
if (!_licenseValid) {
|
||
std::cerr << "Error: License is not valid in ImageResizeV2." << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
|
||
if (inputImage.empty()) {
|
||
std::cerr << "Error: Input image is empty!" << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
|
||
if (resizeWidth <= 0) {
|
||
std::cerr << "Error: Invalid width provided for resizing!" << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
|
||
try {
|
||
const int originalWidth = inputImage.cols;
|
||
const int originalHeight = inputImage.rows;
|
||
|
||
// Calculate target width with scaling
|
||
int targetWidth = resizeWidth;
|
||
|
||
if (originalImageSize > 0 && originalImageSize != originalWidth) {
|
||
const double scale = static_cast<double>(originalWidth) / originalImageSize;
|
||
targetWidth = static_cast<int>(std::round(resizeWidth * scale));
|
||
}
|
||
|
||
// Skip resize if target is same or larger
|
||
if (targetWidth >= originalWidth) {
|
||
return inputImage; // Shallow copy (fast, safe for read-only)
|
||
}
|
||
|
||
// Calculate target height maintaining aspect ratio
|
||
const int targetHeight = static_cast<int>(std::round(
|
||
targetWidth * static_cast<double>(originalHeight) / originalWidth
|
||
));
|
||
|
||
// Validate computed dimensions
|
||
if (targetWidth <= 0 || targetHeight <= 0) {
|
||
std::cerr << "Error: Computed dimensions invalid ("
|
||
<< targetWidth << "x" << targetHeight << ")" << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
|
||
// Choose interpolation method (INTER_AREA best for downscaling)
|
||
cv::Mat resizedImage;
|
||
cv::resize(inputImage, resizedImage, cv::Size(targetWidth, targetHeight),
|
||
0, 0, cv::INTER_AREA);
|
||
|
||
return resizedImage;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ImageResizeV2: " << e.what() << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ImageResizeV2: " << e.what() << std::endl;
|
||
return cv::Mat();
|
||
}
|
||
}
|
||
|
||
bool ANSOPENCV::resizeImage(cv::Mat& inputImage, int resizeWidth, int originalImageSize) {
|
||
if (inputImage.empty()) {
|
||
std::cerr << "Error: Input image is empty!" << std::endl;
|
||
return false;
|
||
}
|
||
|
||
if (resizeWidth <= 0) {
|
||
std::cerr << "Error: Invalid width provided for resizing!" << std::endl;
|
||
return false;
|
||
}
|
||
|
||
try {
|
||
const int originalWidth = inputImage.cols;
|
||
const int originalHeight = inputImage.rows;
|
||
|
||
// Calculate target width with scaling
|
||
int targetWidth = resizeWidth;
|
||
|
||
if (originalImageSize > 0 && originalImageSize != originalWidth) {
|
||
const double scale = static_cast<double>(originalWidth) / originalImageSize;
|
||
targetWidth = static_cast<int>(std::round(resizeWidth * scale));
|
||
}
|
||
|
||
// Skip resize if target is same or larger
|
||
if (targetWidth >= originalWidth) {
|
||
return true; // No resize needed, success
|
||
}
|
||
|
||
// Calculate target height maintaining aspect ratio
|
||
const int targetHeight = static_cast<int>(std::round(
|
||
targetWidth * static_cast<double>(originalHeight) / originalWidth
|
||
));
|
||
|
||
// Validate computed dimensions
|
||
if (targetWidth <= 0 || targetHeight <= 0) {
|
||
std::cerr << "Error: Computed dimensions invalid ("
|
||
<< targetWidth << "x" << targetHeight << ")" << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// CRITICAL: Use INTER_AREA for downscaling (much faster and better quality)
|
||
cv::resize(inputImage, inputImage, cv::Size(targetWidth, targetHeight),
|
||
0, 0, cv::INTER_AREA);
|
||
|
||
return true;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in resizeImage: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in resizeImage: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
|
||
bool ANSOPENCV::cropImage(cv::Mat& inputImage, const cv::Rect& resizeROI, int originalImageSize) {
|
||
if (inputImage.empty()) {
|
||
std::cerr << "Error: Input image is empty!" << std::endl;
|
||
return false;
|
||
}
|
||
|
||
if (resizeROI.width <= 0 || resizeROI.height <= 0) {
|
||
std::cerr << "Error: Invalid ROI size! Width and height must be greater than zero." << std::endl;
|
||
return false;
|
||
}
|
||
|
||
try {
|
||
const int originalWidth = inputImage.cols;
|
||
const int originalHeight = inputImage.rows;
|
||
|
||
// Scale ROI if needed
|
||
cv::Rect roi = resizeROI;
|
||
|
||
if (originalImageSize > 0 && originalImageSize != originalWidth) {
|
||
const double scale = static_cast<double>(originalWidth) / originalImageSize;
|
||
|
||
// Use rounding for more accurate scaling
|
||
roi.x = static_cast<int>(std::round(resizeROI.x * scale));
|
||
roi.y = static_cast<int>(std::round(resizeROI.y * scale));
|
||
roi.width = static_cast<int>(std::round(resizeROI.width * scale));
|
||
roi.height = static_cast<int>(std::round(resizeROI.height * scale));
|
||
}
|
||
|
||
// Validate and clamp ROI to image bounds
|
||
roi.x = max(0, roi.x);
|
||
roi.y = max(0, roi.y);
|
||
roi.width = min(roi.width, originalWidth - roi.x);
|
||
roi.height = min(roi.height, originalHeight - roi.y);
|
||
|
||
// Final validation
|
||
if (roi.width <= 0 || roi.height <= 0) {
|
||
std::cerr << "Error: ROI exceeds image boundaries! ROI("
|
||
<< roi.x << "," << roi.y << "," << roi.width << "," << roi.height
|
||
<< ") vs Image(" << originalWidth << "x" << originalHeight << ")" << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// CRITICAL BUG FIX: Clone to avoid dangling reference
|
||
// Without clone(), inputImage references old memory that may be freed
|
||
inputImage = inputImage(roi).clone();
|
||
|
||
return true;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in cropImage: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in cropImage: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
|
||
bool ANSOPENCV::ImagesToMP4(const std::string& imageFolder,
|
||
const std::string& outputVideoPath,
|
||
int maxWidth, int fps) {
|
||
// Per-output-file mutex for thread safety across concurrent conversions
|
||
static std::mutex mapMutex;
|
||
static std::map<std::string, std::unique_ptr<std::timed_mutex>> fileMutexes;
|
||
|
||
std::shared_ptr<std::timed_mutex> fileMutex;
|
||
{
|
||
std::lock_guard<std::mutex> lock(mapMutex);
|
||
std::string canonicalPath = std::filesystem::canonical(
|
||
std::filesystem::path(outputVideoPath).parent_path()).string() +
|
||
"/" + std::filesystem::path(outputVideoPath).filename().string();
|
||
|
||
if (fileMutexes.find(canonicalPath) == fileMutexes.end()) {
|
||
fileMutexes[canonicalPath] = std::make_unique<std::timed_mutex>();
|
||
}
|
||
fileMutex = std::shared_ptr<std::timed_mutex>(
|
||
fileMutexes[canonicalPath].get(), [](std::timed_mutex*) {});
|
||
}
|
||
|
||
std::unique_lock<std::timed_mutex> lock(*fileMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Another thread is writing to " << outputVideoPath << std::endl;
|
||
return false;
|
||
}
|
||
|
||
cv::VideoWriter videoWriter;
|
||
|
||
try {
|
||
// Clamp FPS to [1, 60]
|
||
fps = max(1, min(60, fps));
|
||
|
||
// Collect all image files
|
||
std::vector<cv::String> imageFiles;
|
||
const std::vector<std::string> extensions = { "*.jpg", "*.jpeg", "*.png", "*.bmp" };
|
||
|
||
for (const auto& ext : extensions) {
|
||
std::vector<cv::String> temp;
|
||
cv::glob(imageFolder + "/" + ext, temp, false);
|
||
imageFiles.insert(imageFiles.end(),
|
||
std::make_move_iterator(temp.begin()),
|
||
std::make_move_iterator(temp.end()));
|
||
}
|
||
|
||
if (imageFiles.empty()) {
|
||
std::cerr << "Error: No images found in folder: " << imageFolder << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// Sort for consistent ordering
|
||
std::sort(imageFiles.begin(), imageFiles.end());
|
||
|
||
// Cap at 5 minutes max duration
|
||
const int maxFrames = fps * 300;
|
||
if (static_cast<int>(imageFiles.size()) > maxFrames) {
|
||
std::cout << "Warning: Truncating from " << imageFiles.size()
|
||
<< " to " << maxFrames << " images (5-minute limit at "
|
||
<< fps << " FPS)" << std::endl;
|
||
imageFiles.resize(maxFrames);
|
||
}
|
||
|
||
const int numImages = static_cast<int>(imageFiles.size());
|
||
|
||
// Read first image to determine dimensions
|
||
cv::Mat firstImage = cv::imread(imageFiles[0], cv::IMREAD_COLOR);
|
||
if (firstImage.empty()) {
|
||
std::cerr << "Error: Could not read first image: " << imageFiles[0] << std::endl;
|
||
return false;
|
||
}
|
||
|
||
int videoWidth = firstImage.cols;
|
||
int videoHeight = firstImage.rows;
|
||
bool needsResize = false;
|
||
|
||
if (maxWidth > 0 && firstImage.cols > maxWidth) {
|
||
// Scale down to fit within maxWidth, preserving aspect ratio
|
||
double scale = static_cast<double>(maxWidth) / firstImage.cols;
|
||
videoWidth = static_cast<int>(std::round(firstImage.cols * scale));
|
||
videoHeight = static_cast<int>(std::round(firstImage.rows * scale));
|
||
needsResize = true;
|
||
}
|
||
|
||
// Force even dimensions (required for H.264)
|
||
videoWidth = (videoWidth / 2) * 2;
|
||
videoHeight = (videoHeight / 2) * 2;
|
||
|
||
if (videoWidth < 2 || videoHeight < 2) {
|
||
std::cerr << "Error: Resulting video dimensions too small: "
|
||
<< videoWidth << "x" << videoHeight << std::endl;
|
||
return false;
|
||
}
|
||
|
||
std::cout << "[Thread " << std::this_thread::get_id() << "] "
|
||
<< "Image: " << firstImage.cols << "x" << firstImage.rows
|
||
<< " -> Video: " << videoWidth << "x" << videoHeight
|
||
<< " | " << numImages << " frames @ " << fps << " FPS"
|
||
<< " (~" << (numImages / fps) << "s)" << std::endl;
|
||
|
||
firstImage.release();
|
||
|
||
// Ensure .mp4 extension
|
||
std::string mp4OutputPath = outputVideoPath;
|
||
if (mp4OutputPath.size() < 4 ||
|
||
mp4OutputPath.substr(mp4OutputPath.size() - 4) != ".mp4") {
|
||
mp4OutputPath += ".mp4";
|
||
}
|
||
|
||
// ---- libx264 tuning for smaller files at preserved quality ----
|
||
// OpenCV's FFmpeg wrapper (>= 4.6) reads OPENCV_FFMPEG_WRITER_OPTIONS
|
||
// at open() time and forwards the options to the encoder. Key points:
|
||
//
|
||
// video_codec;libx264 — FORCE libx264 encoder by name. On Windows,
|
||
// the default H.264 encoder registered in opencv_videoio_ffmpeg is
|
||
// often OpenH264, which silently ignores crf/preset/tune. Without
|
||
// this override, the tuning below has no effect.
|
||
// crf;26 — smaller than default 23, still visually good
|
||
// preset;slow — better compression efficiency
|
||
// tune;stillimage — optimised for mostly-static frames
|
||
// movflags;+faststart — moov atom at front (good for HTTP streaming)
|
||
//
|
||
// We set the env var only around the H.264 codec attempts and restore
|
||
// it immediately so the MP4V/MJPG fallback path is never polluted by
|
||
// video_codec;libx264 (which would be a codec-id mismatch).
|
||
constexpr const char* kWriterOptsEnv = "OPENCV_FFMPEG_WRITER_OPTIONS";
|
||
constexpr const char* kX264WriterOpts =
|
||
"video_codec;libx264|crf;26|preset;slow|tune;stillimage|movflags;+faststart";
|
||
|
||
std::string prevWriterOpts;
|
||
bool hadPrevWriterOpts = false;
|
||
if (const char* prev = std::getenv(kWriterOptsEnv)) {
|
||
prevWriterOpts = prev;
|
||
hadPrevWriterOpts = true;
|
||
}
|
||
|
||
auto setX264Opts = [&]() {
|
||
_putenv_s(kWriterOptsEnv, kX264WriterOpts);
|
||
};
|
||
auto restoreOpts = [&]() {
|
||
if (hadPrevWriterOpts) {
|
||
_putenv_s(kWriterOptsEnv, prevWriterOpts.c_str());
|
||
} else {
|
||
_putenv_s(kWriterOptsEnv, "");
|
||
}
|
||
};
|
||
|
||
// Try codecs in order of preference.
|
||
// avc1 / H264 / x264 route to libx264 via FFmpeg when forced via the
|
||
// video_codec option above. MP4V and MJPG are last-resort fallbacks —
|
||
// they produce substantially larger files.
|
||
const std::vector<std::pair<std::string, int>> codecs = {
|
||
{"avc1", cv::VideoWriter::fourcc('a', 'v', 'c', '1')},
|
||
{"H264", cv::VideoWriter::fourcc('H', '2', '6', '4')},
|
||
{"x264", cv::VideoWriter::fourcc('x', '2', '6', '4')},
|
||
{"MP4V", cv::VideoWriter::fourcc('M', 'P', '4', 'V')},
|
||
{"MJPG", cv::VideoWriter::fourcc('M', 'J', 'P', 'G')}
|
||
};
|
||
|
||
bool codecFound = false;
|
||
std::string usedCodec;
|
||
for (const auto& [name, fourcc] : codecs) {
|
||
const bool isH264Family =
|
||
(name == "avc1" || name == "H264" || name == "x264");
|
||
|
||
if (isH264Family) {
|
||
setX264Opts();
|
||
} else {
|
||
restoreOpts();
|
||
}
|
||
|
||
videoWriter.open(mp4OutputPath, fourcc, fps,
|
||
cv::Size(videoWidth, videoHeight), true);
|
||
|
||
if (videoWriter.isOpened()) {
|
||
std::cout << "Using codec: " << name
|
||
<< (isH264Family ? " (libx264 forced, crf=26, preset=slow, tune=stillimage)" : "")
|
||
<< std::endl;
|
||
usedCodec = name;
|
||
codecFound = true;
|
||
break;
|
||
}
|
||
videoWriter.release();
|
||
}
|
||
|
||
// Always restore the env var after we're done — don't leak the
|
||
// libx264 override into the rest of the process.
|
||
restoreOpts();
|
||
|
||
if (!codecFound) {
|
||
std::cerr << "Error: Could not open video writer with any codec!" << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// Warn loudly if we fell through to a non-H.264 fallback — these
|
||
// produce files many times larger than H.264 at similar quality.
|
||
if (usedCodec == "MP4V" || usedCodec == "MJPG") {
|
||
std::cerr << "Warning: H.264 (libx264) encoder unavailable, fell back to "
|
||
<< usedCodec << ". Output file will be significantly larger. "
|
||
<< "Check that opencv_videoio_ffmpeg is present and that the "
|
||
<< "bundled FFmpeg was built with libx264 support." << std::endl;
|
||
}
|
||
|
||
// Hint for non-FFmpeg backends (e.g. MJPG fallback on some platforms).
|
||
// Ignored by libx264 which is controlled via the env var above.
|
||
videoWriter.set(cv::VIDEOWRITER_PROP_QUALITY, 85.0);
|
||
|
||
// Pre-allocate reusable matrix
|
||
cv::Mat img;
|
||
cv::Mat resizedImg;
|
||
|
||
// 1 image = 1 frame
|
||
for (int i = 0; i < numImages; ++i) {
|
||
img = cv::imread(imageFiles[i], cv::IMREAD_COLOR);
|
||
if (img.empty()) {
|
||
std::cerr << "Warning: Could not read: " << imageFiles[i] << std::endl;
|
||
continue;
|
||
}
|
||
|
||
if (needsResize || img.cols != videoWidth || img.rows != videoHeight) {
|
||
cv::resize(img, resizedImg, cv::Size(videoWidth, videoHeight),
|
||
0, 0, cv::INTER_AREA);
|
||
videoWriter.write(resizedImg);
|
||
}
|
||
else {
|
||
videoWriter.write(img);
|
||
}
|
||
|
||
img.release();
|
||
}
|
||
|
||
resizedImg.release();
|
||
videoWriter.release();
|
||
|
||
std::cout << "Video created: " << mp4OutputPath
|
||
<< " (" << numImages << " frames, "
|
||
<< fps << " FPS, ~" << (numImages / fps) << "s)" << std::endl;
|
||
|
||
return true;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
videoWriter.release();
|
||
std::cerr << "OpenCV exception: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
catch (const std::exception& e) {
|
||
videoWriter.release();
|
||
std::cerr << "Exception: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
// ================================================================
|
||
// ImagesToMP4FF — Direct FFmpeg (libav*) encoder pipeline
|
||
// ================================================================
|
||
// Encoder preference: libx265 (HEVC) > libx264 (H.264) > mpeg4.
|
||
// Produces substantially smaller files than ImagesToMP4 at
|
||
// equivalent quality because we drive libx265/libx264 directly,
|
||
// bypassing OpenCV's VideoWriter + opencv_videoio_ffmpeg wrapper
|
||
// (which on Windows often ends up using OpenH264 with no tunables).
|
||
//
|
||
// Threading: mutex-per-output-file, independent of ImagesToMP4's
|
||
// mutex map, so the two functions can coexist without interference.
|
||
// ================================================================
|
||
bool ANSOPENCV::ImagesToMP4FF(const std::string& imageFolder,
|
||
const std::string& outputVideoPath,
|
||
int maxWidth, int fps) {
|
||
|
||
// ---- Per-output-file mutex (independent of ImagesToMP4's map) ----
|
||
static std::mutex mapMutexFF;
|
||
static std::map<std::string, std::unique_ptr<std::timed_mutex>> fileMutexesFF;
|
||
|
||
std::shared_ptr<std::timed_mutex> fileMutex;
|
||
{
|
||
std::lock_guard<std::mutex> lock(mapMutexFF);
|
||
std::string canonicalPath = std::filesystem::canonical(
|
||
std::filesystem::path(outputVideoPath).parent_path()).string() +
|
||
"/" + std::filesystem::path(outputVideoPath).filename().string();
|
||
|
||
if (fileMutexesFF.find(canonicalPath) == fileMutexesFF.end()) {
|
||
fileMutexesFF[canonicalPath] = std::make_unique<std::timed_mutex>();
|
||
}
|
||
fileMutex = std::shared_ptr<std::timed_mutex>(
|
||
fileMutexesFF[canonicalPath].get(), [](std::timed_mutex*) {});
|
||
}
|
||
|
||
std::unique_lock<std::timed_mutex> lock(*fileMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Another thread is writing to " << outputVideoPath << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- RAII bag for FFmpeg resources -------------------------------
|
||
struct FFState {
|
||
AVFormatContext* fmt_ctx = nullptr;
|
||
AVCodecContext* codec_ctx = nullptr;
|
||
AVFrame* frame = nullptr;
|
||
AVPacket* pkt = nullptr;
|
||
SwsContext* sws = nullptr;
|
||
~FFState() {
|
||
if (sws) { sws_freeContext(sws); sws = nullptr; }
|
||
if (frame) { av_frame_free(&frame); }
|
||
if (pkt) { av_packet_free(&pkt); }
|
||
if (codec_ctx) { avcodec_free_context(&codec_ctx); }
|
||
if (fmt_ctx) {
|
||
if (fmt_ctx->pb && !(fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||
avio_closep(&fmt_ctx->pb);
|
||
}
|
||
avformat_free_context(fmt_ctx);
|
||
fmt_ctx = nullptr;
|
||
}
|
||
}
|
||
} ff;
|
||
|
||
auto ffErr = [](int err) -> std::string {
|
||
char buf[AV_ERROR_MAX_STRING_SIZE] = {0};
|
||
av_strerror(err, buf, sizeof(buf));
|
||
return std::string(buf);
|
||
};
|
||
|
||
try {
|
||
// Clamp FPS to [1, 60]
|
||
fps = max(1, min(60, fps));
|
||
|
||
// ---- Collect image files ----
|
||
std::vector<cv::String> imageFiles;
|
||
const std::vector<std::string> extensions = { "*.jpg", "*.jpeg", "*.png", "*.bmp" };
|
||
for (const auto& ext : extensions) {
|
||
std::vector<cv::String> temp;
|
||
cv::glob(imageFolder + "/" + ext, temp, false);
|
||
imageFiles.insert(imageFiles.end(),
|
||
std::make_move_iterator(temp.begin()),
|
||
std::make_move_iterator(temp.end()));
|
||
}
|
||
if (imageFiles.empty()) {
|
||
std::cerr << "Error: No images found in folder: " << imageFolder << std::endl;
|
||
return false;
|
||
}
|
||
std::sort(imageFiles.begin(), imageFiles.end());
|
||
|
||
// Cap at 5 minutes max duration
|
||
const int maxFrames = fps * 300;
|
||
if (static_cast<int>(imageFiles.size()) > maxFrames) {
|
||
std::cout << "Warning: Truncating from " << imageFiles.size()
|
||
<< " to " << maxFrames << " images (5-minute limit at "
|
||
<< fps << " FPS)" << std::endl;
|
||
imageFiles.resize(maxFrames);
|
||
}
|
||
const int numImages = static_cast<int>(imageFiles.size());
|
||
|
||
// ---- First image -> dimensions ----
|
||
cv::Mat firstImage = cv::imread(imageFiles[0], cv::IMREAD_COLOR);
|
||
if (firstImage.empty()) {
|
||
std::cerr << "Error: Could not read first image: " << imageFiles[0] << std::endl;
|
||
return false;
|
||
}
|
||
|
||
int videoWidth = firstImage.cols;
|
||
int videoHeight = firstImage.rows;
|
||
bool needsResize = false;
|
||
if (maxWidth > 0 && firstImage.cols > maxWidth) {
|
||
double scale = static_cast<double>(maxWidth) / firstImage.cols;
|
||
videoWidth = static_cast<int>(std::round(firstImage.cols * scale));
|
||
videoHeight = static_cast<int>(std::round(firstImage.rows * scale));
|
||
needsResize = true;
|
||
}
|
||
// Force even dims (required for YUV420P)
|
||
videoWidth = (videoWidth / 2) * 2;
|
||
videoHeight = (videoHeight / 2) * 2;
|
||
if (videoWidth < 2 || videoHeight < 2) {
|
||
std::cerr << "Error: Resulting video dimensions too small: "
|
||
<< videoWidth << "x" << videoHeight << std::endl;
|
||
return false;
|
||
}
|
||
|
||
std::cout << "[FF Thread " << std::this_thread::get_id() << "] "
|
||
<< "Image: " << firstImage.cols << "x" << firstImage.rows
|
||
<< " -> Video: " << videoWidth << "x" << videoHeight
|
||
<< " | " << numImages << " frames @ " << fps << " FPS"
|
||
<< " (~" << (numImages / fps) << "s)" << std::endl;
|
||
firstImage.release();
|
||
|
||
// Ensure .mp4 extension
|
||
std::string mp4OutputPath = outputVideoPath;
|
||
if (mp4OutputPath.size() < 4 ||
|
||
mp4OutputPath.substr(mp4OutputPath.size() - 4) != ".mp4") {
|
||
mp4OutputPath += ".mp4";
|
||
}
|
||
|
||
// ---- Encoder selection ----
|
||
struct EncChoice {
|
||
const char* name;
|
||
const char* display;
|
||
const char* crf; // empty = no crf (bitrate-targeted)
|
||
const char* preset; // empty = no preset
|
||
const char* tune; // empty = no tune
|
||
bool isHEVC;
|
||
};
|
||
const std::vector<EncChoice> encChoices = {
|
||
// HEVC: CRF 28 ≈ H.264 CRF 23 in perceived quality.
|
||
// libx265 has no 'stillimage' tune; default is fine.
|
||
{ "libx265", "HEVC/H.265 (libx265)", "28", "slow", "", true },
|
||
// H.264: CRF 26 with stillimage tune for slideshow content.
|
||
{ "libx264", "H.264 (libx264)", "26", "slow", "stillimage", false },
|
||
// MPEG-4 Part 2 fallback: uses bitrate, not CRF. Larger output.
|
||
{ "mpeg4", "MPEG-4 Part 2 (native FFmpeg)", "", "", "", false },
|
||
};
|
||
|
||
const AVCodec* codec = nullptr;
|
||
const EncChoice* chosen = nullptr;
|
||
for (const auto& e : encChoices) {
|
||
if (const AVCodec* c = avcodec_find_encoder_by_name(e.name)) {
|
||
codec = c;
|
||
chosen = &e;
|
||
break;
|
||
}
|
||
}
|
||
if (!codec) {
|
||
std::cerr << "[FFmpeg] Error: no suitable encoder available "
|
||
"(tried libx265, libx264, mpeg4). Bundled FFmpeg was built "
|
||
"without any of these encoders." << std::endl;
|
||
return false;
|
||
}
|
||
std::cout << "[FFmpeg] Using encoder: " << chosen->display;
|
||
if (chosen->crf[0]) std::cout << " crf=" << chosen->crf;
|
||
if (chosen->preset[0]) std::cout << " preset=" << chosen->preset;
|
||
if (chosen->tune[0]) std::cout << " tune=" << chosen->tune;
|
||
std::cout << std::endl;
|
||
|
||
int ret = 0;
|
||
|
||
// ---- 1. Allocate output format context (MP4 muxer) ----
|
||
ret = avformat_alloc_output_context2(&ff.fmt_ctx, nullptr, "mp4", mp4OutputPath.c_str());
|
||
if (ret < 0 || !ff.fmt_ctx) {
|
||
std::cerr << "[FFmpeg] avformat_alloc_output_context2 failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- 2. New stream (owned by fmt_ctx) ----
|
||
AVStream* stream = avformat_new_stream(ff.fmt_ctx, nullptr);
|
||
if (!stream) {
|
||
std::cerr << "[FFmpeg] avformat_new_stream failed" << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- 3. Codec context ----
|
||
ff.codec_ctx = avcodec_alloc_context3(codec);
|
||
if (!ff.codec_ctx) {
|
||
std::cerr << "[FFmpeg] avcodec_alloc_context3 failed" << std::endl;
|
||
return false;
|
||
}
|
||
ff.codec_ctx->width = videoWidth;
|
||
ff.codec_ctx->height = videoHeight;
|
||
ff.codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||
ff.codec_ctx->time_base = AVRational{ 1, fps };
|
||
ff.codec_ctx->framerate = AVRational{ fps, 1 };
|
||
ff.codec_ctx->gop_size = fps * 2; // keyframe every ~2s
|
||
ff.codec_ctx->max_b_frames = 2;
|
||
|
||
// HEVC in MP4: force 'hvc1' codec tag so QuickTime and Apple
|
||
// players accept the file. Without this, libx265 defaults to
|
||
// 'hev1' which some players refuse.
|
||
if (chosen->isHEVC) {
|
||
ff.codec_ctx->codec_tag = MKTAG('h', 'v', 'c', '1');
|
||
}
|
||
|
||
// MP4 muxer requires global header flag for most codecs
|
||
if (ff.fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
|
||
ff.codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||
}
|
||
|
||
// MPEG-4 fallback: set a modest target bitrate (no CRF support)
|
||
if (std::strcmp(chosen->name, "mpeg4") == 0) {
|
||
ff.codec_ctx->bit_rate = 1500000; // ~1.5 Mbps
|
||
}
|
||
|
||
// ---- 4. Encoder-private options ----
|
||
AVDictionary* encOpts = nullptr;
|
||
if (chosen->crf[0]) av_dict_set(&encOpts, "crf", chosen->crf, 0);
|
||
if (chosen->preset[0]) av_dict_set(&encOpts, "preset", chosen->preset, 0);
|
||
if (chosen->tune[0]) av_dict_set(&encOpts, "tune", chosen->tune, 0);
|
||
|
||
// ---- 5. Open encoder ----
|
||
ret = avcodec_open2(ff.codec_ctx, codec, &encOpts);
|
||
if (ret < 0) {
|
||
std::cerr << "[FFmpeg] avcodec_open2 failed: " << ffErr(ret) << std::endl;
|
||
av_dict_free(&encOpts);
|
||
return false;
|
||
}
|
||
// Report any options the encoder silently ignored
|
||
if (encOpts) {
|
||
AVDictionaryEntry* e = nullptr;
|
||
while ((e = av_dict_get(encOpts, "", e, AV_DICT_IGNORE_SUFFIX))) {
|
||
std::cerr << "[FFmpeg] Warning: encoder ignored option "
|
||
<< e->key << "=" << e->value << std::endl;
|
||
}
|
||
av_dict_free(&encOpts);
|
||
}
|
||
|
||
// ---- 6. Copy codec params -> stream ----
|
||
ret = avcodec_parameters_from_context(stream->codecpar, ff.codec_ctx);
|
||
if (ret < 0) {
|
||
std::cerr << "[FFmpeg] avcodec_parameters_from_context failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
stream->time_base = ff.codec_ctx->time_base;
|
||
|
||
// ---- 7. Open output file ----
|
||
if (!(ff.fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||
ret = avio_open(&ff.fmt_ctx->pb, mp4OutputPath.c_str(), AVIO_FLAG_WRITE);
|
||
if (ret < 0) {
|
||
std::cerr << "[FFmpeg] avio_open('" << mp4OutputPath << "') failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
|
||
// ---- 8. Write MP4 header with +faststart ----
|
||
{
|
||
AVDictionary* muxOpts = nullptr;
|
||
av_dict_set(&muxOpts, "movflags", "+faststart", 0);
|
||
ret = avformat_write_header(ff.fmt_ctx, &muxOpts);
|
||
av_dict_free(&muxOpts);
|
||
if (ret < 0) {
|
||
std::cerr << "[FFmpeg] avformat_write_header failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
|
||
// ---- 9. Allocate AVFrame (YUV420P) + AVPacket ----
|
||
ff.frame = av_frame_alloc();
|
||
ff.pkt = av_packet_alloc();
|
||
if (!ff.frame || !ff.pkt) {
|
||
std::cerr << "[FFmpeg] av_frame_alloc / av_packet_alloc failed" << std::endl;
|
||
return false;
|
||
}
|
||
ff.frame->format = AV_PIX_FMT_YUV420P;
|
||
ff.frame->width = videoWidth;
|
||
ff.frame->height = videoHeight;
|
||
ret = av_frame_get_buffer(ff.frame, 0);
|
||
if (ret < 0) {
|
||
std::cerr << "[FFmpeg] av_frame_get_buffer failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- 10. BGR24 -> YUV420P converter ----
|
||
ff.sws = sws_getContext(
|
||
videoWidth, videoHeight, AV_PIX_FMT_BGR24,
|
||
videoWidth, videoHeight, AV_PIX_FMT_YUV420P,
|
||
SWS_BILINEAR, nullptr, nullptr, nullptr);
|
||
if (!ff.sws) {
|
||
std::cerr << "[FFmpeg] sws_getContext failed" << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- Helper: drain any packets the encoder has ready ----
|
||
auto drainPackets = [&]() -> bool {
|
||
for (;;) {
|
||
int r = avcodec_receive_packet(ff.codec_ctx, ff.pkt);
|
||
if (r == AVERROR(EAGAIN) || r == AVERROR_EOF) return true;
|
||
if (r < 0) {
|
||
std::cerr << "[FFmpeg] avcodec_receive_packet failed: " << ffErr(r) << std::endl;
|
||
return false;
|
||
}
|
||
av_packet_rescale_ts(ff.pkt, ff.codec_ctx->time_base, stream->time_base);
|
||
ff.pkt->stream_index = stream->index;
|
||
r = av_interleaved_write_frame(ff.fmt_ctx, ff.pkt);
|
||
av_packet_unref(ff.pkt);
|
||
if (r < 0) {
|
||
std::cerr << "[FFmpeg] av_interleaved_write_frame failed: " << ffErr(r) << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
};
|
||
|
||
// ---- 11. Encoding loop ----
|
||
cv::Mat img;
|
||
cv::Mat resizedImg;
|
||
int64_t framesEncoded = 0;
|
||
|
||
for (int i = 0; i < numImages; ++i) {
|
||
img = cv::imread(imageFiles[i], cv::IMREAD_COLOR);
|
||
if (img.empty()) {
|
||
std::cerr << "Warning: Could not read: " << imageFiles[i] << std::endl;
|
||
continue;
|
||
}
|
||
|
||
cv::Mat* src = &img;
|
||
if (needsResize || img.cols != videoWidth || img.rows != videoHeight) {
|
||
cv::resize(img, resizedImg, cv::Size(videoWidth, videoHeight),
|
||
0, 0, cv::INTER_AREA);
|
||
src = &resizedImg;
|
||
}
|
||
|
||
ret = av_frame_make_writable(ff.frame);
|
||
if (ret < 0) {
|
||
std::cerr << "[FFmpeg] av_frame_make_writable failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
|
||
const uint8_t* srcSlices[4] = { src->data, nullptr, nullptr, nullptr };
|
||
int srcStride[4] = { static_cast<int>(src->step[0]), 0, 0, 0 };
|
||
sws_scale(ff.sws, srcSlices, srcStride, 0, videoHeight,
|
||
ff.frame->data, ff.frame->linesize);
|
||
|
||
ff.frame->pts = framesEncoded;
|
||
|
||
ret = avcodec_send_frame(ff.codec_ctx, ff.frame);
|
||
if (ret < 0) {
|
||
std::cerr << "[FFmpeg] avcodec_send_frame failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
if (!drainPackets()) return false;
|
||
|
||
framesEncoded++;
|
||
img.release();
|
||
}
|
||
|
||
// ---- 12. Flush encoder ----
|
||
ret = avcodec_send_frame(ff.codec_ctx, nullptr);
|
||
if (ret < 0 && ret != AVERROR_EOF) {
|
||
std::cerr << "[FFmpeg] flush send_frame failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
if (!drainPackets()) return false;
|
||
|
||
// ---- 13. Write trailer (finalises moov; with +faststart,
|
||
// FFmpeg rewrites the file to move moov to the front) ----
|
||
ret = av_write_trailer(ff.fmt_ctx);
|
||
if (ret < 0) {
|
||
std::cerr << "[FFmpeg] av_write_trailer failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
|
||
std::cout << "[FFmpeg] Video created: " << mp4OutputPath
|
||
<< " (" << framesEncoded << " frames, "
|
||
<< fps << " FPS, ~" << (framesEncoded / fps) << "s)"
|
||
<< " via " << chosen->display << std::endl;
|
||
|
||
return true;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "[FFmpeg] OpenCV exception: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "[FFmpeg] Exception: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
|
||
// ================================================================
|
||
// ImagesToMP4HW — Hardware-accelerated FFmpeg encoder pipeline
|
||
// ================================================================
|
||
// Preference order: NVIDIA NVENC > Intel QSV > AMD AMF, HEVC first,
|
||
// then H.264 at each vendor, then software (libx265/libx264/mpeg4).
|
||
// Each encoder is probed by attempting a real avcodec_open2(); the
|
||
// first one to succeed is used. This avoids guessing based on GPU
|
||
// presence — we just try and let FFmpeg tell us what works.
|
||
//
|
||
// Per-encoder quality targets:
|
||
// NVENC: rc=vbr, cq=28 (HEVC) / cq=24 (H.264), preset=p5, tune=hq
|
||
// QSV: global_quality=28 (HEVC) / 24 (H.264), preset=slower
|
||
// AMF: quality=quality, rc=cqp, qp ~24/26/28 (HEVC) / 22/24/26 (H.264)
|
||
// libx265: crf=28, preset=slow
|
||
// libx264: crf=26, preset=slow, tune=stillimage
|
||
// mpeg4: bit_rate=1.5 Mbps (no CRF support)
|
||
//
|
||
// Pixel format: NV12 for QSV/AMF (native), YUV420P everywhere else.
|
||
// ================================================================
|
||
bool ANSOPENCV::ImagesToMP4HW(const std::string& imageFolder,
|
||
const std::string& outputVideoPath,
|
||
int maxWidth, int fps) {
|
||
|
||
// ---- Per-output-file mutex (independent of the other two) ----
|
||
static std::mutex mapMutexHW;
|
||
static std::map<std::string, std::unique_ptr<std::timed_mutex>> fileMutexesHW;
|
||
|
||
std::shared_ptr<std::timed_mutex> fileMutex;
|
||
{
|
||
std::lock_guard<std::mutex> lock(mapMutexHW);
|
||
std::string canonicalPath = std::filesystem::canonical(
|
||
std::filesystem::path(outputVideoPath).parent_path()).string() +
|
||
"/" + std::filesystem::path(outputVideoPath).filename().string();
|
||
|
||
if (fileMutexesHW.find(canonicalPath) == fileMutexesHW.end()) {
|
||
fileMutexesHW[canonicalPath] = std::make_unique<std::timed_mutex>();
|
||
}
|
||
fileMutex = std::shared_ptr<std::timed_mutex>(
|
||
fileMutexesHW[canonicalPath].get(), [](std::timed_mutex*) {});
|
||
}
|
||
|
||
std::unique_lock<std::timed_mutex> lock(*fileMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Another thread is writing to " << outputVideoPath << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- RAII bag for FFmpeg resources ----
|
||
struct FFState {
|
||
AVFormatContext* fmt_ctx = nullptr;
|
||
AVCodecContext* codec_ctx = nullptr;
|
||
AVFrame* frame = nullptr;
|
||
AVPacket* pkt = nullptr;
|
||
SwsContext* sws = nullptr;
|
||
~FFState() {
|
||
if (sws) { sws_freeContext(sws); sws = nullptr; }
|
||
if (frame) { av_frame_free(&frame); }
|
||
if (pkt) { av_packet_free(&pkt); }
|
||
if (codec_ctx) { avcodec_free_context(&codec_ctx); }
|
||
if (fmt_ctx) {
|
||
if (fmt_ctx->pb && !(fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||
avio_closep(&fmt_ctx->pb);
|
||
}
|
||
avformat_free_context(fmt_ctx);
|
||
fmt_ctx = nullptr;
|
||
}
|
||
}
|
||
} ff;
|
||
|
||
auto ffErr = [](int err) -> std::string {
|
||
char buf[AV_ERROR_MAX_STRING_SIZE] = {0};
|
||
av_strerror(err, buf, sizeof(buf));
|
||
return std::string(buf);
|
||
};
|
||
|
||
try {
|
||
// Clamp FPS to [1, 60]
|
||
fps = max(1, min(60, fps));
|
||
|
||
// ---- Collect image files ----
|
||
std::vector<cv::String> imageFiles;
|
||
const std::vector<std::string> extensions = { "*.jpg", "*.jpeg", "*.png", "*.bmp" };
|
||
for (const auto& ext : extensions) {
|
||
std::vector<cv::String> temp;
|
||
cv::glob(imageFolder + "/" + ext, temp, false);
|
||
imageFiles.insert(imageFiles.end(),
|
||
std::make_move_iterator(temp.begin()),
|
||
std::make_move_iterator(temp.end()));
|
||
}
|
||
if (imageFiles.empty()) {
|
||
std::cerr << "Error: No images found in folder: " << imageFolder << std::endl;
|
||
return false;
|
||
}
|
||
std::sort(imageFiles.begin(), imageFiles.end());
|
||
|
||
const int maxFrames = fps * 300;
|
||
if (static_cast<int>(imageFiles.size()) > maxFrames) {
|
||
std::cout << "Warning: Truncating from " << imageFiles.size()
|
||
<< " to " << maxFrames << " images (5-minute limit at "
|
||
<< fps << " FPS)" << std::endl;
|
||
imageFiles.resize(maxFrames);
|
||
}
|
||
const int numImages = static_cast<int>(imageFiles.size());
|
||
|
||
// ---- First image -> dimensions ----
|
||
cv::Mat firstImage = cv::imread(imageFiles[0], cv::IMREAD_COLOR);
|
||
if (firstImage.empty()) {
|
||
std::cerr << "Error: Could not read first image: " << imageFiles[0] << std::endl;
|
||
return false;
|
||
}
|
||
|
||
int videoWidth = firstImage.cols;
|
||
int videoHeight = firstImage.rows;
|
||
bool needsResize = false;
|
||
if (maxWidth > 0 && firstImage.cols > maxWidth) {
|
||
double scale = static_cast<double>(maxWidth) / firstImage.cols;
|
||
videoWidth = static_cast<int>(std::round(firstImage.cols * scale));
|
||
videoHeight = static_cast<int>(std::round(firstImage.rows * scale));
|
||
needsResize = true;
|
||
}
|
||
videoWidth = (videoWidth / 2) * 2;
|
||
videoHeight = (videoHeight / 2) * 2;
|
||
if (videoWidth < 2 || videoHeight < 2) {
|
||
std::cerr << "Error: Resulting video dimensions too small: "
|
||
<< videoWidth << "x" << videoHeight << std::endl;
|
||
return false;
|
||
}
|
||
|
||
std::cout << "[FF-HW Thread " << std::this_thread::get_id() << "] "
|
||
<< "Image: " << firstImage.cols << "x" << firstImage.rows
|
||
<< " -> Video: " << videoWidth << "x" << videoHeight
|
||
<< " | " << numImages << " frames @ " << fps << " FPS"
|
||
<< " (~" << (numImages / fps) << "s)" << std::endl;
|
||
firstImage.release();
|
||
|
||
// Ensure .mp4 extension
|
||
std::string mp4OutputPath = outputVideoPath;
|
||
if (mp4OutputPath.size() < 4 ||
|
||
mp4OutputPath.substr(mp4OutputPath.size() - 4) != ".mp4") {
|
||
mp4OutputPath += ".mp4";
|
||
}
|
||
|
||
int ret = 0;
|
||
|
||
// ---- Allocate output format context (needed before codec-open probe) ----
|
||
ret = avformat_alloc_output_context2(&ff.fmt_ctx, nullptr, "mp4", mp4OutputPath.c_str());
|
||
if (ret < 0 || !ff.fmt_ctx) {
|
||
std::cerr << "[FF-HW] avformat_alloc_output_context2 failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- Encoder preference list ----
|
||
struct KV { const char* k; const char* v; };
|
||
struct EncChoice {
|
||
const char* name; // avcodec encoder name
|
||
const char* display; // human-readable
|
||
bool isHEVC; // affects codec_tag for MP4
|
||
AVPixelFormat pixFmt; // NV12 for QSV/AMF, YUV420P else
|
||
int maxBFrames; // 0 for hardware encoders that don't like B-frames
|
||
std::vector<KV> opts;
|
||
};
|
||
|
||
const std::vector<EncChoice> encoders = {
|
||
// ---- HEVC hardware ----
|
||
// NOTE: using LEGACY NVENC preset names (slow/medium/fast/hq/...) not
|
||
// the newer p1..p7 naming, because the latter requires FFmpeg >= 4.4
|
||
// + NVIDIA Video Codec SDK 10.0. Legacy names work on both old and
|
||
// new builds. The newer 'tune' option doesn't exist in older NVENC
|
||
// wrappers either, so we omit it and rely on the preset for quality.
|
||
{ "hevc_nvenc", "NVIDIA HEVC (NVENC)", true, AV_PIX_FMT_YUV420P, 0, {
|
||
{"preset", "slow"}, {"rc", "vbr"}, {"cq", "28"}
|
||
}},
|
||
{ "hevc_qsv", "Intel HEVC (QSV)", true, AV_PIX_FMT_NV12, 0, {
|
||
{"global_quality", "28"}, {"preset", "slower"}
|
||
}},
|
||
{ "hevc_amf", "AMD HEVC (AMF)", true, AV_PIX_FMT_NV12, 0, {
|
||
{"quality", "quality"}, {"rc", "cqp"},
|
||
{"qp_i", "24"}, {"qp_p", "26"}, {"qp_b", "28"}
|
||
}},
|
||
// ---- H.264 hardware ----
|
||
{ "h264_nvenc", "NVIDIA H.264 (NVENC)", false, AV_PIX_FMT_YUV420P, 0, {
|
||
{"preset", "slow"}, {"rc", "vbr"}, {"cq", "24"}
|
||
}},
|
||
{ "h264_qsv", "Intel H.264 (QSV)", false, AV_PIX_FMT_NV12, 0, {
|
||
{"global_quality", "24"}, {"preset", "slower"}
|
||
}},
|
||
{ "h264_amf", "AMD H.264 (AMF)", false, AV_PIX_FMT_NV12, 0, {
|
||
{"quality", "quality"}, {"rc", "cqp"},
|
||
{"qp_i", "22"}, {"qp_p", "24"}, {"qp_b", "26"}
|
||
}},
|
||
// ---- Software fallbacks ----
|
||
{ "libx265", "HEVC/H.265 (libx265)", true, AV_PIX_FMT_YUV420P, 2, {
|
||
{"crf", "28"}, {"preset", "slow"}
|
||
}},
|
||
{ "libx264", "H.264 (libx264)", false, AV_PIX_FMT_YUV420P, 2, {
|
||
{"crf", "26"}, {"preset", "slow"}, {"tune", "stillimage"}
|
||
}},
|
||
{ "mpeg4", "MPEG-4 Part 2 (native FFmpeg)", false, AV_PIX_FMT_YUV420P, 2, {} },
|
||
};
|
||
|
||
const AVCodec* codec = nullptr;
|
||
const EncChoice* chosen = nullptr;
|
||
|
||
// ---- Probe loop: try each encoder until one opens successfully ----
|
||
for (const auto& e : encoders) {
|
||
const AVCodec* c = avcodec_find_encoder_by_name(e.name);
|
||
if (!c) {
|
||
std::cout << "[FF-HW] skip " << e.display
|
||
<< " (not compiled into FFmpeg)" << std::endl;
|
||
continue;
|
||
}
|
||
|
||
AVCodecContext* ctx = avcodec_alloc_context3(c);
|
||
if (!ctx) continue;
|
||
|
||
ctx->width = videoWidth;
|
||
ctx->height = videoHeight;
|
||
ctx->pix_fmt = e.pixFmt;
|
||
ctx->time_base = AVRational{ 1, fps };
|
||
ctx->framerate = AVRational{ fps, 1 };
|
||
ctx->gop_size = fps * 2;
|
||
ctx->max_b_frames = e.maxBFrames;
|
||
|
||
if (e.isHEVC) {
|
||
ctx->codec_tag = MKTAG('h', 'v', 'c', '1');
|
||
}
|
||
if (ff.fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
|
||
ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||
}
|
||
if (std::strcmp(e.name, "mpeg4") == 0) {
|
||
ctx->bit_rate = 1500000; // ~1.5 Mbps fallback
|
||
}
|
||
|
||
AVDictionary* opts = nullptr;
|
||
for (const auto& kv : e.opts) {
|
||
av_dict_set(&opts, kv.k, kv.v, 0);
|
||
}
|
||
|
||
int r = avcodec_open2(ctx, c, &opts);
|
||
av_dict_free(&opts);
|
||
|
||
if (r < 0) {
|
||
std::cout << "[FF-HW] skip " << e.display
|
||
<< " (open failed: " << ffErr(r) << ")" << std::endl;
|
||
avcodec_free_context(&ctx);
|
||
continue;
|
||
}
|
||
|
||
// Success — commit this context to the RAII bag
|
||
ff.codec_ctx = ctx;
|
||
codec = c;
|
||
chosen = &e;
|
||
std::cout << "[FF-HW] Using encoder: " << e.display;
|
||
for (const auto& kv : e.opts) std::cout << " " << kv.k << "=" << kv.v;
|
||
std::cout << std::endl;
|
||
break;
|
||
}
|
||
|
||
if (!ff.codec_ctx) {
|
||
std::cerr << "[FF-HW] Error: no encoder could be opened. "
|
||
"Bundled FFmpeg has neither hardware (NVENC/QSV/AMF) nor "
|
||
"software (libx265/libx264/mpeg4) encoders available." << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- Create output stream, copy codec params ----
|
||
AVStream* stream = avformat_new_stream(ff.fmt_ctx, nullptr);
|
||
if (!stream) {
|
||
std::cerr << "[FF-HW] avformat_new_stream failed" << std::endl;
|
||
return false;
|
||
}
|
||
ret = avcodec_parameters_from_context(stream->codecpar, ff.codec_ctx);
|
||
if (ret < 0) {
|
||
std::cerr << "[FF-HW] avcodec_parameters_from_context failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
stream->time_base = ff.codec_ctx->time_base;
|
||
|
||
// ---- Open output file ----
|
||
if (!(ff.fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||
ret = avio_open(&ff.fmt_ctx->pb, mp4OutputPath.c_str(), AVIO_FLAG_WRITE);
|
||
if (ret < 0) {
|
||
std::cerr << "[FF-HW] avio_open('" << mp4OutputPath << "') failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
|
||
// ---- Write MP4 header with +faststart ----
|
||
{
|
||
AVDictionary* muxOpts = nullptr;
|
||
av_dict_set(&muxOpts, "movflags", "+faststart", 0);
|
||
ret = avformat_write_header(ff.fmt_ctx, &muxOpts);
|
||
av_dict_free(&muxOpts);
|
||
if (ret < 0) {
|
||
std::cerr << "[FF-HW] avformat_write_header failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
|
||
// ---- Allocate AVFrame with chosen pix_fmt + AVPacket ----
|
||
ff.frame = av_frame_alloc();
|
||
ff.pkt = av_packet_alloc();
|
||
if (!ff.frame || !ff.pkt) {
|
||
std::cerr << "[FF-HW] av_frame_alloc / av_packet_alloc failed" << std::endl;
|
||
return false;
|
||
}
|
||
ff.frame->format = chosen->pixFmt;
|
||
ff.frame->width = videoWidth;
|
||
ff.frame->height = videoHeight;
|
||
ret = av_frame_get_buffer(ff.frame, 0);
|
||
if (ret < 0) {
|
||
std::cerr << "[FF-HW] av_frame_get_buffer failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- BGR24 -> chosen pix_fmt converter ----
|
||
ff.sws = sws_getContext(
|
||
videoWidth, videoHeight, AV_PIX_FMT_BGR24,
|
||
videoWidth, videoHeight, chosen->pixFmt,
|
||
SWS_BILINEAR, nullptr, nullptr, nullptr);
|
||
if (!ff.sws) {
|
||
std::cerr << "[FF-HW] sws_getContext failed" << std::endl;
|
||
return false;
|
||
}
|
||
|
||
// ---- Drain helper ----
|
||
auto drainPackets = [&]() -> bool {
|
||
for (;;) {
|
||
int r = avcodec_receive_packet(ff.codec_ctx, ff.pkt);
|
||
if (r == AVERROR(EAGAIN) || r == AVERROR_EOF) return true;
|
||
if (r < 0) {
|
||
std::cerr << "[FF-HW] avcodec_receive_packet failed: " << ffErr(r) << std::endl;
|
||
return false;
|
||
}
|
||
av_packet_rescale_ts(ff.pkt, ff.codec_ctx->time_base, stream->time_base);
|
||
ff.pkt->stream_index = stream->index;
|
||
r = av_interleaved_write_frame(ff.fmt_ctx, ff.pkt);
|
||
av_packet_unref(ff.pkt);
|
||
if (r < 0) {
|
||
std::cerr << "[FF-HW] av_interleaved_write_frame failed: " << ffErr(r) << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
};
|
||
|
||
// ---- Encoding loop ----
|
||
cv::Mat img;
|
||
cv::Mat resizedImg;
|
||
int64_t framesEncoded = 0;
|
||
|
||
for (int i = 0; i < numImages; ++i) {
|
||
img = cv::imread(imageFiles[i], cv::IMREAD_COLOR);
|
||
if (img.empty()) {
|
||
std::cerr << "Warning: Could not read: " << imageFiles[i] << std::endl;
|
||
continue;
|
||
}
|
||
|
||
cv::Mat* src = &img;
|
||
if (needsResize || img.cols != videoWidth || img.rows != videoHeight) {
|
||
cv::resize(img, resizedImg, cv::Size(videoWidth, videoHeight),
|
||
0, 0, cv::INTER_AREA);
|
||
src = &resizedImg;
|
||
}
|
||
|
||
ret = av_frame_make_writable(ff.frame);
|
||
if (ret < 0) {
|
||
std::cerr << "[FF-HW] av_frame_make_writable failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
|
||
const uint8_t* srcSlices[4] = { src->data, nullptr, nullptr, nullptr };
|
||
int srcStride[4] = { static_cast<int>(src->step[0]), 0, 0, 0 };
|
||
sws_scale(ff.sws, srcSlices, srcStride, 0, videoHeight,
|
||
ff.frame->data, ff.frame->linesize);
|
||
|
||
ff.frame->pts = framesEncoded;
|
||
|
||
ret = avcodec_send_frame(ff.codec_ctx, ff.frame);
|
||
if (ret < 0) {
|
||
std::cerr << "[FF-HW] avcodec_send_frame failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
if (!drainPackets()) return false;
|
||
|
||
framesEncoded++;
|
||
img.release();
|
||
}
|
||
|
||
// ---- Flush encoder ----
|
||
ret = avcodec_send_frame(ff.codec_ctx, nullptr);
|
||
if (ret < 0 && ret != AVERROR_EOF) {
|
||
std::cerr << "[FF-HW] flush send_frame failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
if (!drainPackets()) return false;
|
||
|
||
// ---- Write trailer ----
|
||
ret = av_write_trailer(ff.fmt_ctx);
|
||
if (ret < 0) {
|
||
std::cerr << "[FF-HW] av_write_trailer failed: " << ffErr(ret) << std::endl;
|
||
return false;
|
||
}
|
||
|
||
std::cout << "[FF-HW] Video created: " << mp4OutputPath
|
||
<< " (" << framesEncoded << " frames, "
|
||
<< fps << " FPS, ~" << (framesEncoded / fps) << "s)"
|
||
<< " via " << chosen->display << std::endl;
|
||
|
||
return true;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "[FF-HW] OpenCV exception: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "[FF-HW] Exception: " << e.what() << std::endl;
|
||
return false;
|
||
}
|
||
}
|
||
|
||
//bool ANSOPENCV::ImagesToMP4(const std::string& imageFolder, const std::string& outputVideoPath, int targetDurationSec) {
|
||
// std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
// if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
// std::cerr << "Error: Mutex timeout in ImagesToMP4!" << std::endl;
|
||
// return -6;
|
||
// }
|
||
// cv::VideoWriter videoWriter;
|
||
|
||
// try {
|
||
// // Collect all image files efficiently
|
||
// std::vector<cv::String> imageFiles;
|
||
// const std::vector<std::string> extensions = { "*.jpg", "*.jpeg", "*.png", "*.bmp" };
|
||
|
||
// for (const auto& ext : extensions) {
|
||
// std::vector<cv::String> temp;
|
||
// cv::glob(imageFolder + "/" + ext, temp, false);
|
||
// imageFiles.insert(imageFiles.end(), temp.begin(), temp.end());
|
||
// }
|
||
|
||
// if (imageFiles.empty()) {
|
||
// std::cerr << "Error: No images found in folder: " << imageFolder << std::endl;
|
||
// return false;
|
||
// }
|
||
|
||
// // Sort for consistent ordering
|
||
// std::sort(imageFiles.begin(), imageFiles.end());
|
||
|
||
// // Read first image to determine dimensions
|
||
// cv::Mat firstImage = cv::imread(imageFiles[0]);
|
||
// if (firstImage.empty()) {
|
||
// std::cerr << "Error: Could not read first image: " << imageFiles[0] << std::endl;
|
||
// return false;
|
||
// }
|
||
|
||
// // Target video dimensions (1920x1080)
|
||
// const int targetWidth = 1920;
|
||
// const int targetHeight = 1080;
|
||
|
||
// // Calculate scaling to fit within bounds while maintaining aspect ratio
|
||
// const double scaleX = static_cast<double>(targetWidth) / firstImage.cols;
|
||
// const double scaleY = static_cast<double>(targetHeight) / firstImage.rows;
|
||
// const double scale = min(scaleX, scaleY);
|
||
|
||
// // Calculate scaled dimensions (ensure even for H.264)
|
||
// int scaledWidth = static_cast<int>(std::round(firstImage.cols * scale));
|
||
// int scaledHeight = static_cast<int>(std::round(firstImage.rows * scale));
|
||
|
||
// // Make dimensions even (required for H.264)
|
||
// scaledWidth = (scaledWidth / 2) * 2;
|
||
// scaledHeight = (scaledHeight / 2) * 2;
|
||
|
||
// // Calculate centering padding
|
||
// const int padLeft = (targetWidth - scaledWidth) / 2;
|
||
// const int padTop = (targetHeight - scaledHeight) / 2;
|
||
|
||
// std::cout << "Original image size: " << firstImage.cols << "x" << firstImage.rows << std::endl;
|
||
// std::cout << "Scaled image size: " << scaledWidth << "x" << scaledHeight << std::endl;
|
||
// std::cout << "Final video size: " << targetWidth << "x" << targetHeight << std::endl;
|
||
// std::cout << "Padding: left=" << padLeft << ", top=" << padTop << std::endl;
|
||
|
||
// // Release firstImage to free memory
|
||
// firstImage.release();
|
||
|
||
// // Video parameters
|
||
// const int targetFPS = 25;
|
||
// const int videoDurationSec = max(3, targetDurationSec);
|
||
// const int totalFrames = videoDurationSec * targetFPS;
|
||
|
||
// // Ensure .mp4 extension
|
||
// std::string mp4OutputPath = outputVideoPath;
|
||
// if (mp4OutputPath.size() < 4 ||
|
||
// mp4OutputPath.substr(mp4OutputPath.size() - 4) != ".mp4") {
|
||
// mp4OutputPath += ".mp4";
|
||
// }
|
||
|
||
// // Try codecs in order of preference
|
||
// const std::vector<std::tuple<std::string, int>> codecs = {
|
||
// {"x264 (best for web)", cv::VideoWriter::fourcc('x', '2', '6', '4')},
|
||
// {"H264 (good for web)", cv::VideoWriter::fourcc('H', '2', '6', '4')},
|
||
// {"MP4V (web compatible)", cv::VideoWriter::fourcc('M', 'P', '4', 'V')},
|
||
// {"MJPG (large files)", cv::VideoWriter::fourcc('M', 'J', 'P', 'G')}
|
||
// };
|
||
|
||
// bool codecFound = false;
|
||
// for (const auto& [name, fourcc] : codecs) {
|
||
// videoWriter.open(mp4OutputPath, fourcc, targetFPS,
|
||
// cv::Size(targetWidth, targetHeight), true);
|
||
// if (videoWriter.isOpened()) {
|
||
// std::cout << "Using codec: " << name << std::endl;
|
||
// codecFound = true;
|
||
// break;
|
||
// }
|
||
// }
|
||
|
||
// if (!codecFound) {
|
||
// videoWriter.release(); // Ensure cleanup
|
||
// std::cerr << "Error: Could not open video writer with any codec!" << std::endl;
|
||
// std::cerr << "Install OpenCV with FFMPEG support for H.264 encoding." << std::endl;
|
||
// return false;
|
||
// }
|
||
|
||
// const int numImages = static_cast<int>(imageFiles.size());
|
||
|
||
// // Pre-create black canvas and ROI (reuse for all frames)
|
||
// cv::Mat canvas = cv::Mat::zeros(targetHeight, targetWidth, CV_8UC3);
|
||
// const cv::Rect roi(padLeft, padTop, scaledWidth, scaledHeight);
|
||
|
||
// // Pre-allocate matrices for reuse (reduces allocations)
|
||
// cv::Mat img;
|
||
// cv::Mat resizedImg;
|
||
|
||
// if (numImages <= totalFrames) {
|
||
// // Case 1: Few images - each shown for multiple frames
|
||
// const int framesPerImage = totalFrames / numImages;
|
||
// const int remainingFrames = totalFrames % numImages;
|
||
|
||
// for (int i = 0; i < numImages; ++i) {
|
||
// img = cv::imread(imageFiles[i]);
|
||
// if (img.empty()) {
|
||
// std::cerr << "Warning: Could not read image: " << imageFiles[i] << std::endl;
|
||
// continue;
|
||
// }
|
||
|
||
// // Reset canvas to black
|
||
// canvas.setTo(cv::Scalar(0, 0, 0));
|
||
|
||
// // Resize and place on canvas
|
||
// cv::resize(img, resizedImg, cv::Size(scaledWidth, scaledHeight),
|
||
// 0, 0, cv::INTER_AREA);
|
||
// resizedImg.copyTo(canvas(roi));
|
||
|
||
// // Release img to free memory immediately
|
||
// img.release();
|
||
|
||
// // Calculate frames for this image
|
||
// int framesToWrite = framesPerImage + (i < remainingFrames ? 1 : 0);
|
||
|
||
// // Write frames
|
||
// for (int j = 0; j < framesToWrite; ++j) {
|
||
// videoWriter.write(canvas);
|
||
// }
|
||
// }
|
||
// }
|
||
// else {
|
||
// // Case 2: Many images - sample to fit total frames
|
||
// for (int frame = 0; frame < totalFrames; ++frame) {
|
||
// // Calculate which image to use
|
||
// const double imageIndex = (static_cast<double>(frame) * (numImages - 1)) /
|
||
// (totalFrames - 1);
|
||
// const int imageIdx = static_cast<int>(std::round(imageIndex));
|
||
|
||
// img = cv::imread(imageFiles[imageIdx]);
|
||
// if (img.empty()) {
|
||
// std::cerr << "Warning: Could not read image: " << imageFiles[imageIdx] << std::endl;
|
||
// continue;
|
||
// }
|
||
|
||
// // Reset canvas to black
|
||
// canvas.setTo(cv::Scalar(0, 0, 0));
|
||
|
||
// // Resize and place on canvas
|
||
// cv::resize(img, resizedImg, cv::Size(scaledWidth, scaledHeight),
|
||
// 0, 0, cv::INTER_AREA);
|
||
// resizedImg.copyTo(canvas(roi));
|
||
|
||
// // Release img to free memory immediately
|
||
// img.release();
|
||
|
||
// videoWriter.write(canvas);
|
||
// }
|
||
// }
|
||
|
||
// // Explicit cleanup
|
||
// canvas.release();
|
||
// resizedImg.release();
|
||
// videoWriter.release();
|
||
|
||
// std::cout << "Video created successfully: " << mp4OutputPath << std::endl;
|
||
// std::cout << "Dimensions: " << targetWidth << "x" << targetHeight << std::endl;
|
||
// std::cout << "Frame rate: " << targetFPS << " fps" << std::endl;
|
||
// std::cout << "Duration: " << videoDurationSec << " seconds" << std::endl;
|
||
// std::cout << "Total frames: " << totalFrames << std::endl;
|
||
// std::cout << "Images used: " << numImages << std::endl;
|
||
|
||
// return true;
|
||
// }
|
||
// catch (const cv::Exception& e) {
|
||
// if (videoWriter.isOpened()) {
|
||
// videoWriter.release();
|
||
// }
|
||
// std::cerr << "OpenCV exception in ImagesToMP4: " << e.what() << std::endl;
|
||
// return false;
|
||
// }
|
||
// catch (const std::exception& e) {
|
||
// if (videoWriter.isOpened()) {
|
||
// videoWriter.release();
|
||
// }
|
||
// std::cerr << "Exception in ImagesToMP4: " << e.what() << std::endl;
|
||
// return false;
|
||
// }
|
||
//}
|
||
// bool ANSOPENCV::ImagesToMP4(const std::string& imageFolder, const std::string& outputVideoPath, int targetDurationSec) {
|
||
// try {
|
||
// // Collect all image files efficiently
|
||
// std::vector<cv::String> imageFiles;
|
||
// const std::vector<std::string> extensions = { "*.jpg", "*.jpeg", "*.png", "*.bmp" };
|
||
//
|
||
// for (const auto& ext : extensions) {
|
||
// std::vector<cv::String> temp;
|
||
// cv::glob(imageFolder + "/" + ext, temp, false);
|
||
// imageFiles.insert(imageFiles.end(), temp.begin(), temp.end());
|
||
// }
|
||
//
|
||
// if (imageFiles.empty()) {
|
||
// std::cerr << "Error: No images found in folder: " << imageFolder << std::endl;
|
||
// return false;
|
||
// }
|
||
//
|
||
// // Sort for consistent ordering
|
||
// std::sort(imageFiles.begin(), imageFiles.end());
|
||
//
|
||
// // Read first image to determine dimensions
|
||
// cv::Mat firstImage = cv::imread(imageFiles[0]);
|
||
// if (firstImage.empty()) {
|
||
// std::cerr << "Error: Could not read first image: " << imageFiles[0] << std::endl;
|
||
// return false;
|
||
// }
|
||
//
|
||
// // Target video dimensions (1920x1080)
|
||
// const int targetWidth = 1920;
|
||
// const int targetHeight = 1080;
|
||
//
|
||
// // Calculate scaling to fit within bounds while maintaining aspect ratio
|
||
// const double scaleX = static_cast<double>(targetWidth) / firstImage.cols;
|
||
// const double scaleY = static_cast<double>(targetHeight) / firstImage.rows;
|
||
// const double scale = min(scaleX, scaleY);
|
||
//
|
||
// // Calculate scaled dimensions (ensure even for H.264)
|
||
// int scaledWidth = static_cast<int>(std::round(firstImage.cols * scale));
|
||
// int scaledHeight = static_cast<int>(std::round(firstImage.rows * scale));
|
||
//
|
||
// // Make dimensions even (required for H.264)
|
||
// scaledWidth = (scaledWidth / 2) * 2;
|
||
// scaledHeight = (scaledHeight / 2) * 2;
|
||
//
|
||
// // Calculate centering padding
|
||
// const int padLeft = (targetWidth - scaledWidth) / 2;
|
||
// const int padTop = (targetHeight - scaledHeight) / 2;
|
||
//
|
||
// std::cout << "Original image size: " << firstImage.cols << "x" << firstImage.rows << std::endl;
|
||
// std::cout << "Scaled image size: " << scaledWidth << "x" << scaledHeight << std::endl;
|
||
// std::cout << "Final video size: " << targetWidth << "x" << targetHeight << std::endl;
|
||
// std::cout << "Padding: left=" << padLeft << ", top=" << padTop << std::endl;
|
||
//
|
||
// // Video parameters
|
||
// const int targetFPS = 25;
|
||
// const int videoDurationSec = max(3, targetDurationSec);
|
||
// const int totalFrames = videoDurationSec * targetFPS;
|
||
//
|
||
// // Ensure .mp4 extension
|
||
// std::string mp4OutputPath = outputVideoPath;
|
||
// if (mp4OutputPath.size() < 4 ||
|
||
// mp4OutputPath.substr(mp4OutputPath.size() - 4) != ".mp4") {
|
||
// mp4OutputPath += ".mp4";
|
||
// }
|
||
//
|
||
// // Try codecs in order of preference
|
||
// cv::VideoWriter videoWriter;
|
||
// const std::vector<std::tuple<std::string, int>> codecs = {
|
||
// {"x264 (best for web)", cv::VideoWriter::fourcc('x', '2', '6', '4')},
|
||
// {"H264 (good for web)", cv::VideoWriter::fourcc('H', '2', '6', '4')},
|
||
// {"MP4V (web compatible)", cv::VideoWriter::fourcc('M', 'P', '4', 'V')},
|
||
// {"MJPG (large files)", cv::VideoWriter::fourcc('M', 'J', 'P', 'G')}
|
||
// };
|
||
//
|
||
// bool codecFound = false;
|
||
// for (const auto& [name, fourcc] : codecs) {
|
||
// videoWriter.open(mp4OutputPath, fourcc, targetFPS,
|
||
// cv::Size(targetWidth, targetHeight), true);
|
||
// if (videoWriter.isOpened()) {
|
||
// std::cout << "Using codec: " << name << std::endl;
|
||
// codecFound = true;
|
||
// break;
|
||
// }
|
||
// }
|
||
//
|
||
// if (!codecFound) {
|
||
// std::cerr << "Error: Could not open video writer with any codec!" << std::endl;
|
||
// std::cerr << "Install OpenCV with FFMPEG support for H.264 encoding." << std::endl;
|
||
// return false;
|
||
// }
|
||
//
|
||
// const int numImages = static_cast<int>(imageFiles.size());
|
||
//
|
||
// // Pre-create black canvas and ROI (reuse for all frames)
|
||
// cv::Mat canvas = cv::Mat::zeros(targetHeight, targetWidth, CV_8UC3);
|
||
// const cv::Rect roi(padLeft, padTop, scaledWidth, scaledHeight);
|
||
//
|
||
// if (numImages <= totalFrames) {
|
||
// // Case 1: Few images - each shown for multiple frames
|
||
// const int framesPerImage = totalFrames / numImages;
|
||
// const int remainingFrames = totalFrames % numImages;
|
||
//
|
||
// for (int i = 0; i < numImages; ++i) {
|
||
// cv::Mat img = cv::imread(imageFiles[i]);
|
||
// if (img.empty()) {
|
||
// std::cerr << "Warning: Could not read image: " << imageFiles[i] << std::endl;
|
||
// continue;
|
||
// }
|
||
//
|
||
// // Reset canvas to black
|
||
// canvas.setTo(cv::Scalar(0, 0, 0));
|
||
//
|
||
// // Resize and place on canvas
|
||
// cv::Mat resizedImg;
|
||
// cv::resize(img, resizedImg, cv::Size(scaledWidth, scaledHeight),
|
||
// 0, 0, cv::INTER_AREA);
|
||
// resizedImg.copyTo(canvas(roi));
|
||
//
|
||
// // Calculate frames for this image
|
||
// int framesToWrite = framesPerImage + (i < remainingFrames ? 1 : 0);
|
||
//
|
||
// // Write frames
|
||
// for (int j = 0; j < framesToWrite; ++j) {
|
||
// videoWriter.write(canvas);
|
||
// }
|
||
// }
|
||
// }
|
||
// else {
|
||
// // Case 2: Many images - sample to fit total frames
|
||
// for (int frame = 0; frame < totalFrames; ++frame) {
|
||
// // Calculate which image to use
|
||
// const double imageIndex = (static_cast<double>(frame) * (numImages - 1)) /
|
||
// (totalFrames - 1);
|
||
// const int imageIdx = static_cast<int>(std::round(imageIndex));
|
||
//
|
||
// cv::Mat img = cv::imread(imageFiles[imageIdx]);
|
||
// if (img.empty()) {
|
||
// std::cerr << "Warning: Could not read image: " << imageFiles[imageIdx] << std::endl;
|
||
// continue;
|
||
// }
|
||
//
|
||
// // Reset canvas to black
|
||
// canvas.setTo(cv::Scalar(0, 0, 0));
|
||
//
|
||
// // Resize and place on canvas
|
||
// cv::Mat resizedImg;
|
||
// cv::resize(img, resizedImg, cv::Size(scaledWidth, scaledHeight),
|
||
// 0, 0, cv::INTER_AREA);
|
||
// resizedImg.copyTo(canvas(roi));
|
||
//
|
||
// videoWriter.write(canvas);
|
||
// }
|
||
// }
|
||
//
|
||
// videoWriter.release();
|
||
//
|
||
// std::cout << "Video created successfully: " << mp4OutputPath << std::endl;
|
||
// std::cout << "Dimensions: " << targetWidth << "x" << targetHeight << std::endl;
|
||
// std::cout << "Frame rate: " << targetFPS << " fps" << std::endl;
|
||
// std::cout << "Duration: " << videoDurationSec << " seconds" << std::endl;
|
||
// std::cout << "Total frames: " << totalFrames << std::endl;
|
||
// std::cout << "Images used: " << numImages << std::endl;
|
||
//
|
||
// return true;
|
||
// }
|
||
// catch (const cv::Exception& e) {
|
||
// std::cerr << "OpenCV exception in ImagesToMP4: " << e.what() << std::endl;
|
||
// return false;
|
||
// }
|
||
// catch (const std::exception& e) {
|
||
// std::cerr << "Exception in ImagesToMP4: " << e.what() << std::endl;
|
||
// return false;
|
||
// }
|
||
//}
|
||
}
|
||
extern "C" __declspec(dllexport) int CreateANSCVHandle(ANSCENTER::ANSOPENCV** Handle, const char* licenseKey) {
|
||
if (!Handle || !licenseKey) return -1;
|
||
try {
|
||
auto ptr = std::make_unique<ANSCENTER::ANSOPENCV>();
|
||
bool result = ptr->Init(licenseKey);
|
||
if (result) {
|
||
*Handle = ptr.release();
|
||
return 1;
|
||
}
|
||
*Handle = nullptr;
|
||
return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ReleaseANSCVHandle(ANSCENTER::ANSOPENCV** Handle) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
std::unique_ptr<ANSCENTER::ANSOPENCV> ptr(*Handle);
|
||
*Handle = nullptr;
|
||
return 0;
|
||
}
|
||
catch (...) {
|
||
if (Handle) *Handle = nullptr;
|
||
return -1;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageResize(ANSCENTER::ANSOPENCV** Handle,unsigned char* inputImage,unsigned int bufferLength,int width,int height,LStrHandle outputImage)
|
||
{
|
||
// Validate inputs
|
||
if (!Handle || !*Handle) {
|
||
std::cerr << "Error: Invalid Handle in ANSCV_ImageResize" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
if (!inputImage || bufferLength == 0) {
|
||
std::cerr << "Error: Invalid input image in ANSCV_ImageResize" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
if (!outputImage) {
|
||
std::cerr << "Error: Invalid output handle in ANSCV_ImageResize" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
if (width <= 0 || height <= 0) {
|
||
std::cerr << "Error: Invalid dimensions in ANSCV_ImageResize" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
try {
|
||
// Decode input image (no copy needed - uses existing buffer)
|
||
cv::Mat inputFrame = cv::imdecode(
|
||
cv::Mat(1, bufferLength, CV_8UC1, inputImage),
|
||
cv::IMREAD_COLOR
|
||
);
|
||
|
||
if (inputFrame.empty()) {
|
||
std::cerr << "Error: Failed to decode image in ANSCV_ImageResize" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Resize image
|
||
cv::Mat outputFrame;
|
||
(*Handle)->ImageResize(inputFrame, width, height, outputFrame);
|
||
|
||
if (outputFrame.empty()) {
|
||
std::cerr << "Error: ImageResize failed in ANSCV_ImageResize" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Convert to binary data
|
||
std::string binaryData = (*Handle)->MatToBinaryData(outputFrame);
|
||
|
||
// CRITICAL: No need to manually release - RAII handles it
|
||
// inputFrame.release(); // Removed - automatic
|
||
// outputFrame.release(); // Removed - automatic
|
||
|
||
const int size = static_cast<int>(binaryData.length());
|
||
|
||
if (size <= 0) {
|
||
std::cerr << "Error: MatToBinaryData returned empty in ANSCV_ImageResize" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Resize LabVIEW string handle
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
|
||
if (error != noErr) {
|
||
std::cerr << "Error: DSSetHandleSize failed in ANSCV_ImageResize (error code: "
|
||
<< error << ")" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Copy data to LabVIEW string
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, binaryData.c_str(), size);
|
||
|
||
return 1; // Success
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ANSCV_ImageResize: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_ImageResize: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_ImageResize" << std::endl;
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageResizeWithRatio(ANSCENTER::ANSOPENCV** Handle,unsigned char* inputImage,unsigned int bufferLength,int width,LStrHandle outputImage)
|
||
{
|
||
// Validate inputs
|
||
if (!Handle || !*Handle) {
|
||
std::cerr << "Error: Invalid Handle in ANSCV_ImageResizeWithRatio" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
if (!inputImage || bufferLength == 0) {
|
||
std::cerr << "Error: Invalid input image in ANSCV_ImageResizeWithRatio" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
if (!outputImage) {
|
||
std::cerr << "Error: Invalid output handle in ANSCV_ImageResizeWithRatio" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
if (width <= 0) {
|
||
std::cerr << "Error: Invalid width in ANSCV_ImageResizeWithRatio" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
try {
|
||
// Decode input image
|
||
cv::Mat inputFrame = cv::imdecode(
|
||
cv::Mat(1, bufferLength, CV_8UC1, inputImage),
|
||
cv::IMREAD_COLOR
|
||
);
|
||
|
||
if (inputFrame.empty()) {
|
||
std::cerr << "Error: Failed to decode image in ANSCV_ImageResizeWithRatio" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Resize with aspect ratio preservation
|
||
cv::Mat outputFrame;
|
||
(*Handle)->ImageResizeWithRatio(inputFrame, width, outputFrame);
|
||
|
||
if (outputFrame.empty()) {
|
||
std::cerr << "Error: ImageResizeWithRatio failed" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Convert to binary data
|
||
std::string binaryData = (*Handle)->MatToBinaryData(outputFrame);
|
||
|
||
// RAII handles cleanup automatically - no manual .release() needed
|
||
|
||
const int size = static_cast<int>(binaryData.length());
|
||
|
||
if (size <= 0) {
|
||
std::cerr << "Error: MatToBinaryData returned empty" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Resize LabVIEW string handle
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
|
||
if (error != noErr) {
|
||
std::cerr << "Error: DSSetHandleSize failed (error code: " << error << ")" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Copy data to LabVIEW string
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, binaryData.c_str(), size);
|
||
|
||
return 1; // Success
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ANSCV_ImageResizeWithRatio: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_ImageResizeWithRatio: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_ImageResizeWithRatio" << std::endl;
|
||
return 0;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageToBase64(ANSCENTER::ANSOPENCV** Handle,unsigned char* inputImage,unsigned int bufferLength,LStrHandle outputImage)
|
||
{
|
||
// Validate inputs
|
||
if (!Handle || !*Handle) {
|
||
std::cerr << "Error: Invalid Handle in ANSCV_ImageToBase64" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
if (!inputImage || bufferLength == 0) {
|
||
std::cerr << "Error: Invalid input image in ANSCV_ImageToBase64" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
if (!outputImage) {
|
||
std::cerr << "Error: Invalid output handle in ANSCV_ImageToBase64" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
try {
|
||
// Decode input image
|
||
cv::Mat inputFrame = cv::imdecode(
|
||
cv::Mat(1, bufferLength, CV_8UC1, inputImage),
|
||
cv::IMREAD_COLOR
|
||
);
|
||
|
||
if (inputFrame.empty()) {
|
||
std::cerr << "Error: Failed to decode image in ANSCV_ImageToBase64" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Convert to Base64
|
||
std::string base64Data = (*Handle)->MatToBase64(inputFrame);
|
||
|
||
// RAII handles cleanup automatically - no manual .release() needed
|
||
|
||
const int size = static_cast<int>(base64Data.length());
|
||
|
||
if (size <= 0) {
|
||
std::cerr << "Error: MatToBase64 returned empty string" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Resize LabVIEW string handle
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
|
||
if (error != noErr) {
|
||
std::cerr << "Error: DSSetHandleSize failed in ANSCV_ImageToBase64 (error code: "
|
||
<< error << ")" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Copy data to LabVIEW string
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, base64Data.c_str(), size);
|
||
|
||
return 1; // Success
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ANSCV_ImageToBase64: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_ImageToBase64: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_ImageToBase64" << std::endl;
|
||
return 0;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageToGray(ANSCENTER::ANSOPENCV** Handle,unsigned char* inputImage,unsigned int bufferLength,LStrHandle outputImage)
|
||
{
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->ToGray(inputFrame);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
inputFrame.release();
|
||
outputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageDenoise(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->ImageDenoise(inputFrame);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
//inputFrame.release();
|
||
//outputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageRepair(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->ImageRepair(inputFrame);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
//inputFrame.release();
|
||
//outputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageAutoWhiteBalance(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->ImageWhiteBalance(inputFrame);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
//inputFrame.release();
|
||
//outputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageBrightEnhance(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, double brightnessScaleFactor, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->ImageDarkEnhancement(inputFrame, brightnessScaleFactor);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
//inputFrame.release();
|
||
//outputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageContrastEnhance(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->ImageContrastEnhancement(inputFrame);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
//inputFrame.release();
|
||
//outputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_BlurObjects(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, const char* strBboxes, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
std::vector<cv::Rect> objects = (*Handle)->GetBoundingBoxes(strBboxes);
|
||
cv::Mat outputFrame = (*Handle)->BlurObjects(inputFrame, objects);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
//inputFrame.release();
|
||
//outputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_BlurBackground(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, const char* strBboxes, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
std::vector<cv::Rect> objects = (*Handle)->GetBoundingBoxes(strBboxes);
|
||
cv::Mat outputFrame = (*Handle)->BlurBackground(inputFrame, objects);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
//outputFrame.release();
|
||
//inputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_QRDecoder(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, LStrHandle detectedQRText) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
std::string st = (*Handle)->QRDecoder(inputFrame);
|
||
//inputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(detectedQRText, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*detectedQRText)->cnt = size;
|
||
memcpy((*detectedQRText)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_QRDecoderCV(ANSCENTER::ANSOPENCV** Handle, const cv::Mat& image, std::string& detectedQRText) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
if (image.empty()) return 0; // Indicate failure if image is empty
|
||
try {
|
||
detectedQRText = (*Handle)->QRDecoder(image);
|
||
return 1; // Indicate success
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error in ANSCV_QRDecoderCV: " << e.what() << std::endl;
|
||
return -1; // Indicate failure
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_PatternMatchs(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, const char* templateFilePath, double threshold, LStrHandle detectedMatchedLocations) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat templateImage = cv::imread(templateFilePath, cv::IMREAD_COLOR);
|
||
std::string st = (*Handle)->PatternMatches(inputFrame, templateImage, threshold);
|
||
//inputFrame.release();
|
||
//templateImage.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(detectedMatchedLocations, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*detectedMatchedLocations)->cnt = size;
|
||
memcpy((*detectedMatchedLocations)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageCrop(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, int x, int y, int width, int height, LStrHandle outputImage)
|
||
{
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Rect roi(x, y, width, height);
|
||
cv::Mat outputFrame = (*Handle)->ImageCrop(inputFrame, roi);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
outputFrame.release();
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_GetImageSize(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, LStrHandle imageSize) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
int width, height;
|
||
width = inputFrame.cols;
|
||
height = inputFrame.rows;
|
||
//inputFrame.release();
|
||
std::string st = std::to_string(width) + "x" + std::to_string(height);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(imageSize, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*imageSize)->cnt = size;
|
||
memcpy((*imageSize)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_GetImageSizeFromImageFile(ANSCENTER::ANSOPENCV** Handle, const char* imageFilePath, LStrHandle imageSize) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::VideoCapture cap(imageFilePath);
|
||
int width, height;
|
||
if (!cap.isOpened()) {
|
||
std::cerr << "Error opening file: " << imageFilePath << std::endl;
|
||
width = 0;
|
||
height = 0;
|
||
}
|
||
else {
|
||
width = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_WIDTH));
|
||
height = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_HEIGHT));
|
||
}
|
||
cap.release();
|
||
std::string st = std::to_string(width) + "x" + std::to_string(height);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(imageSize, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*imageSize)->cnt = size;
|
||
memcpy((*imageSize)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_RotateImage(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, double angle, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->RotateImage(inputFrame, angle);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_FlipImage(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, int flipCode, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->FlipImage(inputFrame, flipCode);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ShiftImage(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, int shiftX, int shiftY, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->ShiftImage(inputFrame, shiftX, shiftY);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_AddGaussianNoise(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, double mean, double stddev, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->AddGaussianNoise(inputFrame, mean, stddev);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_AddSaltAndPepperNoise(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, double amount, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->AddSaltAndPepperNoise(inputFrame, amount);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_AddSpeckleNoise(ANSCENTER::ANSOPENCV** Handle, unsigned char* inputImage, unsigned int bufferLength, double stddev, LStrHandle outputImage) {
|
||
if (Handle == nullptr || *Handle == nullptr) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = (*Handle)->AddSpeckleNoise(inputFrame, stddev);
|
||
std::string st = (*Handle)->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
|
||
extern "C" __declspec(dllexport) void ANSCV_InitCameraResource() {
|
||
try {
|
||
std::lock_guard<std::mutex> lock(imageMutex); // Automatically locks and unlocks
|
||
ANSCENTER::ANSOPENCV::InitCameraNetwork();
|
||
}
|
||
catch (...) { }
|
||
}
|
||
extern "C" __declspec(dllexport) void ANSCV_FreeCameraResource() {
|
||
try {
|
||
std::lock_guard<std::mutex> lock(imageMutex); // Automatically locks and unlocks
|
||
ANSCENTER::ANSOPENCV::DeinitCameraNetwork();
|
||
}
|
||
catch (...) { }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ResizeImage_Static(unsigned char* inputImage, unsigned int bufferLength, int width, int height, int& newWidth, int& newHeight, LStrHandle outputImage) {
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Automatically locks and unlocks
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ResizeImage_Static!" << std::endl;
|
||
return -6;
|
||
}
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = ANSCENTER::ANSOPENCV::resizeImageToFit(inputFrame, width, height, newWidth, newHeight);
|
||
std::vector<uchar> imageData;
|
||
bool success = cv::imencode(".jpg", outputFrame, imageData);
|
||
inputFrame.release();
|
||
outputFrame.release();
|
||
if (success) {
|
||
std::string st(imageData.begin(), imageData.end());
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr)
|
||
{
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ResizeImage_Static: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
|
||
// Image Reference Management
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_CloneImage_S(cv::Mat** imageIn, cv::Mat** imageOut) {
|
||
try {
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_CloneImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
else {
|
||
*imageOut = anscv_mat_new(**imageIn);
|
||
// Link clone to same NV12 frame data (refcount++)
|
||
gpu_frame_addref(*imageIn, *imageOut);
|
||
return 1; // Success
|
||
}
|
||
}
|
||
catch (const std::bad_alloc& e) {
|
||
std::cerr << "Memory allocation failed in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
catch (const std::bad_alloc& e) {
|
||
std::cerr << "Memory allocation failed in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ReleaseImage_S(cv::Mat** imageIn) {
|
||
try {
|
||
if (!imageIn || !(*imageIn)) {
|
||
return -2;
|
||
}
|
||
// anscv_mat_delete is thread-safe: checks the registry, only deletes if
|
||
// the pointer is still registered (not already freed by a stream source).
|
||
bool deleted = anscv_mat_delete(imageIn);
|
||
return deleted ? 1 : -4;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ReleaseImage_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception in ANSCV_ReleaseImage_S." << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_CropImage_S(cv::Mat** imageIn, int x, int y, int width, int height, int originalImageSize) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
// Step 1: Validate and copy input image safely
|
||
cv::Mat localImage;
|
||
{
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_CropImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
else {
|
||
localImage = (**imageIn).clone(); // shallow copy (ref-counted)
|
||
}
|
||
}
|
||
catch (const std::bad_alloc& e) {
|
||
std::cerr << "Memory allocation failed in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
|
||
// Step 2: Validate cropping parameters
|
||
if (width <= 0 || height <= 0) {
|
||
std::cerr << "Error: Invalid width or height for cropping image in ANSCV_CropImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
|
||
int originalWidth = localImage.cols;
|
||
int originalHeight = localImage.rows;
|
||
|
||
x = max(0, x);
|
||
y = max(0, y);
|
||
width = min(width, originalWidth - x);
|
||
height = min(height, originalHeight - y);
|
||
|
||
cv::Rect roi(x, y, width, height);
|
||
|
||
// Step 3: Process crop outside lock
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
if (!ansCVInstance.Init("")) {
|
||
std::cerr << "Error: Failed to initialize ANSCV instance!" << std::endl;
|
||
return -5;
|
||
}
|
||
|
||
cv::Mat croppedImage = ansCVInstance.ImageCrop(localImage, roi, originalImageSize);
|
||
// Step 4: Replace original image safely
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_CropImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
if (croppedImage.empty()) {
|
||
std::cerr << "Error: Failed to crop image in ANSCV_CropImage_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(croppedImage);
|
||
}
|
||
|
||
}
|
||
|
||
return 1; // Success
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_CropImage_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_GetImage_CPP(cv::Mat** imageIn, int width, int quality,
|
||
int& newWidth, int& newHeight, std::string& outputImage) {
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image!" << std::endl;
|
||
return -2;
|
||
}
|
||
|
||
cv::Mat* img = *imageIn;
|
||
int originalWidth, originalHeight;
|
||
|
||
// Quick dimension check under lock
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::chrono::milliseconds(1000));
|
||
if (!lock.owns_lock()) {
|
||
std::cerr << "Error: Mutex timeout!" << std::endl;
|
||
return -6;
|
||
}
|
||
|
||
if (img->empty() || !img->data) {
|
||
std::cerr << "Error: Invalid image!" << std::endl;
|
||
return -2;
|
||
}
|
||
|
||
originalWidth = img->cols;
|
||
originalHeight = img->rows;
|
||
} // Release lock early
|
||
|
||
int imageMaxSize = max(originalWidth, originalHeight);
|
||
cv::Mat processedImage;
|
||
|
||
if (width > 0 && width < imageMaxSize) {
|
||
// Only lock when we need to resize
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::chrono::milliseconds(1000));
|
||
if (!lock.owns_lock()) return -6;
|
||
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
if (!ansCVInstance.Init("")) return -5;
|
||
|
||
processedImage = ansCVInstance.ImageResizeV2(*img, width);
|
||
}
|
||
else {
|
||
// No resize needed - just copy quickly
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::chrono::milliseconds(1000));
|
||
if (!lock.owns_lock()) return -6;
|
||
|
||
processedImage = img->clone(); // Slightly faster than copyTo
|
||
}
|
||
|
||
if (processedImage.empty()) return -8;
|
||
|
||
newWidth = processedImage.cols;
|
||
newHeight = processedImage.rows;
|
||
|
||
outputImage = ANSCENTER::CompressJpegToString(processedImage, quality);
|
||
return outputImage.empty() ? -9 : 1;
|
||
|
||
}
|
||
catch (...) {
|
||
return -4;
|
||
}
|
||
}
|
||
//extern "C" __declspec(dllexport) int ANSCV_GetImage_S(cv::Mat** imageIn, int width, int quality, int& newWidth, int& newHeight, LStrHandle outputImage) {
|
||
// try {
|
||
// // Initial validation
|
||
// if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
// std::cerr << "Error: Invalid or empty input image in ANSCV_GetImage_S!" << std::endl;
|
||
// return -2;
|
||
// }
|
||
// if (!outputImage) {
|
||
// std::cerr << "Error: Output image handle is null!" << std::endl;
|
||
// return -2;
|
||
// }
|
||
//
|
||
// cv::Mat imgCopy;
|
||
//
|
||
// // Critical section: Only lock during image copy
|
||
// {
|
||
// auto lockStartTime = std::chrono::steady_clock::now();
|
||
// std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
//
|
||
// // Increased timeout to 30 seconds
|
||
// if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
// auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||
// std::chrono::steady_clock::now() - lockStartTime).count();
|
||
// std::cerr << "Error: Mutex timeout after " << elapsed << "ms in ANSCV_GetImage_S!" << std::endl;
|
||
// return -6;
|
||
// }
|
||
//
|
||
// auto lockAcquiredTime = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||
// std::chrono::steady_clock::now() - lockStartTime).count();
|
||
// if (lockAcquiredTime > 1000) {
|
||
// std::cerr << "Warning: Lock acquisition took " << lockAcquiredTime << "ms" << std::endl;
|
||
// }
|
||
//
|
||
// // Re-validate after acquiring lock
|
||
// if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
// std::cerr << "Error: Image became invalid after mutex acquisition!" << std::endl;
|
||
// return -2;
|
||
// }
|
||
//
|
||
// // Copy the image while holding the lock
|
||
// auto copyStartTime = std::chrono::steady_clock::now();
|
||
// try {
|
||
// (*imageIn)->copyTo(imgCopy);
|
||
// }
|
||
// catch (const cv::Exception& e) {
|
||
// std::cerr << "Error: OpenCV exception during image copy: " << e.what() << std::endl;
|
||
// return -7;
|
||
// }
|
||
//
|
||
// auto copyElapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||
// std::chrono::steady_clock::now() - copyStartTime).count();
|
||
// if (copyElapsed > 500) {
|
||
// std::cerr << "Warning: Image copy took " << copyElapsed << "ms (size: "
|
||
// << (*imageIn)->cols << "x" << (*imageIn)->rows << ")" << std::endl;
|
||
// }
|
||
// } // Lock released here - all subsequent operations run without holding the mutex
|
||
//
|
||
// // Validate copied image
|
||
// if (imgCopy.empty() || !imgCopy.data) {
|
||
// std::cerr << "Error: Copied image is invalid in ANSCV_GetImage_S!" << std::endl;
|
||
// return -2;
|
||
// }
|
||
//
|
||
// // Store original dimensions
|
||
// int originalWidth = imgCopy.cols;
|
||
// int originalHeight = imgCopy.rows;
|
||
// int imageMaxSize = max(originalWidth, originalHeight);
|
||
//
|
||
// // Resize if requested
|
||
// if (width > 0 && width < imageMaxSize) {
|
||
// auto resizeStartTime = std::chrono::steady_clock::now();
|
||
//
|
||
// ANSCENTER::ANSOPENCV ansCVInstance;
|
||
// if (!ansCVInstance.Init("")) {
|
||
// std::cerr << "Error: Failed to initialize ANSOPENCV instance!" << std::endl;
|
||
// return -5;
|
||
// }
|
||
//
|
||
// cv::Mat resized = ansCVInstance.ImageResizeV2(imgCopy, width);
|
||
// if (resized.empty()) {
|
||
// std::cerr << "Error: Resizing failed!" << std::endl;
|
||
// return -8;
|
||
// }
|
||
//
|
||
// imgCopy = std::move(resized);
|
||
//
|
||
// auto resizeElapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||
// std::chrono::steady_clock::now() - resizeStartTime).count();
|
||
// if (resizeElapsed > 500) {
|
||
// std::cerr << "Warning: Image resize took " << resizeElapsed << "ms (from "
|
||
// << originalWidth << "x" << originalHeight << " to "
|
||
// << imgCopy.cols << "x" << imgCopy.rows << ")" << std::endl;
|
||
// }
|
||
// }
|
||
//
|
||
// // Update output dimensions
|
||
// newWidth = imgCopy.cols;
|
||
// newHeight = imgCopy.rows;
|
||
//
|
||
// // Compress to JPEG
|
||
// auto compressStartTime = std::chrono::steady_clock::now();
|
||
// std::string jpegString = ANSCENTER::CompressJpegToString(imgCopy, quality);
|
||
// if (jpegString.empty()) {
|
||
// std::cerr << "Error: JPEG compression failed!" << std::endl;
|
||
// return -9;
|
||
// }
|
||
//
|
||
// auto compressElapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||
// std::chrono::steady_clock::now() - compressStartTime).count();
|
||
// if (compressElapsed > 500) {
|
||
// std::cerr << "Warning: JPEG compression took " << compressElapsed << "ms (quality: "
|
||
// << quality << ", size: " << jpegString.size() << " bytes)" << std::endl;
|
||
// }
|
||
//
|
||
// // Validate compressed size
|
||
// int32_t size = static_cast<int32_t>(jpegString.size());
|
||
// if (size > 50 * 1024 * 1024) {
|
||
// std::cerr << "Error: Compressed image size too large: " << size << " bytes" << std::endl;
|
||
// return -10;
|
||
// }
|
||
//
|
||
// // Allocate memory for output
|
||
// MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size);
|
||
// if (error != noErr) {
|
||
// std::cerr << "Error: Failed to allocate memory for output image! Error code: " << error << std::endl;
|
||
// return -10;
|
||
// }
|
||
//
|
||
// // Copy data to output handle
|
||
// (*outputImage)->cnt = size;
|
||
// memcpy((*outputImage)->str, jpegString.data(), size);
|
||
//
|
||
// return 1;
|
||
// }
|
||
// catch (const std::exception& e) {
|
||
// std::cerr << "Exception in ANSCV_GetImage_S: " << e.what() << std::endl;
|
||
// return -3;
|
||
// }
|
||
// catch (...) {
|
||
// std::cerr << "Unknown exception in ANSCV_GetImage_S!" << std::endl;
|
||
// return -4;
|
||
// }
|
||
//}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_GetImage_S(cv::Mat** imageIn, int width, int quality, int& newWidth, int& newHeight, LStrHandle outputImage) {
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_GetImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
|
||
if (!outputImage) {
|
||
std::cerr << "Error: Output image handle is null!" << std::endl;
|
||
return -2;
|
||
}
|
||
|
||
cv::Mat imgCopy;
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_GetImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Image became invalid after mutex acquisition!" << std::endl;
|
||
return -2;
|
||
}
|
||
|
||
try {
|
||
(*imageIn)->copyTo(imgCopy);
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "Error: OpenCV exception during image copy: " << e.what() << std::endl;
|
||
return -7;
|
||
}
|
||
}
|
||
|
||
if (imgCopy.empty() || !imgCopy.data) {
|
||
std::cerr << "Error: Copied image is invalid in ANSCV_GetImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
|
||
int originalWidth = imgCopy.cols;
|
||
int originalHeight = imgCopy.rows;
|
||
int imageMaxSize = max(originalWidth, originalHeight);
|
||
|
||
if (width > 0 && width < imageMaxSize) {
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
if (!ansCVInstance.Init("")) {
|
||
std::cerr << "Error: Failed to initialize ANSOPENCV instance!" << std::endl;
|
||
return -5;
|
||
}
|
||
|
||
cv::Mat resized = ansCVInstance.ImageResizeV2(imgCopy, width);
|
||
if (resized.empty()) {
|
||
std::cerr << "Error: Resizing failed!" << std::endl;
|
||
return -8;
|
||
}
|
||
imgCopy = std::move(resized);
|
||
}
|
||
|
||
newWidth = imgCopy.cols;
|
||
newHeight = imgCopy.rows;
|
||
std::string jpegString = ANSCENTER::CompressJpegToString(imgCopy, quality);
|
||
if (jpegString.empty()) {
|
||
std::cerr << "Error: JPEG compression failed!" << std::endl;
|
||
return -9;
|
||
}
|
||
|
||
int32_t size = static_cast<int32_t>(jpegString.size());
|
||
|
||
if (size > 50 * 1024 * 1024) {
|
||
std::cerr << "Error: Compressed image size too large: " << size << " bytes" << std::endl;
|
||
return -10;
|
||
}
|
||
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size);
|
||
if (error != noErr) {
|
||
std::cerr << "Error: Failed to allocate memory for output image! Error code: " << error << std::endl;
|
||
return -10;
|
||
}
|
||
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, jpegString.data(), size);
|
||
|
||
return 1;
|
||
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_GetImage_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_GetImage_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ReSizeImage_S(cv::Mat** imageIn, int width, int originalImageSize) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
cv::Mat localImage;
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
else {
|
||
localImage = **imageIn; // shallow copy (ref-counted)
|
||
}
|
||
}
|
||
catch (const std::bad_alloc& e) {
|
||
std::cerr << "Memory allocation failed in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
|
||
if (width <= 0) {
|
||
std::cerr << "Error: Invalid target width in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
const int originalWidth = localImage.cols;
|
||
const int originalHeight = localImage.rows;
|
||
int targetWidth = width;
|
||
cv::Mat resizedImage;
|
||
// Scale width based on original image size, if provided
|
||
if (originalImageSize > 0 && originalImageSize != originalWidth) {
|
||
double scale = static_cast<double>(originalWidth) / originalImageSize;
|
||
targetWidth = static_cast<int>(width * scale);
|
||
}
|
||
// Skip resizing if target size is greater or equal to original
|
||
if (targetWidth < originalWidth) {
|
||
|
||
// Maintain aspect ratio
|
||
int targetHeight = static_cast<int>(std::round(targetWidth * static_cast<double>(originalHeight) / originalWidth));
|
||
|
||
if (targetHeight <= 0) {
|
||
std::cerr << "Error: Computed height is invalid!" << std::endl;
|
||
return -2;
|
||
}
|
||
cv::resize(localImage, resizedImage, cv::Size(targetWidth, targetHeight), 0, 0, cv::INTER_LANCZOS4);
|
||
}
|
||
else {
|
||
resizedImage = localImage.clone(); // No resizing needed
|
||
}
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
if (resizedImage.empty()) {
|
||
std::cerr << "Error: Resizing failed!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(resizedImage);
|
||
}
|
||
|
||
}
|
||
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_ReSizeImage_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_GetImageAndRemoveImgRef_S(cv::Mat** imageIn, int width, int quality, int& newWidth, int& newHeight, LStrHandle outputImage) {
|
||
bool cleanupRequired = true;
|
||
try {
|
||
cv::Mat imgCopy;
|
||
{
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_GetImageAndRemoveImgRef_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
(*imageIn)->copyTo(imgCopy);
|
||
anscv_mat_delete(imageIn); // Safe delete + null
|
||
cleanupRequired = false; // We already deleted inside lock
|
||
}
|
||
|
||
if (imgCopy.empty() || !imgCopy.data) {
|
||
std::cerr << "Error: Copied image is invalid in ANSCV_GetImageAndRemoveImgRef_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
int originalWidth = imgCopy.cols;
|
||
int originalHeight = imgCopy.rows;
|
||
int imageMaxSize = max(originalWidth, originalHeight);
|
||
|
||
if (width > 0 && width < imageMaxSize) {
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
if (!ansCVInstance.Init("")) {
|
||
std::cerr << "Error: Failed to initialize ANSOPENCV instance!" << std::endl;
|
||
return -5;
|
||
}
|
||
cv::Mat resized = ansCVInstance.ImageResizeV2(imgCopy, width);
|
||
if (resized.empty()) {
|
||
std::cerr << "Error: Resizing failed!" << std::endl;
|
||
return 0;
|
||
}
|
||
imgCopy = std::move(resized);
|
||
}
|
||
|
||
newWidth = imgCopy.cols;
|
||
newHeight = imgCopy.rows;
|
||
|
||
std::string jpegString = ANSCENTER::CompressJpegToString(imgCopy, quality);
|
||
if (jpegString.empty()) {
|
||
std::cerr << "Error: JPEG compression failed in ANSCV_GetImageAndRemoveImgRef_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
int32_t size = static_cast<int32_t>(jpegString.size());
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size);
|
||
if (error != noErr) {
|
||
std::cerr << "Error: Failed to allocate memory for output image!" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, jpegString.data(), size);
|
||
|
||
return 1; // Success
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_GetImageAndRemoveImgRef_S: " << e.what() << std::endl;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_GetImageAndRemoveImgRef_S!" << std::endl;
|
||
}
|
||
|
||
// Cleanup in case of exception and still pending
|
||
if (cleanupRequired) {
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
anscv_mat_delete(imageIn);
|
||
}
|
||
return -3;
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_GetImageInfo_S(cv::Mat** imageIn, int& width, int& height) {
|
||
try {
|
||
cv::Mat imgCopy;
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
else {
|
||
imgCopy = **imageIn; // shallow copy (ref-counted)
|
||
}
|
||
}
|
||
catch (const std::bad_alloc& e) {
|
||
std::cerr << "Memory allocation failed in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_CloneImage_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
if (imgCopy.empty() || !imgCopy.data) {
|
||
std::cerr << "Error: Dereferenced image is invalid in ANSCV_GetImageInfo_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
// Assign the width and height once the image is validated
|
||
width = imgCopy.cols;
|
||
height = imgCopy.rows;
|
||
return 1; // Success
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_GetImageInfo_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_GetImageInfo_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_CreateImageFromJpegString_S(
|
||
unsigned char* inputImage,
|
||
unsigned int bufferLength,
|
||
cv::Mat** image)
|
||
{
|
||
// Validate input parameters
|
||
if (!inputImage || bufferLength == 0 || image == nullptr) {
|
||
std::cerr << "Error: Invalid input parameters in ANSCV_CreateImageFromJpegString_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
|
||
try {
|
||
// Copy input buffer for decoding
|
||
std::vector<uchar> buffer(inputImage, inputImage + bufferLength);
|
||
cv::Mat decodedImage = cv::imdecode(buffer, cv::IMREAD_COLOR);
|
||
// Allocate image safely
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
// Check if decoding was successful
|
||
if (decodedImage.empty()) {
|
||
std::cerr << "Error: Failed to decode JPEG image in ANSCV_CreateImageFromJpegString_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
*image = anscv_mat_new(decodedImage);
|
||
return 1; // Success
|
||
}
|
||
}
|
||
|
||
}
|
||
catch (const std::bad_alloc& e) {
|
||
std::cerr << "Memory allocation failed in ANSCV_CreateImageFromJpegString_S: " << e.what() << std::endl;
|
||
return -1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_CreateImageFromJpegString_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_CreateImageFromJpegString_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_CreateImageFromFile_S(const char* imageFilePath, cv::Mat** image)
|
||
{
|
||
try {
|
||
if (!imageFilePath) {
|
||
std::cerr << "Error: Null input parameter in ANSCV_CreateImageFromFile_S!" << std::endl;
|
||
return -1; // Null pointer input
|
||
}
|
||
|
||
std::string stImageFileName(imageFilePath);
|
||
|
||
if (stImageFileName.empty()) {
|
||
std::cerr << "Error: Empty file path in ANSCV_CreateImageFromFile_S!" << std::endl;
|
||
return -2; // Empty path
|
||
}
|
||
|
||
// Check if file exists
|
||
std::ifstream fileCheck(stImageFileName);
|
||
if (!fileCheck.good()) {
|
||
std::cerr << "Error: File does not exist or is inaccessible: " << stImageFileName << std::endl;
|
||
return -4; // File does not exist
|
||
}
|
||
|
||
// Load the image using OpenCV
|
||
cv::Mat loadedImage = cv::imread(stImageFileName, cv::ImreadModes::IMREAD_COLOR);
|
||
{
|
||
// Allocate and assign the image
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Automatically locks and unlocks
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
if (loadedImage.empty()) {
|
||
std::cerr << "Error: Failed to load image from file in ANSCV_CreateImageFromFile_S!" << std::endl;
|
||
return 0; // Load failed
|
||
}
|
||
else {
|
||
if (image == nullptr) {
|
||
image = new cv::Mat*; // Allocate pointer to cv::Mat*
|
||
*image = nullptr; // Initialize to nullptr
|
||
}
|
||
*image = anscv_mat_new(loadedImage); // Use registry for safe lifecycle
|
||
return 1; // Success
|
||
}
|
||
}
|
||
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_CreateImageFromFile_S: " << e.what() << std::endl;
|
||
return -3; // Exception
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown error occurred in ANSCV_CreateImageFromFile_S!" << std::endl;
|
||
return -5; // Unknown error
|
||
}
|
||
}
|
||
|
||
// Image Preprocessing
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageAutoWhiteBalance_S(cv::Mat** imageIn) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
try {
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
if (!ansCVInstance.Init("")) {
|
||
std::cerr << "Error: Failed to initialize ANSCV instance!" << std::endl;
|
||
return -5;
|
||
}
|
||
|
||
cv::Mat imOut = ansCVInstance.ImageWhiteBalance(**imageIn);
|
||
// Thread-safe assignment
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex);
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: White balance processing failed in ANSCV_ImageAutoWhiteBalance_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1;
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageAutoWhiteBalance_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageAutoWhiteBalance_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageBrightEnhance_S(cv::Mat** imageIn, double brightnessScaleFactor) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
cv::Mat imOut = ansCVInstance.ImageDarkEnhancement(**imageIn, brightnessScaleFactor);
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Lock only during shared resource write
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: Brightness enhancement failed in ANSCV_ImageBrightEnhance_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1;
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageBrightEnhance_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageBrightEnhance_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageContrastEnhance_S(cv::Mat** imageIn) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
try {
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
|
||
// Perform white balance correction
|
||
cv::Mat imOut = ansCVInstance.ImageContrastEnhancement(**imageIn);
|
||
|
||
{
|
||
// Assign processed image back to input pointer
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Lock only during shared resource write
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: White balance processing failed in ANSCV_ImageContrastEnhance_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1; // Success
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageContrastEnhance_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageContrastEnhance_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageDenoise_S(cv::Mat** imageIn) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
|
||
// Perform denoising
|
||
cv::Mat imOut = ansCVInstance.ImageDenoise(**imageIn);
|
||
{
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Lock only during shared resource modification
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: Denoising processing failed in ANSCV_ImageDenoise_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1; // Success
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageDenoise_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageDenoise_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageRepair_S(cv::Mat** imageIn) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
|
||
// Perform image repair
|
||
cv::Mat imOut = ansCVInstance.ImageRepair(**imageIn);
|
||
{
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Lock only during shared resource modification
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: Image repair processing failed in ANSCV_ImageRepair_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1; // Success
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageRepair_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageRepair_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageToGray_S(cv::Mat** imageIn) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
// Perform white balance correction
|
||
cv::Mat imOut = ansCVInstance.ToGray(**imageIn);
|
||
{
|
||
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Lock only during shared resource modification
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: White balance processing failed in ANSCV_ImageToGray_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1;
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageToGray_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageToGray_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageRotate_S(cv::Mat** imageIn, double angle) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
|
||
// Perform white balance correction
|
||
cv::Mat imOut = ansCVInstance.RotateImage(**imageIn, angle);
|
||
// Assign processed image back to input pointer
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Ensure thread safety
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: White balance processing failed in ANSCV_ImageRotate_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1;
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageRotate_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageRotate_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageFlip_S(cv::Mat** imageIn, int flipCode) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
|
||
// Perform white balance correction
|
||
cv::Mat imOut = ansCVInstance.FlipImage(**imageIn, flipCode);
|
||
// Assign processed image back to input pointer
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Ensure thread safety
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: White balance processing failed in ANSCV_ImageFlip_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1;
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageFlip_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageFlip_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
|
||
// Post processing
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageBlurObjects_S(cv::Mat** imageIn, const char* strBboxes) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
std::vector<cv::Rect> objects = ansCVInstance.GetBoundingBoxes(strBboxes);
|
||
// Perform white balance correction
|
||
cv::Mat imOut = ansCVInstance.BlurObjects(**imageIn, objects);
|
||
|
||
// Assign processed image back to input pointer
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Ensure thread safety
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: White balance processing failed in ANSCV_ImageBlurObjects_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1;
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageBlurObjects_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageBlurObjects_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageBlurBackground_S(cv::Mat** imageIn, const char* strBboxes) {
|
||
gpu_frame_invalidate(imageIn ? *imageIn : nullptr);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
std::vector<cv::Rect> objects = ansCVInstance.GetBoundingBoxes(strBboxes);
|
||
// Perform white balance correction
|
||
cv::Mat imOut = ansCVInstance.BlurBackground(**imageIn, objects);
|
||
// Assign processed image back to input pointer
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Ensure thread safety
|
||
if (imOut.empty()) {
|
||
std::cerr << "Error: White balance processing failed in ANSCV_ImageBlurBackground_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
else {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
**imageIn = std::move(imOut);
|
||
return 1;
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageBlurBackground_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageBlurBackground_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageQRDecoder_S(cv::Mat** imageIn, int maxImageWidth, const char* strBboxes, LStrHandle detectedQRText) {
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
std::vector<cv::Rect> Bboxes = ansCVInstance.GetBoundingBoxes(strBboxes);
|
||
// Decode the QR code
|
||
std::string qrText = ansCVInstance.QRDecoderWithBBox(**imageIn, maxImageWidth, Bboxes);
|
||
{
|
||
// Assign QR decoded text to detectedQRText handle
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Ensure thread safety when modifying the handle
|
||
if (qrText.empty()) {
|
||
std::cerr << "Error: QR decoding failed in ANSCV_ImageQRDecoder_S!" << std::endl;
|
||
return 0;
|
||
}
|
||
int size = qrText.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(detectedQRText, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*detectedQRText)->cnt = size;
|
||
memcpy((*detectedQRText)->str, qrText.c_str(), size);
|
||
return 1; // Success
|
||
}
|
||
else {
|
||
return 0; // Error setting handle size
|
||
}
|
||
}
|
||
else {
|
||
return 0; // No QR code found
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImageQRDecoder_S: " << e.what() << std::endl;
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImageQRDecoder_S!" << std::endl;
|
||
return -4;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImagePatternMatchs_S(cv::Mat** imageIn, const char* templateFilePath, double threshold, LStrHandle detectedMatchedLocations) {
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !(*imageIn)->data) {
|
||
std::cerr << "Error: Invalid or empty input image in ANSCV_CloneImage_S!" << std::endl;
|
||
return -2;
|
||
}
|
||
ANSCENTER::ANSOPENCV ansCVInstance;
|
||
ansCVInstance.Init(""); // Initialize ANSCV instance
|
||
|
||
// Load template image
|
||
cv::Mat templateImage = cv::imread(templateFilePath, cv::IMREAD_COLOR);
|
||
if (templateImage.empty()) {
|
||
std::cerr << "Error: Failed to load template image from " << templateFilePath << std::endl;
|
||
return -2; // Return error if template cannot be loaded
|
||
}
|
||
|
||
// Perform pattern matching
|
||
std::string strMatchedLocations = ansCVInstance.PatternMatches(**imageIn, templateImage, threshold);
|
||
{
|
||
std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
return -6;
|
||
}
|
||
//std::lock_guard<std::mutex> lock(imageMutex); // Ensure thread safety when modifying detectedMatchedLocations
|
||
int size = strMatchedLocations.length();
|
||
if (size > 0) {
|
||
MgErr error;
|
||
error = DSSetHandleSize(detectedMatchedLocations, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*detectedMatchedLocations)->cnt = size;
|
||
memcpy((*detectedMatchedLocations)->str, strMatchedLocations.c_str(), size);
|
||
return 1; // Success
|
||
}
|
||
else {
|
||
std::cerr << "Error: Failed to set handle size for detectedMatchedLocations!" << std::endl;
|
||
return 0; // Error setting handle size
|
||
}
|
||
}
|
||
else {
|
||
return 0; // No matches found
|
||
}
|
||
}
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception occurred in ANSCV_ImagePatternMatchs_S: " << e.what() << std::endl;
|
||
return -3; // Exception occurred
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception occurred in ANSCV_ImagePatternMatchs_S!" << std::endl;
|
||
return -4; // Unknown exception
|
||
}
|
||
}
|
||
|
||
//extern "C" __declspec(dllexport) int ANSCV_ImagesToMP4_S(const char* imageFolder, const char* outputVideoPath, int targetDurationSec) {
|
||
// try {
|
||
// std::unique_lock<std::timed_mutex> lock(timeImageMutex, std::defer_lock);
|
||
// if (!lock.try_lock_for(std::chrono::milliseconds(MUTEX_TIMEOUT_MS))) {
|
||
// std::cerr << "Error: Mutex timeout in ANSCV_ReSizeImage_S!" << std::endl;
|
||
// return -6;
|
||
// }
|
||
// if (!imageFolder || strlen(imageFolder) == 0) {
|
||
// std::cerr << "Error: Invalid image folder path in ANSCV_ImagesToMP4_S!" << std::endl;
|
||
// return -1; // Invalid input
|
||
// }
|
||
//
|
||
// // Create MP4 video from images in the specified folder
|
||
// bool success = ANSCENTER::ANSOPENCV::ImagesToMP4(imageFolder, outputVideoPath, targetDurationSec);
|
||
// if (!success) {
|
||
// std::cerr << "Error: Failed to create MP4 video from images in folder: " << imageFolder << std::endl;
|
||
// return 0; // Failure
|
||
// }
|
||
//
|
||
// return 1; // Success
|
||
// }
|
||
// catch (const std::exception& e) {
|
||
// std::cerr << "Error: Exception occurred in ANSCV_ImagesToMP4_S: " << e.what() << std::endl;
|
||
// return -2; // Exception occurred
|
||
// }
|
||
// catch (...) {
|
||
// std::cerr << "Error: Unknown exception occurred in ANSCV_ImagesToMP4_S!" << std::endl;
|
||
// return -3; // Unknown exception
|
||
// }
|
||
//
|
||
//}
|
||
extern "C" __declspec(dllexport) int ANSCV_ImagesToMP4_S(
|
||
const char* imageFolder,
|
||
const char* outputVideoPath,
|
||
int maxWidth, int fps) {
|
||
|
||
try {
|
||
if (!imageFolder || strlen(imageFolder) == 0) {
|
||
std::cerr << "Error: Invalid image folder path!" << std::endl;
|
||
return -1;
|
||
}
|
||
|
||
if (!outputVideoPath || strlen(outputVideoPath) == 0) {
|
||
std::cerr << "Error: Invalid output video path!" << std::endl;
|
||
return -1;
|
||
}
|
||
|
||
fps = 10;
|
||
|
||
bool success = ANSCENTER::ANSOPENCV::ImagesToMP4(
|
||
imageFolder, outputVideoPath, maxWidth, fps);
|
||
|
||
if (!success) {
|
||
std::cerr << "Error: Failed to create MP4 from: "
|
||
<< imageFolder << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception in ANSCV_ImagesToMP4_S: "
|
||
<< e.what() << std::endl;
|
||
return -2;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception!" << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
|
||
// ----------------------------------------------------------------------------
|
||
// Direct-FFmpeg variant — routes to ANSCENTER::ANSOPENCV::ImagesToMP4FF which
|
||
// encodes with libx265 (preferred) / libx264 / mpeg4 through the libav* API.
|
||
// Same fps=10 hardcoding as ANSCV_ImagesToMP4_S for consistency.
|
||
// ----------------------------------------------------------------------------
|
||
extern "C" __declspec(dllexport) int ANSCV_ImagesToMP4FF_S(
|
||
const char* imageFolder,
|
||
const char* outputVideoPath,
|
||
int maxWidth, int fps) {
|
||
|
||
try {
|
||
if (!imageFolder || strlen(imageFolder) == 0) {
|
||
std::cerr << "Error: Invalid image folder path!" << std::endl;
|
||
return -1;
|
||
}
|
||
|
||
if (!outputVideoPath || strlen(outputVideoPath) == 0) {
|
||
std::cerr << "Error: Invalid output video path!" << std::endl;
|
||
return -1;
|
||
}
|
||
|
||
fps = 10;
|
||
|
||
bool success = ANSCENTER::ANSOPENCV::ImagesToMP4FF(
|
||
imageFolder, outputVideoPath, maxWidth, fps);
|
||
|
||
if (!success) {
|
||
std::cerr << "Error: Failed to create MP4 (FFmpeg) from: "
|
||
<< imageFolder << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception in ANSCV_ImagesToMP4FF_S: "
|
||
<< e.what() << std::endl;
|
||
return -2;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception in ANSCV_ImagesToMP4FF_S!" << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
|
||
// ----------------------------------------------------------------------------
|
||
// Prints the copyright license of the FFmpeg libraries actually linked into
|
||
// ANSCV.dll. The FFmpeg symbols are resolved here (inside the DLL) where
|
||
// libavcodec / libavformat / libavutil are linked, so callers that don't link
|
||
// FFmpeg themselves (e.g. ANSCV-UnitTest) can still get the info.
|
||
//
|
||
// LGPL v2.1+ → commercial/closed-source distribution OK (subject to LGPL
|
||
// requirements like allowing relinking with a modified FFmpeg).
|
||
// GPL v2+ → ANSCV.dll is a derivative work and must be GPL-compatible.
|
||
// ----------------------------------------------------------------------------
|
||
extern "C" __declspec(dllexport) void ANSCV_PrintFFmpegLicense_S() {
|
||
std::cout << "[FFmpeg] avutil license: " << avutil_license() << std::endl;
|
||
std::cout << "[FFmpeg] avcodec license: " << avcodec_license() << std::endl;
|
||
std::cout << "[FFmpeg] avformat license: " << avformat_license() << std::endl;
|
||
std::cout << "[FFmpeg] swscale license: " << swscale_license() << std::endl;
|
||
}
|
||
|
||
// ----------------------------------------------------------------------------
|
||
// Hardware-accelerated variant — routes to ANSCENTER::ANSOPENCV::ImagesToMP4HW
|
||
// which probes NVIDIA NVENC, Intel QSV, and AMD AMF HEVC/H.264 encoders in
|
||
// order, then falls back to software (libx265/libx264/mpeg4) if none work.
|
||
// Same fps=10 hardcoding as the other two variants.
|
||
// ----------------------------------------------------------------------------
|
||
extern "C" __declspec(dllexport) int ANSCV_ImagesToMP4HW_S(
|
||
const char* imageFolder,
|
||
const char* outputVideoPath,
|
||
int maxWidth, int fps) {
|
||
|
||
try {
|
||
if (!imageFolder || strlen(imageFolder) == 0) {
|
||
std::cerr << "Error: Invalid image folder path!" << std::endl;
|
||
return -1;
|
||
}
|
||
|
||
if (!outputVideoPath || strlen(outputVideoPath) == 0) {
|
||
std::cerr << "Error: Invalid output video path!" << std::endl;
|
||
return -1;
|
||
}
|
||
|
||
fps = 10;
|
||
|
||
bool success = ANSCENTER::ANSOPENCV::ImagesToMP4HW(
|
||
imageFolder, outputVideoPath, maxWidth, fps);
|
||
|
||
if (!success) {
|
||
std::cerr << "Error: Failed to create MP4 (FFmpeg-HW) from: "
|
||
<< imageFolder << std::endl;
|
||
return 0;
|
||
}
|
||
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Error: Exception in ANSCV_ImagesToMP4HW_S: "
|
||
<< e.what() << std::endl;
|
||
return -2;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Error: Unknown exception in ANSCV_ImagesToMP4HW_S!" << std::endl;
|
||
return -3;
|
||
}
|
||
}
|
||
|
||
// ============================================================================
|
||
// V2 functions: accept uint64_t handleVal by value instead of ANSOPENCV**
|
||
// This eliminates the LabVIEW buffer reuse bug with double-pointer handles.
|
||
// ============================================================================
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageResize_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, int width, int height, LStrHandle outputImage)
|
||
{
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
|
||
if (!inputImage || bufferLength == 0) return 0;
|
||
if (!outputImage) return 0;
|
||
if (width <= 0 || height <= 0) return 0;
|
||
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
if (inputFrame.empty()) return 0;
|
||
|
||
cv::Mat outputFrame;
|
||
h->ImageResize(inputFrame, width, height, outputFrame);
|
||
if (outputFrame.empty()) return 0;
|
||
|
||
std::string binaryData = h->MatToBinaryData(outputFrame);
|
||
const int size = static_cast<int>(binaryData.length());
|
||
if (size <= 0) return 0;
|
||
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error != noErr) return 0;
|
||
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, binaryData.c_str(), size);
|
||
return 1;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ANSCV_ImageResize_V2: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_ImageResize_V2: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_ImageResize_V2" << std::endl;
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageResizeWithRatio_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, int width, LStrHandle outputImage)
|
||
{
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
|
||
if (!inputImage || bufferLength == 0) return 0;
|
||
if (!outputImage) return 0;
|
||
if (width <= 0) return 0;
|
||
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
if (inputFrame.empty()) return 0;
|
||
|
||
cv::Mat outputFrame;
|
||
h->ImageResizeWithRatio(inputFrame, width, outputFrame);
|
||
if (outputFrame.empty()) return 0;
|
||
|
||
std::string binaryData = h->MatToBinaryData(outputFrame);
|
||
const int size = static_cast<int>(binaryData.length());
|
||
if (size <= 0) return 0;
|
||
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error != noErr) return 0;
|
||
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, binaryData.c_str(), size);
|
||
return 1;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ANSCV_ImageResizeWithRatio_V2: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_ImageResizeWithRatio_V2: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_ImageResizeWithRatio_V2" << std::endl;
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageToBase64_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage)
|
||
{
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
|
||
if (!inputImage || bufferLength == 0) return 0;
|
||
if (!outputImage) return 0;
|
||
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
if (inputFrame.empty()) return 0;
|
||
|
||
std::string base64Data = h->MatToBase64(inputFrame);
|
||
const int size = static_cast<int>(base64Data.length());
|
||
if (size <= 0) return 0;
|
||
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error != noErr) return 0;
|
||
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, base64Data.c_str(), size);
|
||
return 1;
|
||
}
|
||
catch (const cv::Exception& e) {
|
||
std::cerr << "OpenCV exception in ANSCV_ImageToBase64_V2: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (const std::exception& e) {
|
||
std::cerr << "Exception in ANSCV_ImageToBase64_V2: " << e.what() << std::endl;
|
||
return 0;
|
||
}
|
||
catch (...) {
|
||
std::cerr << "Unknown exception in ANSCV_ImageToBase64_V2" << std::endl;
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageToGray_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage)
|
||
{
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->ToGray(inputFrame);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageDenoise_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->ImageDenoise(inputFrame);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageRepair_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->ImageRepair(inputFrame);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageAutoWhiteBalance_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->ImageWhiteBalance(inputFrame);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageBrightEnhance_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, double brightnessScaleFactor, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->ImageDarkEnhancement(inputFrame, brightnessScaleFactor);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageContrastEnhance_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->ImageContrastEnhancement(inputFrame);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageCrop_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, int x, int y, int width, int height, LStrHandle outputImage)
|
||
{
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Rect roi(x, y, width, height);
|
||
cv::Mat outputFrame = h->ImageCrop(inputFrame, roi);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_GetImageSize_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, LStrHandle imageSize) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
int width = inputFrame.cols;
|
||
int height = inputFrame.rows;
|
||
std::string st = std::to_string(width) + "x" + std::to_string(height);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(imageSize, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*imageSize)->cnt = size;
|
||
memcpy((*imageSize)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_GetImageSizeFromImageFile_V2(uint64_t handleVal, const char* imageFilePath, LStrHandle imageSize) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::VideoCapture cap(imageFilePath);
|
||
int width, height;
|
||
if (!cap.isOpened()) {
|
||
std::cerr << "Error opening file: " << imageFilePath << std::endl;
|
||
width = 0;
|
||
height = 0;
|
||
}
|
||
else {
|
||
width = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_WIDTH));
|
||
height = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_HEIGHT));
|
||
}
|
||
cap.release();
|
||
std::string st = std::to_string(width) + "x" + std::to_string(height);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(imageSize, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*imageSize)->cnt = size;
|
||
memcpy((*imageSize)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_BlurObjects_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, const char* strBboxes, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
std::vector<cv::Rect> objects = h->GetBoundingBoxes(strBboxes);
|
||
cv::Mat outputFrame = h->BlurObjects(inputFrame, objects);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_BlurBackground_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, const char* strBboxes, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
std::vector<cv::Rect> objects = h->GetBoundingBoxes(strBboxes);
|
||
cv::Mat outputFrame = h->BlurBackground(inputFrame, objects);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_QRDecoder_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, LStrHandle detectedQRText) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
std::string st = h->QRDecoder(inputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(detectedQRText, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*detectedQRText)->cnt = size;
|
||
memcpy((*detectedQRText)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_PatternMatchs_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, const char* templateFilePath, double threshold, LStrHandle detectedMatchedLocations) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat templateImage = cv::imread(templateFilePath, cv::IMREAD_COLOR);
|
||
std::string st = h->PatternMatches(inputFrame, templateImage, threshold);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(detectedMatchedLocations, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*detectedMatchedLocations)->cnt = size;
|
||
memcpy((*detectedMatchedLocations)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_RotateImage_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, double angle, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->RotateImage(inputFrame, angle);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_FlipImage_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, int flipCode, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->FlipImage(inputFrame, flipCode);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ShiftImage_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, int shiftX, int shiftY, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->ShiftImage(inputFrame, shiftX, shiftY);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_AddGaussianNoise_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, double mean, double stddev, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->AddGaussianNoise(inputFrame, mean, stddev);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_AddSaltAndPepperNoise_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, double amount, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->AddSaltAndPepperNoise(inputFrame, amount);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_AddSpeckleNoise_V2(uint64_t handleVal, unsigned char* inputImage, unsigned int bufferLength, double stddev, LStrHandle outputImage) {
|
||
auto* h = reinterpret_cast<ANSCENTER::ANSOPENCV*>(handleVal);
|
||
if (!h) return -1;
|
||
try {
|
||
cv::Mat inputFrame = cv::imdecode(cv::Mat(1, bufferLength, CV_8UC1, inputImage), cv::IMREAD_COLOR);
|
||
cv::Mat outputFrame = h->AddSpeckleNoise(inputFrame, stddev);
|
||
std::string st = h->MatToBinaryData(outputFrame);
|
||
int size = st.length();
|
||
if (size > 0) {
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error == noErr) {
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, st.c_str(), size);
|
||
return 1;
|
||
}
|
||
else return 0;
|
||
}
|
||
else return 0;
|
||
}
|
||
catch (...) { return -1; }
|
||
}
|
||
|
||
// ── IMAQ <-> cv::Mat conversion ──────────────────────────────────
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_IMAQ2Image(void* imaqHandle, cv::Mat** imageOut) {
|
||
try {
|
||
if (!imaqHandle || !imageOut) {
|
||
ANS_DBG("ANSCV", "IMAQ2Image: null pointer - imaqHandle=%p imageOut=%p", imaqHandle, (void*)imageOut);
|
||
return -2;
|
||
}
|
||
|
||
// Try as Image* first (direct pointer)
|
||
Image* imaqImage = static_cast<Image*>(imaqHandle);
|
||
ImageInfo info = {};
|
||
if (!imaqGetImageInfo(imaqImage, &info)) {
|
||
// Try as Image** (pointer-to-pointer, LabVIEW handle indirection)
|
||
ANS_DBG("ANSCV", "IMAQ2Image: Image* failed (err=%d), trying Image**", imaqGetLastError());
|
||
Image** imaqImagePtr = static_cast<Image**>(imaqHandle);
|
||
imaqImage = *imaqImagePtr;
|
||
if (!imaqImage || !imaqGetImageInfo(imaqImage, &info)) {
|
||
int errCode = imaqGetLastError();
|
||
const char* errFunc = imaqGetLastErrorFunc();
|
||
ANS_DBG("ANSCV", "IMAQ2Image: FAILED both Image* and Image** - err=%d func=%s",
|
||
errCode, errFunc ? errFunc : "unknown");
|
||
return -4;
|
||
}
|
||
ANS_DBG("ANSCV", "IMAQ2Image: resolved as Image** OK");
|
||
} else {
|
||
ANS_DBG("ANSCV", "IMAQ2Image: resolved as Image* OK");
|
||
}
|
||
|
||
if (!info.imageStart || info.xRes <= 0 || info.yRes <= 0) {
|
||
ANS_DBG("ANSCV", "IMAQ2Image: invalid image - start=%p xRes=%d yRes=%d",
|
||
info.imageStart, info.xRes, info.yRes);
|
||
return -2;
|
||
}
|
||
|
||
int width = info.xRes;
|
||
int height = info.yRes;
|
||
int stride = info.pixelsPerLine;
|
||
|
||
ANS_DBG("ANSCV", "IMAQ2Image: %dx%d stride=%d type=%d", width, height, stride, info.imageType);
|
||
|
||
cv::Mat result;
|
||
switch (info.imageType) {
|
||
case IMAQ_IMAGE_U8: {
|
||
cv::Mat wrapper(height, width, CV_8UC1, info.imageStart, stride * sizeof(unsigned char));
|
||
result = wrapper.clone();
|
||
break;
|
||
}
|
||
case IMAQ_IMAGE_U16: {
|
||
cv::Mat wrapper(height, width, CV_16UC1, info.imageStart, stride * sizeof(unsigned short));
|
||
result = wrapper.clone();
|
||
break;
|
||
}
|
||
case IMAQ_IMAGE_RGB: {
|
||
cv::Mat bgra(height, width, CV_8UC4, info.imageStart, stride * sizeof(RGBValue));
|
||
cv::cvtColor(bgra, result, cv::COLOR_BGRA2BGR);
|
||
break;
|
||
}
|
||
case IMAQ_IMAGE_SGL: {
|
||
cv::Mat wrapper(height, width, CV_32FC1, info.imageStart, stride * sizeof(float));
|
||
result = wrapper.clone();
|
||
break;
|
||
}
|
||
default:
|
||
ANS_DBG("ANSCV", "IMAQ2Image: unsupported IMAQ type %d", info.imageType);
|
||
return -5;
|
||
}
|
||
|
||
*imageOut = anscv_mat_new(result);
|
||
ANS_DBG("ANSCV", "IMAQ2Image: SUCCESS - Mat=%p %dx%d type=%d",
|
||
(void*)*imageOut, result.cols, result.rows, result.type());
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
ANS_DBG("ANSCV", "IMAQ2Image: EXCEPTION - %s", e.what());
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
ANS_DBG("ANSCV", "IMAQ2Image: UNKNOWN EXCEPTION");
|
||
return -1;
|
||
}
|
||
}
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_Image2IMAQ(cv::Mat** imageIn, LStrHandle outputImage) {
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !outputImage) {
|
||
ANS_DBG("ANSCV", "Image2IMAQ: null input - imageIn=%p outputImage=%p",
|
||
(void*)imageIn, (void*)outputImage);
|
||
return -2;
|
||
}
|
||
|
||
const cv::Mat& mat = **imageIn;
|
||
ANS_DBG("ANSCV", "Image2IMAQ: Mat=%p (%dx%d type=%d)",
|
||
(void*)*imageIn, mat.cols, mat.rows, mat.type());
|
||
|
||
// Encode as lossless PNG
|
||
std::vector<unsigned char> buf;
|
||
std::vector<int> params = {cv::IMWRITE_PNG_COMPRESSION, 1};
|
||
if (!cv::imencode(".png", mat, buf, params)) {
|
||
ANS_DBG("ANSCV", "Image2IMAQ: imencode PNG failed");
|
||
return -4;
|
||
}
|
||
|
||
int size = static_cast<int>(buf.size());
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error != noErr) {
|
||
ANS_DBG("ANSCV", "Image2IMAQ: DSSetHandleSize failed - err=%d", error);
|
||
return -4;
|
||
}
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, buf.data(), size);
|
||
|
||
ANS_DBG("ANSCV", "Image2IMAQ: SUCCESS - %d bytes PNG (%dx%d)", size, mat.cols, mat.rows);
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
ANS_DBG("ANSCV", "Image2IMAQ: EXCEPTION - %s", e.what());
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
ANS_DBG("ANSCV", "Image2IMAQ: UNKNOWN EXCEPTION");
|
||
return -1;
|
||
}
|
||
}
|
||
|
||
// ── cv::Mat -> LabVIEW 2D U32 array for IMAQ ArrayToColorImage VI ───
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageToArray(cv::Mat** imageIn, LVArray2D_U32Hdl arrayOut) {
|
||
// Outputs 2D U32 array (height x width), each U32 = packed XRGB
|
||
// Wire to IMAQ ArrayToColorImage "Image Pixels (U32)" input
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !arrayOut) {
|
||
ANS_DBG("ANSCV", "ImageToArray: invalid input");
|
||
return -2;
|
||
}
|
||
|
||
const cv::Mat& mat = **imageIn;
|
||
int rows = mat.rows;
|
||
int cols = mat.cols;
|
||
ANS_DBG("ANSCV", "ImageToArray: Mat=%p (%dx%d type=%d)",
|
||
(void*)*imageIn, cols, rows, mat.type());
|
||
|
||
// Convert to BGRA with alpha=0 (IMAQ convention)
|
||
cv::Mat bgra;
|
||
switch (mat.type()) {
|
||
case CV_8UC3: {
|
||
cv::cvtColor(mat, bgra, cv::COLOR_BGR2BGRA);
|
||
// Force alpha to 0 (cvtColor may set it to 255)
|
||
std::vector<cv::Mat> ch;
|
||
cv::split(bgra, ch);
|
||
ch[3].setTo(0);
|
||
cv::merge(ch, bgra);
|
||
break;
|
||
}
|
||
case CV_8UC4: {
|
||
bgra = mat.clone();
|
||
// Force alpha to 0 for IMAQ
|
||
std::vector<cv::Mat> ch;
|
||
cv::split(bgra, ch);
|
||
ch[3].setTo(0);
|
||
cv::merge(ch, bgra);
|
||
break;
|
||
}
|
||
case CV_8UC1:
|
||
cv::cvtColor(mat, bgra, cv::COLOR_GRAY2BGRA);
|
||
// cvtColor sets alpha to 0, which is correct for IMAQ
|
||
break;
|
||
default:
|
||
ANS_DBG("ANSCV", "ImageToArray: unsupported type %d", mat.type());
|
||
return -5;
|
||
}
|
||
|
||
int totalPixels = rows * cols;
|
||
|
||
// Resize LabVIEW 2D U32 array
|
||
MgErr err = NumericArrayResize(uL, 2, reinterpret_cast<UHandle*>(&arrayOut), totalPixels);
|
||
if (err != noErr) {
|
||
ANS_DBG("ANSCV", "ImageToArray: NumericArrayResize failed - err=%d", err);
|
||
return -4;
|
||
}
|
||
(*arrayOut)->dimSizes[0] = rows;
|
||
(*arrayOut)->dimSizes[1] = cols;
|
||
|
||
// IMAQ RGBValue layout is {B, G, R, alpha} which is the same memory layout as BGRA
|
||
// Copy directly — each 4 bytes of BGRA = one U32 in the array
|
||
memcpy((*arrayOut)->elt, bgra.data, totalPixels * sizeof(uInt32));
|
||
|
||
ANS_DBG("ANSCV", "ImageToArray: SUCCESS - %dx%d (%d pixels)", cols, rows, totalPixels);
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
ANS_DBG("ANSCV", "ImageToArray: EXCEPTION - %s", e.what());
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
ANS_DBG("ANSCV", "ImageToArray: UNKNOWN EXCEPTION");
|
||
return -1;
|
||
}
|
||
}
|
||
|
||
// ── cv::Mat -> 1D U8 flat array (for .NET MemoryStream / Picture control) ───
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageToFlatArray(
|
||
cv::Mat** imageIn, LVArray1D_U8Hdl arrayOut, int* width, int* height, int* channels) {
|
||
// Outputs raw BGR pixel data as a flat 1D U8 array (row-major, no padding)
|
||
// width/height/channels describe the image layout
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !arrayOut || !width || !height || !channels) {
|
||
ANS_DBG("ANSCV", "ImageToFlatArray: invalid input");
|
||
return -2;
|
||
}
|
||
|
||
const cv::Mat& mat = **imageIn;
|
||
*width = mat.cols;
|
||
*height = mat.rows;
|
||
*channels = mat.channels();
|
||
|
||
ANS_DBG("ANSCV", "ImageToFlatArray: Mat=%p (%dx%d ch=%d type=%d)",
|
||
(void*)*imageIn, mat.cols, mat.rows, mat.channels(), mat.type());
|
||
|
||
// Ensure continuous memory layout
|
||
cv::Mat continuous = mat.isContinuous() ? mat : mat.clone();
|
||
|
||
int totalBytes = continuous.rows * continuous.cols * continuous.channels();
|
||
|
||
MgErr err = NumericArrayResize(uB, 1, reinterpret_cast<UHandle*>(&arrayOut), totalBytes);
|
||
if (err != noErr) {
|
||
ANS_DBG("ANSCV", "ImageToFlatArray: NumericArrayResize failed - err=%d", err);
|
||
return -4;
|
||
}
|
||
(*arrayOut)->dimSizes[0] = totalBytes;
|
||
memcpy((*arrayOut)->elt, continuous.data, totalBytes);
|
||
|
||
ANS_DBG("ANSCV", "ImageToFlatArray: SUCCESS - %d bytes (%dx%dx%d)",
|
||
totalBytes, *width, *height, *channels);
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
ANS_DBG("ANSCV", "ImageToFlatArray: EXCEPTION - %s", e.what());
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
ANS_DBG("ANSCV", "ImageToFlatArray: UNKNOWN EXCEPTION");
|
||
return -1;
|
||
}
|
||
}
|
||
|
||
// ── 1D U8 raw pixel array -> JPEG string ───
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_FlatArrayToJpeg(
|
||
LVArray1D_U8Hdl arrayIn, int width, int height, int channels, int quality, LStrHandle outputImage) {
|
||
try {
|
||
if (!arrayIn || !outputImage || width <= 0 || height <= 0 || channels <= 0) {
|
||
ANS_DBG("ANSCV", "FlatArrayToJpeg: invalid input - w=%d h=%d ch=%d", width, height, channels);
|
||
return -2;
|
||
}
|
||
|
||
int expectedSize = width * height * channels;
|
||
int actualSize = (*arrayIn)->dimSizes[0];
|
||
if (actualSize < expectedSize) {
|
||
ANS_DBG("ANSCV", "FlatArrayToJpeg: array too small - expected=%d actual=%d", expectedSize, actualSize);
|
||
return -2;
|
||
}
|
||
|
||
if (quality <= 0 || quality > 100) quality = 95;
|
||
|
||
// Wrap raw pixel data as cv::Mat (no copy)
|
||
int cvType = (channels == 1) ? CV_8UC1 : (channels == 3) ? CV_8UC3 : CV_8UC4;
|
||
cv::Mat mat(height, width, cvType, (*arrayIn)->elt);
|
||
|
||
ANS_DBG("ANSCV", "FlatArrayToJpeg: %dx%d ch=%d quality=%d", width, height, channels, quality);
|
||
|
||
// Encode to JPEG
|
||
std::vector<unsigned char> buf;
|
||
std::vector<int> params = {cv::IMWRITE_JPEG_QUALITY, quality};
|
||
if (!cv::imencode(".jpg", mat, buf, params)) {
|
||
ANS_DBG("ANSCV", "FlatArrayToJpeg: imencode failed");
|
||
return -4;
|
||
}
|
||
|
||
int size = static_cast<int>(buf.size());
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + size * sizeof(uChar));
|
||
if (error != noErr) {
|
||
ANS_DBG("ANSCV", "FlatArrayToJpeg: DSSetHandleSize failed - err=%d", error);
|
||
return -4;
|
||
}
|
||
(*outputImage)->cnt = size;
|
||
memcpy((*outputImage)->str, buf.data(), size);
|
||
|
||
ANS_DBG("ANSCV", "FlatArrayToJpeg: SUCCESS - %d bytes JPEG", size);
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
ANS_DBG("ANSCV", "FlatArrayToJpeg: EXCEPTION - %s", e.what());
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
ANS_DBG("ANSCV", "FlatArrayToJpeg: UNKNOWN EXCEPTION");
|
||
return -1;
|
||
}
|
||
}
|
||
|
||
// ── cv::Mat -> BMP binary (no compression, zero-cost encode for .NET) ───
|
||
|
||
#pragma pack(push, 1)
|
||
struct BmpFileHeader {
|
||
uint16_t type{0x4D42}; // "BM"
|
||
uint32_t fileSize{0};
|
||
uint16_t reserved1{0};
|
||
uint16_t reserved2{0};
|
||
uint32_t offsetData{0};
|
||
};
|
||
struct BmpInfoHeader {
|
||
uint32_t size{40};
|
||
int32_t width{0};
|
||
int32_t height{0}; // negative = top-down (no flip needed)
|
||
uint16_t planes{1};
|
||
uint16_t bitCount{0};
|
||
uint32_t compression{0};
|
||
uint32_t sizeImage{0};
|
||
int32_t xPPM{0};
|
||
int32_t yPPM{0};
|
||
uint32_t colorsUsed{0};
|
||
uint32_t colorsImportant{0};
|
||
};
|
||
#pragma pack(pop)
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_ImageToBmp(cv::Mat** imageIn, int maxWidth, int& newWidth, int& newHeight, LStrHandle outputImage) {
|
||
|
||
return ANSCV_GetImage_S(imageIn,maxWidth,80,newWidth,newHeight,outputImage);
|
||
try {
|
||
if (!imageIn || !(*imageIn) || (*imageIn)->empty() || !outputImage) {
|
||
ANS_DBG("ANSCV", "ImageToBmp: invalid input");
|
||
return -2;
|
||
}
|
||
|
||
cv::Mat mat = **imageIn;
|
||
|
||
ANS_DBG("ANSCV", "ImageToBmp: Mat=%p (%dx%d type=%d) maxWidth=%d",
|
||
(void*)*imageIn, mat.cols, mat.rows, mat.type(), maxWidth);
|
||
|
||
// Resize if maxWidth > 0 and image is wider
|
||
if (maxWidth > 0 && mat.cols > maxWidth) {
|
||
double scale = static_cast<double>(maxWidth) / mat.cols;
|
||
int targetHeight = static_cast<int>(mat.rows * scale);
|
||
cv::resize(mat, mat, cv::Size(maxWidth, targetHeight), 0, 0, cv::INTER_AREA);
|
||
ANS_DBG("ANSCV", "ImageToBmp: resized to %dx%d", mat.cols, mat.rows);
|
||
}
|
||
|
||
int width = mat.cols;
|
||
int height = mat.rows;
|
||
newWidth = width;
|
||
newHeight = height;
|
||
|
||
// Convert to BGR 24-bit
|
||
cv::Mat bgr;
|
||
switch (mat.type()) {
|
||
case CV_8UC3:
|
||
bgr = mat;
|
||
break;
|
||
case CV_8UC4:
|
||
cv::cvtColor(mat, bgr, cv::COLOR_BGRA2BGR);
|
||
break;
|
||
case CV_8UC1:
|
||
cv::cvtColor(mat, bgr, cv::COLOR_GRAY2BGR);
|
||
break;
|
||
default:
|
||
ANS_DBG("ANSCV", "ImageToBmp: unsupported type %d", mat.type());
|
||
return -5;
|
||
}
|
||
|
||
// BMP rows must be aligned to 4 bytes
|
||
int rowBytes = width * 3;
|
||
int stride = (rowBytes + 3) & ~3; // round up to 4-byte boundary
|
||
int padding = stride - rowBytes;
|
||
int imageSize = stride * height;
|
||
|
||
// Build BMP file in memory
|
||
BmpFileHeader fileHeader;
|
||
BmpInfoHeader infoHeader;
|
||
int headerSize = sizeof(BmpFileHeader) + sizeof(BmpInfoHeader);
|
||
|
||
fileHeader.offsetData = headerSize;
|
||
fileHeader.fileSize = headerSize + imageSize;
|
||
infoHeader.width = width;
|
||
infoHeader.height = -height; // negative = top-down, no flip needed
|
||
infoHeader.bitCount = 24;
|
||
infoHeader.sizeImage = imageSize;
|
||
|
||
int totalSize = headerSize + imageSize;
|
||
MgErr error = DSSetHandleSize(outputImage, sizeof(int32) + totalSize);
|
||
if (error != noErr) {
|
||
ANS_DBG("ANSCV", "ImageToBmp: DSSetHandleSize failed - err=%d", error);
|
||
return -4;
|
||
}
|
||
(*outputImage)->cnt = totalSize;
|
||
|
||
unsigned char* dst = (*outputImage)->str;
|
||
|
||
// Write headers
|
||
memcpy(dst, &fileHeader, sizeof(BmpFileHeader));
|
||
dst += sizeof(BmpFileHeader);
|
||
memcpy(dst, &infoHeader, sizeof(BmpInfoHeader));
|
||
dst += sizeof(BmpInfoHeader);
|
||
|
||
// Write pixel rows with padding
|
||
unsigned char pad[4] = {0, 0, 0, 0};
|
||
for (int y = 0; y < height; y++) {
|
||
memcpy(dst, bgr.ptr(y), rowBytes);
|
||
dst += rowBytes;
|
||
if (padding > 0) {
|
||
memcpy(dst, pad, padding);
|
||
dst += padding;
|
||
}
|
||
}
|
||
|
||
ANS_DBG("ANSCV", "ImageToBmp: SUCCESS - %d bytes BMP (%dx%d)", totalSize, width, height);
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
ANS_DBG("ANSCV", "ImageToBmp: EXCEPTION - %s", e.what());
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
ANS_DBG("ANSCV", "ImageToBmp: UNKNOWN EXCEPTION");
|
||
return -1;
|
||
}
|
||
}
|
||
|
||
// ── BMP string -> JPEG string ───
|
||
|
||
extern "C" __declspec(dllexport) int ANSCV_BmpToJpeg(LStrHandle bmpInput, int quality, LStrHandle jpegOutput) {
|
||
try {
|
||
if (!bmpInput || !jpegOutput || (*bmpInput)->cnt <= 0) {
|
||
ANS_DBG("ANSCV", "BmpToJpeg: invalid input");
|
||
return -2;
|
||
}
|
||
|
||
if (quality <= 0 || quality > 100) quality = 85;
|
||
|
||
int bmpSize = (*bmpInput)->cnt;
|
||
unsigned char* raw = reinterpret_cast<unsigned char*>((*bmpInput)->str);
|
||
|
||
// ── Passthrough: input is already JPEG (starts with FF D8 FF) ──
|
||
if (bmpSize >= 3 && raw[0] == 0xFF && raw[1] == 0xD8 && raw[2] == 0xFF) {
|
||
MgErr error = DSSetHandleSize(jpegOutput, sizeof(int32) + bmpSize * sizeof(uChar));
|
||
if (error != noErr) {
|
||
ANS_DBG("ANSCV", "BmpToJpeg: DSSetHandleSize failed (passthrough) - err=%d", error);
|
||
return -4;
|
||
}
|
||
(*jpegOutput)->cnt = bmpSize;
|
||
memcpy((*jpegOutput)->str, raw, bmpSize);
|
||
ANS_DBG("ANSCV", "BmpToJpeg: PASSTHROUGH - input is already JPEG (%d bytes)", bmpSize);
|
||
return 1;
|
||
}
|
||
|
||
// ── Fast path: parse BMP header directly, zero-copy ──
|
||
// Minimum BMP = file header (14) + info header (40) + some pixels
|
||
constexpr int kMinBmpSize = sizeof(BmpFileHeader) + sizeof(BmpInfoHeader) + 1;
|
||
if (bmpSize >= kMinBmpSize && raw[0] == 'B' && raw[1] == 'M') {
|
||
|
||
const auto& fh = *reinterpret_cast<const BmpFileHeader*>(raw);
|
||
const auto& ih = *reinterpret_cast<const BmpInfoHeader*>(raw + sizeof(BmpFileHeader));
|
||
|
||
int width = ih.width;
|
||
int height = ih.height; // negative = top-down
|
||
bool topDown = (height < 0);
|
||
if (height < 0) height = -height;
|
||
|
||
// Only handle 24-bit uncompressed (the format ImageToBmp produces)
|
||
if (ih.bitCount == 24 && ih.compression == 0 && width > 0 && height > 0) {
|
||
int rowBytes = width * 3;
|
||
int stride = (rowBytes + 3) & ~3; // BMP rows are 4-byte aligned
|
||
|
||
// Verify the buffer is large enough
|
||
int pixelOffset = static_cast<int>(fh.offsetData);
|
||
int64_t neededSize = static_cast<int64_t>(pixelOffset) + static_cast<int64_t>(stride) * height;
|
||
if (bmpSize >= neededSize) {
|
||
unsigned char* pixels = raw + pixelOffset;
|
||
|
||
cv::Mat mat;
|
||
if (topDown) {
|
||
// Top-down BMP: rows are already in correct order
|
||
// If no padding, wrap directly; otherwise need to handle stride
|
||
if (stride == rowBytes) {
|
||
mat = cv::Mat(height, width, CV_8UC3, pixels);
|
||
} else {
|
||
mat = cv::Mat(height, width, CV_8UC3, pixels, stride);
|
||
}
|
||
} else {
|
||
// Bottom-up BMP: flip to top-down for JPEG encoding
|
||
// Create Mat pointing at the last row with negative step
|
||
// OpenCV doesn't support negative step, so flip
|
||
cv::Mat bottomUp(height, width, CV_8UC3, pixels, stride);
|
||
cv::flip(bottomUp, mat, 0);
|
||
}
|
||
|
||
ANS_DBG("ANSCV", "BmpToJpeg: fast-path %dx%d, encoding JPEG q=%d", width, height, quality);
|
||
|
||
std::string jpegStr = ANSCENTER::CompressJpegToString(mat, quality);
|
||
if (!jpegStr.empty()) {
|
||
int size = static_cast<int>(jpegStr.size());
|
||
MgErr error = DSSetHandleSize(jpegOutput, sizeof(int32) + size * sizeof(uChar));
|
||
if (error != noErr) {
|
||
ANS_DBG("ANSCV", "BmpToJpeg: DSSetHandleSize failed - err=%d", error);
|
||
return -4;
|
||
}
|
||
(*jpegOutput)->cnt = size;
|
||
memcpy((*jpegOutput)->str, jpegStr.data(), size);
|
||
ANS_DBG("ANSCV", "BmpToJpeg: SUCCESS (fast) - %d bytes BMP -> %d bytes JPEG", bmpSize, size);
|
||
return 1;
|
||
}
|
||
// If fast-path encode failed, fall through to imdecode path
|
||
}
|
||
}
|
||
}
|
||
|
||
// ── Fallback: use imdecode for non-standard BMP formats ──
|
||
ANS_DBG("ANSCV", "BmpToJpeg: using imdecode fallback for %d bytes", bmpSize);
|
||
std::vector<unsigned char> bmpData(raw, raw + bmpSize);
|
||
cv::Mat mat = cv::imdecode(bmpData, cv::IMREAD_COLOR);
|
||
if (mat.empty()) {
|
||
ANS_DBG("ANSCV", "BmpToJpeg: imdecode failed - %d bytes input", bmpSize);
|
||
return -4;
|
||
}
|
||
|
||
ANS_DBG("ANSCV", "BmpToJpeg: decoded %dx%d, encoding JPEG q=%d", mat.cols, mat.rows, quality);
|
||
|
||
std::string jpegStr = ANSCENTER::CompressJpegToString(mat, quality);
|
||
if (jpegStr.empty()) {
|
||
ANS_DBG("ANSCV", "BmpToJpeg: JPEG encode failed");
|
||
return -4;
|
||
}
|
||
|
||
int size = static_cast<int>(jpegStr.size());
|
||
MgErr error = DSSetHandleSize(jpegOutput, sizeof(int32) + size * sizeof(uChar));
|
||
if (error != noErr) {
|
||
ANS_DBG("ANSCV", "BmpToJpeg: DSSetHandleSize failed - err=%d", error);
|
||
return -4;
|
||
}
|
||
(*jpegOutput)->cnt = size;
|
||
memcpy((*jpegOutput)->str, jpegStr.data(), size);
|
||
|
||
ANS_DBG("ANSCV", "BmpToJpeg: SUCCESS (fallback) - %d bytes BMP -> %d bytes JPEG", bmpSize, size);
|
||
return 1;
|
||
}
|
||
catch (const std::exception& e) {
|
||
ANS_DBG("ANSCV", "BmpToJpeg: EXCEPTION - %s", e.what());
|
||
return -3;
|
||
}
|
||
catch (...) {
|
||
ANS_DBG("ANSCV", "BmpToJpeg: UNKNOWN EXCEPTION");
|
||
return -1;
|
||
}
|
||
} |