summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Zander <negril.nx+gentoo@gmail.com>2024-10-21 13:27:04 +0200
committerSam James <sam@gentoo.org>2024-11-05 02:02:35 +0000
commit4c9a19202381c1d51d0fb54fe24eb446e2bc067e (patch)
treeea78c49aab9d9c4e492dfcef0d4906710b7b7222 /media-libs
parentmedia-libs/opencv: allow CUDA_PATH override (diff)
downloadgentoo-4c9a19202381c1d51d0fb54fe24eb446e2bc067e.tar.gz
gentoo-4c9a19202381c1d51d0fb54fe24eb446e2bc067e.tar.bz2
gentoo-4c9a19202381c1d51d0fb54fe24eb446e2bc067e.zip
media-libs/opencv: backport cuda-12.6 support
Signed-off-by: Paul Zander <negril.nx+gentoo@gmail.com> Signed-off-by: Sam James <sam@gentoo.org>
Diffstat (limited to 'media-libs')
-rw-r--r--media-libs/opencv/Manifest1
-rw-r--r--media-libs/opencv/files/opencv-4.10.0-cuda-fp16.patch226
-rw-r--r--media-libs/opencv/files/opencv-4.10.0-cudnn-9.patch32
-rw-r--r--media-libs/opencv/files/opencv-4.10.0-tbb-detection.patch28
-rw-r--r--media-libs/opencv/files/opencv_contrib-4.10.0-CUDA-12.6-tuple_size.patch53
-rw-r--r--media-libs/opencv/opencv-4.10.0.ebuild18
6 files changed, 353 insertions, 5 deletions
diff --git a/media-libs/opencv/Manifest b/media-libs/opencv/Manifest
index bd9f8567bf7f..75d70689375b 100644
--- a/media-libs/opencv/Manifest
+++ b/media-libs/opencv/Manifest
@@ -7,6 +7,7 @@ DIST opencv_3rdparty-8afa57abc8229d611c4937165d20e2a2d9fc5a12.tar.gz 63301261 BL
DIST opencv_3rdparty-a8b69ccc738421293254aec5ddb38bd523503252.tar.gz 920403 BLAKE2B 5f834e8ccef9d88d0650f8ae5d215a2d07d0a356aaad4de1622068e0b2a7def88fccc84bc4397f0182732e836fcd6e363ae25a2cdeaa4d458a7d8ef9afc81da5 SHA512 5611b9db40477759213ab35320570c198f1f224f92ca801ca3accd1f7620faf3d44f306e44abefd10be118acee9b56da643ca82abaa7c4d2102fe92e5b298a6e
DIST opencv_3rdparty-b2bfc75f6aea5b1f834ff0f0b865a7c18ff1459f.tar.gz 10036903 BLAKE2B c13559569808db24105049e2081bc466c0e8b6dab58bcc69001e49ff82e03ec4701e25648b5e542b7df2c8e522abfbd82c9825e66c37d6a673847b41bf6effae SHA512 3fc44f12802aa1dad0b12372d53242ae2134a2d74ca0b6d8e4639551623426106781a41d77ebfce79ac625a49aec26b0d7425e0d31f09bab6a15e3d43caef8bc
DIST opencv_3rdparty-fccf7cd6a4b12079f73bbfb21745f9babcd4eb1d.tar.gz 1470898 BLAKE2B f1794cc8f8684501f670db3d720c02f35c57ebe91f276cc21dea1f1c1426740d03569143fec74380e941703b5086db5f013ca913fb296eda99559115fd53ca30 SHA512 07118b9d688bf0ff34a6e4ca1548006124d80d11e7b48f08c9e1910b64249b1dad0ace460f50adda0ecb69b90871fc59cc25f451570c63f7c6d6ba2b92d8801c
+DIST opencv_contrib-4.10.0-3607.patch 29870 BLAKE2B cf43afbd24d0b32817e908c55ac3bf0d5da8b82b2c459bc04ef31414e16285eefe334c46eeee730bd3cad2bbec062f5bc212a82bd7f2ac83baca231d711545d4 SHA512 3fdd08cee6d7da8edf36411f30c8d6230ebd86a1c896a08f1dc86d3a0982e1f1f99797066722bc51ce4b1a60a2db55853c348441b3e6edc0d318fcb7bd5cf944
DIST opencv_contrib-4.10.0.tar.gz 55387316 BLAKE2B 5d6f884817b6d45b86833fcab1e31cd0fd7be19885698e0aefe300fa570f93c446d7f564567cc08099d559a98a65e9c3dd0fd35ceeca7e0e9a1e56edc74a0fe9 SHA512 480df862250692a97ce6431cba00dbecb70332307a19c1c04aa9d7444e6e74ab4f8c798548dce76d2319a9877624b82e361fb22a71df14b996087ade448be501
DIST opencv_contrib-4.9.0.tar.gz 59803362 BLAKE2B e6b90e9bd462f96ef010490d45105d7125622d6c49912653c0d387568ae7109b1cbcac4cc0f52c1f997a45ed75923caea90bf4ad9336e689a56742b029789038 SHA512 ebaee3b88bd7ae246727e65a98d9fbc1d9772a4181a1926f3af742410b78dc87d2386bcd96ac67d7fb1a3020c3717a2cdebdcf9304d6dfd9ea494004791cf043
DIST opencv_extra-4.10.0.tar.gz 504349887 BLAKE2B 6e76860beb95cf00698980a19cfd2894e5bfe0d477d94352cbb2ff98688acbc1c404953cbc3baa95453588e7b9063175a067f24006c5d2719a6c75d15f2ef891 SHA512 1a581dad61bc1e5075af5afed2dd3d67793232a9e1219408448d08c518ae714cf0c4d5fec2c25791351a49a0d265fe051ea142b2170f82843e19e82f65abc8c6
diff --git a/media-libs/opencv/files/opencv-4.10.0-cuda-fp16.patch b/media-libs/opencv/files/opencv-4.10.0-cuda-fp16.patch
new file mode 100644
index 000000000000..ed0f128b99c5
--- /dev/null
+++ b/media-libs/opencv/files/opencv-4.10.0-cuda-fp16.patch
@@ -0,0 +1,226 @@
+https://github.com/opencv/opencv/issues/25711
+https://github.com/opencv/opencv/pull/25880
+
+From 5115dc62f8af616c6e75e4b3df3eb8f201298432 Mon Sep 17 00:00:00 2001
+From: Aliaksei Urbanski <aliaksei.urbanski@gmail.com>
+Date: Tue, 9 Jul 2024 01:46:12 +0300
+Subject: [PATCH 1/3] =?UTF-8?q?=F0=9F=90=9B=20Fix=20CUDA=20for=20old=20GPU?=
+ =?UTF-8?q?s=20without=20FP16=20support?=
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+--- a/modules/dnn/src/cuda4dnn/init.hpp
++++ b/modules/dnn/src/cuda4dnn/init.hpp
+@@ -15,7 +15,7 @@
+
+ namespace cv { namespace dnn { namespace cuda4dnn {
+
+- void checkVersions()
++ inline void checkVersions()
+ {
+ // https://docs.nvidia.com/deeplearning/cudnn/developer-guide/index.html#programming-model
+ // cuDNN API Compatibility
+@@ -44,19 +44,19 @@ namespace cv { namespace dnn { namespace cuda4dnn {
+ }
+ }
+
+- int getDeviceCount()
++ inline int getDeviceCount()
+ {
+ return cuda::getCudaEnabledDeviceCount();
+ }
+
+- int getDevice()
++ inline int getDevice()
+ {
+ int device_id = -1;
+ CUDA4DNN_CHECK_CUDA(cudaGetDevice(&device_id));
+ return device_id;
+ }
+
+- bool isDeviceCompatible()
++ inline bool isDeviceCompatible()
+ {
+ int device_id = getDevice();
+ if (device_id < 0)
+@@ -76,7 +76,7 @@ namespace cv { namespace dnn { namespace cuda4dnn {
+ return false;
+ }
+
+- bool doesDeviceSupportFP16()
++ inline bool doesDeviceSupportFP16()
+ {
+ int device_id = getDevice();
+ if (device_id < 0)
+--- a/modules/dnn/src/registry.cpp
++++ b/modules/dnn/src/registry.cpp
+@@ -18,6 +18,10 @@
+ #include "backend.hpp"
+ #include "factory.hpp"
+
++#ifdef HAVE_CUDA
++#include "cuda4dnn/init.hpp"
++#endif
++
+ namespace cv {
+ namespace dnn {
+ CV__DNN_INLINE_NS_BEGIN
+@@ -121,7 +125,8 @@ class BackendRegistry
+ if (haveCUDA())
+ {
+ backends.push_back(std::make_pair(DNN_BACKEND_CUDA, DNN_TARGET_CUDA));
+- backends.push_back(std::make_pair(DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16));
++ if (cuda4dnn::doesDeviceSupportFP16())
++ backends.push_back(std::make_pair(DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16));
+ }
+ #endif
+
+
+From cfb2bc34acd7699707110523f067a7452a404206 Mon Sep 17 00:00:00 2001
+From: Alexander Smorkalov <alexander.smorkalov@xperience.ai>
+Date: Tue, 9 Jul 2024 11:21:58 +0300
+Subject: [PATCH 2/3] Added CUDA FP16 availability check for target management.
+
+--- a/modules/dnn/src/cuda4dnn/init.hpp
++++ b/modules/dnn/src/cuda4dnn/init.hpp
+@@ -56,9 +56,11 @@ namespace cv { namespace dnn { namespace cuda4dnn {
+ return device_id;
+ }
+
+- inline bool isDeviceCompatible()
++ inline bool isDeviceCompatible(int device_id = -1)
+ {
+- int device_id = getDevice();
++ if (device_id < 0)
++ device_id = getDevice();
++
+ if (device_id < 0)
+ return false;
+
+@@ -76,9 +78,11 @@ namespace cv { namespace dnn { namespace cuda4dnn {
+ return false;
+ }
+
+- inline bool doesDeviceSupportFP16()
++ inline bool doesDeviceSupportFP16(int device_id = -1)
+ {
+- int device_id = getDevice();
++ if (device_id < 0)
++ device_id = getDevice();
++
+ if (device_id < 0)
+ return false;
+
+@@ -87,9 +91,7 @@ namespace cv { namespace dnn { namespace cuda4dnn {
+ CUDA4DNN_CHECK_CUDA(cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, device_id));
+
+ int version = major * 10 + minor;
+- if (version < 53)
+- return false;
+- return true;
++ return (version >= 53);
+ }
+
+ }}} /* namespace cv::dnn::cuda4dnn */
+--- a/modules/dnn/src/net_impl_backend.cpp
++++ b/modules/dnn/src/net_impl_backend.cpp
+@@ -10,6 +10,10 @@
+ #include "backend.hpp"
+ #include "factory.hpp"
+
++#ifdef HAVE_CUDA
++#include "cuda4dnn/init.hpp"
++#endif
++
+ namespace cv {
+ namespace dnn {
+ CV__DNN_INLINE_NS_BEGIN
+@@ -242,6 +246,16 @@ void Net::Impl::setPreferableTarget(int targetId)
+ #endif
+ }
+
++ if (IS_DNN_CUDA_TARGET(targetId))
++ {
++ preferableTarget = DNN_TARGET_CPU;
++#ifdef HAVE_CUDA
++ if (cuda4dnn::doesDeviceSupportFP16() && targetId == DNN_TARGET_CUDA_FP16)
++ preferableTarget = DNN_TARGET_CUDA_FP16;
++ else
++ preferableTarget = DNN_TARGET_CUDA;
++#endif
++ }
+ #if !defined(__arm64__) || !__arm64__
+ if (targetId == DNN_TARGET_CPU_FP16)
+ {
+--- a/modules/dnn/src/registry.cpp
++++ b/modules/dnn/src/registry.cpp
+@@ -122,10 +122,24 @@ class BackendRegistry
+ #endif
+
+ #ifdef HAVE_CUDA
+- if (haveCUDA())
++ cuda4dnn::checkVersions();
++
++ bool hasCudaCompatible = false;
++ bool hasCudaFP16 = false;
++ for (int i = 0; i < cuda4dnn::getDeviceCount(); i++)
++ {
++ if (cuda4dnn::isDeviceCompatible(i))
++ {
++ hasCudaCompatible = true;
++ if (cuda4dnn::doesDeviceSupportFP16(i))
++ hasCudaFP16 = true;
++ }
++ }
++
++ if (hasCudaCompatible)
+ {
+ backends.push_back(std::make_pair(DNN_BACKEND_CUDA, DNN_TARGET_CUDA));
+- if (cuda4dnn::doesDeviceSupportFP16())
++ if (hasCudaFP16)
+ backends.push_back(std::make_pair(DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16));
+ }
+ #endif
+--- a/modules/dnn/test/test_common.hpp
++++ b/modules/dnn/test/test_common.hpp
+@@ -211,7 +211,7 @@ class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
+ if ((!l->supportBackend(backend) || l->preferableTarget != target) && !fused)
+ {
+ hasFallbacks = true;
+- std::cout << "FALLBACK: Layer [" << l->type << "]:[" << l->name << "] is expected to has backend implementation" << endl;
++ std::cout << "FALLBACK: Layer [" << l->type << "]:[" << l->name << "] is expected to have backend implementation" << endl;
+ }
+ }
+ if (hasFallbacks && raiseError)
+--- a/modules/dnn/test/test_onnx_conformance.cpp
++++ b/modules/dnn/test/test_onnx_conformance.cpp
+@@ -1008,7 +1008,7 @@ class Test_ONNX_conformance : public TestWithParam<ONNXConfParams>
+ if ((!l->supportBackend(backend) || l->preferableTarget != target) && !fused)
+ {
+ hasFallbacks = true;
+- std::cout << "FALLBACK: Layer [" << l->type << "]:[" << l->name << "] is expected to has backend implementation" << endl;
++ std::cout << "FALLBACK: Layer [" << l->type << "]:[" << l->name << "] is expected to have backend implementation" << endl;
+ }
+ }
+ return hasFallbacks;
+
+From cc9178903daff229bc396db718bf347c4eafd33b Mon Sep 17 00:00:00 2001
+From: Alexander Smorkalov <2536374+asmorkalov@users.noreply.github.com>
+Date: Wed, 10 Jul 2024 09:06:09 +0300
+Subject: [PATCH 3/3] Update modules/dnn/src/registry.cpp
+
+Co-authored-by: Aliaksei Urbanski <aliaksei.urbanski@gmail.com>
+--- a/modules/dnn/src/registry.cpp
++++ b/modules/dnn/src/registry.cpp
+@@ -132,7 +132,10 @@ class BackendRegistry
+ {
+ hasCudaCompatible = true;
+ if (cuda4dnn::doesDeviceSupportFP16(i))
++ {
+ hasCudaFP16 = true;
++ break; // we already have all we need here
++ }
+ }
+ }
+
diff --git a/media-libs/opencv/files/opencv-4.10.0-cudnn-9.patch b/media-libs/opencv/files/opencv-4.10.0-cudnn-9.patch
new file mode 100644
index 000000000000..78ac162bef2d
--- /dev/null
+++ b/media-libs/opencv/files/opencv-4.10.0-cudnn-9.patch
@@ -0,0 +1,32 @@
+https://github.com/opencv/opencv/issues/25711
+https://github.com/opencv/opencv/pull/25841
+
+From 3d74d646d8c4c48e400e650fef9463f174414b96 Mon Sep 17 00:00:00 2001
+From: Alexander Smorkalov <alexander.smorkalov@xperience.ai>
+Date: Mon, 1 Jul 2024 17:33:24 +0300
+Subject: [PATCH] Fixed CuDNN runtime version check for CuDNN 9+.
+
+--- a/modules/dnn/src/cuda4dnn/init.hpp
++++ b/modules/dnn/src/cuda4dnn/init.hpp
+@@ -23,8 +23,19 @@ namespace cv { namespace dnn { namespace cuda4dnn {
+ // Any patch release x.y.z is forward or backward-compatible with applications built against another cuDNN patch release x.y.w (meaning, of the same major and minor version number, but having w!=z).
+ // cuDNN minor releases beginning with cuDNN 7 are binary backward-compatible with applications built against the same or earlier patch release (meaning, an application built against cuDNN 7.x is binary compatible with cuDNN library 7.y, where y>=x).
+ // Applications compiled with a cuDNN version 7.y are not guaranteed to work with 7.x release when y > x.
+- auto cudnn_bversion = cudnnGetVersion();
+- auto cudnn_major_bversion = cudnn_bversion / 1000, cudnn_minor_bversion = cudnn_bversion % 1000 / 100;
++ int cudnn_bversion = cudnnGetVersion();
++ int cudnn_major_bversion = 0, cudnn_minor_bversion = 0;
++ // CuDNN changed major version multiplier in 9.0
++ if (cudnn_bversion >= 9*10000)
++ {
++ cudnn_major_bversion = cudnn_bversion / 10000;
++ cudnn_minor_bversion = cudnn_bversion % 10000 / 100;
++ }
++ else
++ {
++ cudnn_major_bversion = cudnn_bversion / 1000;
++ cudnn_minor_bversion = cudnn_bversion % 1000 / 100;
++ }
+ if (cudnn_major_bversion != CUDNN_MAJOR || cudnn_minor_bversion < CUDNN_MINOR)
+ {
+ std::ostringstream oss;
diff --git a/media-libs/opencv/files/opencv-4.10.0-tbb-detection.patch b/media-libs/opencv/files/opencv-4.10.0-tbb-detection.patch
new file mode 100644
index 000000000000..e808aec04547
--- /dev/null
+++ b/media-libs/opencv/files/opencv-4.10.0-tbb-detection.patch
@@ -0,0 +1,28 @@
+From 460bf0927fcc09e8c7a21d5bb48044fc923a8ee7 Mon Sep 17 00:00:00 2001
+From: Paul Zander <negril.nx+gentoo@gmail.com>
+Date: Tue, 8 Oct 2024 00:31:52 +0200
+Subject: [PATCH] Fix tbb detection
+
+Signed-off-by: Paul Zander <negril.nx+gentoo@gmail.com>
+
+diff --git a/cmake/OpenCVDetectTBB.cmake b/cmake/OpenCVDetectTBB.cmake
+index c9ecc02..3db3ae0 100644
+--- a/cmake/OpenCVDetectTBB.cmake
++++ b/cmake/OpenCVDetectTBB.cmake
+@@ -25,7 +25,12 @@ function(ocv_tbb_cmake_guess _found)
+ message(WARNING "No TBB::tbb target found!")
+ return()
+ endif()
+- get_target_property(_lib TBB::tbb IMPORTED_LOCATION_RELEASE)
++
++ # Get installed configuration of tbb
++ get_target_property (TARGET_TBB_IMPORT_CONFS TBB::tbb IMPORTED_CONFIGURATIONS)
++ list (GET TARGET_TBB_IMPORT_CONFS 0 CHOSEN_IMPORT_CONF)
++
++ get_target_property(_lib TBB::tbb IMPORTED_LOCATION_${CHOSEN_IMPORT_CONF})
+ message(STATUS "Found TBB (cmake): ${_lib}")
+ get_target_property(_inc TBB::tbb INTERFACE_INCLUDE_DIRECTORIES)
+ add_library(tbb INTERFACE IMPORTED)
+--
+2.46.2
+
diff --git a/media-libs/opencv/files/opencv_contrib-4.10.0-CUDA-12.6-tuple_size.patch b/media-libs/opencv/files/opencv_contrib-4.10.0-CUDA-12.6-tuple_size.patch
new file mode 100644
index 000000000000..252ecdee6cb6
--- /dev/null
+++ b/media-libs/opencv/files/opencv_contrib-4.10.0-CUDA-12.6-tuple_size.patch
@@ -0,0 +1,53 @@
+https://github.com/opencv/opencv_contrib/pull/3785
+From 09eb618804588f77026924096d848800630c8d6f Mon Sep 17 00:00:00 2001
+From: Alexander Smorkalov <alexander.smorkalov@xperience.ai>
+Date: Tue, 3 Sep 2024 13:47:48 +0300
+Subject: [PATCH] Workaround for CUDA 12.6 tuple_size issue #3773.
+
+---
+ modules/cudaarithm/src/cuda/polar_cart.cu | 20 +++++---------------
+ 1 file changed, 5 insertions(+), 15 deletions(-)
+
+diff --git a/modules/cudaarithm/src/cuda/polar_cart.cu b/modules/cudaarithm/src/cuda/polar_cart.cu
+index 725f5741d8..c65b894bf6 100644
+--- a/modules/cudaarithm/src/cuda/polar_cart.cu
++++ b/modules/cudaarithm/src/cuda/polar_cart.cu
+@@ -159,25 +159,15 @@ void cv::cuda::cartToPolar(InputArray _xy, OutputArray _mag, OutputArray _angle,
+ GpuMat_<float> magc(mag);
+ GpuMat_<float> anglec(angle);
+
++ gridTransformUnary(globPtr<float2>(xy), globPtr<float>(magc), magnitude_interleaved_func<float2>(), stream);
++
+ if (angleInDegrees)
+ {
+- auto f1 = magnitude_interleaved_func<float2>();
+- auto f2 = direction_interleaved_func<float2, true>();
+- cv::cudev::tuple<decltype(f1), decltype(f2)> f12 = cv::cudev::make_tuple(f1, f2);
+- gridTransformTuple(globPtr<float2>(xy),
+- tie(magc, anglec),
+- f12,
+- stream);
++ gridTransformUnary(globPtr<float2>(xy), globPtr<float>(anglec), direction_interleaved_func<float2, true>(), stream);
+ }
+ else
+ {
+- auto f1 = magnitude_interleaved_func<float2>();
+- auto f2 = direction_interleaved_func<float2, false>();
+- cv::cudev::tuple<decltype(f1), decltype(f2)> f12 = cv::cudev::make_tuple(f1, f2);
+- gridTransformTuple(globPtr<float2>(xy),
+- tie(magc, anglec),
+- f12,
+- stream);
++ gridTransformUnary(globPtr<float2>(xy), globPtr<float>(anglec), direction_interleaved_func<float2, false>(), stream);
+ }
+
+ syncOutput(mag, _mag, stream);
+@@ -191,7 +181,7 @@ void cv::cuda::cartToPolar(InputArray _xy, OutputArray _magAngle, bool angleInDe
+ CV_Assert( xy.type() == CV_32FC2 );
+
+ GpuMat magAngle = getOutputMat(_magAngle, xy.size(), CV_32FC2, stream);
+-
++
+ if (angleInDegrees)
+ {
+ gridTransformUnary(globPtr<float2>(xy),
diff --git a/media-libs/opencv/opencv-4.10.0.ebuild b/media-libs/opencv/opencv-4.10.0.ebuild
index dc2b31fa8321..27c7ce81d7cb 100644
--- a/media-libs/opencv/opencv-4.10.0.ebuild
+++ b/media-libs/opencv/opencv-4.10.0.ebuild
@@ -51,6 +51,8 @@ else
https://github.com/NVIDIA/NVIDIAOpticalFlowSDK/archive/${NVIDIA_OPTICAL_FLOW_COMMIT}.tar.gz
-> NVIDIAOpticalFlowSDK-${NVIDIA_OPTICAL_FLOW_COMMIT}.tar.gz
)
+ https://github.com/${PN}/${PN}_contrib/commit/667a66ee0e99f3f3263c1ef2de1b90d9244b7bd4.patch
+ -> ${PN}_contrib-4.10.0-3607.patch
)
test? (
https://github.com/${PN}/${PN}_extra/archive/refs/tags/${PV}.tar.gz -> ${PN}_extra-${PV}.tar.gz
@@ -177,7 +179,7 @@ COMMON_DEPEND="
app-arch/bzip2[${MULTILIB_USEDEP}]
dev-libs/protobuf:=[${MULTILIB_USEDEP}]
sys-libs/zlib[${MULTILIB_USEDEP}]
- cuda? ( <dev-util/nvidia-cuda-toolkit-12.4:0= )
+ cuda? ( dev-util/nvidia-cuda-toolkit:= )
cudnn? (
dev-cpp/abseil-cpp:=
dev-libs/cudnn:=
@@ -282,7 +284,7 @@ RDEPEND="
"
BDEPEND="
virtual/pkgconfig
- cuda? ( dev-util/nvidia-cuda-toolkit:0= )
+ cuda? ( dev-util/nvidia-cuda-toolkit:= )
doc? (
app-text/doxygen[dot]
python? (
@@ -306,10 +308,15 @@ PATCHES=(
"${FILESDIR}/${PN}-4.9.0-cmake-cleanup.patch"
"${FILESDIR}/${PN}-4.10.0-dnn-explicitly-include-abseil-cpp.patch"
+ "${FILESDIR}/${PN}-4.10.0-cudnn-9.patch" # 25841
+ "${FILESDIR}/${PN}-4.10.0-cuda-fp16.patch" # 25880
+ "${FILESDIR}/${PN}-4.10.0-tbb-detection.patch"
# TODO applied in src_prepare
# "${FILESDIR}/${PN}_contrib-${PV}-rgbd.patch"
# "${FILESDIR}/${PN}_contrib-4.8.1-NVIDIAOpticalFlowSDK-2.0.tar.gz.patch"
+
+ # "${FILESDIR}/${PN}_contrib-4.10.0-CUDA-12.6-tuple_size.patch" # 3785
)
cuda_get_host_compiler() {
@@ -419,10 +426,11 @@ src_prepare() {
cd "${WORKDIR}/${PN}_contrib-${PV}" || die
eapply "${FILESDIR}/${PN}_contrib-4.8.1-rgbd.patch"
eapply "${FILESDIR}/${PN}_contrib-4.8.1-NVIDIAOpticalFlowSDK-2.0.tar.gz.patch"
- if has_version ">=dev-util/nvidia-cuda-toolkit-12.4" && use cuda; then
- # TODO https://github.com/NVIDIA/cccl/pull/1522
- eapply "${FILESDIR}/${PN}_contrib-4.9.0-cuda-12.4.patch"
+ if ver_test "$(nvcc --version | tail -n 1 | cut -d '_' -f 2- | cut -d '.' -f 1-2)" -ge 12.4; then
+ eapply "${DISTDIR}/${PN}_contrib-4.10.0-3607.patch"
+ eapply "${FILESDIR}/${PN}_contrib-4.10.0-CUDA-12.6-tuple_size.patch" # 3785
fi
+
cd "${S}" || die
! use contribcvv && { rm -R "${WORKDIR}/${PN}_contrib-${PV}/modules/cvv" || die; }