public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2022-06-26  7:47 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2022-06-26  7:47 UTC (permalink / raw
  To: gentoo-commits

commit:     794186914f817e446782620d16d4f7eda5d8353c
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Sat Jun 25 21:40:03 2022 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Sun Jun 26 07:47:23 2022 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=79418691

sci-libs/caffe2: add to tree

Package-Manager: Portage-3.0.30, Repoman-3.0.3
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                         |  1 +
 sci-libs/caffe2/caffe2-1.11.0.ebuild             | 96 ++++++++++++++++++++++++
 sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch | 93 +++++++++++++++++++++++
 sci-libs/caffe2/metadata.xml                     | 11 +++
 4 files changed, 201 insertions(+)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
new file mode 100644
index 000000000000..ae4d5567b7fc
--- /dev/null
+++ b/sci-libs/caffe2/Manifest
@@ -0,0 +1 @@
+DIST pytorch-1.11.0.tar.gz 20719323 BLAKE2B 24e7aaa2c26821d36f8092542de9d8d5ac85a619fb9fffb5131987958842afb1cad395780662d15f3411a7cc6ff83a445871960eca1e469fcbf0b9895d83d6e0 SHA512 2342eb7a1a241f5855a7cf12e11f62bc4baaa78d1d0864e53bfc946e783eb4addd05ca154a814d2376cd602098b5547e61c158d6eddb7cad5a9f3b0c1357adca

diff --git a/sci-libs/caffe2/caffe2-1.11.0.ebuild b/sci-libs/caffe2/caffe2-1.11.0.ebuild
new file mode 100644
index 000000000000..f8e9483bf74a
--- /dev/null
+++ b/sci-libs/caffe2/caffe2-1.11.0.ebuild
@@ -0,0 +1,96 @@
+# Copyright 2022 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{8..10} )
+inherit python-r1 cmake
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/"
+SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
+	-> ${MYP}.tar.gz"
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64"
+RESTRICT="test"
+REQUIRED_USE="${PYTHON_REQUIRED_USE}"
+
+RDEPEND="
+	${PYTHON_DEPS}
+	dev-libs/cpuinfo
+	dev-libs/libfmt
+	dev-libs/protobuf
+	dev-libs/sleef
+	sci-libs/lapack
+	sci-libs/onnx
+	sci-libs/foxi
+"
+DEPEND="${RDEPEND}
+	dev-libs/FP16
+	dev-libs/pocketfft
+	dev-libs/flatbuffers
+"
+
+S="${WORKDIR}"/${MYP}
+
+PATCHES=( "${FILESDIR}"/${P}-gentoo.patch )
+
+src_prepare() {
+	cmake_src_prepare
+	pushd torch/csrc/jit/serialization || die
+	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+	popd
+}
+
+src_configure() {
+	local mycmakeargs=(
+		-DUSE_KINETO=OFF
+		-DUSE_XNNPACK=OFF
+		-DUSE_SYSTEM_SLEEF=ON
+		-DUSE_BREAKPAD=OFF
+		-DUSE_SYSTEM_ONNX=ON
+		-DUSE_TENSORPIPE=OFF
+		-DUSE_GLOO=OFF
+		-DUSE_SYSTEM_FP16=ON
+		-DUSE_FBGEMM=OFF
+		-DUSE_NNPACK=OFF
+		-DUSE_PYTORCH_QNNPACK=OFF
+		-DUSE_QNNPACK=OFF
+		-DUSE_SYSTEM_CPUINFO=ON
+		-DBUILD_CUSTOM_PROTOBUF=OFF
+		-DUSE_MKLDNN=OFF
+		-DUSE_NUMPY=OFF
+		-DUSE_OPENMP=OFF
+		-DUSE_DISTRIBUTED=OFF
+		-DUSE_CUDA=OFF
+		-DUSE_NCCL=OFF
+		-Wno-dev
+		-DTORCH_INSTALL_LIB_DIR=/usr/$(get_libdir)
+		-DLIBSHM_INSTALL_LIB_SUBDIR=/usr/$(get_libdir)
+		-DUSE_CCACHE=OFF
+	)
+	cmake_src_configure
+}
+
+python_install() {
+	python_domodule python/caffe2
+	python_domodule python/torch
+}
+
+src_install() {
+	cmake_src_install
+
+	insinto "/var/lib/${PN}"
+	doins "${BUILD_DIR}"/CMakeCache.txt
+
+	rm -rf python
+	mkdir -p python/torch || die
+	mv "${D}"/usr/lib/python*/site-packages/caffe2 python/ || die
+	cp torch/version.py python/torch/ || die
+	python_foreach_impl python_install
+}

diff --git a/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch b/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
new file mode 100644
index 000000000000..ac74edadab35
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
@@ -0,0 +1,93 @@
+--- a/cmake/Dependencies.cmake	2022-06-17 22:12:34.451841710 +0200
++++ b/cmake/Dependencies.cmake	2022-06-17 22:12:48.302786501 +0200
+@@ -1512,7 +1512,6 @@
+   if(NOT USE_SYSTEM_ONNX)
+     add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/onnx EXCLUDE_FROM_ALL)
+   endif()
+-  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
+ 
+   add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
+   if(NOT USE_SYSTEM_ONNX)
+@@ -1810,7 +1809,6 @@
+ #
+ set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
+ set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
+-add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+ 
+ # Disable compiler feature checks for `fmt`.
+ #
+@@ -1819,9 +1817,7 @@
+ # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
+ # `fmt` is compatible with a superset of the compilers that PyTorch is, it
+ # shouldn't be too bad to just disable the checks.
+-set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
+ 
+-list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
+ set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
+ 
+ if(USE_BREAKPAD)
+--- a/c10/CMakeLists.txt	2022-06-17 22:40:53.573306905 +0200
++++ b/c10/CMakeLists.txt	2022-06-17 22:41:16.920219686 +0200
+@@ -59,7 +59,7 @@
+ if(${USE_GLOG})
+     target_link_libraries(c10 PUBLIC glog::glog)
+ endif()
+-target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
++target_link_libraries(c10 PRIVATE fmt)
+ 
+ find_package(Backtrace)
+ if(Backtrace_FOUND)
+--- a/torch/CMakeLists.txt	2022-06-17 22:42:20.396982644 +0200
++++ b/torch/CMakeLists.txt	2022-06-17 22:43:20.214759326 +0200
+@@ -82,7 +82,6 @@
+ 
+ set(TORCH_PYTHON_LINK_LIBRARIES
+     shm
+-    fmt::fmt-header-only
+     ATEN_CPU_FILES_GEN_LIB)
+ 
+ set(TORCH_PYTHON_COMPILE_DEFINITIONS)
+--- a/CMakeLists.txt	2022-06-18 20:35:01.451116185 +0200
++++ b/CMakeLists.txt	2022-06-18 20:35:08.219023569 +0200
+@@ -750,7 +750,7 @@
+ 
+ # ---[ Build flags
+ if(NOT MSVC)
+-  string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
++  string(APPEND CMAKE_CXX_FLAGS " -fPIC")
+   string(APPEND CMAKE_CXX_FLAGS " -Wno-narrowing")
+   # Eigen fails to build with some versions, so convert this to a warning
+   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+--- a/cmake/public/utils.cmake	2022-06-18 20:50:39.314263395 +0200
++++ b/cmake/public/utils.cmake	2022-06-18 20:51:49.611291709 +0200
+@@ -512,8 +512,6 @@
+   endif()
+ 
+   # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
+-  target_compile_options(${libname} PRIVATE
+-      $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
+ 
+ endfunction()
+ 
+--- a/cmake/Codegen.cmake	2022-06-18 21:33:13.366381817 +0200
++++ b/cmake/Codegen.cmake	2022-06-18 21:33:30.249157610 +0200
+@@ -57,7 +57,7 @@
+   if(MSVC)
+     set(OPT_FLAG "/fp:strict ")
+   else(MSVC)
+-    set(OPT_FLAG "-O3 ")
++    set(OPT_FLAG " ")
+     if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+       set(OPT_FLAG " ")
+     endif()
+--- a/c10/CMakeLists.txt	2022-06-19 09:57:16.776536871 +0200
++++ b/c10/CMakeLists.txt	2022-06-19 09:58:51.663356075 +0200
+@@ -107,7 +107,7 @@
+ # Note: for now, we will put all export path into one single Caffe2Targets group
+ # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
+ # individual libraries like libc10.so and libcaffe2.so are still self-contained.
+-install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
++install(TARGETS c10 EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+         DESTINATION include
+         FILES_MATCHING PATTERN "*.h")

diff --git a/sci-libs/caffe2/metadata.xml b/sci-libs/caffe2/metadata.xml
new file mode 100644
index 000000000000..d12749aa5c21
--- /dev/null
+++ b/sci-libs/caffe2/metadata.xml
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE pkgmetadata SYSTEM "https://www.gentoo.org/dtd/metadata.dtd">
+<pkgmetadata>
+	<maintainer type="person">
+		<email>tupone@gentoo.org</email>
+		<name>Tupone Alfredo</name>
+	</maintainer>
+	<upstream>
+		<remote-id type="github">pytorch/pytorch</remote-id>
+	</upstream>
+</pkgmetadata>


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2022-07-02 21:12 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2022-07-02 21:12 UTC (permalink / raw
  To: gentoo-commits

commit:     0f9a010a044de133417cbd56f63c32692451732c
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Sat Jul  2 21:12:40 2022 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Sat Jul  2 21:12:40 2022 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=0f9a010a

sci-libs/caffe2: add support for nnpack

Package-Manager: Portage-3.0.30, Repoman-3.0.3
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 .../{caffe2-1.11.0-r1.ebuild => caffe2-1.11.0-r2.ebuild}      |  5 +++--
 sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch              | 11 +++++++++++
 sci-libs/caffe2/metadata.xml                                  |  1 +
 3 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/sci-libs/caffe2/caffe2-1.11.0-r1.ebuild b/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
similarity index 96%
rename from sci-libs/caffe2/caffe2-1.11.0-r1.ebuild
rename to sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
index d48fb9082441..c59a498341cd 100644
--- a/sci-libs/caffe2/caffe2-1.11.0-r1.ebuild
+++ b/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
@@ -18,7 +18,7 @@ LICENSE="BSD"
 SLOT="0"
 KEYWORDS="~amd64"
 RESTRICT="test"
-IUSE="xnnpack"
+IUSE="nnpack xnnpack"
 REQUIRED_USE="${PYTHON_REQUIRED_USE}"
 
 RDEPEND="
@@ -30,6 +30,7 @@ RDEPEND="
 	sci-libs/lapack
 	sci-libs/onnx
 	sci-libs/foxi
+	nnpack? ( sci-libs/NNPACK )
 	xnnpack? ( sci-libs/XNNPACK )
 "
 DEPEND="${RDEPEND}
@@ -62,7 +63,6 @@ src_configure() {
 		-DUSE_GLOO=OFF
 		-DUSE_SYSTEM_FP16=ON
 		-DUSE_FBGEMM=OFF
-		-DUSE_NNPACK=OFF
 		-DUSE_PYTORCH_QNNPACK=OFF
 		-DUSE_QNNPACK=OFF
 		-DUSE_SYSTEM_CPUINFO=ON
@@ -81,6 +81,7 @@ src_configure() {
 		-DUSE_SYSTEM_FXDIV=ON
 		-DUSE_XNNPACK=$(usex xnnpack ON OFF)
 		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack ON OFF)
+		-DUSE_NNPACK=$(usex nnpack ON OFF)
 	)
 	cmake_src_configure
 }

diff --git a/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch b/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
index f34216ea0970..b6b35e2329e8 100644
--- a/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
+++ b/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
@@ -110,3 +110,14 @@
  endif()
  
  # ==========================================================
+--- a/cmake/External/nnpack.cmake	2022-07-02 21:56:54.905696921 +0200
++++ b/cmake/External/nnpack.cmake	2022-07-02 21:57:03.665696739 +0200
+@@ -58,7 +58,7 @@
+   set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
+   set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
+ 
+-  if(NOT TARGET nnpack)
++  if(FALSE)
+     if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
+       set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
+     endif()

diff --git a/sci-libs/caffe2/metadata.xml b/sci-libs/caffe2/metadata.xml
index bdd11115f0fe..da10df6e0739 100644
--- a/sci-libs/caffe2/metadata.xml
+++ b/sci-libs/caffe2/metadata.xml
@@ -6,6 +6,7 @@
 		<name>Tupone Alfredo</name>
 	</maintainer>
 	<use>
+		<flag name="nnpack">Use NNPACK</flag>
 		<flag name="xnnpack">Use XNNPACK</flag>
 	</use>
 	<upstream>


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2022-07-03  9:57 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2022-07-03  9:57 UTC (permalink / raw
  To: gentoo-commits

commit:     7a3d0a4e3007f19ff60d86b8bcfbf40323c610aa
Author:     James Beddek <telans <AT> posteo <DOT> de>
AuthorDate: Sun Jul  3 06:47:31 2022 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Sun Jul  3 09:56:50 2022 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=7a3d0a4e

sci-libs/caffe2: add 1.12.0

add support fot QNNPACK, dev-python/numpy, opencv, ffmpeg, openmp, opencl
add patch to fix install dirs

Signed-off-by: James Beddek <telans <AT> posteo.de>
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                           |   1 +
 sci-libs/caffe2/caffe2-1.11.0-r2.ebuild            |  84 ++++++++----
 ...affe2-1.11.0-r2.ebuild => caffe2-1.12.0.ebuild} |  83 ++++++++----
 sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch   |  31 +++--
 .../caffe2/files/caffe2-1.12.0-install-dirs.patch  | 147 +++++++++++++++++++++
 sci-libs/caffe2/metadata.xml                       |   6 +
 6 files changed, 291 insertions(+), 61 deletions(-)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index ae4d5567b7fc..0d28654e641f 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1 +1,2 @@
 DIST pytorch-1.11.0.tar.gz 20719323 BLAKE2B 24e7aaa2c26821d36f8092542de9d8d5ac85a619fb9fffb5131987958842afb1cad395780662d15f3411a7cc6ff83a445871960eca1e469fcbf0b9895d83d6e0 SHA512 2342eb7a1a241f5855a7cf12e11f62bc4baaa78d1d0864e53bfc946e783eb4addd05ca154a814d2376cd602098b5547e61c158d6eddb7cad5a9f3b0c1357adca
+DIST pytorch-1.12.0.tar.gz 106286765 BLAKE2B ff9bafedb35f859f7dccb9b606299cf9c345bdaa0deb87ecfe0c0c30c3c828414d989e1d9a243d9b7cd3f376d56a2f81c241ca2e3c9a8a2b30cddcdeddd3a5c7 SHA512 c9c748a2e0047daaaf199a1ba3198d2d1aee47f664170a9b34ccacd3deeb95f2070e4035eeb900012ef48dc62cf6fb6806f1a1dfe22de8c94892963076e593b7

diff --git a/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild b/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
index c59a498341cd..d45e26d90d6d 100644
--- a/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
+++ b/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
@@ -17,23 +17,35 @@ SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
 LICENSE="BSD"
 SLOT="0"
 KEYWORDS="~amd64"
+IUSE="ffmpeg nnpack +numpy opencl opencv openmp qnnpack xnnpack"
 RESTRICT="test"
-IUSE="nnpack xnnpack"
-REQUIRED_USE="${PYTHON_REQUIRED_USE}"
+REQUIRED_USE="
+	${PYTHON_REQUIRED_USE}
+	ffmpeg? ( opencv )
+"
 
 RDEPEND="
 	${PYTHON_DEPS}
 	dev-libs/cpuinfo
 	dev-libs/libfmt
 	dev-libs/protobuf
+	dev-libs/pthreadpool
 	dev-libs/sleef
 	sci-libs/lapack
 	sci-libs/onnx
 	sci-libs/foxi
+	ffmpeg? ( media-video/ffmpeg:= )
 	nnpack? ( sci-libs/NNPACK )
+	numpy? ( dev-python/numpy[${PYTHON_USEDEP}] )
+	opencl? ( virtual/opencl )
+	opencv? ( media-libs/opencv:= )
+	qnnpack? ( sci-libs/QNNPACK )
 	xnnpack? ( sci-libs/XNNPACK )
 "
-DEPEND="${RDEPEND}
+DEPEND="
+	${RDEPEND}
+	dev-cpp/eigen
+	dev-libs/psimd
 	dev-libs/FP16
 	dev-libs/pocketfft
 	dev-libs/flatbuffers
@@ -43,7 +55,10 @@ DEPEND="${RDEPEND}
 
 S="${WORKDIR}"/${MYP}
 
-PATCHES=( "${FILESDIR}"/${P}-gentoo.patch )
+PATCHES=(
+	"${FILESDIR}"/${PN}-1.11.0-gentoo.patch
+	"${FILESDIR}"/${PN}-1.12.0-install-dirs.patch
+)
 
 src_prepare() {
 	cmake_src_prepare
@@ -55,33 +70,52 @@ src_prepare() {
 src_configure() {
 	python_setup
 	local mycmakeargs=(
-		-DUSE_KINETO=OFF
-		-DUSE_SYSTEM_SLEEF=ON
-		-DUSE_BREAKPAD=OFF
-		-DUSE_SYSTEM_ONNX=ON
-		-DUSE_TENSORPIPE=OFF
+		-DBUILD_CUSTOM_PROTOBUF=OFF
+		-DBUILD_SHARED_LIBS=ON
+
+		-DUSE_CCACHE=OFF
+		-DUSE_CUDA=OFF # TODO
+		-DUSE_CUDNN=OFF # TODO
+		-DUSE_FAST_NVCC=OFF # TODO
+		#-DCUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-Auto}"
+		-DUSE_DISTRIBUTED=OFF
+		-DUSE_FAKELOWP=OFF
+		-DUSE_FBGEMM=OFF # TODO
+		-DUSE_FFMPEG=$(usex ffmpeg)
+		-DUSE_GFLAGS=OFF # TODO
+		-DUSE_GLOG=OFF # TODO
 		-DUSE_GLOO=OFF
-		-DUSE_SYSTEM_FP16=ON
-		-DUSE_FBGEMM=OFF
+		-DUSE_KINETO=OFF # TODO
+		-DUSE_LEVELDB=OFF
+		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+		-DUSE_MKLDNN=OFF
+		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
+		-DUSE_NNPACK=$(usex nnpack)
+		-DUSE_QNNPACK=$(usex qnnpack)
+		-DUSE_XNNPACK=$(usex xnnpack)
+		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
 		-DUSE_PYTORCH_QNNPACK=OFF
-		-DUSE_QNNPACK=OFF
+		-DUSE_NUMPY=$(usex numpy)
+		-DUSE_OPENCL=$(usex opencl)
+		-DUSE_OPENCV=$(usex opencv)
+		-DUSE_OPENMP=$(usex openmp)
+		-DUSE_ROCM=OFF # TODO
 		-DUSE_SYSTEM_CPUINFO=ON
-		-DBUILD_CUSTOM_PROTOBUF=OFF
-		-DUSE_MKLDNN=OFF
-		-DUSE_NUMPY=OFF
-		-DUSE_OPENMP=OFF
-		-DUSE_DISTRIBUTED=OFF
-		-DUSE_CUDA=OFF
-		-DUSE_NCCL=OFF
+		-DUSE_BREAKPAD=OFF # TODO
+		-DUSE_SYSTEM_BIND11=ON
+		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
+		-DPYTHON_EXECUTABLE="${PYTHON}"
+		-DUSE_SYSTEM_EIGEN_INSTALL=ON
+		-DUSE_SYSTEM_PTHREADPOOL=ON
+		-DUSE_SYSTEM_FXDIV=ON
+		-DUSE_SYSTEM_FP16=ON
+		-DUSE_SYSTEM_ONNX=ON
+		-DUSE_SYSTEM_SLEEF=ON
+		-DUSE_TENSORPIPE=OFF
+
 		-Wno-dev
 		-DTORCH_INSTALL_LIB_DIR=/usr/$(get_libdir)
 		-DLIBSHM_INSTALL_LIB_SUBDIR=/usr/$(get_libdir)
-		-DUSE_CCACHE=OFF
-		-DUSE_SYSTEM_PTHREADPOOL=ON
-		-DUSE_SYSTEM_FXDIV=ON
-		-DUSE_XNNPACK=$(usex xnnpack ON OFF)
-		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack ON OFF)
-		-DUSE_NNPACK=$(usex nnpack ON OFF)
 	)
 	cmake_src_configure
 }

diff --git a/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild b/sci-libs/caffe2/caffe2-1.12.0.ebuild
similarity index 57%
copy from sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
copy to sci-libs/caffe2/caffe2-1.12.0.ebuild
index c59a498341cd..c1e033bbfc7b 100644
--- a/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
+++ b/sci-libs/caffe2/caffe2-1.12.0.ebuild
@@ -17,23 +17,35 @@ SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
 LICENSE="BSD"
 SLOT="0"
 KEYWORDS="~amd64"
+IUSE="ffmpeg nnpack +numpy opencl opencv openmp qnnpack xnnpack"
 RESTRICT="test"
-IUSE="nnpack xnnpack"
-REQUIRED_USE="${PYTHON_REQUIRED_USE}"
+REQUIRED_USE="
+	${PYTHON_REQUIRED_USE}
+	ffmpeg? ( opencv )
+"
 
 RDEPEND="
 	${PYTHON_DEPS}
 	dev-libs/cpuinfo
 	dev-libs/libfmt
 	dev-libs/protobuf
+	dev-libs/pthreadpool
 	dev-libs/sleef
 	sci-libs/lapack
 	sci-libs/onnx
 	sci-libs/foxi
+	ffmpeg? ( media-video/ffmpeg:= )
 	nnpack? ( sci-libs/NNPACK )
+	numpy? ( dev-python/numpy[${PYTHON_USEDEP}] )
+	opencl? ( virtual/opencl )
+	opencv? ( media-libs/opencv:= )
+	qnnpack? ( sci-libs/QNNPACK )
 	xnnpack? ( sci-libs/XNNPACK )
 "
-DEPEND="${RDEPEND}
+DEPEND="
+	${RDEPEND}
+	dev-cpp/eigen
+	dev-libs/psimd
 	dev-libs/FP16
 	dev-libs/pocketfft
 	dev-libs/flatbuffers
@@ -43,7 +55,10 @@ DEPEND="${RDEPEND}
 
 S="${WORKDIR}"/${MYP}
 
-PATCHES=( "${FILESDIR}"/${P}-gentoo.patch )
+PATCHES=(
+	"${FILESDIR}"/${PN}-1.11.0-gentoo.patch
+	"${FILESDIR}"/${PN}-1.12.0-install-dirs.patch
+)
 
 src_prepare() {
 	cmake_src_prepare
@@ -55,33 +70,51 @@ src_prepare() {
 src_configure() {
 	python_setup
 	local mycmakeargs=(
-		-DUSE_KINETO=OFF
-		-DUSE_SYSTEM_SLEEF=ON
-		-DUSE_BREAKPAD=OFF
-		-DUSE_SYSTEM_ONNX=ON
-		-DUSE_TENSORPIPE=OFF
+		-DBUILD_CUSTOM_PROTOBUF=OFF
+		-DBUILD_SHARED_LIBS=ON
+
+		-DUSE_CCACHE=OFF
+		-DUSE_CUDA=OFF # TODO
+		-DUSE_CUDNN=OFF # TODO
+		-DUSE_FAST_NVCC=OFF # TODO
+		#-DCUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-Auto}"
+		-DUSE_DISTRIBUTED=OFF
+		-DUSE_FAKELOWP=OFF
+		-DUSE_FBGEMM=OFF # TODO
+		-DUSE_FFMPEG=$(usex ffmpeg)
+		-DUSE_GFLAGS=OFF # TODO
+		-DUSE_GLOG=OFF # TODO
 		-DUSE_GLOO=OFF
-		-DUSE_SYSTEM_FP16=ON
-		-DUSE_FBGEMM=OFF
+		-DUSE_KINETO=OFF # TODO
+		-DUSE_LEVELDB=OFF
+		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+		-DUSE_MKLDNN=OFF
+		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
+		-DUSE_NNPACK=$(usex nnpack)
+		-DUSE_QNNPACK=$(usex qnnpack)
+		-DUSE_XNNPACK=$(usex xnnpack)
+		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
 		-DUSE_PYTORCH_QNNPACK=OFF
-		-DUSE_QNNPACK=OFF
+		-DUSE_NUMPY=$(usex numpy)
+		-DUSE_OPENCL=$(usex opencl)
+		-DUSE_OPENCV=$(usex opencv)
+		-DUSE_OPENMP=$(usex openmp)
+		-DUSE_ROCM=OFF # TODO
 		-DUSE_SYSTEM_CPUINFO=ON
-		-DBUILD_CUSTOM_PROTOBUF=OFF
-		-DUSE_MKLDNN=OFF
-		-DUSE_NUMPY=OFF
-		-DUSE_OPENMP=OFF
-		-DUSE_DISTRIBUTED=OFF
-		-DUSE_CUDA=OFF
-		-DUSE_NCCL=OFF
+		-DUSE_SYSTEM_BIND11=ON
+		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
+		-DPYTHON_EXECUTABLE="${PYTHON}"
+		-DUSE_SYSTEM_EIGEN_INSTALL=ON
+		-DUSE_SYSTEM_PTHREADPOOL=ON
+		-DUSE_SYSTEM_FXDIV=ON
+		-DUSE_SYSTEM_FP16=ON
+		-DUSE_SYSTEM_ONNX=ON
+		-DUSE_SYSTEM_SLEEF=ON
+		-DUSE_TENSORPIPE=OFF
+
 		-Wno-dev
 		-DTORCH_INSTALL_LIB_DIR=/usr/$(get_libdir)
 		-DLIBSHM_INSTALL_LIB_SUBDIR=/usr/$(get_libdir)
-		-DUSE_CCACHE=OFF
-		-DUSE_SYSTEM_PTHREADPOOL=ON
-		-DUSE_SYSTEM_FXDIV=ON
-		-DUSE_XNNPACK=$(usex xnnpack ON OFF)
-		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack ON OFF)
-		-DUSE_NNPACK=$(usex nnpack ON OFF)
 	)
 	cmake_src_configure
 }

diff --git a/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch b/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
index b6b35e2329e8..505af5e6695c 100644
--- a/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
+++ b/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
@@ -80,17 +80,6 @@
      if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
        set(OPT_FLAG " ")
      endif()
---- a/c10/CMakeLists.txt	2022-06-19 09:57:16.776536871 +0200
-+++ b/c10/CMakeLists.txt	2022-06-19 09:58:51.663356075 +0200
-@@ -107,7 +107,7 @@
- # Note: for now, we will put all export path into one single Caffe2Targets group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still self-contained.
--install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10 EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
-         DESTINATION include
-         FILES_MATCHING PATTERN "*.h")
 --- a/caffe2/CMakeLists.txt	2022-07-01 21:56:49.643490049 +0200
 +++ b/caffe2/CMakeLists.txt	2022-07-01 21:57:17.091489479 +0200
 @@ -109,7 +109,7 @@
@@ -121,3 +110,23 @@
      if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
        set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
      endif()
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -481,7 +481,7 @@ endif()
+ list(APPEND Caffe2_DEPENDENCY_LIBS cpuinfo)
+ 
+ # ---[ QNNPACK
+-if(USE_QNNPACK)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ 
+   if(NOT DEFINED QNNPACK_SOURCE_DIR)
+@@ -537,7 +537,7 @@ if(USE_QNNPACK)
+ endif()
+ 
+ # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and neon2sse headers
+-if(USE_QNNPACK)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
+   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")

diff --git a/sci-libs/caffe2/files/caffe2-1.12.0-install-dirs.patch b/sci-libs/caffe2/files/caffe2-1.12.0-install-dirs.patch
new file mode 100644
index 000000000000..d9c403d5882e
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-1.12.0-install-dirs.patch
@@ -0,0 +1,147 @@
+--- a/c10/CMakeLists.txt	2022-06-19 09:57:16.776536871 +0200
++++ b/c10/CMakeLists.txt	2022-06-19 09:58:51.663356075 +0200
+@@ -107,7 +107,7 @@
+ # Note: for now, we will put all export path into one single Caffe2Targets group
+ # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
+ # individual libraries like libc10.so and libcaffe2.so are still self-contained.
+-install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
++install(TARGETS c10 EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+         DESTINATION include
+         FILES_MATCHING PATTERN "*.h")
+diff --git a/c10/cuda/CMakeLists.txt b/c10/cuda/CMakeLists.txt
+index a95bd278e2..4a33b4e4ed 100644
+--- a/c10/cuda/CMakeLists.txt
++++ b/c10/cuda/CMakeLists.txt
+@@ -63,7 +63,7 @@ add_subdirectory(test)
+ # Note: for now, we will put all export path into one single Caffe2Targets group
+ # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
+ # individual libraries like libc10.so and libcaffe2.so are still self-contained.
+-install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION lib)
++install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ foreach(file ${C10_CUDA_HEADERS})
+   get_filename_component( dir ${file} DIRECTORY )
+   install( FILES ${file} DESTINATION include/c10/cuda/${dir} )
+diff --git a/c10/hip/CMakeLists.txt b/c10/hip/CMakeLists.txt
+index 6a0e0e41a1..7582c73919 100644
+--- a/c10/hip/CMakeLists.txt
++++ b/c10/hip/CMakeLists.txt
+@@ -55,7 +55,7 @@ target_include_directories(
+ add_subdirectory(test)
+ 
+ # ---[ Installation
+-install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION lib)
++install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+         DESTINATION include
+         FILES_MATCHING PATTERN "*.h")
+diff --git a/modules/detectron/CMakeLists.txt b/modules/detectron/CMakeLists.txt
+index bffc074e39..e1697e65f2 100644
+--- a/modules/detectron/CMakeLists.txt
++++ b/modules/detectron/CMakeLists.txt
+@@ -17,7 +17,7 @@ if(BUILD_CAFFE2_OPS)
+ 
+     torch_set_target_props(caffe2_detectron_ops_gpu)
+     target_link_libraries(caffe2_detectron_ops_gpu torch ${OpenMP_link})
+-    install(TARGETS caffe2_detectron_ops_gpu DESTINATION lib)
++    install(TARGETS caffe2_detectron_ops_gpu DESTINATION ${CMAKE_INSTALL_LIBDIR})
+     if(MSVC)
+       install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops_gpu> DESTINATION lib OPTIONAL)
+     endif()
+@@ -31,7 +31,7 @@ if(BUILD_CAFFE2_OPS)
+     torch_set_target_props(caffe2_detectron_ops_hip)
+     target_compile_options(caffe2_detectron_ops_hip PRIVATE ${HIP_CXX_FLAGS})
+     target_link_libraries(caffe2_detectron_ops_hip torch)
+-    install(TARGETS caffe2_detectron_ops_hip DESTINATION lib)
++    install(TARGETS caffe2_detectron_ops_hip DESTINATION ${CMAKE_INSTALL_LIBDIR})
+   elseif(NOT IOS_PLATFORM)
+     add_library(caffe2_detectron_ops SHARED ${Detectron_CPU_SRCS})
+     if(HAVE_SOVERSION)
+@@ -40,7 +40,7 @@ if(BUILD_CAFFE2_OPS)
+     endif()
+     torch_set_target_props(caffe2_detectron_ops)
+     target_link_libraries(caffe2_detectron_ops torch ${OpenMP_link})
+-    install(TARGETS caffe2_detectron_ops DESTINATION lib)
++    install(TARGETS caffe2_detectron_ops DESTINATION ${CMAKE_INSTALL_LIBDIR})
+     if(MSVC)
+       install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops> DESTINATION lib OPTIONAL)
+     endif()
+diff --git a/modules/module_test/CMakeLists.txt b/modules/module_test/CMakeLists.txt
+index f72120d535..c293ce025d 100644
+--- a/modules/module_test/CMakeLists.txt
++++ b/modules/module_test/CMakeLists.txt
+@@ -16,7 +16,7 @@ if(BUILD_TEST AND NOT BUILD_LITE_INTERPRETER)
+         VERSION ${TORCH_VERSION} SOVERSION ${TORCH_SOVERSION})
+   endif()
+   target_link_libraries(caffe2_module_test_dynamic torch_library)
+-  install(TARGETS caffe2_module_test_dynamic DESTINATION lib)
++  install(TARGETS caffe2_module_test_dynamic DESTINATION ${CMAKE_INSTALL_LIBDIR})
+   if(MSVC AND BUILD_SHARED_LIBS)
+     install(FILES $<TARGET_PDB_FILE:caffe2_module_test_dynamic> DESTINATION lib OPTIONAL)
+   endif()
+diff --git a/modules/observers/CMakeLists.txt b/modules/observers/CMakeLists.txt
+index 050b8a1461..0309a273b7 100644
+--- a/modules/observers/CMakeLists.txt
++++ b/modules/observers/CMakeLists.txt
+@@ -21,7 +21,7 @@ endif()
+ target_link_libraries(caffe2_observers PUBLIC torch_library)
+ target_include_directories(caffe2_observers PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/..)
+ target_compile_options(caffe2_observers PRIVATE "-DCAFFE2_BUILD_OBSERVER_LIB")
+-install(TARGETS caffe2_observers DESTINATION lib)
++install(TARGETS caffe2_observers DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ caffe2_interface_library(caffe2_observers caffe2_observers_library)
+ if(MSVC AND BUILD_SHARED_LIBS)
+   install(FILES $<TARGET_PDB_FILE:caffe2_observers> DESTINATION lib OPTIONAL)
+diff --git a/modules/rocksdb/CMakeLists.txt b/modules/rocksdb/CMakeLists.txt
+index 78651989aa..52bfd149a2 100644
+--- a/modules/rocksdb/CMakeLists.txt
++++ b/modules/rocksdb/CMakeLists.txt
+@@ -59,7 +59,7 @@ add_library(caffe2_rocksdb ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb.cc)
+ target_link_libraries(caffe2_rocksdb PUBLIC torch_library)
+ target_link_libraries(caffe2_rocksdb PRIVATE ${RocksDB_LIBRARIES})
+ target_include_directories(caffe2_rocksdb PRIVATE ${RocksDB_INCLUDE_DIR})
+-install(TARGETS caffe2_rocksdb DESTINATION lib)
++install(TARGETS caffe2_rocksdb DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ 
+ # ---[ Last, Append the library to Caffe2_MODULES, if we are building with
+ # the main repo.
+diff --git a/test/cpp/c10d/CMakeLists.txt b/test/cpp/c10d/CMakeLists.txt
+index bf91460c4b..ebbd476fa9 100644
+--- a/test/cpp/c10d/CMakeLists.txt
++++ b/test/cpp/c10d/CMakeLists.txt
+@@ -51,7 +51,7 @@ if(USE_CUDA)
+     if(INSTALL_TEST)
+       install(TARGETS ProcessGroupNCCLTest DESTINATION bin)
+       install(TARGETS ProcessGroupNCCLErrorsTest DESTINATION bin)
+-      install(TARGETS c10d_cuda_test DESTINATION lib)
++      install(TARGETS c10d_cuda_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
+     endif()
+   endif()
+ else()
+diff --git a/test/cpp/jit/CMakeLists.txt b/test/cpp/jit/CMakeLists.txt
+index 60b43b81fc..9cf34a1620 100644
+--- a/test/cpp/jit/CMakeLists.txt
++++ b/test/cpp/jit/CMakeLists.txt
+@@ -32,9 +32,9 @@ endif()
+ target_link_libraries(backend_with_compiler torch)
+ 
+ if(INSTALL_TEST)
+-  install(TARGETS torchbind_test DESTINATION lib)
+-  install(TARGETS jitbackend_test DESTINATION lib)
+-  install(TARGETS backend_with_compiler DESTINATION lib)
++  install(TARGETS torchbind_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
++  install(TARGETS jitbackend_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
++  install(TARGETS backend_with_compiler DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ endif()
+ 
+ # Build the cpp gtest binary containing the cpp-only tests.
+diff --git a/torch/csrc/deploy/CMakeLists.txt b/torch/csrc/deploy/CMakeLists.txt
+index 61fe8c1bb8..28575979dd 100644
+--- a/torch/csrc/deploy/CMakeLists.txt
++++ b/torch/csrc/deploy/CMakeLists.txt
+@@ -80,4 +80,4 @@ if(INSTALL_TEST)
+   install(TARGETS test_deploy_gpu DESTINATION bin)
+ endif()
+ 
+-install(TARGETS torch_deploy DESTINATION lib)
++install(TARGETS torch_deploy DESTINATION ${CMAKE_INSTALL_LIBDIR})

diff --git a/sci-libs/caffe2/metadata.xml b/sci-libs/caffe2/metadata.xml
index da10df6e0739..48cf68b11224 100644
--- a/sci-libs/caffe2/metadata.xml
+++ b/sci-libs/caffe2/metadata.xml
@@ -6,7 +6,13 @@
 		<name>Tupone Alfredo</name>
 	</maintainer>
 	<use>
+		<flag name="ffmpeg">Add support for video processing operators</flag>
 		<flag name="nnpack">Use NNPACK</flag>
+		<flag name="numpy">Add support for math operations through numpy</flag>
+		<flag name="opencl">Use OpenCL</flag>
+		<flag name="opencv">Add support for image processing operators</flag>
+		<flag name="openmp">Use OpenMP for parallel code</flag>
+		<flag name="qnnpack">Use QNNPACK</flag>
 		<flag name="xnnpack">Use XNNPACK</flag>
 	</use>
 	<upstream>


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2022-09-05  6:40 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2022-09-05  6:40 UTC (permalink / raw
  To: gentoo-commits

commit:     3f34c4d94727a44935b381b06d00367a2755930a
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Mon Sep  5 06:40:29 2022 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Mon Sep  5 06:40:29 2022 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=3f34c4d9

sci-libs/caffe2: support glog-0.5.0

Closes: https://bugs.gentoo.org/865681
Package-Manager: Portage-3.0.30, Repoman-3.0.3
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/caffe2-1.12.0.ebuild               |  3 ++-
 .../caffe2/files/caffe2-1.12.0-glog-0.6.0.patch    | 29 ++++++++++++++++++++++
 2 files changed, 31 insertions(+), 1 deletion(-)

diff --git a/sci-libs/caffe2/caffe2-1.12.0.ebuild b/sci-libs/caffe2/caffe2-1.12.0.ebuild
index 08e81ce1c8ab..0a44b6824c3c 100644
--- a/sci-libs/caffe2/caffe2-1.12.0.ebuild
+++ b/sci-libs/caffe2/caffe2-1.12.0.ebuild
@@ -27,7 +27,7 @@ REQUIRED_USE="
 RDEPEND="
 	${PYTHON_DEPS}
 	dev-cpp/gflags:=
-	<dev-cpp/glog-0.5.0
+	>=dev-cpp/glog-0.5.0
 	dev-libs/cpuinfo
 	dev-libs/libfmt
 	dev-libs/protobuf
@@ -66,6 +66,7 @@ S="${WORKDIR}"/${MYP}
 PATCHES=(
 	"${FILESDIR}"/${PN}-1.11.0-gentoo.patch
 	"${FILESDIR}"/${PN}-1.12.0-install-dirs.patch
+	"${FILESDIR}"/${P}-glog-0.6.0.patch
 )
 
 src_prepare() {

diff --git a/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch b/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch
new file mode 100644
index 000000000000..a821ebedf7b7
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch
@@ -0,0 +1,29 @@
+https://github.com/pytorch/pytorch/issues/58054
+
+--- /c10/util/Logging.cpp
++++ /c10/util/Logging.cpp
+@@ -192,23 +192,13 @@
+     google::GLOG_WARNING,
+     "The minimum log level that caffe2 will output.");
+ 
+-// Google glog's api does not have an external function that allows one to check
+-// if glog is initialized or not. It does have an internal function - so we are
+-// declaring it here. This is a hack but has been used by a bunch of others too
+-// (e.g. Torch).
+-namespace google {
+-namespace glog_internal_namespace_ {
+-bool IsGoogleLoggingInitialized();
+-} // namespace glog_internal_namespace_
+-} // namespace google
+-
+ namespace c10 {
+ namespace {
+ 
+ void initGoogleLogging(char const* name) {
+ #if !defined(_MSC_VER)
+   // This trick can only be used on UNIX platforms
+-  if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized())
++  if (!::google::IsGoogleLoggingInitialized())
+ #endif
+   {
+     ::google::InitGoogleLogging(name);


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2023-01-12 12:08 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2023-01-12 12:08 UTC (permalink / raw
  To: gentoo-commits

commit:     05b4eaf3a0b650d61c1ae0adbcc52a1717aa36d4
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 12 12:05:46 2023 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Thu Jan 12 12:08:10 2023 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=05b4eaf3

sci-libs/caffe2: fix build with clang

Closes: https://bugs.gentoo.org/886273
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/caffe2-1.12.0-r1.ebuild         |  2 ++
 sci-libs/caffe2/files/caffe2-1.12.0-clang.patch | 42 +++++++++++++++++++++++++
 2 files changed, 44 insertions(+)

diff --git a/sci-libs/caffe2/caffe2-1.12.0-r1.ebuild b/sci-libs/caffe2/caffe2-1.12.0-r1.ebuild
index 8f6e1f87386f..3ed8c6670b1d 100644
--- a/sci-libs/caffe2/caffe2-1.12.0-r1.ebuild
+++ b/sci-libs/caffe2/caffe2-1.12.0-r1.ebuild
@@ -71,6 +71,7 @@ PATCHES=(
 	"${FILESDIR}"/${PN}-1.11.0-gentoo.patch
 	"${FILESDIR}"/${PN}-1.12.0-install-dirs.patch
 	"${FILESDIR}"/${P}-glog-0.6.0.patch
+	"${FILESDIR}"/${P}-clang.patch
 )
 
 src_prepare() {
@@ -134,6 +135,7 @@ src_configure() {
 		-DUSE_SYSTEM_PTHREADPOOL=ON
 		-DUSE_SYSTEM_FXDIV=ON
 		-DUSE_SYSTEM_FP16=ON
+		-DUSE_SYSTEM_GLOO=ON
 		-DUSE_SYSTEM_ONNX=ON
 		-DUSE_SYSTEM_SLEEF=ON
 		-DUSE_TENSORPIPE=OFF

diff --git a/sci-libs/caffe2/files/caffe2-1.12.0-clang.patch b/sci-libs/caffe2/files/caffe2-1.12.0-clang.patch
new file mode 100644
index 000000000000..dd43b06a97a1
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-1.12.0-clang.patch
@@ -0,0 +1,42 @@
+From bfdc0358dc37c55af6118fe5d8b6ccd898e003fd Mon Sep 17 00:00:00 2001
+From: Kazuki Sakamoto <kaz@meta.com>
+Date: Wed, 21 Dec 2022 11:19:58 +0000
+Subject: [PATCH] Compile fix for Clang + libc++ (#91212)
+
+Summary:
+LLVM 15 has a compile issue with the deprecated __has_trivial_copy. Update the GCC ifdef logic to exclude Clang + libc++.
+
+```
+caffe2/c10/util/Optional.h:536:13: error: builtin __has_trivial_copy is deprecated; use __is_trivially_copyable instead [-Werror,-Wdeprecated-builtins]
+            C10_IS_TRIVIALLY_COPYABLE(T) &&
+            ^
+caffe2/c10/macros/Macros.h:438:38: note: expanded from macro 'C10_IS_TRIVIALLY_COPYABLE'
+#define C10_IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T)
+```
+
+Test Plan: CI
+
+Reviewed By: kit1980
+
+Differential Revision: D42180203
+
+Pull Request resolved: https://github.com/pytorch/pytorch/pull/91212
+Approved by: https://github.com/kit1980, https://github.com/soumith
+---
+ c10/macros/Macros.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/c10/macros/Macros.h b/c10/macros/Macros.h
+index 9098a294228f4..09e57ca0a4d6a 100644
+--- a/c10/macros/Macros.h
++++ b/c10/macros/Macros.h
+@@ -434,7 +434,8 @@ __device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail(
+ // Warning: __has_trivial_copy for GCC may not always detect the non-POD
+ // correctly. For example, T = std::unique_ptr may evaluate to true and be
+ // treated as POD. This can cause unexpected behavior.
+-#if defined(__GNUG__) && __GNUC__ < 5
++#if defined(__GNUG__) && __GNUC__ < 5 && \
++    !(defined(__clang__) && defined(_LIBCPP_VERSION))
+ #define C10_IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T)
+ #else
+ #define C10_IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable<T>::value


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2023-02-22  8:07 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2023-02-22  8:07 UTC (permalink / raw
  To: gentoo-commits

commit:     67094a0ab3f18f4c8bbcf4db3e1838b33527b15c
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Wed Feb 22 08:06:43 2023 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Wed Feb 22 08:06:43 2023 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=67094a0a

sci-libs/caffe2: drop 1.12.0-r1, 1.12.1, 1.13.0-r1

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                           |   3 -
 sci-libs/caffe2/caffe2-1.12.0-r1.ebuild            | 165 --------------------
 sci-libs/caffe2/caffe2-1.12.1.ebuild               | 165 --------------------
 sci-libs/caffe2/caffe2-1.13.0-r1.ebuild            | 169 ---------------------
 sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch   | 168 --------------------
 .../caffe2/files/caffe2-1.12.0-install-dirs.patch  | 129 ----------------
 6 files changed, 799 deletions(-)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index 7a1c9e4370ae..616b75a49763 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,4 +1 @@
-DIST pytorch-1.12.0.tar.gz 106286765 BLAKE2B ff9bafedb35f859f7dccb9b606299cf9c345bdaa0deb87ecfe0c0c30c3c828414d989e1d9a243d9b7cd3f376d56a2f81c241ca2e3c9a8a2b30cddcdeddd3a5c7 SHA512 c9c748a2e0047daaaf199a1ba3198d2d1aee47f664170a9b34ccacd3deeb95f2070e4035eeb900012ef48dc62cf6fb6806f1a1dfe22de8c94892963076e593b7
-DIST pytorch-1.12.1.tar.gz 106311625 BLAKE2B e8ca19d0e1987449c33ad4c36722a3a467f7f8a9f90be2a7f2de643cbd665038f6802b5ff1f1d3da09b6253d8f29e11549a24295de013d97f73affe538c84c99 SHA512 afeb551904ebd9b5901ae623a98eadbb3045115247cedf8006a940742cfad04e5ce24cfaf363336a9ed88d7ce6a4ac53dbb6a5c690aef6efdf20477c3a22c7ca
-DIST pytorch-1.13.0.tar.gz 108276317 BLAKE2B 8149775dea06d8e4027b741c828169d33f768a96aef58cd2f86daa3bbad5bf36143454e26b683a992aca34e7fb52e6483c46168b698db48ff6978c9605d7a3d2 SHA512 5a0e8c589bdf552ccf682511a8860c754ab6f5844f51e568c5034793f787b97707af4340b338b9b8606dd27a6ced6ef50091f0cc514458b3021a2220409d7f20
 DIST pytorch-1.13.1.tar.gz 108279745 BLAKE2B 75de03b74dfdaf8d8fb5ea743fcc0c1b0e408a714ad4160c487921220a7b1755e5fa6e587e6bbc8c9f34dd75e096d2e6dd69c80d24821835fff6c833314434d3 SHA512 f16f89d027efade11d057245cad5b69a390e88b458398310ae30de2dbff7c8fd7f1165be7b8da7ea989c81ac3f5a66c5cb9050610e441a97c83fb8aa28c0bd62

diff --git a/sci-libs/caffe2/caffe2-1.12.0-r1.ebuild b/sci-libs/caffe2/caffe2-1.12.0-r1.ebuild
deleted file mode 100644
index 1840db7282a4..000000000000
--- a/sci-libs/caffe2/caffe2-1.12.0-r1.ebuild
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 2022-2023 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-PYTHON_COMPAT=( python3_{9..10} )
-inherit python-single-r1 cmake flag-o-matic
-
-MYPN=pytorch
-MYP=${MYPN}-${PV}
-
-DESCRIPTION="A deep learning framework"
-HOMEPAGE="https://pytorch.org/"
-SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-	-> ${MYP}.tar.gz"
-
-LICENSE="BSD"
-SLOT="0"
-KEYWORDS="~amd64"
-IUSE="cuda ffmpeg nnpack +numpy opencl opencv openmp qnnpack xnnpack"
-RESTRICT="test"
-REQUIRED_USE="
-	${PYTHON_REQUIRED_USE}
-	ffmpeg? ( opencv )
-" # ?? ( cuda rocm )
-
-RDEPEND="
-	${PYTHON_DEPS}
-	dev-cpp/gflags:=
-	>=dev-cpp/glog-0.5.0
-	dev-libs/cpuinfo
-	dev-libs/libfmt
-	dev-libs/protobuf:=
-	dev-libs/pthreadpool
-	dev-libs/sleef
-	sci-libs/lapack
-	sci-libs/onnx
-	sci-libs/foxi
-	cuda? (
-		=dev-libs/cudnn-8*
-		dev-libs/cudnn-frontend:0/8
-		dev-util/nvidia-cuda-toolkit:=[profiler]
-	)
-	ffmpeg? ( media-video/ffmpeg:= )
-	nnpack? ( sci-libs/NNPACK )
-	numpy? ( $(python_gen_cond_dep '
-		dev-python/numpy[${PYTHON_USEDEP}]
-		') )
-	opencl? ( virtual/opencl )
-	opencv? ( media-libs/opencv:= )
-	qnnpack? ( sci-libs/QNNPACK )
-	xnnpack? ( sci-libs/XNNPACK )
-"
-DEPEND="
-	${RDEPEND}
-	dev-cpp/eigen
-	dev-libs/psimd
-	dev-libs/FP16
-	dev-libs/FXdiv
-	dev-libs/pocketfft
-	dev-libs/flatbuffers
-	$(python_gen_cond_dep '
-		dev-python/pyyaml[${PYTHON_USEDEP}]
-		dev-python/pybind11[${PYTHON_USEDEP}]
-	')
-"
-
-S="${WORKDIR}"/${MYP}
-
-PATCHES=(
-	"${FILESDIR}"/${PN}-1.11.0-gentoo.patch
-	"${FILESDIR}"/${PN}-1.12.0-install-dirs.patch
-	"${FILESDIR}"/${P}-glog-0.6.0.patch
-	"${FILESDIR}"/${P}-clang.patch
-)
-
-src_prepare() {
-	filter-lto #bug 862672
-	cmake_src_prepare
-	pushd torch/csrc/jit/serialization || die
-	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
-	popd
-}
-
-src_configure() {
-	if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
-		ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
-		ewarn "These may not be optimal for your GPU."
-		ewarn ""
-		ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
-		ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
-		ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5,3.5"
-		ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
-		ewarn ""
-		ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
-		ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
-	fi
-
-	local mycmakeargs=(
-		-DBUILD_CUSTOM_PROTOBUF=OFF
-		-DBUILD_SHARED_LIBS=ON
-
-		-DUSE_CCACHE=OFF
-		-DUSE_CUDA=$(usex cuda)
-		-DUSE_CUDNN=$(usex cuda)
-		-DUSE_FAST_NVCC=$(usex cuda)
-		-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
-		-DUSE_DISTRIBUTED=OFF
-		-DUSE_FAKELOWP=OFF
-		-DUSE_FBGEMM=OFF # TODO
-		-DUSE_FFMPEG=$(usex ffmpeg)
-		-DUSE_GFLAGS=ON
-		-DUSE_GLOG=ON
-		-DUSE_GLOO=OFF
-		-DUSE_KINETO=OFF # TODO
-		-DUSE_LEVELDB=OFF
-		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
-		-DUSE_MKLDNN=OFF
-		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
-		-DUSE_NNPACK=$(usex nnpack)
-		-DUSE_QNNPACK=$(usex qnnpack)
-		-DUSE_XNNPACK=$(usex xnnpack)
-		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
-		-DUSE_PYTORCH_QNNPACK=OFF
-		-DUSE_NUMPY=$(usex numpy)
-		-DUSE_OPENCL=$(usex opencl)
-		-DUSE_OPENCV=$(usex opencv)
-		-DUSE_OPENMP=$(usex openmp)
-		-DUSE_ROCM=OFF # TODO
-		-DUSE_SYSTEM_CPUINFO=ON
-		-DUSE_SYSTEM_BIND11=ON
-		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
-		-DPYTHON_EXECUTABLE="${PYTHON}"
-		-DUSE_SYSTEM_EIGEN_INSTALL=ON
-		-DUSE_SYSTEM_PTHREADPOOL=ON
-		-DUSE_SYSTEM_FXDIV=ON
-		-DUSE_SYSTEM_FP16=ON
-		-DUSE_SYSTEM_GLOO=ON
-		-DUSE_SYSTEM_ONNX=ON
-		-DUSE_SYSTEM_SLEEF=ON
-		-DUSE_TENSORPIPE=OFF
-
-		-Wno-dev
-		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
-		-DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
-	)
-
-	use cuda && addpredict "/dev/nvidiactl" # bug 867706
-	cmake_src_configure
-}
-
-src_install() {
-	cmake_src_install
-
-	insinto "/var/lib/${PN}"
-	doins "${BUILD_DIR}"/CMakeCache.txt
-
-	rm -rf python
-	mkdir -p python/torch/include || die
-	mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
-	mv "${ED}"/usr/include/torch python/torch/include || die
-	cp torch/version.py python/torch/ || die
-	python_domodule python/caffe2
-	python_domodule python/torch
-}

diff --git a/sci-libs/caffe2/caffe2-1.12.1.ebuild b/sci-libs/caffe2/caffe2-1.12.1.ebuild
deleted file mode 100644
index 218cdb2ce9e7..000000000000
--- a/sci-libs/caffe2/caffe2-1.12.1.ebuild
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 2022-2023 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-PYTHON_COMPAT=( python3_{9..10} )
-inherit python-single-r1 cmake flag-o-matic
-
-MYPN=pytorch
-MYP=${MYPN}-${PV}
-
-DESCRIPTION="A deep learning framework"
-HOMEPAGE="https://pytorch.org/"
-SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-	-> ${MYP}.tar.gz"
-
-LICENSE="BSD"
-SLOT="0"
-KEYWORDS="~amd64"
-IUSE="cuda ffmpeg nnpack +numpy opencl opencv openmp qnnpack xnnpack"
-RESTRICT="test"
-REQUIRED_USE="
-	${PYTHON_REQUIRED_USE}
-	ffmpeg? ( opencv )
-" # ?? ( cuda rocm )
-
-RDEPEND="
-	${PYTHON_DEPS}
-	dev-cpp/gflags:=
-	>=dev-cpp/glog-0.5.0
-	dev-libs/cpuinfo
-	dev-libs/libfmt
-	dev-libs/protobuf:=
-	dev-libs/pthreadpool
-	dev-libs/sleef
-	sci-libs/lapack
-	sci-libs/onnx
-	sci-libs/foxi
-	cuda? (
-		=dev-libs/cudnn-8*
-		dev-libs/cudnn-frontend:0/8
-		dev-util/nvidia-cuda-toolkit:=[profiler]
-	)
-	ffmpeg? ( media-video/ffmpeg:= )
-	nnpack? ( sci-libs/NNPACK )
-	numpy? ( $(python_gen_cond_dep '
-		dev-python/numpy[${PYTHON_USEDEP}]
-		') )
-	opencl? ( virtual/opencl )
-	opencv? ( media-libs/opencv:= )
-	qnnpack? ( sci-libs/QNNPACK )
-	xnnpack? ( sci-libs/XNNPACK )
-"
-DEPEND="
-	${RDEPEND}
-	dev-cpp/eigen
-	dev-libs/psimd
-	dev-libs/FP16
-	dev-libs/FXdiv
-	dev-libs/pocketfft
-	dev-libs/flatbuffers
-	$(python_gen_cond_dep '
-		dev-python/pyyaml[${PYTHON_USEDEP}]
-		dev-python/pybind11[${PYTHON_USEDEP}]
-	')
-"
-
-S="${WORKDIR}"/${MYP}
-
-PATCHES=(
-	"${FILESDIR}"/${PN}-1.11.0-gentoo.patch
-	"${FILESDIR}"/${PN}-1.12.0-install-dirs.patch
-	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
-	"${FILESDIR}"/${PN}-1.12.0-clang.patch
-)
-
-src_prepare() {
-	filter-lto #bug 862672
-	cmake_src_prepare
-	pushd torch/csrc/jit/serialization || die
-	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
-	popd
-}
-
-src_configure() {
-	if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
-		ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
-		ewarn "These may not be optimal for your GPU."
-		ewarn ""
-		ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
-		ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
-		ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5,3.5"
-		ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
-		ewarn ""
-		ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
-		ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
-	fi
-
-	local mycmakeargs=(
-		-DBUILD_CUSTOM_PROTOBUF=OFF
-		-DBUILD_SHARED_LIBS=ON
-
-		-DUSE_CCACHE=OFF
-		-DUSE_CUDA=$(usex cuda)
-		-DUSE_CUDNN=$(usex cuda)
-		-DUSE_FAST_NVCC=$(usex cuda)
-		-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
-		-DUSE_DISTRIBUTED=OFF
-		-DUSE_FAKELOWP=OFF
-		-DUSE_FBGEMM=OFF # TODO
-		-DUSE_FFMPEG=$(usex ffmpeg)
-		-DUSE_GFLAGS=ON
-		-DUSE_GLOG=ON
-		-DUSE_GLOO=OFF
-		-DUSE_KINETO=OFF # TODO
-		-DUSE_LEVELDB=OFF
-		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
-		-DUSE_MKLDNN=OFF
-		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
-		-DUSE_NNPACK=$(usex nnpack)
-		-DUSE_QNNPACK=$(usex qnnpack)
-		-DUSE_XNNPACK=$(usex xnnpack)
-		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
-		-DUSE_PYTORCH_QNNPACK=OFF
-		-DUSE_NUMPY=$(usex numpy)
-		-DUSE_OPENCL=$(usex opencl)
-		-DUSE_OPENCV=$(usex opencv)
-		-DUSE_OPENMP=$(usex openmp)
-		-DUSE_ROCM=OFF # TODO
-		-DUSE_SYSTEM_CPUINFO=ON
-		-DUSE_SYSTEM_BIND11=ON
-		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
-		-DPYTHON_EXECUTABLE="${PYTHON}"
-		-DUSE_SYSTEM_EIGEN_INSTALL=ON
-		-DUSE_SYSTEM_PTHREADPOOL=ON
-		-DUSE_SYSTEM_FXDIV=ON
-		-DUSE_SYSTEM_FP16=ON
-		-DUSE_SYSTEM_GLOO=ON
-		-DUSE_SYSTEM_ONNX=ON
-		-DUSE_SYSTEM_SLEEF=ON
-		-DUSE_TENSORPIPE=OFF
-
-		-Wno-dev
-		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
-		-DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
-	)
-
-	use cuda && addpredict "/dev/nvidiactl" # bug 867706
-	cmake_src_configure
-}
-
-src_install() {
-	cmake_src_install
-
-	insinto "/var/lib/${PN}"
-	doins "${BUILD_DIR}"/CMakeCache.txt
-
-	rm -rf python
-	mkdir -p python/torch/include || die
-	mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
-	mv "${ED}"/usr/include/torch python/torch/include || die
-	cp torch/version.py python/torch/ || die
-	python_domodule python/caffe2
-	python_domodule python/torch
-}

diff --git a/sci-libs/caffe2/caffe2-1.13.0-r1.ebuild b/sci-libs/caffe2/caffe2-1.13.0-r1.ebuild
deleted file mode 100644
index ee1d1154a978..000000000000
--- a/sci-libs/caffe2/caffe2-1.13.0-r1.ebuild
+++ /dev/null
@@ -1,169 +0,0 @@
-# Copyright 2022-2023 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-PYTHON_COMPAT=( python3_{9..10} )
-inherit python-single-r1 cmake flag-o-matic
-
-MYPN=pytorch
-MYP=${MYPN}-${PV}
-
-DESCRIPTION="A deep learning framework"
-HOMEPAGE="https://pytorch.org/"
-SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-	-> ${MYP}.tar.gz"
-
-LICENSE="BSD"
-SLOT="0"
-KEYWORDS="~amd64"
-IUSE="cuda ffmpeg nnpack +numpy opencl opencv openmp qnnpack xnnpack"
-RESTRICT="test"
-REQUIRED_USE="
-	${PYTHON_REQUIRED_USE}
-	ffmpeg? ( opencv )
-" # ?? ( cuda rocm )
-
-RDEPEND="
-	${PYTHON_DEPS}
-	dev-cpp/gflags:=
-	>=dev-cpp/glog-0.5.0
-	dev-libs/cpuinfo
-	dev-libs/libfmt
-	dev-libs/protobuf:=
-	dev-libs/pthreadpool
-	dev-libs/sleef
-	sci-libs/lapack
-	>=sci-libs/onnx-1.12.0
-	sci-libs/foxi
-	cuda? (
-		=dev-libs/cudnn-8*
-		dev-libs/cudnn-frontend:0/8
-		dev-util/nvidia-cuda-toolkit:=[profiler]
-	)
-	ffmpeg? ( media-video/ffmpeg:= )
-	nnpack? ( sci-libs/NNPACK )
-	numpy? ( $(python_gen_cond_dep '
-		dev-python/numpy[${PYTHON_USEDEP}]
-		') )
-	opencl? ( virtual/opencl )
-	opencv? ( media-libs/opencv:= )
-	qnnpack? ( sci-libs/QNNPACK )
-	xnnpack? ( sci-libs/XNNPACK )
-"
-DEPEND="
-	${RDEPEND}
-	dev-cpp/eigen
-	dev-libs/psimd
-	dev-libs/FP16
-	dev-libs/FXdiv
-	dev-libs/pocketfft
-	dev-libs/flatbuffers
-	sci-libs/kineto
-	$(python_gen_cond_dep '
-		dev-python/pyyaml[${PYTHON_USEDEP}]
-		dev-python/pybind11[${PYTHON_USEDEP}]
-	')
-"
-
-S="${WORKDIR}"/${MYP}
-
-PATCHES=(
-	"${FILESDIR}"/${P}-gentoo.patch
-	"${FILESDIR}"/${P}-install-dirs.patch
-	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
-	"${FILESDIR}"/${PN}-1.12.0-clang.patch
-)
-
-src_prepare() {
-	filter-lto #bug 862672
-	cmake_src_prepare
-	pushd torch/csrc/jit/serialization || die
-	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
-	popd
-}
-
-src_configure() {
-	if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
-		ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
-		ewarn "These may not be optimal for your GPU."
-		ewarn ""
-		ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
-		ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
-		ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5,3.5"
-		ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
-		ewarn ""
-		ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
-		ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
-	fi
-
-	local mycmakeargs=(
-		-DBUILD_CUSTOM_PROTOBUF=OFF
-		-DBUILD_SHARED_LIBS=ON
-
-		-DUSE_CCACHE=OFF
-		-DUSE_CUDA=$(usex cuda)
-		-DUSE_CUDNN=$(usex cuda)
-		-DUSE_FAST_NVCC=$(usex cuda)
-		-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
-		-DUSE_DISTRIBUTED=OFF
-		-DUSE_FAKELOWP=OFF
-		-DUSE_FBGEMM=OFF # TODO
-		-DUSE_FFMPEG=$(usex ffmpeg)
-		-DUSE_GFLAGS=ON
-		-DUSE_GLOG=ON
-		-DUSE_GLOO=OFF
-		-DUSE_KINETO=OFF # TODO
-		-DUSE_LEVELDB=OFF
-		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
-		-DUSE_MKLDNN=OFF
-		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
-		-DUSE_NNPACK=$(usex nnpack)
-		-DUSE_QNNPACK=$(usex qnnpack)
-		-DUSE_XNNPACK=$(usex xnnpack)
-		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
-		-DUSE_PYTORCH_QNNPACK=OFF
-		-DUSE_NUMPY=$(usex numpy)
-		-DUSE_OPENCL=$(usex opencl)
-		-DUSE_OPENCV=$(usex opencv)
-		-DUSE_OPENMP=$(usex openmp)
-		-DUSE_ROCM=OFF # TODO
-		-DUSE_SYSTEM_CPUINFO=ON
-		-DUSE_SYSTEM_PYBIND11=ON
-		-DUSE_VALGRIND=OFF
-		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
-		-DPYTHON_EXECUTABLE="${PYTHON}"
-		-DUSE_ITT=OFF
-		-DUSE_SYSTEM_EIGEN_INSTALL=ON
-		-DUSE_SYSTEM_PTHREADPOOL=ON
-		-DUSE_SYSTEM_FXDIV=ON
-		-DUSE_SYSTEM_FP16=ON
-		-DUSE_SYSTEM_GLOO=ON
-		-DUSE_SYSTEM_ONNX=ON
-		-DUSE_SYSTEM_SLEEF=ON
-		-DUSE_TENSORPIPE=OFF
-
-		-Wno-dev
-		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
-		-DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
-	)
-
-	use cuda && addpredict "/dev/nvidiactl" # bug 867706
-	cmake_src_configure
-}
-
-src_install() {
-	cmake_src_install
-
-	insinto "/var/lib/${PN}"
-	doins "${BUILD_DIR}"/CMakeCache.txt
-
-	rm -rf python
-	mkdir -p python/torch/include || die
-	mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
-	mv "${ED}"/usr/include/torch python/torch/include || die
-	cp torch/version.py python/torch/ || die
-	rm -r "${ED}"/var/tmp || die
-	python_domodule python/caffe2
-	python_domodule python/torch
-}

diff --git a/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch b/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
deleted file mode 100644
index 67d4281c8b95..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.11.0-gentoo.patch
+++ /dev/null
@@ -1,168 +0,0 @@
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1523,7 +1523,6 @@
-   if(NOT USE_SYSTEM_ONNX)
-     add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/onnx EXCLUDE_FROM_ALL)
-   endif()
--  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
- 
-   add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
-   if(NOT USE_SYSTEM_ONNX)
-@@ -1821,7 +1820,6 @@
- #
- set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
- set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
--add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
- 
- # Disable compiler feature checks for `fmt`.
- #
-@@ -1830,9 +1828,7 @@
- # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
- # `fmt` is compatible with a superset of the compilers that PyTorch is, it
- # shouldn't be too bad to just disable the checks.
--set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
- 
--list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
- set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
- 
- # ---[ Kineto
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -62,7 +62,7 @@
- if(${USE_GLOG})
-     target_link_libraries(c10 PUBLIC glog::glog)
- endif()
--target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
-+target_link_libraries(c10 PRIVATE fmt)
- 
- find_package(Backtrace)
- if(Backtrace_FOUND)
---- a/torch/CMakeLists.txt
-+++ b/torch/CMakeLists.txt
-@@ -86,7 +86,6 @@
-     python::python
-     pybind::pybind11
-     shm
--    fmt::fmt-header-only
-     ATEN_CPU_FILES_GEN_LIB)
- 
- set(TORCH_PYTHON_COMPILE_DEFINITIONS)
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -777,7 +777,7 @@
- 
- # ---[ Build flags
- if(NOT MSVC)
--  string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
-+  string(APPEND CMAKE_CXX_FLAGS " -fPIC")
-   string(APPEND CMAKE_CXX_FLAGS " -Wno-narrowing")
-   # Eigen fails to build with some versions, so convert this to a warning
-   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
-@@ -783,7 +783,6 @@
-   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
-   string(APPEND CMAKE_CXX_FLAGS " -Wall")
-   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
--  string(APPEND CMAKE_CXX_FLAGS " -Werror=return-type")
-   string(APPEND CMAKE_CXX_FLAGS " -Wno-missing-field-initializers")
-   string(APPEND CMAKE_CXX_FLAGS " -Wno-type-limits")
-   string(APPEND CMAKE_CXX_FLAGS " -Wno-array-bounds")
-@@ -883,11 +882,9 @@
-   string(APPEND CMAKE_CXX_FLAGS " -fno-trapping-math")
-   check_cxx_compiler_flag("-Werror=format" HAS_WERROR_FORMAT)
-   if(HAS_WERROR_FORMAT)
--    string(APPEND CMAKE_CXX_FLAGS " -Werror=format")
-   endif()
-   check_cxx_compiler_flag("-Werror=cast-function-type" HAS_WERROR_CAST_FUNCTION_TYPE)
-   if(HAS_WERROR_CAST_FUNCTION_TYPE)
--    string(APPEND CMAKE_CXX_FLAGS " -Werror=cast-function-type")
-   endif()
-   check_cxx_compiler_flag("-Werror=sign-compare" HAS_WERROR_SIGN_COMPARE)
-   # This doesn't work globally so we use the test on specific
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -510,8 +510,6 @@
-   endif()
- 
-   # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
--  target_compile_options(${libname} PRIVATE
--      $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
- 
- endfunction()
- 
---- a/cmake/Codegen.cmake
-+++ b/cmake/Codegen.cmake
-@@ -57,7 +57,7 @@
-   if(MSVC)
-     set(OPT_FLAG "/fp:strict ")
-   else(MSVC)
--    set(OPT_FLAG "-O3 ")
-+    set(OPT_FLAG " ")
-     if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
-       set(OPT_FLAG " ")
-     endif()
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -111,7 +111,7 @@
- # Note: the folders that are being commented out have not been properly
- # addressed yet.
- 
--if(NOT MSVC AND USE_XNNPACK)
-+if(FALSE)
-   if(NOT TARGET fxdiv)
-     set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
-     set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
-@@ -557,7 +557,6 @@
-   if(NOT MSVC)
-     set_source_files_properties(${TORCH_SRC_DIR}/csrc/jit/tensorexpr/llvm_jit.cpp PROPERTIES COMPILE_FLAGS -Wno-noexcept-type)
-     # Force -Werror on several files
--    set_source_files_properties(${CMAKE_CURRENT_LIST_DIR}/../aten/src/ATen/native/mkldnn/Pooling.cpp PROPERTIES COMPILE_FLAGS "-Werror")
-   endif()
-   # Disable certain warnings for GCC-9.X
-   if(CMAKE_COMPILER_IS_GNUCXX AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 9.0.0))
-@@ -825,7 +824,6 @@
- torch_compile_options(torch_cpu)  # see cmake/public/utils.cmake
- if(HAS_WERROR_SIGN_COMPARE AND WERROR)
-   # target_compile_options(torch_cpu PRIVATE "-Werror=sign-compare")
--  set_property(SOURCE ${ATen_CORE_SRCS} ${ATen_CPU_SRCS} APPEND PROPERTY COMPILE_OPTIONS "-Werror=sign-compare")
- endif()
- 
- set_property(SOURCE ${ATen_CORE_SRCS} APPEND
-@@ -1017,7 +1015,6 @@
- endif()
- 
- if(NOT MSVC AND USE_XNNPACK)
--  TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
- endif()
- 
- # ==========================================================
---- a/cmake/External/nnpack.cmake
-+++ b/cmake/External/nnpack.cmake
-@@ -58,7 +58,7 @@
-   set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
-   set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
- 
--  if(NOT TARGET nnpack)
-+  if(FALSE)
-     if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
-       set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
-     endif()
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -481,7 +481,7 @@ endif()
- list(APPEND Caffe2_DEPENDENCY_LIBS cpuinfo)
- 
- # ---[ QNNPACK
--if(USE_QNNPACK)
-+if(FALSE)
-   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
- 
-   if(NOT DEFINED QNNPACK_SOURCE_DIR)
-@@ -537,7 +537,7 @@ if(USE_QNNPACK)
- endif()
- 
- # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and neon2sse headers
--if(USE_QNNPACK)
-+if(FALSE)
-   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
-   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
-   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")

diff --git a/sci-libs/caffe2/files/caffe2-1.12.0-install-dirs.patch b/sci-libs/caffe2/files/caffe2-1.12.0-install-dirs.patch
deleted file mode 100644
index 078e33f45020..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.12.0-install-dirs.patch
+++ /dev/null
@@ -1,129 +0,0 @@
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -107,7 +107,7 @@
- # Note: for now, we will put all export path into one single Caffe2Targets group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still self-contained.
--install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10 EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
-         DESTINATION include
-         FILES_MATCHING PATTERN "*.h")
---- a/c10/cuda/CMakeLists.txt
-+++ b/c10/cuda/CMakeLists.txt
-@@ -63,7 +63,7 @@ add_subdirectory(test)
- # Note: for now, we will put all export path into one single Caffe2Targets group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still self-contained.
--install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- foreach(file ${C10_CUDA_HEADERS})
-   get_filename_component( dir ${file} DIRECTORY )
-   install( FILES ${file} DESTINATION include/c10/cuda/${dir} )
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -55,7 +55,7 @@ target_include_directories(
- add_subdirectory(test)
- 
- # ---[ Installation
--install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
-         DESTINATION include
-         FILES_MATCHING PATTERN "*.h")
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -17,7 +17,7 @@ if(BUILD_CAFFE2_OPS)
- 
-     torch_set_target_props(caffe2_detectron_ops_gpu)
-     target_link_libraries(caffe2_detectron_ops_gpu torch ${OpenMP_link})
--    install(TARGETS caffe2_detectron_ops_gpu DESTINATION lib)
-+    install(TARGETS caffe2_detectron_ops_gpu DESTINATION ${CMAKE_INSTALL_LIBDIR})
-     if(MSVC)
-       install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops_gpu> DESTINATION lib OPTIONAL)
-     endif()
-@@ -31,7 +31,7 @@ if(BUILD_CAFFE2_OPS)
-     torch_set_target_props(caffe2_detectron_ops_hip)
-     target_compile_options(caffe2_detectron_ops_hip PRIVATE ${HIP_CXX_FLAGS})
-     target_link_libraries(caffe2_detectron_ops_hip torch)
--    install(TARGETS caffe2_detectron_ops_hip DESTINATION lib)
-+    install(TARGETS caffe2_detectron_ops_hip DESTINATION ${CMAKE_INSTALL_LIBDIR})
-   elseif(NOT IOS_PLATFORM)
-     add_library(caffe2_detectron_ops SHARED ${Detectron_CPU_SRCS})
-     if(HAVE_SOVERSION)
-@@ -40,7 +40,7 @@ if(BUILD_CAFFE2_OPS)
-     endif()
-     torch_set_target_props(caffe2_detectron_ops)
-     target_link_libraries(caffe2_detectron_ops torch ${OpenMP_link})
--    install(TARGETS caffe2_detectron_ops DESTINATION lib)
-+    install(TARGETS caffe2_detectron_ops DESTINATION ${CMAKE_INSTALL_LIBDIR})
-     if(MSVC)
-       install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops> DESTINATION lib OPTIONAL)
-     endif()
---- a/modules/module_test/CMakeLists.txt
-+++ b/modules/module_test/CMakeLists.txt
-@@ -16,7 +16,7 @@ if(BUILD_TEST AND NOT BUILD_LITE_INTERPRETER)
-         VERSION ${TORCH_VERSION} SOVERSION ${TORCH_SOVERSION})
-   endif()
-   target_link_libraries(caffe2_module_test_dynamic torch_library)
--  install(TARGETS caffe2_module_test_dynamic DESTINATION lib)
-+  install(TARGETS caffe2_module_test_dynamic DESTINATION ${CMAKE_INSTALL_LIBDIR})
-   if(MSVC AND BUILD_SHARED_LIBS)
-     install(FILES $<TARGET_PDB_FILE:caffe2_module_test_dynamic> DESTINATION lib OPTIONAL)
-   endif()
---- a/modules/observers/CMakeLists.txt
-+++ b/modules/observers/CMakeLists.txt
-@@ -21,7 +21,7 @@ endif()
- target_link_libraries(caffe2_observers PUBLIC torch_library)
- target_include_directories(caffe2_observers PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/..)
- target_compile_options(caffe2_observers PRIVATE "-DCAFFE2_BUILD_OBSERVER_LIB")
--install(TARGETS caffe2_observers DESTINATION lib)
-+install(TARGETS caffe2_observers DESTINATION ${CMAKE_INSTALL_LIBDIR})
- caffe2_interface_library(caffe2_observers caffe2_observers_library)
- if(MSVC AND BUILD_SHARED_LIBS)
-   install(FILES $<TARGET_PDB_FILE:caffe2_observers> DESTINATION lib OPTIONAL)
---- a/modules/rocksdb/CMakeLists.txt
-+++ b/modules/rocksdb/CMakeLists.txt
-@@ -59,7 +59,7 @@ add_library(caffe2_rocksdb ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb.cc)
- target_link_libraries(caffe2_rocksdb PUBLIC torch_library)
- target_link_libraries(caffe2_rocksdb PRIVATE ${RocksDB_LIBRARIES})
- target_include_directories(caffe2_rocksdb PRIVATE ${RocksDB_INCLUDE_DIR})
--install(TARGETS caffe2_rocksdb DESTINATION lib)
-+install(TARGETS caffe2_rocksdb DESTINATION ${CMAKE_INSTALL_LIBDIR})
- 
- # ---[ Last, Append the library to Caffe2_MODULES, if we are building with
- # the main repo.
---- a/test/cpp/c10d/CMakeLists.txt
-+++ b/test/cpp/c10d/CMakeLists.txt
-@@ -51,7 +51,7 @@ if(USE_CUDA)
-     if(INSTALL_TEST)
-       install(TARGETS ProcessGroupNCCLTest DESTINATION bin)
-       install(TARGETS ProcessGroupNCCLErrorsTest DESTINATION bin)
--      install(TARGETS c10d_cuda_test DESTINATION lib)
-+      install(TARGETS c10d_cuda_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-     endif()
-   endif()
- else()
---- a/test/cpp/jit/CMakeLists.txt
-+++ b/test/cpp/jit/CMakeLists.txt
-@@ -32,9 +32,9 @@ endif()
- target_link_libraries(backend_with_compiler torch)
- 
- if(INSTALL_TEST)
--  install(TARGETS torchbind_test DESTINATION lib)
--  install(TARGETS jitbackend_test DESTINATION lib)
--  install(TARGETS backend_with_compiler DESTINATION lib)
-+  install(TARGETS torchbind_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+  install(TARGETS jitbackend_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+  install(TARGETS backend_with_compiler DESTINATION ${CMAKE_INSTALL_LIBDIR})
- endif()
- 
- # Build the cpp gtest binary containing the cpp-only tests.
---- a/torch/csrc/deploy/CMakeLists.txt
-+++ b/torch/csrc/deploy/CMakeLists.txt
-@@ -80,4 +80,4 @@ if(INSTALL_TEST)
-   install(TARGETS test_deploy_gpu DESTINATION bin)
- endif()
- 
--install(TARGETS torch_deploy DESTINATION lib)
-+install(TARGETS torch_deploy DESTINATION ${CMAKE_INSTALL_LIBDIR})


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2023-02-28 20:38 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2023-02-28 20:38 UTC (permalink / raw
  To: gentoo-commits

commit:     725ad515274266020396fdae1a5b949b30fdc127
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Tue Feb 28 20:37:40 2023 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Tue Feb 28 20:38:24 2023 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=725ad515

sci-libs/caffe2: add tensorpipe use flag

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 .../{caffe2-1.13.1-r3.ebuild => caffe2-1.13.1-r4.ebuild}       |  7 +++++--
 sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch           | 10 ++++++++++
 sci-libs/caffe2/metadata.xml                                   |  1 +
 3 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/sci-libs/caffe2/caffe2-1.13.1-r3.ebuild b/sci-libs/caffe2/caffe2-1.13.1-r4.ebuild
similarity index 96%
rename from sci-libs/caffe2/caffe2-1.13.1-r3.ebuild
rename to sci-libs/caffe2/caffe2-1.13.1-r4.ebuild
index 04521a5c26ef..9411531dd1ec 100644
--- a/sci-libs/caffe2/caffe2-1.13.1-r3.ebuild
+++ b/sci-libs/caffe2/caffe2-1.13.1-r4.ebuild
@@ -17,12 +17,13 @@ SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
 LICENSE="BSD"
 SLOT="0"
 KEYWORDS="~amd64"
-IUSE="cuda distributed ffmpeg mpi nnpack +numpy opencl opencv openmp qnnpack xnnpack"
+IUSE="cuda distributed ffmpeg mpi nnpack +numpy opencl opencv openmp qnnpack tensorpipe xnnpack"
 RESTRICT="test"
 REQUIRED_USE="
 	${PYTHON_REQUIRED_USE}
 	ffmpeg? ( opencv )
 	mpi? ( distributed )
+	tensorpipe? ( distributed )
 " # ?? ( cuda rocm )
 
 # CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
@@ -52,6 +53,7 @@ RDEPEND="
 	opencl? ( virtual/opencl )
 	opencv? ( media-libs/opencv:= )
 	qnnpack? ( sci-libs/QNNPACK )
+	tensorpipe? ( sci-libs/tensorpipe )
 	xnnpack? ( sci-libs/XNNPACK )
 "
 DEPEND="
@@ -77,6 +79,7 @@ PATCHES=(
 	"${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
 	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
 	"${FILESDIR}"/${PN}-1.12.0-clang.patch
+	"${FILESDIR}"/${P}-tensorpipe.patch
 )
 
 src_prepare() {
@@ -127,6 +130,7 @@ src_configure() {
 		-DUSE_QNNPACK=$(usex qnnpack)
 		-DUSE_XNNPACK=$(usex xnnpack)
 		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+		-DUSE_TENSORPIPE=$(usex tensorpipe)
 		-DUSE_PYTORCH_QNNPACK=OFF
 		-DUSE_NUMPY=$(usex numpy)
 		-DUSE_OPENCL=$(usex opencl)
@@ -147,7 +151,6 @@ src_configure() {
 		-DUSE_SYSTEM_GLOO=ON
 		-DUSE_SYSTEM_ONNX=ON
 		-DUSE_SYSTEM_SLEEF=ON
-		-DUSE_TENSORPIPE=OFF
 
 		-Wno-dev
 		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)

diff --git a/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch b/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch
new file mode 100644
index 000000000000..ae0cac9fb947
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch
@@ -0,0 +1,10 @@
+--- a/cmake/Dependencies.cmake	2023-02-28 14:14:49.099057348 +0100
++++ b/cmake/Dependencies.cmake	2023-02-28 14:15:05.326790806 +0100
+@@ -1404,7 +1404,6 @@
+ 
+     # Tensorpipe uses cuda_add_library
+     torch_update_find_cuda_flags()
+-    add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)
+ 
+     list(APPEND Caffe2_DEPENDENCY_LIBS tensorpipe)
+     if(USE_CUDA)

diff --git a/sci-libs/caffe2/metadata.xml b/sci-libs/caffe2/metadata.xml
index f009baee81bb..531b9cbf5879 100644
--- a/sci-libs/caffe2/metadata.xml
+++ b/sci-libs/caffe2/metadata.xml
@@ -19,6 +19,7 @@
 		<flag name="opencv">Add support for image processing operators</flag>
 		<flag name="openmp">Use OpenMP for parallel code</flag>
 		<flag name="qnnpack">Use QNNPACK</flag>
+		<flag name="tensorpipe">Use tensorpipe</flag>
 		<flag name="xnnpack">Use XNNPACK</flag>
 	</use>
 	<upstream>


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2023-04-05  9:07 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2023-04-05  9:07 UTC (permalink / raw
  To: gentoo-commits

commit:     af7595c6e757334814a1d164a6fcaf35cfd0a944
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Wed Apr  5 09:05:54 2023 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Wed Apr  5 09:06:53 2023 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=af7595c6

sci-libs/caffe2: add 2.0.0

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                        |   1 +
 sci-libs/caffe2/caffe2-2.0.0.ebuild             | 184 +++++++++++++++++++++++
 sci-libs/caffe2/files/caffe2-2.0.0-gentoo.patch | 189 ++++++++++++++++++++++++
 3 files changed, 374 insertions(+)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index 616b75a49763..1424256938e9 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1 +1,2 @@
 DIST pytorch-1.13.1.tar.gz 108279745 BLAKE2B 75de03b74dfdaf8d8fb5ea743fcc0c1b0e408a714ad4160c487921220a7b1755e5fa6e587e6bbc8c9f34dd75e096d2e6dd69c80d24821835fff6c833314434d3 SHA512 f16f89d027efade11d057245cad5b69a390e88b458398310ae30de2dbff7c8fd7f1165be7b8da7ea989c81ac3f5a66c5cb9050610e441a97c83fb8aa28c0bd62
+DIST pytorch-2.0.0.tar.gz 111327292 BLAKE2B 6d593a975c0ade714f0b189f7e3c4ff704b9a9a2377b5e441a9cefc202fa22779966d08948e63671912c6ea5a0eee124042155f4f57a654db34e19e42f013cc9 SHA512 4dd76160711c0d87f3026c8b7fa3ed149dd86b8ac0ee9ecea0eaf80d2e6ce8c29368392e77b9466d90b60634087b462b782495997a5d33367cc8ca9fe14c8a14

diff --git a/sci-libs/caffe2/caffe2-2.0.0.ebuild b/sci-libs/caffe2/caffe2-2.0.0.ebuild
new file mode 100644
index 000000000000..d06eaa5b375f
--- /dev/null
+++ b/sci-libs/caffe2/caffe2-2.0.0.ebuild
@@ -0,0 +1,184 @@
+# Copyright 2022-2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{9..11} )
+inherit python-single-r1 cmake cuda flag-o-matic
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/"
+SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
+	-> ${MYP}.tar.gz"
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64"
+IUSE="cuda distributed ffmpeg mpi nnpack +numpy opencl opencv openmp qnnpack tensorpipe xnnpack"
+RESTRICT="test"
+REQUIRED_USE="
+	${PYTHON_REQUIRED_USE}
+	ffmpeg? ( opencv )
+	mpi? ( distributed )
+	tensorpipe? ( distributed )
+" # ?? ( cuda rocm )
+
+# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
+RDEPEND="
+	${PYTHON_DEPS}
+	dev-cpp/gflags:=
+	>=dev-cpp/glog-0.5.0
+	dev-libs/cpuinfo
+	dev-libs/libfmt
+	dev-libs/protobuf:=
+	dev-libs/pthreadpool
+	dev-libs/sleef
+	sci-libs/lapack
+	>=sci-libs/onnx-1.12.0
+	sci-libs/foxi
+	cuda? (
+		=dev-libs/cudnn-8*
+		dev-libs/cudnn-frontend:0/8
+		<dev-util/nvidia-cuda-toolkit-12:=[profiler]
+	)
+	ffmpeg? ( media-video/ffmpeg:= )
+	mpi? ( sys-cluster/openmpi )
+	nnpack? ( sci-libs/NNPACK )
+	numpy? ( $(python_gen_cond_dep '
+		dev-python/numpy[${PYTHON_USEDEP}]
+		') )
+	opencl? ( virtual/opencl )
+	opencv? ( media-libs/opencv:= )
+	qnnpack? ( sci-libs/QNNPACK )
+	tensorpipe? ( sci-libs/tensorpipe )
+	xnnpack? ( sci-libs/XNNPACK )
+"
+DEPEND="
+	${RDEPEND}
+	dev-cpp/eigen
+	cuda? ( dev-libs/cutlass )
+	dev-libs/psimd
+	dev-libs/FP16
+	dev-libs/FXdiv
+	dev-libs/pocketfft
+	dev-libs/flatbuffers
+	sci-libs/kineto
+	$(python_gen_cond_dep '
+		dev-python/pyyaml[${PYTHON_USEDEP}]
+		dev-python/pybind11[${PYTHON_USEDEP}]
+	')
+"
+
+S="${WORKDIR}"/${MYP}
+
+PATCHES=(
+	"${FILESDIR}"/${P}-gentoo.patch
+	"${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
+	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
+	"${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
+)
+
+src_prepare() {
+	filter-lto #bug 862672
+	cmake_src_prepare
+	pushd torch/csrc/jit/serialization || die
+	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+	popd
+}
+
+src_configure() {
+	if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
+		ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
+		ewarn "These may not be optimal for your GPU."
+		ewarn ""
+		ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
+		ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
+		ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5,3.5"
+		ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
+		ewarn ""
+		ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
+		ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
+	fi
+
+	local mycmakeargs=(
+		-DBUILD_CUSTOM_PROTOBUF=OFF
+		-DBUILD_SHARED_LIBS=ON
+
+		-DUSE_CCACHE=OFF
+		-DUSE_CUDA=$(usex cuda)
+		-DUSE_CUDNN=$(usex cuda)
+		-DUSE_FAST_NVCC=$(usex cuda)
+		-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
+		-DUSE_DISTRIBUTED=$(usex distributed)
+		-DUSE_MPI=$(usex mpi)
+		-DUSE_FAKELOWP=OFF
+		-DUSE_FBGEMM=OFF # TODO
+		-DUSE_FFMPEG=$(usex ffmpeg)
+		-DUSE_GFLAGS=ON
+		-DUSE_GLOG=ON
+		-DUSE_GLOO=OFF
+		-DUSE_KINETO=OFF # TODO
+		-DUSE_LEVELDB=OFF
+		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+		-DUSE_MKLDNN=OFF
+		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
+		-DUSE_NNPACK=$(usex nnpack)
+		-DUSE_QNNPACK=$(usex qnnpack)
+		-DUSE_XNNPACK=$(usex xnnpack)
+		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+		-DUSE_TENSORPIPE=$(usex tensorpipe)
+		-DUSE_PYTORCH_QNNPACK=OFF
+		-DUSE_NUMPY=$(usex numpy)
+		-DUSE_OPENCL=$(usex opencl)
+		-DUSE_OPENCV=$(usex opencv)
+		-DUSE_OPENMP=$(usex openmp)
+		-DUSE_ROCM=OFF # TODO
+		-DUSE_SYSTEM_CPUINFO=ON
+		-DUSE_SYSTEM_PYBIND11=ON
+		-DUSE_UCC=OFF
+		-DUSE_VALGRIND=OFF
+		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
+		-DPYTHON_EXECUTABLE="${PYTHON}"
+		-DUSE_ITT=OFF
+		-DBLAS=Eigen # avoid the use of MKL, if found on the system
+		-DUSE_SYSTEM_EIGEN_INSTALL=ON
+		-DUSE_SYSTEM_PTHREADPOOL=ON
+		-DUSE_SYSTEM_FXDIV=ON
+		-DUSE_SYSTEM_FP16=ON
+		-DUSE_SYSTEM_GLOO=ON
+		-DUSE_SYSTEM_ONNX=ON
+		-DUSE_SYSTEM_SLEEF=ON
+
+		-Wno-dev
+		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
+		-DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
+	)
+
+	if use cuda; then
+		addpredict "/dev/nvidiactl" # bug 867706
+
+		mycmakeargs+=(
+			-DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
+		)
+	fi
+	cmake_src_configure
+}
+
+src_install() {
+	cmake_src_install
+
+	insinto "/var/lib/${PN}"
+	doins "${BUILD_DIR}"/CMakeCache.txt
+
+	rm -rf python
+	mkdir -p python/torch/include || die
+	mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
+	mv "${ED}"/usr/include/torch python/torch/include || die
+	cp torch/version.py python/torch/ || die
+	rm -rf "${ED}"/var/tmp || die
+	python_domodule python/caffe2
+	python_domodule python/torch
+}

diff --git a/sci-libs/caffe2/files/caffe2-2.0.0-gentoo.patch b/sci-libs/caffe2/files/caffe2-2.0.0-gentoo.patch
new file mode 100644
index 000000000000..021a06e343e4
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.0.0-gentoo.patch
@@ -0,0 +1,189 @@
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1554,7 +1554,6 @@
+       set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
+     endif()
+   endif()
+-  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
+ 
+   add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
+   if(NOT USE_SYSTEM_ONNX)
+@@ -1819,7 +1818,6 @@
+ #
+ set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
+ set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
+-add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+ 
+ # Disable compiler feature checks for `fmt`.
+ #
+@@ -1828,9 +1826,7 @@
+ # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
+ # `fmt` is compatible with a superset of the compilers that PyTorch is, it
+ # shouldn't be too bad to just disable the checks.
+-set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
+ 
+-list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
+ set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
+ 
+ # ---[ Kineto
+--- a/c10/CMakeLists.txt
++++ b/c10/CMakeLists.txt
+@@ -65,7 +65,7 @@
+ if(${USE_GLOG})
+     target_link_libraries(c10 PUBLIC glog::glog)
+ endif()
+-target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
++target_link_libraries(c10 PRIVATE fmt)
+ 
+ find_package(Backtrace)
+ if(Backtrace_FOUND)
+--- a/torch/CMakeLists.txt
++++ b/torch/CMakeLists.txt
+@@ -66,15 +66,9 @@
+     ${CMAKE_BINARY_DIR}
+     ${CMAKE_BINARY_DIR}/aten/src
+     ${CMAKE_BINARY_DIR}/caffe2/aten/src
+-    ${CMAKE_BINARY_DIR}/third_party
+-    ${CMAKE_BINARY_DIR}/third_party/onnx
+ 
+-    ${TORCH_ROOT}/third_party/valgrind-headers
+ 
+-    ${TORCH_ROOT}/third_party/gloo
+-    ${TORCH_ROOT}/third_party/onnx
+-    ${TORCH_ROOT}/third_party/flatbuffers/include
+-    ${TORCH_ROOT}/third_party/kineto/libkineto/include
++    /usr/include/kineto
+ 
+     ${TORCH_SRC_DIR}/csrc
+     ${TORCH_SRC_DIR}/csrc/api/include
+@@ -87,7 +81,6 @@
+     python::python
+     pybind::pybind11
+     shm
+-    fmt::fmt-header-only
+     ATEN_CPU_FILES_GEN_LIB)
+ 
+ set(TORCH_PYTHON_COMPILE_DEFINITIONS)
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -799,12 +799,11 @@
+ 
+ # ---[ Build flags
+ if(NOT MSVC)
+-  string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
++  string(APPEND CMAKE_CXX_FLAGS " -fPIC")
+   # Eigen fails to build with some versions, so convert this to a warning
+   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+   string(APPEND CMAKE_CXX_FLAGS " -Wall")
+   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+-  append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
+@@ -912,8 +911,6 @@
+   string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
+   append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
+-  append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
+-  append_cxx_flag_if_supported("-Werror=cast-function-type" CMAKE_CXX_FLAGS)
+ endif()
+ 
+ if(USE_ASAN)
+--- a/cmake/public/utils.cmake
++++ b/cmake/public/utils.cmake
+@@ -507,8 +507,6 @@
+   endif()
+ 
+   # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
+-  target_compile_options(${libname} PRIVATE
+-      $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
+ 
+ endfunction()
+ 
+--- a/cmake/Codegen.cmake
++++ b/cmake/Codegen.cmake
+@@ -57,7 +57,7 @@
+   if(MSVC)
+     set(OPT_FLAG "/fp:strict ")
+   else(MSVC)
+-    set(OPT_FLAG "-O3 ")
++    set(OPT_FLAG " ")
+     if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+       set(OPT_FLAG " ")
+     endif()
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -106,7 +106,7 @@
+ # Note: the folders that are being commented out have not been properly
+ # addressed yet.
+ 
+-if(NOT MSVC AND USE_XNNPACK)
++if(FALSE)
+   if(NOT TARGET fxdiv)
+     set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
+     set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
+@@ -533,7 +533,6 @@
+ if(NOT MSVC)
+   set_source_files_properties(${TORCH_SRC_DIR}/csrc/jit/tensorexpr/llvm_jit.cpp PROPERTIES COMPILE_FLAGS -Wno-noexcept-type)
+   # Force -Werror on several files
+-  set_source_files_properties(${CMAKE_CURRENT_LIST_DIR}/../aten/src/ATen/native/mkldnn/Pooling.cpp PROPERTIES COMPILE_FLAGS "-Werror")
+ endif()
+ # Disable certain warnings for GCC-9.X
+ if(CMAKE_COMPILER_IS_GNUCXX AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 9.0.0))
+@@ -811,7 +812,6 @@
+ torch_compile_options(torch_cpu)  # see cmake/public/utils.cmake
+ if(HAS_WERROR_SIGN_COMPARE AND WERROR)
+   # target_compile_options(torch_cpu PRIVATE "-Werror=sign-compare")
+-  set_property(SOURCE ${ATen_CORE_SRCS} ${ATen_CPU_SRCS} APPEND PROPERTY COMPILE_OPTIONS "-Werror=sign-compare")
+ endif()
+ 
+ set_property(SOURCE ${ATen_CORE_SRCS} APPEND
+@@ -975,7 +977,6 @@
+ endif()
+ 
+ if(NOT MSVC AND USE_XNNPACK)
+-  TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
+ endif()
+ 
+ # ==========================================================
+@@ -1095,8 +1098,7 @@
+ target_include_directories(torch_cpu PRIVATE
+   ${TORCH_ROOT}/third_party/miniz-2.1.0)
+ 
+-target_include_directories(torch_cpu PRIVATE
+-  ${TORCH_ROOT}/third_party/kineto/libkineto/include)
++target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
+ 
+ if(USE_KINETO)
+   target_include_directories(torch_cpu PRIVATE
+--- a/cmake/External/nnpack.cmake
++++ b/cmake/External/nnpack.cmake
+@@ -58,7 +58,7 @@
+   set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
+   set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
+ 
+-  if(NOT TARGET nnpack)
++  if(FALSE)
+     if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
+       set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
+     endif()
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -486,7 +486,7 @@ endif()
+ list(APPEND Caffe2_DEPENDENCY_LIBS cpuinfo)
+ 
+ # ---[ QNNPACK
+-if(USE_QNNPACK)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ 
+   if(NOT DEFINED QNNPACK_SOURCE_DIR)
+@@ -542,7 +542,7 @@ if(USE_QNNPACK)
+ endif()
+ 
+ # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and neon2sse headers
+-if(USE_QNNPACK)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
+   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2023-04-24  8:36 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2023-04-24  8:36 UTC (permalink / raw
  To: gentoo-commits

commit:     4d6a404d63ba3914c98344efe64d9b631089e3e1
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Mon Apr 24 08:06:19 2023 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Mon Apr 24 08:35:48 2023 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=4d6a404d

sci-libs/caffe2: fix cudnn include path

Closes: https://bugs.gentoo.org/904882
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/caffe2-2.0.0-r2.ebuild                     |  1 +
 sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch | 12 ++++++++++++
 2 files changed, 13 insertions(+)

diff --git a/sci-libs/caffe2/caffe2-2.0.0-r2.ebuild b/sci-libs/caffe2/caffe2-2.0.0-r2.ebuild
index 38ce9cd3befa..7667c7bbee17 100644
--- a/sci-libs/caffe2/caffe2-2.0.0-r2.ebuild
+++ b/sci-libs/caffe2/caffe2-2.0.0-r2.ebuild
@@ -81,6 +81,7 @@ PATCHES=(
 	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
 	"${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
 	"${FILESDIR}"/${P}-gcc13.patch
+	"${FILESDIR}"/${P}-cudnn_include_fix.patch
 )
 
 src_prepare() {

diff --git a/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch b/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch
new file mode 100644
index 000000000000..ff64e4108087
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch
@@ -0,0 +1,12 @@
+diff -uar pytorch-2.0.0/cmake/Dependencies.cmake pytorch-2.0.0orig/cmake/Dependencies.cmake
+--- a/cmake/Dependencies.cmake	2023-04-23 09:43:20.767566047 -0400
++++ b/cmake/Dependencies.cmake	2023-03-09 17:42:00.000000000 -0500
+@@ -1235,7 +1235,7 @@
+ 
+ # ---[ cuDNN
+ if(USE_CUDNN)
+-  set(CUDNN_FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/../third_party/cudnn_frontend/include)
++  set(CUDNN_FRONTEND_INCLUDE_DIR /opt/cuda/include)
+   target_include_directories(torch::cudnn INTERFACE ${CUDNN_FRONTEND_INCLUDE_DIR})
+ endif()
+ 


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2023-12-06 11:55 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2023-12-06 11:55 UTC (permalink / raw
  To: gentoo-commits

commit:     783ebfe477601f3a416a1bf7f7f0daf5b0732c5c
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Wed Dec  6 11:55:09 2023 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Wed Dec  6 11:55:25 2023 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=783ebfe4

sci-libs/caffe2: add 2.1.1

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                           |   1 +
 sci-libs/caffe2/caffe2-2.1.1.ebuild                | 210 +++++++++++++++++++++
 sci-libs/caffe2/files/caffe2-2.1.1-cudaExtra.patch |  28 +++
 sci-libs/caffe2/files/caffe2-2.1.1-gentoo.patch    | 188 ++++++++++++++++++
 4 files changed, 427 insertions(+)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index 0ee75499ccbe..d51cccdc3c37 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,3 +1,4 @@
 DIST pytorch-1.13.1.tar.gz 108279745 BLAKE2B 75de03b74dfdaf8d8fb5ea743fcc0c1b0e408a714ad4160c487921220a7b1755e5fa6e587e6bbc8c9f34dd75e096d2e6dd69c80d24821835fff6c833314434d3 SHA512 f16f89d027efade11d057245cad5b69a390e88b458398310ae30de2dbff7c8fd7f1165be7b8da7ea989c81ac3f5a66c5cb9050610e441a97c83fb8aa28c0bd62
 DIST pytorch-2.0.0.tar.gz 111327292 BLAKE2B 6d593a975c0ade714f0b189f7e3c4ff704b9a9a2377b5e441a9cefc202fa22779966d08948e63671912c6ea5a0eee124042155f4f57a654db34e19e42f013cc9 SHA512 4dd76160711c0d87f3026c8b7fa3ed149dd86b8ac0ee9ecea0eaf80d2e6ce8c29368392e77b9466d90b60634087b462b782495997a5d33367cc8ca9fe14c8a14
 DIST pytorch-2.0.1.tar.gz 111335778 BLAKE2B 7a10cc2b2d5e2422aef7e060a0c3a62ca5c7460c6e0b9becade9b98939501975c74ed5a175a653731f43ca824d2c9bd31f41d1f633c2b139779ab23d5331e9ce SHA512 2309a22b3be3ccdb36d8d9781a59a7bdcc2fdb8d95ada205702ec77862480f0cbb12cd5d6b8cd3114d01a6e33b7743d0fe9de93debf37138ca5c14403cdb0c43
+DIST pytorch-2.1.1.tar.gz 116317162 BLAKE2B d9819256cba0b9951aabe95d86fb135e97d8bafa2c010d13162cd9b3373ca75f20d218e31279ace41981f3f76308721c522f9e53745a1ff9e6386fa10634f9ad SHA512 31b36e7732ee086ae7565a3811ab2d1b2869e79057bea7a4ffc4a3c95c544757e656a6d2289ee11fe7508828aca144e4220ef1e9ab1878e075e1259cf6ff9ca4

diff --git a/sci-libs/caffe2/caffe2-2.1.1.ebuild b/sci-libs/caffe2/caffe2-2.1.1.ebuild
new file mode 100644
index 000000000000..e5e9a71069cd
--- /dev/null
+++ b/sci-libs/caffe2/caffe2-2.1.1.ebuild
@@ -0,0 +1,210 @@
+# Copyright 2022-2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{9..11} )
+inherit python-single-r1 cmake cuda flag-o-matic prefix
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/"
+SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
+	-> ${MYP}.tar.gz"
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64"
+IUSE="cuda distributed fbgemm ffmpeg gloo mpi nnpack +numpy opencl opencv openmp qnnpack tensorpipe xnnpack"
+RESTRICT="test"
+REQUIRED_USE="
+	${PYTHON_REQUIRED_USE}
+	ffmpeg? ( opencv )
+	mpi? ( distributed )
+	tensorpipe? ( distributed )
+	distributed? ( tensorpipe )
+	gloo? ( distributed )
+" # ?? ( cuda rocm )
+
+# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
+RDEPEND="
+	${PYTHON_DEPS}
+	dev-cpp/gflags:=
+	>=dev-cpp/glog-0.5.0
+	dev-libs/cpuinfo
+	dev-libs/libfmt
+	dev-libs/protobuf:=
+	dev-libs/pthreadpool
+	dev-libs/sleef
+	sci-libs/lapack
+	>=sci-libs/onnx-1.12.0
+	sci-libs/foxi
+	cuda? (
+		=dev-libs/cudnn-8*
+		dev-libs/cudnn-frontend:0/8
+		<dev-util/nvidia-cuda-toolkit-12:=[profiler]
+	)
+	fbgemm? ( dev-libs/FBGEMM )
+	ffmpeg? ( media-video/ffmpeg:= )
+	gloo? ( sci-libs/gloo[cuda?] )
+	mpi? ( virtual/mpi )
+	nnpack? ( sci-libs/NNPACK )
+	numpy? ( $(python_gen_cond_dep '
+		dev-python/numpy[${PYTHON_USEDEP}]
+		') )
+	opencl? ( virtual/opencl )
+	opencv? ( media-libs/opencv:= )
+	qnnpack? ( sci-libs/QNNPACK )
+	tensorpipe? ( sci-libs/tensorpipe[cuda?] )
+	xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
+"
+DEPEND="
+	${RDEPEND}
+	dev-cpp/eigen
+	cuda? ( dev-libs/cutlass )
+	dev-libs/psimd
+	dev-libs/FP16
+	dev-libs/FXdiv
+	dev-libs/pocketfft
+	dev-libs/flatbuffers
+	>=sci-libs/kineto-0.4.0_p20231031
+	$(python_gen_cond_dep '
+		dev-python/pyyaml[${PYTHON_USEDEP}]
+		dev-python/pybind11[${PYTHON_USEDEP}]
+	')
+"
+
+S="${WORKDIR}"/${MYP}
+
+PATCHES=(
+	"${FILESDIR}"/${P}-gentoo.patch
+	"${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
+	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
+	"${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
+	"${FILESDIR}"/${PN}-2.0.0-gcc13.patch
+	"${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
+	"${FILESDIR}"/${P}-cudaExtra.patch
+)
+
+src_prepare() {
+	filter-lto #bug 862672
+	sed -i \
+		-e "/third_party\/gloo/d" \
+		cmake/Dependencies.cmake \
+		|| die
+	cmake_src_prepare
+	pushd torch/csrc/jit/serialization || die
+	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+	popd
+	# prefixify the hardcoded paths, after all patches are applied
+	hprefixify \
+		aten/CMakeLists.txt \
+		caffe2/CMakeLists.txt \
+		cmake/Metal.cmake \
+		cmake/Modules/*.cmake \
+		cmake/Modules_CUDA_fix/FindCUDNN.cmake \
+		cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
+		cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
+		cmake/public/LoadHIP.cmake \
+		cmake/public/cuda.cmake \
+		cmake/Dependencies.cmake \
+		torch/CMakeLists.txt \
+		CMakeLists.txt
+}
+
+src_configure() {
+	if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
+		ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
+		ewarn "These may not be optimal for your GPU."
+		ewarn ""
+		ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
+		ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
+		ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5"
+		ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
+		ewarn ""
+		ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
+		ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
+	fi
+
+	local mycmakeargs=(
+		-DBUILD_CUSTOM_PROTOBUF=OFF
+		-DBUILD_SHARED_LIBS=ON
+
+		-DUSE_CCACHE=OFF
+		-DUSE_CUDA=$(usex cuda)
+		-DUSE_CUDNN=$(usex cuda)
+		-DUSE_FAST_NVCC=$(usex cuda)
+		-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
+		-DBUILD_NVFUSER=$(usex cuda)
+		-DUSE_DISTRIBUTED=$(usex distributed)
+		-DUSE_MPI=$(usex mpi)
+		-DUSE_FAKELOWP=OFF
+		-DUSE_FBGEMM=$(usex fbgemm)
+		-DUSE_FFMPEG=$(usex ffmpeg)
+		-DUSE_GFLAGS=ON
+		-DUSE_GLOG=ON
+		-DUSE_GLOO=$(usex gloo)
+		-DUSE_KINETO=OFF # TODO
+		-DUSE_LEVELDB=OFF
+		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+		-DUSE_MKLDNN=OFF
+		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
+		-DUSE_NNPACK=$(usex nnpack)
+		-DUSE_QNNPACK=$(usex qnnpack)
+		-DUSE_XNNPACK=$(usex xnnpack)
+		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+		-DUSE_TENSORPIPE=$(usex tensorpipe)
+		-DUSE_PYTORCH_QNNPACK=OFF
+		-DUSE_NUMPY=$(usex numpy)
+		-DUSE_OPENCL=$(usex opencl)
+		-DUSE_OPENCV=$(usex opencv)
+		-DUSE_OPENMP=$(usex openmp)
+		-DUSE_ROCM=OFF # TODO
+		-DUSE_SYSTEM_CPUINFO=ON
+		-DUSE_SYSTEM_PYBIND11=ON
+		-DUSE_UCC=OFF
+		-DUSE_VALGRIND=OFF
+		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
+		-DPYTHON_EXECUTABLE="${PYTHON}"
+		-DUSE_ITT=OFF
+		-DBLAS=Eigen # avoid the use of MKL, if found on the system
+		-DUSE_SYSTEM_EIGEN_INSTALL=ON
+		-DUSE_SYSTEM_PTHREADPOOL=ON
+		-DUSE_SYSTEM_FXDIV=ON
+		-DUSE_SYSTEM_FP16=ON
+		-DUSE_SYSTEM_GLOO=ON
+		-DUSE_SYSTEM_ONNX=ON
+		-DUSE_SYSTEM_SLEEF=ON
+
+		-Wno-dev
+		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
+		-DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
+	)
+
+	if use cuda; then
+		addpredict "/dev/nvidiactl" # bug 867706
+		addpredict "/dev/char"
+
+		mycmakeargs+=(
+			-DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
+		)
+	fi
+	cmake_src_configure
+}
+
+src_install() {
+	cmake_src_install
+
+	insinto "/var/lib/${PN}"
+	doins "${BUILD_DIR}"/CMakeCache.txt
+
+	rm -rf python
+	mkdir -p python/torch/include || die
+	mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
+	mv "${ED}"/usr/include/torch python/torch/include || die
+	cp torch/version.py python/torch/ || die
+	python_domodule python/caffe2
+	python_domodule python/torch
+}

diff --git a/sci-libs/caffe2/files/caffe2-2.1.1-cudaExtra.patch b/sci-libs/caffe2/files/caffe2-2.1.1-cudaExtra.patch
new file mode 100644
index 000000000000..f12623f2068a
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.1.1-cudaExtra.patch
@@ -0,0 +1,28 @@
+--- a/third_party/nvfuser/CMakeLists.txt	2023-11-30 21:42:07.336946970 +0100
++++ b/third_party/nvfuser/CMakeLists.txt	2023-11-30 21:46:35.101749250 +0100
+@@ -18,7 +18,7 @@
+ set(NVFUSER_ROOT ${PROJECT_SOURCE_DIR})
+ set(NVFUSER_SRCS_DIR "${NVFUSER_ROOT}/csrc")
+ set(TORCH_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/../..")
+-set(TORCH_INSTALL_LIB_DIR ${TORCH_ROOT}/torch/lib)
++set(TORCH_INSTALL_LIB_DIR ${CMAKE_INSTALL_LIBDIR})
+ 
+ # --- build nvfuser_codegen library
+ 
+@@ -218,7 +218,7 @@
+     message(STATUS "somehow this is happening")
+     set_target_properties(${NVFUSER} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS})
+   endif()
+-  install(TARGETS ${NVFUSER} EXPORT NvfuserTargets DESTINATION ${TORCH_ROOT}/nvfuser/)
++  install(TARGETS ${NVFUSER} EXPORT NvfuserTargets DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+ 
+   # install nvfuser python files
+   install(DIRECTORY "${NVFUSER_ROOT}/python/"
+--- a/functorch/CMakeLists.txt	2023-11-30 20:30:45.805209036 +0100
++++ b/functorch/CMakeLists.txt	2023-11-30 20:31:13.284766157 +0100
+@@ -35,4 +35,4 @@
+ if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "")
+   set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS})
+ endif()
+-install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}")
++install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}")

diff --git a/sci-libs/caffe2/files/caffe2-2.1.1-gentoo.patch b/sci-libs/caffe2/files/caffe2-2.1.1-gentoo.patch
new file mode 100644
index 000000000000..056ac9afe5b3
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.1.1-gentoo.patch
@@ -0,0 +1,188 @@
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -487,7 +487,7 @@ endif()
+ list(APPEND Caffe2_DEPENDENCY_LIBS cpuinfo)
+ 
+ # ---[ QNNPACK
+-if(USE_QNNPACK)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ 
+   if(NOT DEFINED QNNPACK_SOURCE_DIR)
+@@ -543,7 +543,7 @@ if(USE_QNNPACK)
+ endif()
+ 
+ # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and neon2sse headers
+-if(USE_QNNPACK)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
+   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")
+@@ -803,7 +803,7 @@
+ endif()
+ 
+ # ---[ FBGEMM
+-if(USE_FBGEMM)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+   if(NOT DEFINED FBGEMM_SOURCE_DIR)
+     set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING "FBGEMM source directory")
+@@ -848,6 +848,7 @@
+ endif()
+ 
+ if(USE_FBGEMM)
++  list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
+   caffe2_update_option(USE_FBGEMM ON)
+ else()
+   caffe2_update_option(USE_FBGEMM OFF)
+@@ -1552,7 +1553,6 @@
+       set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
+     endif()
+   endif()
+-  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
+ 
+   add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
+   if(NOT USE_SYSTEM_ONNX)
+@@ -1831,7 +1831,6 @@
+ #
+ set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
+ set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
+-add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+ 
+ # Disable compiler feature checks for `fmt`.
+ #
+@@ -1840,9 +1839,7 @@
+ # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
+ # `fmt` is compatible with a superset of the compilers that PyTorch is, it
+ # shouldn't be too bad to just disable the checks.
+-set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
+ 
+-list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
+ set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
+ 
+ # ---[ Kineto
+--- a/c10/CMakeLists.txt
++++ b/c10/CMakeLists.txt
+@@ -87,7 +87,7 @@
+ if(${USE_GLOG})
+     target_link_libraries(c10 PUBLIC glog::glog)
+ endif()
+-target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
++target_link_libraries(c10 PRIVATE fmt)
+ 
+ find_package(Backtrace)
+ if(Backtrace_FOUND)
+--- a/torch/CMakeLists.txt
++++ b/torch/CMakeLists.txt
+@@ -63,15 +63,9 @@
+     ${CMAKE_BINARY_DIR}
+     ${CMAKE_BINARY_DIR}/aten/src
+     ${CMAKE_BINARY_DIR}/caffe2/aten/src
+-    ${CMAKE_BINARY_DIR}/third_party
+-    ${CMAKE_BINARY_DIR}/third_party/onnx
+ 
+-    ${TORCH_ROOT}/third_party/valgrind-headers
+ 
+-    ${TORCH_ROOT}/third_party/gloo
+-    ${TORCH_ROOT}/third_party/onnx
+-    ${TORCH_ROOT}/third_party/flatbuffers/include
+-    ${TORCH_ROOT}/third_party/kineto/libkineto/include
++    /usr/include/kineto
+ 
+     ${TORCH_SRC_DIR}/csrc
+     ${TORCH_SRC_DIR}/csrc/api/include
+@@ -84,7 +78,6 @@
+     python::python
+     pybind::pybind11
+     shm
+-    fmt::fmt-header-only
+     ATEN_CPU_FILES_GEN_LIB)
+ 
+ if(USE_ASAN AND TARGET Sanitizer::address)
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -812,12 +812,11 @@
+ 
+ # ---[ Build flags
+ if(NOT MSVC)
+-  string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
++  string(APPEND CMAKE_CXX_FLAGS " -fPIC")
+   # Eigen fails to build with some versions, so convert this to a warning
+   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+   string(APPEND CMAKE_CXX_FLAGS " -Wall")
+   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+-  append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
+@@ -917,8 +916,6 @@
+   string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
+   append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
+-  append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
+-  append_cxx_flag_if_supported("-Werror=cast-function-type" CMAKE_CXX_FLAGS)
+ else()
+   # skip unwanted includes from windows.h
+   add_compile_definitions(WIN32_LEAN_AND_MEAN)
+--- a/cmake/public/utils.cmake
++++ b/cmake/public/utils.cmake
+@@ -492,8 +492,6 @@
+   endif()
+ 
+   # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
+-  target_compile_options(${libname} PRIVATE
+-      $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
+ 
+ endfunction()
+ 
+--- a/cmake/Codegen.cmake
++++ b/cmake/Codegen.cmake
+@@ -57,7 +57,7 @@
+   if(MSVC)
+     set(OPT_FLAG "/fp:strict ")
+   else(MSVC)
+-    set(OPT_FLAG "-O3 ")
++    set(OPT_FLAG " ")
+     if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+       set(OPT_FLAG " ")
+     endif()
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -107,7 +107,7 @@
+ # Note: the folders that are being commented out have not been properly
+ # addressed yet.
+ 
+-if(NOT MSVC AND USE_XNNPACK)
++if(FALSE)
+   if(NOT TARGET fxdiv)
+     set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
+     set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
+@@ -1023,7 +1025,6 @@
+ endif()
+ 
+ if(NOT MSVC AND USE_XNNPACK)
+-  TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
+ endif()
+ 
+ # ==========================================================
+@@ -1143,8 +1146,7 @@
+ target_include_directories(torch_cpu PRIVATE
+   ${TORCH_ROOT}/third_party/miniz-2.1.0)
+ 
+-target_include_directories(torch_cpu PRIVATE
+-  ${TORCH_ROOT}/third_party/kineto/libkineto/include)
++target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
+ 
+ if(USE_KINETO)
+   target_include_directories(torch_cpu PRIVATE
+--- a/cmake/External/nnpack.cmake
++++ b/cmake/External/nnpack.cmake
+@@ -58,7 +58,7 @@
+   set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
+   set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
+ 
+-  if(NOT TARGET nnpack)
++  if(FALSE)
+     if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
+       set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
+     endif()


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2024-03-08 18:46 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2024-03-08 18:46 UTC (permalink / raw
  To: gentoo-commits

commit:     087c7302f2f874abf503fad2c0eaf04fde04cb40
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  8 18:40:10 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Fri Mar  8 18:45:29 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=087c7302

sci-libs/caffe2: add 2.2.1

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                        |   1 +
 sci-libs/caffe2/caffe2-2.2.1.ebuild             | 230 ++++++++++++++++++++++++
 sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch | 195 ++++++++++++++++++++
 3 files changed, 426 insertions(+)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index a05e285f98e0..2a63d8018442 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,3 +1,4 @@
 DIST pytorch-1.13.1.tar.gz 108279745 BLAKE2B 75de03b74dfdaf8d8fb5ea743fcc0c1b0e408a714ad4160c487921220a7b1755e5fa6e587e6bbc8c9f34dd75e096d2e6dd69c80d24821835fff6c833314434d3 SHA512 f16f89d027efade11d057245cad5b69a390e88b458398310ae30de2dbff7c8fd7f1165be7b8da7ea989c81ac3f5a66c5cb9050610e441a97c83fb8aa28c0bd62
 DIST pytorch-2.0.1.tar.gz 111335778 BLAKE2B 7a10cc2b2d5e2422aef7e060a0c3a62ca5c7460c6e0b9becade9b98939501975c74ed5a175a653731f43ca824d2c9bd31f41d1f633c2b139779ab23d5331e9ce SHA512 2309a22b3be3ccdb36d8d9781a59a7bdcc2fdb8d95ada205702ec77862480f0cbb12cd5d6b8cd3114d01a6e33b7743d0fe9de93debf37138ca5c14403cdb0c43
 DIST pytorch-2.1.2.tar.gz 116316469 BLAKE2B c5a55ee264bc3477d3556ba6376b5591117e992e56e0dd0c9ba93d12526e2727f7840f6f1e0730a38223b6492c9556840c4ebf22ffd220e97225c2abff303747 SHA512 a8961d78ad785b13c959a0612563a60e0de17a7c8bb9822ddea9a24072796354d07e81c47b6cc8761b21a6448845b088cf80e1661d9e889b0ed5474d3dc76756
+DIST pytorch-2.2.1.tar.gz 116370903 BLAKE2B 7d08e80f91bad76fba1751c30a34bebfe7145058b7758c0d47112702263a80666f70687a8860744725c6aa995e854f766a5bfa4644c23e5635e7e08c8d63a6e9 SHA512 f19ebcf59d183c3348946ba7cfcab2bc4ca93785863b8edc39dba5772083a7b0425ccb4f92a8df4dc0d18246c75e8ff812993161467fbf9dc48d7fb28a1e26f1

diff --git a/sci-libs/caffe2/caffe2-2.2.1.ebuild b/sci-libs/caffe2/caffe2-2.2.1.ebuild
new file mode 100644
index 000000000000..68328fb71624
--- /dev/null
+++ b/sci-libs/caffe2/caffe2-2.2.1.ebuild
@@ -0,0 +1,230 @@
+# Copyright 2022-2024 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{9..12} )
+inherit python-single-r1 cmake cuda flag-o-matic prefix
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/"
+SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
+	-> ${MYP}.tar.gz"
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64"
+IUSE="cuda distributed fbgemm ffmpeg gloo mkl mpi nnpack +numpy onednn openblas opencl opencv openmp qnnpack xnnpack"
+RESTRICT="test"
+REQUIRED_USE="
+	${PYTHON_REQUIRED_USE}
+	ffmpeg? ( opencv )
+	mpi? ( distributed )
+	gloo? ( distributed )
+" # ?? ( cuda rocm )
+
+# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
+RDEPEND="
+	${PYTHON_DEPS}
+	dev-cpp/gflags:=
+	>=dev-cpp/glog-0.5.0
+	dev-libs/cpuinfo
+	dev-libs/libfmt
+	dev-libs/protobuf:=
+	dev-libs/pthreadpool
+	dev-libs/sleef
+	virtual/lapack
+	>=sci-libs/onnx-1.12.0
+	<sci-libs/onnx-1.15.0
+	sci-libs/foxi
+	cuda? (
+		=dev-libs/cudnn-8*
+		>=dev-libs/cudnn-frontend-0.9.2:0/8
+		dev-util/nvidia-cuda-toolkit:=[profiler]
+	)
+	fbgemm? ( >=dev-libs/FBGEMM-2023.11.02 )
+	ffmpeg? ( media-video/ffmpeg:= )
+	gloo? ( sci-libs/gloo[cuda?] )
+	mpi? ( virtual/mpi )
+	nnpack? ( sci-libs/NNPACK )
+	numpy? ( $(python_gen_cond_dep '
+		dev-python/numpy[${PYTHON_USEDEP}]
+		') )
+	onednn? ( dev-libs/oneDNN )
+	opencl? ( virtual/opencl )
+	opencv? ( media-libs/opencv:= )
+	qnnpack? ( sci-libs/QNNPACK )
+	distributed? ( sci-libs/tensorpipe[cuda?] )
+	xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
+	mkl? ( sci-libs/mkl )
+	openblas? ( sci-libs/openblas )
+"
+DEPEND="
+	${RDEPEND}
+	cuda? ( >=dev-libs/cutlass-3.1.0 )
+	onednn? ( sci-libs/ideep )
+	dev-libs/psimd
+	dev-libs/FP16
+	dev-libs/FXdiv
+	dev-libs/pocketfft
+	dev-libs/flatbuffers
+	>=sci-libs/kineto-0.4.0_p20231031
+	$(python_gen_cond_dep '
+		dev-python/pyyaml[${PYTHON_USEDEP}]
+		dev-python/pybind11[${PYTHON_USEDEP}]
+	')
+"
+
+S="${WORKDIR}"/${MYP}
+
+PATCHES=(
+	"${FILESDIR}"/${P}-gentoo.patch
+	"${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
+	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
+	"${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
+	"${FILESDIR}"/${PN}-2.0.0-gcc13.patch
+	"${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
+	"${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
+	"${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
+)
+
+src_prepare() {
+	filter-lto #bug 862672
+	sed -i \
+		-e "/third_party\/gloo/d" \
+		cmake/Dependencies.cmake \
+		|| die
+	cmake_src_prepare
+	pushd torch/csrc/jit/serialization || die
+	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+	popd
+	# prefixify the hardcoded paths, after all patches are applied
+	hprefixify \
+		aten/CMakeLists.txt \
+		caffe2/CMakeLists.txt \
+		cmake/Metal.cmake \
+		cmake/Modules/*.cmake \
+		cmake/Modules_CUDA_fix/FindCUDNN.cmake \
+		cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
+		cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
+		cmake/public/LoadHIP.cmake \
+		cmake/public/cuda.cmake \
+		cmake/Dependencies.cmake \
+		torch/CMakeLists.txt \
+		CMakeLists.txt
+}
+
+src_configure() {
+	if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
+		ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
+		ewarn "These may not be optimal for your GPU."
+		ewarn ""
+		ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
+		ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
+		ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5"
+		ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
+		ewarn ""
+		ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
+		ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
+	fi
+
+	local mycmakeargs=(
+		-DBUILD_CUSTOM_PROTOBUF=OFF
+		-DBUILD_SHARED_LIBS=ON
+
+		-DUSE_CCACHE=OFF
+		-DUSE_CUDA=$(usex cuda)
+		-DUSE_CUDNN=$(usex cuda)
+		-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
+		-DBUILD_NVFUSER=$(usex cuda)
+		-DUSE_DISTRIBUTED=$(usex distributed)
+		-DUSE_MPI=$(usex mpi)
+		-DUSE_FAKELOWP=OFF
+		-DUSE_FBGEMM=$(usex fbgemm)
+		-DUSE_FFMPEG=$(usex ffmpeg)
+		-DUSE_GFLAGS=ON
+		-DUSE_GLOG=ON
+		-DUSE_GLOO=$(usex gloo)
+		-DUSE_KINETO=OFF # TODO
+		-DUSE_LEVELDB=OFF
+		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+		-DUSE_MKLDNN=$(usex onednn)
+		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
+		-DUSE_NNPACK=$(usex nnpack)
+		-DUSE_QNNPACK=$(usex qnnpack)
+		-DUSE_XNNPACK=$(usex xnnpack)
+		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+		-DUSE_TENSORPIPE=$(usex distributed)
+		-DUSE_PYTORCH_QNNPACK=OFF
+		-DUSE_NUMPY=$(usex numpy)
+		-DUSE_OPENCL=$(usex opencl)
+		-DUSE_OPENCV=$(usex opencv)
+		-DUSE_OPENMP=$(usex openmp)
+		-DUSE_ROCM=OFF # TODO
+		-DUSE_SYSTEM_CPUINFO=ON
+		-DUSE_SYSTEM_PYBIND11=ON
+		-DUSE_UCC=OFF
+		-DUSE_VALGRIND=OFF
+		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
+		-DPYTHON_EXECUTABLE="${PYTHON}"
+		-DUSE_ITT=OFF
+		-DUSE_SYSTEM_PTHREADPOOL=ON
+		-DUSE_SYSTEM_FXDIV=ON
+		-DUSE_SYSTEM_FP16=ON
+		-DUSE_SYSTEM_GLOO=ON
+		-DUSE_SYSTEM_ONNX=ON
+		-DUSE_SYSTEM_SLEEF=ON
+		-DUSE_METAL=OFF
+
+		-Wno-dev
+		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
+		-DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
+	)
+
+	if use mkl; then
+		mycmakeargs+=(-DBLAS=MKL)
+	elif use openblas; then
+		mycmakeargs+=(-DBLAS=OpenBLAS)
+	else
+		mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=)
+	fi
+
+	if use cuda; then
+		addpredict "/dev/nvidiactl" # bug 867706
+		addpredict "/dev/char"
+
+		mycmakeargs+=(
+			-DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
+		)
+	fi
+
+	if use onednn; then
+		mycmakeargs+=(
+			-DUSE_MKLDNN=ON
+			-DMKLDNN_FOUND=ON
+			-DMKLDNN_LIBRARIES=dnnl
+			-DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl"
+		)
+	fi
+
+	cmake_src_configure
+}
+
+src_install() {
+	cmake_src_install
+
+	insinto "/var/lib/${PN}"
+	doins "${BUILD_DIR}"/CMakeCache.txt
+
+	rm -rf python
+	mkdir -p python/torch/include || die
+	mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
+	cp torch/version.py python/torch/ || die
+	python_domodule python/caffe2
+	python_domodule python/torch
+	ln -s ../../../../../include/torch \
+		"${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269
+}

diff --git a/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch b/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
new file mode 100644
index 000000000000..5472a2c41836
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
@@ -0,0 +1,195 @@
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -474,7 +474,7 @@
+ endif()
+ 
+ # ---[ QNNPACK
+-if(USE_QNNPACK)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ 
+   if(NOT DEFINED QNNPACK_SOURCE_DIR)
+@@ -530,7 +530,7 @@
+ endif()
+ 
+ # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and neon2sse headers
+-if(USE_QNNPACK)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
+   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")
+@@ -780,7 +780,7 @@
+ endif()
+ 
+ # ---[ FBGEMM
+-if(USE_FBGEMM)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+   if(NOT DEFINED FBGEMM_SOURCE_DIR)
+     set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING "FBGEMM source directory")
+@@ -828,6 +828,7 @@
+ endif()
+ 
+ if(USE_FBGEMM)
++  list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
+   caffe2_update_option(USE_FBGEMM ON)
+ else()
+   caffe2_update_option(USE_FBGEMM OFF)
+@@ -1529,7 +1530,6 @@
+       set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
+     endif()
+   endif()
+-  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
+ 
+   add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
+   if(NOT USE_SYSTEM_ONNX)
+@@ -1796,7 +1796,6 @@
+ #
+ set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
+ set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
+-add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+ 
+ # Disable compiler feature checks for `fmt`.
+ #
+@@ -1805,9 +1804,7 @@
+ # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
+ # `fmt` is compatible with a superset of the compilers that PyTorch is, it
+ # shouldn't be too bad to just disable the checks.
+-set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
+ 
+-list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
+ set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
+ 
+ # ---[ Kineto
+--- a/c10/CMakeLists.txt
++++ b/c10/CMakeLists.txt
+@@ -89,7 +89,7 @@
+ if(C10_USE_GLOG)
+     target_link_libraries(c10 PUBLIC glog::glog)
+ endif()
+-target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
++target_link_libraries(c10 PRIVATE fmt)
+ 
+ if(C10_USE_NUMA)
+   target_include_directories(c10 PRIVATE ${Numa_INCLUDE_DIR})
+--- a/torch/CMakeLists.txt
++++ b/torch/CMakeLists.txt
+@@ -59,15 +59,9 @@
+     ${CMAKE_BINARY_DIR}
+     ${CMAKE_BINARY_DIR}/aten/src
+     ${CMAKE_BINARY_DIR}/caffe2/aten/src
+-    ${CMAKE_BINARY_DIR}/third_party
+-    ${CMAKE_BINARY_DIR}/third_party/onnx
+ 
+-    ${TORCH_ROOT}/third_party/valgrind-headers
+ 
+-    ${TORCH_ROOT}/third_party/gloo
+-    ${TORCH_ROOT}/third_party/onnx
+-    ${TORCH_ROOT}/third_party/flatbuffers/include
+-    ${TORCH_ROOT}/third_party/kineto/libkineto/include
++    /usr/include/kineto
+ 
+     ${TORCH_SRC_DIR}/csrc
+     ${TORCH_SRC_DIR}/csrc/api/include
+@@ -80,7 +74,6 @@
+     python::python
+     pybind::pybind11
+     shm
+-    fmt::fmt-header-only
+     ATEN_CPU_FILES_GEN_LIB)
+ 
+ if(USE_ASAN AND TARGET Sanitizer::address)
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -835,12 +835,11 @@
+ # Re-include to override append_cxx_flag_if_supported from third_party/FBGEMM
+ include(cmake/public/utils.cmake)
+ if(NOT MSVC)
+-  string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
++  string(APPEND CMAKE_CXX_FLAGS " -O2")
+   # Eigen fails to build with some versions, so convert this to a warning
+   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+   string(APPEND CMAKE_CXX_FLAGS " -Wall")
+   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+-  append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
+@@ -930,7 +930,6 @@
+   string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
+   append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
+-  append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
+ else()
+   # skip unwanted includes from windows.h
+   add_compile_definitions(WIN32_LEAN_AND_MEAN)
+--- a/cmake/public/utils.cmake
++++ b/cmake/public/utils.cmake
+@@ -486,8 +486,6 @@
+   endif()
+ 
+   # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
+-  target_compile_options(${libname} PRIVATE
+-      $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
+ 
+ endfunction()
+ 
+--- a/cmake/Codegen.cmake
++++ b/cmake/Codegen.cmake
+@@ -57,7 +57,7 @@
+   if(MSVC)
+     set(OPT_FLAG "/fp:strict ")
+   else(MSVC)
+-    set(OPT_FLAG "-O3 ")
++    set(OPT_FLAG " ")
+     if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+       set(OPT_FLAG " ")
+     endif()
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -107,7 +107,7 @@
+ # Note: the folders that are being commented out have not been properly
+ # addressed yet.
+ 
+-if(NOT MSVC AND USE_XNNPACK)
++if(FALSE)
+   if(NOT TARGET fxdiv)
+     set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
+     set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
+@@ -1055,7 +1055,6 @@
+ endif()
+ 
+ if(NOT MSVC AND USE_XNNPACK)
+-  TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
+ endif()
+ 
+ # ==========================================================
+@@ -1175,8 +1174,7 @@
+ target_include_directories(torch_cpu PRIVATE
+   ${TORCH_ROOT}/third_party/miniz-2.1.0)
+ 
+-target_include_directories(torch_cpu PRIVATE
+-  ${TORCH_ROOT}/third_party/kineto/libkineto/include)
++target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
+ 
+ if(USE_KINETO)
+   target_include_directories(torch_cpu PRIVATE
+--- a/cmake/External/nnpack.cmake
++++ b/cmake/External/nnpack.cmake
+@@ -56,7 +56,7 @@
+   set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
+   set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
+ 
+-  if(NOT TARGET nnpack)
++  if(FALSE)
+     if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
+       set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
+     endif()
+--- a/functorch/CMakeLists.txt	2023-11-30 20:30:45.805209036 +0100
++++ b/functorch/CMakeLists.txt	2023-11-30 20:31:13.284766157 +0100
+@@ -35,4 +35,4 @@
+ if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "")
+   set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS})
+ endif()
+-install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}")
++install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}")


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2024-03-11 19:28 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2024-03-11 19:28 UTC (permalink / raw
  To: gentoo-commits

commit:     a5bd494c9be931e7bdcf88f75f37d9f4d8594864
Author:     Sv. Lockal <lockalsash <AT> gmail <DOT> com>
AuthorDate: Mon Mar 11 16:37:02 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Mon Mar 11 19:27:48 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=a5bd494c

sci-libs/caffe2: add USE=rocm flag for AMDGPU support for 2.1.2 and 2.2.1

Closes: https://bugs.gentoo.org/905286
Signed-off-by: Sv. Lockal <lockalsash <AT> gmail.com>
Closes: https://github.com/gentoo/gentoo/pull/35713
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 ...ffe2-2.1.2-r6.ebuild => caffe2-2.1.2-r7.ebuild} | 61 +++++++++++++++----
 ...{caffe2-2.2.1.ebuild => caffe2-2.2.1-r1.ebuild} | 54 ++++++++++++++---
 .../files/caffe2-2.1.2-rocm-fix-std-cpp17.patch    | 68 ++++++++++++++++++++++
 sci-libs/caffe2/metadata.xml                       |  1 +
 4 files changed, 166 insertions(+), 18 deletions(-)

diff --git a/sci-libs/caffe2/caffe2-2.1.2-r6.ebuild b/sci-libs/caffe2/caffe2-2.1.2-r7.ebuild
similarity index 79%
rename from sci-libs/caffe2/caffe2-2.1.2-r6.ebuild
rename to sci-libs/caffe2/caffe2-2.1.2-r7.ebuild
index 969c36754c5c..f57406145c6a 100644
--- a/sci-libs/caffe2/caffe2-2.1.2-r6.ebuild
+++ b/sci-libs/caffe2/caffe2-2.1.2-r7.ebuild
@@ -4,7 +4,8 @@
 EAPI=8
 
 PYTHON_COMPAT=( python3_{9..12} )
-inherit python-single-r1 cmake cuda flag-o-matic prefix
+ROCM_VERSION=5.7
+inherit python-single-r1 cmake cuda flag-o-matic prefix rocm
 
 MYPN=pytorch
 MYP=${MYPN}-${PV}
@@ -17,7 +18,7 @@ SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
 LICENSE="BSD"
 SLOT="0"
 KEYWORDS="~amd64"
-IUSE="cuda distributed fbgemm ffmpeg gloo mkl mpi nnpack +numpy onednn openblas opencl opencv openmp qnnpack tensorpipe xnnpack"
+IUSE="cuda distributed fbgemm ffmpeg gloo mkl mpi nnpack +numpy onednn openblas opencl opencv openmp qnnpack rocm tensorpipe xnnpack"
 RESTRICT="test"
 REQUIRED_USE="
 	${PYTHON_REQUIRED_USE}
@@ -26,7 +27,9 @@ REQUIRED_USE="
 	tensorpipe? ( distributed )
 	distributed? ( tensorpipe )
 	gloo? ( distributed )
-" # ?? ( cuda rocm )
+	?? ( cuda rocm )
+	rocm? ( || ( ${ROCM_REQUIRED_USE} ) )
+"
 
 # CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
 RDEPEND="
@@ -59,6 +62,20 @@ RDEPEND="
 	opencl? ( virtual/opencl )
 	opencv? ( media-libs/opencv:= )
 	qnnpack? ( sci-libs/QNNPACK )
+	rocm? (
+		>=dev-util/hip-5.7
+		>=dev-libs/rccl-5.7[${ROCM_USEDEP}]
+		>=sci-libs/rocThrust-5.7[${ROCM_USEDEP}]
+		>=sci-libs/rocPRIM-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipBLAS-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipFFT-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipSPARSE-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipRAND-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipCUB-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipSOLVER-5.7[${ROCM_USEDEP}]
+		>=sci-libs/miopen-5.7[${ROCM_USEDEP}]
+		>=dev-util/roctracer-5.7[${ROCM_USEDEP}]
+	)
 	tensorpipe? ( sci-libs/tensorpipe[cuda?] )
 	xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
 	mkl? ( sci-libs/mkl )
@@ -92,6 +109,7 @@ PATCHES=(
 	"${FILESDIR}"/${PN}-2.1.1-cudaExtra.patch
 	"${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
 	"${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
+	"${FILESDIR}"/${PN}-2.1.2-rocm-fix-std-cpp17.patch
 )
 
 src_prepare() {
@@ -118,6 +136,18 @@ src_prepare() {
 		cmake/Dependencies.cmake \
 		torch/CMakeLists.txt \
 		CMakeLists.txt
+
+	if use rocm; then
+		sed -e "s:ROCM_PATH /opt/rocm:ROCM_PATH /usr:" \
+			-e "s:HIP_PATH \${ROCM_PATH}/hip:HIP_PATH /usr:" \
+			-e "s:\${HIP_PATH}/cmake:/usr/$(get_libdir)/cmake/hip:g" \
+			-e "s/HIP 1.0/HIP 1.0 REQUIRED/" \
+			-i cmake/public/LoadHIP.cmake || die
+
+		ebegin "HIPifying cuda sources"
+		${EPYTHON} tools/amd_build/build_amd.py || die
+		eend $?
+	fi
 }
 
 src_configure() {
@@ -140,9 +170,6 @@ src_configure() {
 
 		-DUSE_CCACHE=OFF
 		-DUSE_CUDA=$(usex cuda)
-		-DUSE_CUDNN=$(usex cuda)
-		-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
-		-DBUILD_NVFUSER=$(usex cuda)
 		-DUSE_DISTRIBUTED=$(usex distributed)
 		-DUSE_MPI=$(usex mpi)
 		-DUSE_FAKELOWP=OFF
@@ -155,7 +182,6 @@ src_configure() {
 		-DUSE_LEVELDB=OFF
 		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
 		-DUSE_MKLDNN=$(usex onednn)
-		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
 		-DUSE_NNPACK=$(usex nnpack)
 		-DUSE_QNNPACK=$(usex qnnpack)
 		-DUSE_XNNPACK=$(usex xnnpack)
@@ -166,7 +192,7 @@ src_configure() {
 		-DUSE_OPENCL=$(usex opencl)
 		-DUSE_OPENCV=$(usex opencv)
 		-DUSE_OPENMP=$(usex openmp)
-		-DUSE_ROCM=OFF # TODO
+		-DUSE_ROCM=$(usex rocm)
 		-DUSE_SYSTEM_CPUINFO=ON
 		-DUSE_SYSTEM_PYBIND11=ON
 		-DUSE_UCC=OFF
@@ -200,8 +226,20 @@ src_configure() {
 		addpredict "/dev/char"
 
 		mycmakeargs+=(
+			-DUSE_CUDNN=ON
+			-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
+			-DBUILD_NVFUSER=ON
+			-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
 			-DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
 		)
+	elif use rocm; then
+		export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
+
+		mycmakeargs+=(
+			-DBUILD_NVFUSER=ON
+			-DUSE_NCCL=ON
+			-DUSE_SYSTEM_NCCL=ON
+		)
 	fi
 
 	if use onednn; then
@@ -214,6 +252,9 @@ src_configure() {
 	fi
 
 	cmake_src_configure
+
+	# do not rerun cmake and the build process in src_install
+	sed '/RERUN/,+1d' -i "${BUILD_DIR}"/build.ninja || die
 }
 
 src_install() {
@@ -225,7 +266,7 @@ src_install() {
 	rm -rf python
 	mkdir -p python/torch/include || die
 	mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
-	if use cuda; then
+	if use cuda || use rocm; then
 		mv "${ED}${S}"/nvfuser python/nvfuser || die
 		mv "${ED}"/usr/$(get_libdir)/nvfuser.so python/nvfuser/_C.so || die
 	fi
@@ -234,7 +275,7 @@ src_install() {
 	python_domodule python/torch
 	ln -s ../../../../../include/torch \
 		"${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269
-	if use cuda; then
+	if use cuda || use rocm; then
 		python_domodule python/nvfuser
 	fi
 	rm -rf "${ED}${WORKDIR}"

diff --git a/sci-libs/caffe2/caffe2-2.2.1.ebuild b/sci-libs/caffe2/caffe2-2.2.1-r1.ebuild
similarity index 80%
rename from sci-libs/caffe2/caffe2-2.2.1.ebuild
rename to sci-libs/caffe2/caffe2-2.2.1-r1.ebuild
index 6f96107154b7..80dc2b500a0f 100644
--- a/sci-libs/caffe2/caffe2-2.2.1.ebuild
+++ b/sci-libs/caffe2/caffe2-2.2.1-r1.ebuild
@@ -4,7 +4,8 @@
 EAPI=8
 
 PYTHON_COMPAT=( python3_{9..12} )
-inherit python-single-r1 cmake cuda flag-o-matic prefix
+ROCM_VERSION=5.7
+inherit python-single-r1 cmake cuda flag-o-matic prefix rocm
 
 MYPN=pytorch
 MYP=${MYPN}-${PV}
@@ -17,14 +18,16 @@ SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
 LICENSE="BSD"
 SLOT="0"
 KEYWORDS="~amd64"
-IUSE="cuda distributed fbgemm ffmpeg gloo mkl mpi nnpack +numpy onednn openblas opencl opencv openmp qnnpack xnnpack"
+IUSE="cuda distributed fbgemm ffmpeg gloo mkl mpi nnpack +numpy onednn openblas opencl opencv openmp qnnpack rocm xnnpack"
 RESTRICT="test"
 REQUIRED_USE="
 	${PYTHON_REQUIRED_USE}
 	ffmpeg? ( opencv )
 	mpi? ( distributed )
 	gloo? ( distributed )
-" # ?? ( cuda rocm )
+	?? ( cuda rocm )
+	rocm? ( || ( ${ROCM_REQUIRED_USE} ) )
+"
 
 # CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
 RDEPEND="
@@ -57,6 +60,20 @@ RDEPEND="
 	opencl? ( virtual/opencl )
 	opencv? ( media-libs/opencv:= )
 	qnnpack? ( sci-libs/QNNPACK )
+	rocm? (
+		>=dev-util/hip-5.7
+		>=dev-libs/rccl-5.7[${ROCM_USEDEP}]
+		>=sci-libs/rocThrust-5.7[${ROCM_USEDEP}]
+		>=sci-libs/rocPRIM-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipBLAS-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipFFT-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipSPARSE-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipRAND-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipCUB-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipSOLVER-5.7[${ROCM_USEDEP}]
+		>=sci-libs/miopen-5.7[${ROCM_USEDEP}]
+		>=dev-util/roctracer-5.7[${ROCM_USEDEP}]
+	)
 	distributed? ( sci-libs/tensorpipe[cuda?] )
 	xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
 	mkl? ( sci-libs/mkl )
@@ -89,6 +106,7 @@ PATCHES=(
 	"${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
 	"${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
 	"${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
+	"${FILESDIR}"/${PN}-2.1.2-rocm-fix-std-cpp17.patch
 )
 
 src_prepare() {
@@ -115,6 +133,17 @@ src_prepare() {
 		cmake/Dependencies.cmake \
 		torch/CMakeLists.txt \
 		CMakeLists.txt
+
+	if use rocm; then
+		sed -e "s:/opt/rocm:/usr:" \
+			-e "s:lib/cmake:$(get_libdir)/cmake:g" \
+			-e "s/HIP 1.0/HIP 1.0 REQUIRED/" \
+			-i cmake/public/LoadHIP.cmake || die
+
+		ebegin "HIPifying cuda sources"
+		${EPYTHON} tools/amd_build/build_amd.py || die
+		eend $?
+	fi
 }
 
 src_configure() {
@@ -137,9 +166,6 @@ src_configure() {
 
 		-DUSE_CCACHE=OFF
 		-DUSE_CUDA=$(usex cuda)
-		-DUSE_CUDNN=$(usex cuda)
-		-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
-		-DBUILD_NVFUSER=$(usex cuda)
 		-DUSE_DISTRIBUTED=$(usex distributed)
 		-DUSE_MPI=$(usex mpi)
 		-DUSE_FAKELOWP=OFF
@@ -152,7 +178,6 @@ src_configure() {
 		-DUSE_LEVELDB=OFF
 		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
 		-DUSE_MKLDNN=$(usex onednn)
-		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
 		-DUSE_NNPACK=$(usex nnpack)
 		-DUSE_QNNPACK=$(usex qnnpack)
 		-DUSE_XNNPACK=$(usex xnnpack)
@@ -163,7 +188,7 @@ src_configure() {
 		-DUSE_OPENCL=$(usex opencl)
 		-DUSE_OPENCV=$(usex opencv)
 		-DUSE_OPENMP=$(usex openmp)
-		-DUSE_ROCM=OFF # TODO
+		-DUSE_ROCM=$(usex rocm)
 		-DUSE_SYSTEM_CPUINFO=ON
 		-DUSE_SYSTEM_PYBIND11=ON
 		-DUSE_UCC=OFF
@@ -197,8 +222,18 @@ src_configure() {
 		addpredict "/dev/char"
 
 		mycmakeargs+=(
+			-DUSE_CUDNN=ON
+			-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
+			-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
 			-DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
 		)
+	elif use rocm; then
+		export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
+
+		mycmakeargs+=(
+			-DUSE_NCCL=ON
+			-DUSE_SYSTEM_NCCL=ON
+		)
 	fi
 
 	if use onednn; then
@@ -211,6 +246,9 @@ src_configure() {
 	fi
 
 	cmake_src_configure
+
+	# do not rerun cmake and the build process in src_install
+	sed '/RERUN/,+1d' -i "${BUILD_DIR}"/build.ninja || die
 }
 
 src_install() {

diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch b/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch
new file mode 100644
index 000000000000..cb0fa0c48e80
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch
@@ -0,0 +1,68 @@
+Fix for error: invalid argument '-std=c++17' not allowed with 'C'
+https://github.com/pytorch/pytorch/issues/103222
+--- a/c10/hip/CMakeLists.txt
++++ b/c10/hip/CMakeLists.txt
+@@ -30,6 +30,7 @@ hip_add_library(c10_hip ${C10_HIP_SRCS} ${C10_HIP_HEADERS})
+ 
+ # Propagate HIP_CXX_FLAGS that were set from Dependencies.cmake
+ target_compile_options(c10_hip PRIVATE ${HIP_CXX_FLAGS})
++set_target_properties(c10_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
+ 
+ # caffe2_hip adds a bunch of dependencies like rocsparse, but c10/hip is supposed to be
+ # minimal.  I'm not sure if we need hip_hcc or not; for now leave it out
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -1598,6 +1598,7 @@ if(USE_ROCM)
+ 
+   # Since PyTorch files contain HIP headers, these flags are required for the necessary definitions to be added.
+   target_compile_options(torch_hip PUBLIC ${HIP_CXX_FLAGS})  # experiment
++  set_target_properties(torch_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
+   target_link_libraries(torch_hip PUBLIC c10_hip)
+ 
+   if(NOT INTERN_BUILD_MOBILE)
+@@ -1774,6 +1775,7 @@ if(BUILD_TEST)
+       target_include_directories(${test_name} PRIVATE $<INSTALL_INTERFACE:include>)
+       target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE} ${Caffe2_HIP_INCLUDE})
+       target_compile_options(${test_name} PRIVATE ${HIP_CXX_FLAGS})
++      set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
+       add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
+       if(INSTALL_TEST)
+         install(TARGETS ${test_name} DESTINATION test)
+@@ -1955,6 +1957,7 @@ if(BUILD_PYTHON)
+     endif()
+     if(NOT MSVC)
+       target_compile_options(caffe2_pybind11_state_hip PRIVATE ${HIP_CXX_FLAGS} -fvisibility=hidden)
++      set_target_properties(caffe2_pybind11_state_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
+     endif()
+     set_target_properties(caffe2_pybind11_state_hip PROPERTIES PREFIX "")
+     set_target_properties(caffe2_pybind11_state_hip PROPERTIES SUFFIX ${PY_EXT_SUFFIX})
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1287,7 +1287,6 @@ if(USE_ROCM)
+     list(APPEND HIP_CXX_FLAGS -Wno-duplicate-decl-specifier)
+     list(APPEND HIP_CXX_FLAGS -DCAFFE2_USE_MIOPEN)
+     list(APPEND HIP_CXX_FLAGS -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP)
+-    list(APPEND HIP_CXX_FLAGS -std=c++17)
+     add_definitions(-DROCM_VERSION=${ROCM_VERSION_DEV_INT})
+     add_definitions(-DTORCH_HIP_VERSION=${TORCH_HIP_VERSION})
+     message("TORCH_HIP_VERSION=${TORCH_HIP_VERSION} is added as a compiler defines")
+--- a/cmake/public/utils.cmake
++++ b/cmake/public/utils.cmake
+@@ -335,6 +335,7 @@ function(caffe2_hip_binary_target target_name_or_src)
+   caffe2_binary_target(${target_name_or_src})
+ 
+   target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS})
++  set_target_properties(${__target} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
+   target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE})
+ endfunction()
+ 
+--- a/modules/detectron/CMakeLists.txt
++++ b/modules/detectron/CMakeLists.txt
+@@ -31,6 +31,7 @@ if(BUILD_CAFFE2_OPS)
+         ${Detectron_CPU_SRCS}
+         ${Detectron_HIP_SRCS})
+     target_compile_options(caffe2_detectron_ops_hip PRIVATE ${HIP_CXX_FLAGS})
++    set_target_properties(caffe2_detectron_ops_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
+     if(USE_MKLDNN)
+       target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
+     endif()

diff --git a/sci-libs/caffe2/metadata.xml b/sci-libs/caffe2/metadata.xml
index 3fe84b0977fc..ed1f9fa58993 100644
--- a/sci-libs/caffe2/metadata.xml
+++ b/sci-libs/caffe2/metadata.xml
@@ -18,6 +18,7 @@
 		<flag name="opencv">Add support for image processing operators</flag>
 		<flag name="openmp">Use OpenMP for parallel code</flag>
 		<flag name="qnnpack">Use QNNPACK</flag>
+		<flag name="rocm">Enable ROCm gpu computing support</flag>
 		<flag name="tensorpipe">Use tensorpipe</flag>
 		<flag name="xnnpack">Use XNNPACK</flag>
 	</use>


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2024-07-27 19:14 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2024-07-27 19:14 UTC (permalink / raw
  To: gentoo-commits

commit:     ca2e68ab5527af67bf95684068758b4efbd5b5a5
Author:     Sv. Lockal <lockalsash <AT> gmail <DOT> com>
AuthorDate: Thu Jul 25 09:27:07 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Sat Jul 27 19:08:42 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=ca2e68ab

sci-libs/caffe2: update dependencies to fix rocm flag

pytorch 2.3.0 introduced 2 new direct dependencies: hipBLASLt and aotriton.

pytorch uses hipBLASLt to perform gemm operation on datacenter AMD Instinct GPUs. For other GPUs pytorch fallbacks to hipBLAS.
caffe2-2.3.x ebuilds now contain a patch to optionally disable this dependency, when none AMDGPU_TARGETS="gfx90a gfx940 gfx941 gfx942" is used.

pytorch uses aotriton to perform FlashAttention operation.
caffe2-2.3.x ebuilds now contain a patch which fully disables aotriton dependency, as there is no such package yet.
Technically aotriton can be compiled (with minor patches), but I suggest to wait for next releases.
It is a massive burden, as it depends on forked triton and forked clang (merge with upstream is not expected anytime soon).
aotriton is usually distributed as a huge static (!) library (but in next release library will be shared).

Minor fixes added for compatibility with libc++ (used in experimental llvm Gentoo profile), however other ebuilds also require minor patches
(in other words: right now ROCm ecosystem can be compiled with libc++, but only by people with experience in C++).

Closes: https://bugs.gentoo.org/931046
Signed-off-by: Sv. Lockal <lockalsash <AT> gmail.com>
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/caffe2-2.3.0-r3.ebuild             |  45 ++--
 sci-libs/caffe2/caffe2-2.3.1.ebuild                |  46 ++--
 .../files/caffe2-2.3.0-exclude-aotriton.patch      |  35 +++
 .../caffe2-2.3.0-fix-gcc-clang-abi-compat.patch    |  17 ++
 .../caffe2/files/caffe2-2.3.0-fix-libcpp.patch     |  24 +++
 .../files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch  |  18 ++
 .../files/caffe2-2.3.0-optional-hipblaslt.patch    | 235 +++++++++++++++++++++
 7 files changed, 393 insertions(+), 27 deletions(-)

diff --git a/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild b/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
index c01e904d8eb0..666800d8f4b6 100644
--- a/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
+++ b/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
@@ -4,7 +4,7 @@
 EAPI=8
 
 PYTHON_COMPAT=( python3_{10..12} )
-ROCM_VERSION=5.7
+ROCM_VERSION=6.1
 inherit python-single-r1 cmake cuda flag-o-matic prefix rocm
 
 MYPN=pytorch
@@ -65,18 +65,23 @@ RDEPEND="
 	opencv? ( media-libs/opencv:= )
 	qnnpack? ( sci-libs/QNNPACK )
 	rocm? (
-		>=dev-util/hip-5.7
-		>=dev-libs/rccl-5.7[${ROCM_USEDEP}]
-		>=sci-libs/rocThrust-5.7[${ROCM_USEDEP}]
-		>=sci-libs/rocPRIM-5.7[${ROCM_USEDEP}]
-		>=sci-libs/hipBLAS-5.7[${ROCM_USEDEP}]
-		>=sci-libs/hipFFT-5.7[${ROCM_USEDEP}]
-		>=sci-libs/hipSPARSE-5.7[${ROCM_USEDEP}]
-		>=sci-libs/hipRAND-5.7[${ROCM_USEDEP}]
-		>=sci-libs/hipCUB-5.7[${ROCM_USEDEP}]
-		>=sci-libs/hipSOLVER-5.7[${ROCM_USEDEP}]
-		>=sci-libs/miopen-5.7[${ROCM_USEDEP}]
-		>=dev-util/roctracer-5.7[${ROCM_USEDEP}]
+		=dev-util/hip-6.1*
+		=dev-libs/rccl-6.1*[${ROCM_USEDEP}]
+		=sci-libs/rocThrust-6.1*[${ROCM_USEDEP}]
+		=sci-libs/rocPRIM-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipBLAS-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipFFT-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipSPARSE-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipRAND-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipCUB-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipSOLVER-6.1*[${ROCM_USEDEP}]
+		=sci-libs/miopen-6.1*[${ROCM_USEDEP}]
+		=dev-util/roctracer-6.1*[${ROCM_USEDEP}]
+
+		amdgpu_targets_gfx90a? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx90a] )
+		amdgpu_targets_gfx940? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx940] )
+		amdgpu_targets_gfx941? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] )
+		amdgpu_targets_gfx942? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] )
 	)
 	distributed? ( sci-libs/tensorpipe[cuda?] )
 	xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
@@ -111,6 +116,11 @@ PATCHES=(
 	"${FILESDIR}"/${P}-rocm-fix-std-cpp17.patch
 	"${FILESDIR}"/${PN}-2.2.2-musl.patch
 	"${FILESDIR}"/${P}-CMakeFix.patch
+	"${FILESDIR}"/${PN}-2.3.0-exclude-aotriton.patch
+	"${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+	"${FILESDIR}"/${PN}-2.3.0-optional-hipblaslt.patch
+	"${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
+	"${FILESDIR}"/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
 )
 
 src_prepare() {
@@ -235,11 +245,20 @@ src_configure() {
 		)
 	elif use rocm; then
 		export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
+		local use_hipblaslt="OFF"
+		if use amdgpu_targets_gfx90a || use amdgpu_targets_gfx940 || use amdgpu_targets_gfx941 \
+			|| use amdgpu_targets_gfx942; then
+			use_hipblaslt="ON"
+		fi
 
 		mycmakeargs+=(
 			-DUSE_NCCL=ON
 			-DUSE_SYSTEM_NCCL=ON
+			-DUSE_HIPBLASLT=${use_hipblaslt}
 		)
+
+		# ROCm libraries produce too much warnings
+		append-cxxflags -Wno-deprecated-declarations -Wno-unused-result
 	fi
 
 	if use onednn; then

diff --git a/sci-libs/caffe2/caffe2-2.3.1.ebuild b/sci-libs/caffe2/caffe2-2.3.1.ebuild
index 6355d0083336..ee1da28aa12f 100644
--- a/sci-libs/caffe2/caffe2-2.3.1.ebuild
+++ b/sci-libs/caffe2/caffe2-2.3.1.ebuild
@@ -4,7 +4,7 @@
 EAPI=8
 
 PYTHON_COMPAT=( python3_{10..12} )
-ROCM_VERSION=5.7
+ROCM_VERSION=6.1
 inherit python-single-r1 cmake cuda flag-o-matic prefix rocm
 
 MYPN=pytorch
@@ -65,19 +65,23 @@ RDEPEND="
 	opencv? ( media-libs/opencv:= )
 	qnnpack? ( sci-libs/QNNPACK )
 	rocm? (
-		=dev-util/hip-5.7*
-		=dev-libs/rccl-5.7*[${ROCM_USEDEP}]
-		=sci-libs/rocThrust-5.7*[${ROCM_USEDEP}]
-		=sci-libs/rocPRIM-5.7*[${ROCM_USEDEP}]
-		=sci-libs/hipBLAS-5.7*[${ROCM_USEDEP}]
-		sci-libs/hipBLASLt
-		=sci-libs/hipFFT-5.7*[${ROCM_USEDEP}]
-		=sci-libs/hipSPARSE-5.7*[${ROCM_USEDEP}]
-		=sci-libs/hipRAND-5.7*[${ROCM_USEDEP}]
-		=sci-libs/hipCUB-5.7*[${ROCM_USEDEP}]
-		=sci-libs/hipSOLVER-5.7*[${ROCM_USEDEP}]
-		=sci-libs/miopen-5.7*[${ROCM_USEDEP}]
-		=dev-util/roctracer-5.7*[${ROCM_USEDEP}]
+		=dev-util/hip-6.1*
+		=dev-libs/rccl-6.1*[${ROCM_USEDEP}]
+		=sci-libs/rocThrust-6.1*[${ROCM_USEDEP}]
+		=sci-libs/rocPRIM-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipBLAS-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipFFT-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipSPARSE-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipRAND-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipCUB-6.1*[${ROCM_USEDEP}]
+		=sci-libs/hipSOLVER-6.1*[${ROCM_USEDEP}]
+		=sci-libs/miopen-6.1*[${ROCM_USEDEP}]
+		=dev-util/roctracer-6.1*[${ROCM_USEDEP}]
+
+		amdgpu_targets_gfx90a? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx90a] )
+		amdgpu_targets_gfx940? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx940] )
+		amdgpu_targets_gfx941? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] )
+		amdgpu_targets_gfx942? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] )
 	)
 	distributed? ( sci-libs/tensorpipe[cuda?] )
 	xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
@@ -112,6 +116,11 @@ PATCHES=(
 	"${FILESDIR}"/${PN}-2.3.0-rocm-fix-std-cpp17.patch
 	"${FILESDIR}"/${PN}-2.2.2-musl.patch
 	"${FILESDIR}"/${PN}-2.3.0-CMakeFix.patch
+	"${FILESDIR}"/${PN}-2.3.0-exclude-aotriton.patch
+	"${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+	"${FILESDIR}"/${PN}-2.3.0-optional-hipblaslt.patch
+	"${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
+	"${FILESDIR}"/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
 )
 
 src_prepare() {
@@ -236,11 +245,20 @@ src_configure() {
 		)
 	elif use rocm; then
 		export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
+		local use_hipblaslt="OFF"
+		if use amdgpu_targets_gfx90a || use amdgpu_targets_gfx940 || use amdgpu_targets_gfx941 \
+			|| use amdgpu_targets_gfx942; then
+			use_hipblaslt="ON"
+		fi
 
 		mycmakeargs+=(
 			-DUSE_NCCL=ON
 			-DUSE_SYSTEM_NCCL=ON
+			-DUSE_HIPBLASLT=${use_hipblaslt}
 		)
+
+		# ROCm libraries produce too much warnings
+		append-cxxflags -Wno-deprecated-declarations -Wno-unused-result
 	fi
 
 	if use onednn; then

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch b/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch
new file mode 100644
index 000000000000..2c65987acd85
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch
@@ -0,0 +1,35 @@
+Disables aotriton download when both USE_FLASH_ATTENTION and USE_MEM_EFF_ATTENTION cmake flags are OFF
+Backports upstream PR to 2.3.0: https://github.com/pytorch/pytorch/pull/130197
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1334,7 +1334,9 @@ if(USE_ROCM)
+       message(STATUS "Disabling Kernel Assert for ROCm")
+     endif()
+ 
+-    include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
++    if(USE_FLASH_ATTENTION)
++      include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
++    endif()
+     if(USE_CUDA)
+       caffe2_update_option(USE_MEM_EFF_ATTENTION OFF)
+     endif()
+--- a/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
++++ b/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
+@@ -21,7 +21,7 @@
+ #include <cmath>
+ #include <functional>
+ 
+-#if USE_ROCM
++#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
+ #include <aotriton/flash.h>
+ #endif
+ 
+@@ -186,7 +186,7 @@ bool check_flash_attention_hardware_support(sdp_params const& params, bool debug
+   // Check that the gpu is capable of running flash attention
+   using sm80 = SMVersion<8, 0>;
+   using sm90 = SMVersion<9, 0>;
+-#if USE_ROCM
++#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
+   auto stream = at::cuda::getCurrentCUDAStream().stream();
+   if (hipSuccess != aotriton::v2::flash::check_gpu(stream)) {
+       auto dprops = at::cuda::getCurrentDeviceProperties();

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch b/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch
new file mode 100644
index 000000000000..a6f981b7e054
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch
@@ -0,0 +1,17 @@
+
+When gcc builds libtorch_cpu.so and hipcc (clang-18) build libtorch_hip.so,
+resulting binary fails in runtime due to different mangling.
+Related issue in LLVM: https://github.com/llvm/llvm-project/issues/85656
+Fixed in pytorch-2.4.0 in https://github.com/pytorch/pytorch/commit/a89f442f0b103fa6f38103784a2dfedbd147f863
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1314,6 +1314,9 @@ if(USE_ROCM)
+        list(APPEND HIP_HIPCC_FLAGS -fdebug-info-for-profiling)
+     endif(CMAKE_BUILD_TYPE MATCHES Debug)
+ 
++    # needed for compat with newer versions of hip-clang that introduced C++20 mangling rules
++    list(APPEND HIP_HIPCC_FLAGS -fclang-abi-compat=17)
++
+     set(HIP_CLANG_FLAGS ${HIP_CXX_FLAGS})
+     # Ask hcc to generate device code during compilation so we can use
+     # host linker to link.

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch b/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch
new file mode 100644
index 000000000000..75808fd7ec50
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch
@@ -0,0 +1,24 @@
+Workaround for libc++ issue https://github.com/llvm/llvm-project/issues/100802
+"reference to __host__ function 'memcpy' in __device__ function"
+--- a/c10/util/Half.h
++++ b/c10/util/Half.h
+@@ -227,7 +227,7 @@ C10_HOST_DEVICE inline float fp16_ieee_to_fp32_value(uint16_t h) {
+   // const float exp_scale = 0x1.0p-112f;
+   constexpr uint32_t scale_bits = (uint32_t)15 << 23;
+   float exp_scale_val = 0;
+-  std::memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
++  memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
+   const float exp_scale = exp_scale_val;
+   const float normalized_value =
+       fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
+@@ -298,8 +298,8 @@ inline uint16_t fp16_ieee_from_fp32_value(float f) {
+   constexpr uint32_t scale_to_inf_bits = (uint32_t)239 << 23;
+   constexpr uint32_t scale_to_zero_bits = (uint32_t)17 << 23;
+   float scale_to_inf_val = 0, scale_to_zero_val = 0;
+-  std::memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
+-  std::memcpy(
++  memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
++  memcpy(
+       &scale_to_zero_val, &scale_to_zero_bits, sizeof(scale_to_zero_val));
+   const float scale_to_inf = scale_to_inf_val;
+   const float scale_to_zero = scale_to_zero_val;

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch b/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch
new file mode 100644
index 000000000000..81ae075c67cc
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch
@@ -0,0 +1,18 @@
+Fix hip compilation with gcc-14
+Upstream commit: https://github.com/pytorch/pytorch/commit/8c2c3a03fb87c3568a22362d83b00d82b9fb3db2
+--- a/aten/src/ATen/native/cuda/IndexKernel.cu
++++ b/aten/src/ATen/native/cuda/IndexKernel.cu
+@@ -259,7 +259,13 @@ void index_put_kernel_quantized_cuda(TensorIterator& iter, const IntArrayRef ind
+ 
+     gpu_index_kernel(iter, index_size, index_stride, [inv_scale, zero_point, qmin, qmax]C10_DEVICE(char* const out_data, const char* const in_data, const int64_t offset) {
+       int64_t qvalue = static_cast<int64_t>(zero_point + nearbyintf(*(float*)in_data * inv_scale));
++      // See https://github.com/pytorch/pytorch/issues/127666
++      // hip-clang std::clamp __glibcxx_assert_fail host function when building on Fedora40/gcc14
++#ifndef USE_ROCM
+       qvalue = std::clamp(qvalue, qmin, qmax);
++#else
++      qvalue = (qvalue < qmin) ? qmin : (qmax < qvalue) ? qmax : qvalue;
++#endif
+       *(scalar_t*)(out_data + offset) = static_cast<scalar_t>(qvalue);
+     });
+   });

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch b/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch
new file mode 100644
index 000000000000..dc544255c2bd
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch
@@ -0,0 +1,235 @@
+Makes hipblaslt optional to simplify build for non-datacenter GPUs.
+Based on https://github.com/pytorch/pytorch/pull/120551 with added USE_HIPBLASLT cmake option.
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -225,6 +225,9 @@ option(USE_FAKELOWP "Use FakeLowp operators" OFF)
+ option(USE_FFMPEG "Use ffmpeg" OFF)
+ option(USE_GFLAGS "Use GFLAGS" OFF)
+ option(USE_GLOG "Use GLOG" OFF)
++cmake_dependent_option(
++    USE_HIPBLASLT "Use hipBLASLt" ON
++    "USE_ROCM" OFF)
+ option(USE_LEVELDB "Use LEVELDB" OFF)
+ option(USE_LITE_PROTO "Use lite protobuf instead of full." OFF)
+ option(USE_LMDB "Use LMDB" OFF)
+--- a/aten/src/ATen/cuda/CUDABlas.cpp
++++ b/aten/src/ATen/cuda/CUDABlas.cpp
+@@ -14,7 +14,7 @@
+ #include <c10/util/irange.h>
+ 
+ #ifdef USE_ROCM
+-#if ROCM_VERSION >= 60000
++#ifdef USE_HIPBLASLT
+ #include <hipblaslt/hipblaslt-ext.hpp>
+ #endif
+ // until hipblas has an API to accept flags, we must use rocblas here
+@@ -781,7 +781,7 @@ void gemm<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)) {
+   }
+ }
+ 
+-#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
++#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
+ 
+ #if defined(USE_ROCM) && ROCM_VERSION >= 50700 && ROCM_VERSION < 60000
+ // only for rocm 5.7 where we first supported hipblaslt, it was difficult
+@@ -912,6 +912,7 @@ class CuBlasLtMatmulPreference : public CuBlasLtDescriptor<
+ };
+ } // namespace
+ 
++#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
+ template <typename Dtype>
+ void gemm_and_bias(
+     bool transpose_mat1,
+@@ -1124,7 +1125,7 @@ template void gemm_and_bias(
+     at::BFloat16* result_ptr,
+     int64_t result_ld,
+     GEMMAndBiasActivationEpilogue activation);
+-
++#endif
+ void scaled_gemm(
+     char transa,
+     char transb,
+--- a/aten/src/ATen/cuda/CUDABlas.h
++++ b/aten/src/ATen/cuda/CUDABlas.h
+@@ -82,7 +82,7 @@ void gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
+ template <>
+ void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
+ 
+-#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
++#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
+ enum GEMMAndBiasActivationEpilogue {
+   None,
+   RELU,
+--- a/aten/src/ATen/cuda/CUDAContextLight.h
++++ b/aten/src/ATen/cuda/CUDAContextLight.h
+@@ -9,7 +9,7 @@
+ 
+ // cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also
+ // added bf16 support
+-#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
++#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
+ #include <cublasLt.h>
+ #endif
+ 
+@@ -82,7 +82,7 @@ TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator();
+ /* Handles */
+ TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle();
+ TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle();
+-#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
++#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
+ TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle();
+ #endif
+ 
+--- a/aten/src/ATen/cuda/CublasHandlePool.cpp
++++ b/aten/src/ATen/cuda/CublasHandlePool.cpp
+@@ -29,7 +29,7 @@ namespace at::cuda {
+ 
+ namespace {
+ 
+-#if defined(USE_ROCM) && ROCM_VERSION >= 50700
++#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
+ void createCublasLtHandle(cublasLtHandle_t *handle) {
+   TORCH_CUDABLAS_CHECK(cublasLtCreate(handle));
+ }
+@@ -190,7 +190,7 @@ cublasHandle_t getCurrentCUDABlasHandle() {
+   return handle;
+ }
+ 
+-#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
++#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
+ cublasLtHandle_t getCurrentCUDABlasLtHandle() {
+ #ifdef USE_ROCM
+   c10::DeviceIndex device = 0;
+--- a/aten/src/ATen/cuda/tunable/TunableGemm.h
++++ b/aten/src/ATen/cuda/tunable/TunableGemm.h
+@@ -11,7 +11,7 @@
+ 
+ #include <ATen/cuda/tunable/GemmCommon.h>
+ #ifdef USE_ROCM
+-#if ROCM_VERSION >= 50700
++#ifdef USE_HIPBLASLT
+ #include <ATen/cuda/tunable/GemmHipblaslt.h>
+ #endif
+ #include <ATen/cuda/tunable/GemmRocblas.h>
+@@ -166,7 +166,7 @@ class GemmTunableOp : public TunableOp<GemmParams<T>, StreamTimer> {
+     }
+ #endif
+ 
+-#if defined(USE_ROCM) && ROCM_VERSION >= 50700
++#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
+     static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
+     if (env == nullptr || strcmp(env, "1") == 0) {
+       // disallow tuning of hipblaslt with c10::complex
+@@ -240,7 +240,7 @@ class GemmStridedBatchedTunableOp : public TunableOp<GemmStridedBatchedParams<T>
+     }
+ #endif
+ 
+-#if defined(USE_ROCM) && ROCM_VERSION >= 50700
++#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
+     static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
+     if (env == nullptr || strcmp(env, "1") == 0) {
+       // disallow tuning of hipblaslt with c10::complex
+--- a/aten/src/ATen/native/cuda/Blas.cpp
++++ b/aten/src/ATen/native/cuda/Blas.cpp
+@@ -155,7 +155,7 @@ enum class Activation {
+   GELU,
+ };
+ 
+-#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
++#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
+ cuda::blas::GEMMAndBiasActivationEpilogue activation_to_gemm_and_blas_arg(Activation a) {
+   switch (a) {
+     case Activation::None:
+@@ -193,6 +193,7 @@ static bool getDisableAddmmCudaLt() {
+ 
+ #ifdef USE_ROCM
+ static bool isSupportedHipLtROCmArch(int index) {
++#if defined(USE_HIPBLASLT)
+     hipDeviceProp_t* prop = at::cuda::getDeviceProperties(index);
+     std::string device_arch = prop->gcnArchName;
+     static const std::vector<std::string> archs = {"gfx90a", "gfx940", "gfx941", "gfx942"};
+@@ -203,6 +204,7 @@ static bool isSupportedHipLtROCmArch(int index) {
+         }
+     }
+     TORCH_CHECK(false, "Attempting to use hipBLASLt on a unsupported architecture!");
++#endif
+     return false;
+ }
+ #endif
+@@ -228,7 +230,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
+   at::ScalarType scalar_type = self.scalar_type();
+   c10::MaybeOwned<Tensor> self_;
+   if (&result != &self) {
+-#if (defined(CUDA_VERSION) && CUDA_VERSION >= 11040 && !defined(_MSC_VER)) || defined(USE_ROCM) && ROCM_VERSION >= 50700
++#if (defined(CUDA_VERSION) && CUDA_VERSION >= 11040 && !defined(_MSC_VER)) || defined(USE_ROCM) && defined(USE_HIPBLASLT)
+     // Strangely, if mat2 has only 1 row or column, we get
+     // CUBLAS_STATUS_INVALID_VALUE error from cublasLtMatmulAlgoGetHeuristic.
+     // self.dim() == 1 && result.dim() == 2 && self.sizes()[0] == mat2_sizes[1]
+@@ -271,7 +273,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
+     }
+     self__sizes = self_->sizes();
+   } else {
+-#if defined(USE_ROCM) && ROCM_VERSION >= 50700
++#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
+     useLtInterface = !disable_addmm_cuda_lt &&
+         result.dim() == 2 && result.is_contiguous() &&
+         isSupportedHipLtROCmArch(self.device().index()) &&
+@@ -322,7 +324,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
+ 
+   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!args.result->is_conj());
+ 
+-#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
++#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
+   if (useLtInterface) {
+     AT_DISPATCH_FLOATING_TYPES_AND2(
+         at::ScalarType::Half,
+@@ -876,7 +878,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
+   at::native::resize_output(out, {mat1_sizes[0], mat2_sizes[1]});
+   at::native::resize_output(amax, {});
+ 
+-#if !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) && ROCM_VERSION >= 60000)
++#if !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
+   cublasCommonArgs args(mat1, mat2, out);
+   const auto out_dtype_ = args.result->scalar_type();
+   TORCH_CHECK(args.transa == 't' && args.transb == 'n', "Only multiplication of row-major and column-major matrices is supported by cuBLASLt");
+@@ -906,7 +908,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
+   TORCH_CHECK(false, "_scaled_mm_out_cuda is not compiled for this platform.");
+ #endif
+ 
+-#if defined(USE_ROCM) && ROCM_VERSION >= 60000
++#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
+   // rocm's hipblaslt does not yet support amax, so calculate separately
+   auto out_float32 = out.to(kFloat);
+   out_float32.abs_();
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1282,6 +1282,9 @@ if(USE_ROCM)
+     if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "6.0.0")
+       list(APPEND HIP_CXX_FLAGS -DHIPBLAS_V2)
+     endif()
++    if(hipblast_FOUND)
++      list(APPEND HIP_CXX_FLAGS -DHIPBLASLT)
++    endif()
+     if(HIPBLASLT_CUSTOM_DATA_TYPE)
+       list(APPEND HIP_CXX_FLAGS -DHIPBLASLT_CUSTOM_DATA_TYPE)
+     endif()
+--- a/cmake/public/LoadHIP.cmake
++++ b/cmake/public/LoadHIP.cmake
+@@ -155,7 +155,7 @@ if(HIP_FOUND)
+   find_package_and_print_version(hiprand REQUIRED)
+   find_package_and_print_version(rocblas REQUIRED)
+   find_package_and_print_version(hipblas REQUIRED)
+-  if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0")
++  if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0" AND USE_HIPBLASLT)
+     find_package_and_print_version(hipblaslt REQUIRED)
+   endif()
+   find_package_and_print_version(miopen REQUIRED)
+@@ -191,7 +191,7 @@ if(HIP_FOUND)
+   # roctx is part of roctracer
+   find_library(ROCM_ROCTX_LIB roctx64 HINTS ${ROCM_PATH}/lib)
+ 
+-  if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0")
++  if(hipblastlt_FOUND)
+     # check whether hipblaslt is using its own datatype
+     set(file "${PROJECT_BINARY_DIR}/hipblaslt_test_data_type.cc")
+     file(WRITE ${file} ""


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2024-08-09 16:17 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2024-08-09 16:17 UTC (permalink / raw
  To: gentoo-commits

commit:     e3b7cde9d825c40eba0c1ee43a71d9455aa77c2c
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Fri Aug  9 16:12:10 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Fri Aug  9 16:13:52 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=e3b7cde9

sci-libs/caffe2: update SRC_URI

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                           |   2 +-
 sci-libs/caffe2/caffe2-2.2.2-r1.ebuild             |  23 +-
 sci-libs/caffe2/caffe2-2.3.0-r3.ebuild             |  33 +--
 sci-libs/caffe2/caffe2-2.3.1.ebuild                |  33 +--
 sci-libs/caffe2/caffe2-2.4.0.ebuild                |   2 +-
 .../caffe2/files/caffe2-1.12.0-glog-0.6.0.patch    |  29 ---
 .../caffe2/files/caffe2-1.13.0-install-dirs.patch  | 121 -----------
 .../caffe2/files/caffe2-1.13.1-tensorpipe.patch    |  10 -
 .../files/caffe2-2.0.0-cudnn_include_fix.patch     |  12 --
 sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch     |  41 ----
 .../files/caffe2-2.1.2-fix-openmp-link.patch       |  15 --
 sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch |  12 --
 .../files/caffe2-2.1.2-rocm-fix-std-cpp17.patch    |  68 ------
 sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch    | 195 -----------------
 sci-libs/caffe2/files/caffe2-2.2.2-musl.patch      |  13 --
 sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch  |  11 -
 .../files/caffe2-2.3.0-cudnn_include_fix.patch     |  11 -
 .../files/caffe2-2.3.0-exclude-aotriton.patch      |  35 ---
 .../caffe2-2.3.0-fix-gcc-clang-abi-compat.patch    |  17 --
 .../caffe2/files/caffe2-2.3.0-fix-libcpp.patch     |  24 ---
 .../files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch  |  18 --
 .../files/caffe2-2.3.0-optional-hipblaslt.patch    | 235 ---------------------
 .../files/caffe2-2.3.0-rocm-fix-std-cpp17.patch    |  68 ------
 23 files changed, 48 insertions(+), 980 deletions(-)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index 0ab8e6d1b824..d190ba229493 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,4 +1,4 @@
-DIST caffe2-20240809.tar.gz 15242 BLAKE2B 77503c61487e7d85cca5afcab9a6e638f9833a70861845638cf1b62bc492d7b6650e6db81d53ebb2f39c6313509250d339f725f04d03ec6dd23dd0cf70843d8c SHA512 74b3b0b6671b655ecac93f7436c4ed7cb0157a83aafbf6afcc0811e11cef341cd8f638db1a111bcbb01e1a6dd4daf3a36b96d7a8ce90f04c2fa091bd6e3a142b
+DIST caffe2-patches-20240809.tar.gz 15242 BLAKE2B 77503c61487e7d85cca5afcab9a6e638f9833a70861845638cf1b62bc492d7b6650e6db81d53ebb2f39c6313509250d339f725f04d03ec6dd23dd0cf70843d8c SHA512 74b3b0b6671b655ecac93f7436c4ed7cb0157a83aafbf6afcc0811e11cef341cd8f638db1a111bcbb01e1a6dd4daf3a36b96d7a8ce90f04c2fa091bd6e3a142b
 DIST pytorch-2.2.2.tar.gz 116367503 BLAKE2B 0be22f2ec4b9aac6f5e976664cae01facf07929a32565cd57d7cc5b2d9888e9ae71ca301853752fe8f31d174d04c9974eb9ed2f3d452360a50ccf024f200726a SHA512 7990e0f9484038c3458c0bda2c863bf2b19e56edab81fc5938c6e0f08b17558287f853bb67350e8cca8f42bec0f1d4ba0e94e50a145db8da44bdd4bd703d91d0
 DIST pytorch-2.3.0.tar.gz 117029829 BLAKE2B 8f9c0d71ee0a9219b495eddccdcc65107f7ad537c43c68100b229f3d27b0e6c01ccb1659c7fffc356a48d80f2adc0a10361305dc8f1df20446de837d380f89f6 SHA512 67f7e9a096c3ffb952206ebf9105bedebb68c24ad82456083adf1d1d210437fcaa9dd52b68484cfc97d408c9eebc9541c76868c34a7c9982494dc3f424cfb07c
 DIST pytorch-2.3.1.tar.gz 117035696 BLAKE2B d419d7fa1342f1fb317ffce09ec9dc1447414627cc83d36578fe60f68c283c620b2b4d49f414cd206d537b90b16432a06cd1941662720db05d5e2b6c493325f5 SHA512 e1bcae44f9939fc7ccb1360a9b1970d92426f25e5de73e36964df3dd15ad5d8d9f5bd2f9a7dda6b8f64e2bba3674005bd869f542489cc442ad0125a02676f587

diff --git a/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild b/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild
index 6649975ddf2d..773808bc4f76 100644
--- a/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild
+++ b/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
 DESCRIPTION="A deep learning framework"
 HOMEPAGE="https://pytorch.org/"
 SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-	-> ${MYP}.tar.gz"
+	-> ${MYP}.tar.gz
+	https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz"
 
 S="${WORKDIR}"/${MYP}
 
@@ -97,16 +98,16 @@ DEPEND="
 "
 
 PATCHES=(
-	"${FILESDIR}"/${PN}-2.2.1-gentoo.patch
-	"${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
-	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
-	"${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
-	"${FILESDIR}"/${PN}-2.0.0-gcc13.patch
-	"${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
-	"${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
-	"${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
-	"${FILESDIR}"/${PN}-2.1.2-rocm-fix-std-cpp17.patch
-	"${FILESDIR}"/${P}-musl.patch
+	../patches/${PN}-2.2.1-gentoo.patch
+	../patches/${PN}-1.13.0-install-dirs.patch
+	../patches/${PN}-1.12.0-glog-0.6.0.patch
+	../patches/${PN}-1.13.1-tensorpipe.patch
+	../patches/${PN}-2.0.0-gcc13.patch
+	../patches/${PN}-2.0.0-cudnn_include_fix.patch
+	../patches/${PN}-2.1.2-fix-rpath.patch
+	../patches/${PN}-2.1.2-fix-openmp-link.patch
+	../patches/${PN}-2.1.2-rocm-fix-std-cpp17.patch
+	../patches/${P}-musl.patch
 )
 
 src_prepare() {

diff --git a/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild b/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
index 666800d8f4b6..7fe4818311cb 100644
--- a/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
+++ b/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
 DESCRIPTION="A deep learning framework"
 HOMEPAGE="https://pytorch.org/"
 SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-	-> ${MYP}.tar.gz"
+	-> ${MYP}.tar.gz
+	https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz"
 
 S="${WORKDIR}"/${MYP}
 
@@ -106,21 +107,21 @@ DEPEND="
 "
 
 PATCHES=(
-	"${FILESDIR}"/${PN}-2.2.1-gentoo.patch
-	"${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
-	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
-	"${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
-	"${FILESDIR}"/${P}-cudnn_include_fix.patch
-	"${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
-	"${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
-	"${FILESDIR}"/${P}-rocm-fix-std-cpp17.patch
-	"${FILESDIR}"/${PN}-2.2.2-musl.patch
-	"${FILESDIR}"/${P}-CMakeFix.patch
-	"${FILESDIR}"/${PN}-2.3.0-exclude-aotriton.patch
-	"${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
-	"${FILESDIR}"/${PN}-2.3.0-optional-hipblaslt.patch
-	"${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
-	"${FILESDIR}"/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
+	../patches/${PN}-2.2.1-gentoo.patch
+	../patches/${PN}-1.13.0-install-dirs.patch
+	../patches/${PN}-1.12.0-glog-0.6.0.patch
+	../patches/${PN}-1.13.1-tensorpipe.patch
+	../patches/${P}-cudnn_include_fix.patch
+	../patches/${PN}-2.1.2-fix-rpath.patch
+	../patches/${PN}-2.1.2-fix-openmp-link.patch
+	../patches/${P}-rocm-fix-std-cpp17.patch
+	../patches/${PN}-2.2.2-musl.patch
+	../patches/${P}-CMakeFix.patch
+	../patches/${PN}-2.3.0-exclude-aotriton.patch
+	../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+	../patches/${PN}-2.3.0-optional-hipblaslt.patch
+	../patches/${PN}-2.3.0-fix-libcpp.patch
+	../patches/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
 )
 
 src_prepare() {

diff --git a/sci-libs/caffe2/caffe2-2.3.1.ebuild b/sci-libs/caffe2/caffe2-2.3.1.ebuild
index ee1da28aa12f..ff2a9caebd59 100644
--- a/sci-libs/caffe2/caffe2-2.3.1.ebuild
+++ b/sci-libs/caffe2/caffe2-2.3.1.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
 DESCRIPTION="A deep learning framework"
 HOMEPAGE="https://pytorch.org/"
 SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-	-> ${MYP}.tar.gz"
+	-> ${MYP}.tar.gz
+	https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz"
 
 S="${WORKDIR}"/${MYP}
 
@@ -106,21 +107,21 @@ DEPEND="
 "
 
 PATCHES=(
-	"${FILESDIR}"/${PN}-2.2.1-gentoo.patch
-	"${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
-	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
-	"${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
-	"${FILESDIR}"/${PN}-2.3.0-cudnn_include_fix.patch
-	"${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
-	"${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
-	"${FILESDIR}"/${PN}-2.3.0-rocm-fix-std-cpp17.patch
-	"${FILESDIR}"/${PN}-2.2.2-musl.patch
-	"${FILESDIR}"/${PN}-2.3.0-CMakeFix.patch
-	"${FILESDIR}"/${PN}-2.3.0-exclude-aotriton.patch
-	"${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
-	"${FILESDIR}"/${PN}-2.3.0-optional-hipblaslt.patch
-	"${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
-	"${FILESDIR}"/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
+	../patches/${PN}-2.2.1-gentoo.patch
+	../patches/${PN}-1.13.0-install-dirs.patch
+	../patches/${PN}-1.12.0-glog-0.6.0.patch
+	../patches/${PN}-1.13.1-tensorpipe.patch
+	../patches/${PN}-2.3.0-cudnn_include_fix.patch
+	../patches/${PN}-2.1.2-fix-rpath.patch
+	../patches/${PN}-2.1.2-fix-openmp-link.patch
+	../patches/${PN}-2.3.0-rocm-fix-std-cpp17.patch
+	../patches/${PN}-2.2.2-musl.patch
+	../patches/${PN}-2.3.0-CMakeFix.patch
+	../patches/${PN}-2.3.0-exclude-aotriton.patch
+	../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+	../patches/${PN}-2.3.0-optional-hipblaslt.patch
+	../patches/${PN}-2.3.0-fix-libcpp.patch
+	../patches/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
 )
 
 src_prepare() {

diff --git a/sci-libs/caffe2/caffe2-2.4.0.ebuild b/sci-libs/caffe2/caffe2-2.4.0.ebuild
index 730ea4d365f2..524dafcaacef 100644
--- a/sci-libs/caffe2/caffe2-2.4.0.ebuild
+++ b/sci-libs/caffe2/caffe2-2.4.0.ebuild
@@ -14,7 +14,7 @@ DESCRIPTION="A deep learning framework"
 HOMEPAGE="https://pytorch.org/"
 SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
 	-> ${MYP}.tar.gz
-	https://dev.gentoo.org/~tupone/distfiles/caffe2-20240809.tar.gz"
+	https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz"
 
 S="${WORKDIR}"/${MYP}
 

diff --git a/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch b/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch
deleted file mode 100644
index 6c06d2cca654..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-https://github.com/pytorch/pytorch/issues/58054
-
---- a/c10/util/Logging.cpp
-+++ b/c10/util/Logging.cpp
-@@ -192,23 +192,13 @@
-     google::GLOG_WARNING,
-     "The minimum log level that caffe2 will output.");
- 
--// Google glog's api does not have an external function that allows one to check
--// if glog is initialized or not. It does have an internal function - so we are
--// declaring it here. This is a hack but has been used by a bunch of others too
--// (e.g. Torch).
--namespace google {
--namespace glog_internal_namespace_ {
--bool IsGoogleLoggingInitialized();
--} // namespace glog_internal_namespace_
--} // namespace google
--
- namespace c10 {
- namespace {
- 
- void initGoogleLogging(char const* name) {
- #if !defined(_MSC_VER)
-   // This trick can only be used on UNIX platforms
--  if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized())
-+  if (!::google::IsGoogleLoggingInitialized())
- #endif
-   {
-     ::google::InitGoogleLogging(name);

diff --git a/sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch b/sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch
deleted file mode 100644
index 299c9f88a173..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch
+++ /dev/null
@@ -1,121 +0,0 @@
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -112,7 +112,7 @@
- # Note: for now, we will put all export path into one single Caffe2Targets group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still self-contained.
--install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10 EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
-         DESTINATION include
-         FILES_MATCHING PATTERN "*.h")
---- a/c10/cuda/CMakeLists.txt
-+++ b/c10/cuda/CMakeLists.txt
-@@ -64,7 +64,7 @@ add_subdirectory(test)
- # Note: for now, we will put all export path into one single Caffe2Targets group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still self-contained.
--install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- foreach(file ${C10_CUDA_HEADERS})
-   get_filename_component( dir ${file} DIRECTORY )
-   install( FILES ${file} DESTINATION include/c10/cuda/${dir} )
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -55,7 +55,7 @@ target_include_directories(
- add_subdirectory(test)
- 
- # ---[ Installation
--install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
-         DESTINATION include
-         FILES_MATCHING PATTERN "*.h")
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -20,7 +20,7 @@
-     if(USE_MKLDNN)
-       target_link_libraries(caffe2_detectron_ops_gpu PRIVATE caffe2::mkldnn)
-     endif()
--    install(TARGETS caffe2_detectron_ops_gpu DESTINATION lib)
-+    install(TARGETS caffe2_detectron_ops_gpu DESTINATION ${CMAKE_INSTALL_LIBDIR})
-     if(MSVC)
-       install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops_gpu> DESTINATION lib OPTIONAL)
-     endif()
-@@ -37,7 +37,7 @@
-       target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
-     endif()
-     target_link_libraries(caffe2_detectron_ops_hip PRIVATE torch)
--    install(TARGETS caffe2_detectron_ops_hip DESTINATION lib)
-+    install(TARGETS caffe2_detectron_ops_hip DESTINATION ${CMAKE_INSTALL_LIBDIR})
-   elseif(NOT IOS_PLATFORM)
-     add_library(caffe2_detectron_ops SHARED ${Detectron_CPU_SRCS})
-     if(HAVE_SOVERSION)
-@@ -49,7 +49,7 @@
-     if(USE_MKLDNN)
-       target_link_libraries(caffe2_detectron_ops PRIVATE caffe2::mkldnn)
-     endif()
--    install(TARGETS caffe2_detectron_ops DESTINATION lib)
-+    install(TARGETS caffe2_detectron_ops DESTINATION ${CMAKE_INSTALL_LIBDIR})
-     if(MSVC)
-       install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops> DESTINATION lib OPTIONAL)
-     endif()
---- a/modules/module_test/CMakeLists.txt
-+++ b/modules/module_test/CMakeLists.txt
-@@ -16,7 +16,7 @@ if(BUILD_TEST AND NOT BUILD_LITE_INTERPRETER)
-         VERSION ${TORCH_VERSION} SOVERSION ${TORCH_SOVERSION})
-   endif()
-   target_link_libraries(caffe2_module_test_dynamic torch_library)
--  install(TARGETS caffe2_module_test_dynamic DESTINATION lib)
-+  install(TARGETS caffe2_module_test_dynamic DESTINATION ${CMAKE_INSTALL_LIBDIR})
-   if(MSVC AND BUILD_SHARED_LIBS)
-     install(FILES $<TARGET_PDB_FILE:caffe2_module_test_dynamic> DESTINATION lib OPTIONAL)
-   endif()
---- a/modules/observers/CMakeLists.txt
-+++ b/modules/observers/CMakeLists.txt
-@@ -21,7 +21,7 @@ endif()
- target_link_libraries(caffe2_observers PUBLIC torch_library)
- target_include_directories(caffe2_observers PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/..)
- target_compile_options(caffe2_observers PRIVATE "-DCAFFE2_BUILD_OBSERVER_LIB")
--install(TARGETS caffe2_observers DESTINATION lib)
-+install(TARGETS caffe2_observers DESTINATION ${CMAKE_INSTALL_LIBDIR})
- caffe2_interface_library(caffe2_observers caffe2_observers_library)
- if(MSVC AND BUILD_SHARED_LIBS)
-   install(FILES $<TARGET_PDB_FILE:caffe2_observers> DESTINATION lib OPTIONAL)
---- a/modules/rocksdb/CMakeLists.txt
-+++ b/modules/rocksdb/CMakeLists.txt
-@@ -63,7 +63,7 @@ add_library(caffe2_rocksdb ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb.cc)
- target_link_libraries(caffe2_rocksdb PUBLIC torch_library)
- target_link_libraries(caffe2_rocksdb PRIVATE ${RocksDB_LIBRARIES})
- target_include_directories(caffe2_rocksdb PRIVATE ${RocksDB_INCLUDE_DIR})
--install(TARGETS caffe2_rocksdb DESTINATION lib)
-+install(TARGETS caffe2_rocksdb DESTINATION ${CMAKE_INSTALL_LIBDIR})
- 
- # ---[ Last, Append the library to Caffe2_MODULES, if we are building with
- # the main repo.
---- a/test/cpp/c10d/CMakeLists.txt
-+++ b/test/cpp/c10d/CMakeLists.txt
-@@ -51,7 +51,7 @@ if(USE_CUDA)
-     if(INSTALL_TEST)
-       install(TARGETS ProcessGroupNCCLTest DESTINATION bin)
-       install(TARGETS ProcessGroupNCCLErrorsTest DESTINATION bin)
--      install(TARGETS c10d_cuda_test DESTINATION lib)
-+      install(TARGETS c10d_cuda_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-     endif()
-   endif()
-   if(USE_UCC AND USE_C10D_UCC)
---- a/test/cpp/jit/CMakeLists.txt
-+++ b/test/cpp/jit/CMakeLists.txt
-@@ -32,9 +32,9 @@ endif()
- target_link_libraries(backend_with_compiler torch)
- 
- if(INSTALL_TEST)
--  install(TARGETS torchbind_test DESTINATION lib)
--  install(TARGETS jitbackend_test DESTINATION lib)
--  install(TARGETS backend_with_compiler DESTINATION lib)
-+  install(TARGETS torchbind_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+  install(TARGETS jitbackend_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+  install(TARGETS backend_with_compiler DESTINATION ${CMAKE_INSTALL_LIBDIR})
- endif()
- 
- # Build the cpp gtest binary containing the cpp-only tests.

diff --git a/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch b/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch
deleted file mode 100644
index ae0cac9fb947..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch
+++ /dev/null
@@ -1,10 +0,0 @@
---- a/cmake/Dependencies.cmake	2023-02-28 14:14:49.099057348 +0100
-+++ b/cmake/Dependencies.cmake	2023-02-28 14:15:05.326790806 +0100
-@@ -1404,7 +1404,6 @@
- 
-     # Tensorpipe uses cuda_add_library
-     torch_update_find_cuda_flags()
--    add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)
- 
-     list(APPEND Caffe2_DEPENDENCY_LIBS tensorpipe)
-     if(USE_CUDA)

diff --git a/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch b/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch
deleted file mode 100644
index ff64e4108087..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff -uar pytorch-2.0.0/cmake/Dependencies.cmake pytorch-2.0.0orig/cmake/Dependencies.cmake
---- a/cmake/Dependencies.cmake	2023-04-23 09:43:20.767566047 -0400
-+++ b/cmake/Dependencies.cmake	2023-03-09 17:42:00.000000000 -0500
-@@ -1235,7 +1235,7 @@
- 
- # ---[ cuDNN
- if(USE_CUDNN)
--  set(CUDNN_FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/../third_party/cudnn_frontend/include)
-+  set(CUDNN_FRONTEND_INCLUDE_DIR /opt/cuda/include)
-   target_include_directories(torch::cudnn INTERFACE ${CUDNN_FRONTEND_INCLUDE_DIR})
- endif()
- 

diff --git a/sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch b/sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch
deleted file mode 100644
index acbcebad0a5d..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch
+++ /dev/null
@@ -1,41 +0,0 @@
---- a/c10/util/Registry.h   2023-03-09 17:42:00.000000000 -0500
-+++ b/c10/util/Registry.h       2023-04-09 20:38:33.108135511 -0400
-@@ -16,6 +16,7 @@
- #include <memory>
- #include <mutex>
- #include <string>
-+#include <stdexcept>
- #include <unordered_map>
- #include <vector>
- 
---- a/torch/csrc/jit/passes/quantization/quantization_type.h    2023-03-09 17:42:00.000000000 -0500
-+++ b/torch/csrc/jit/passes/quantization/quantization_type.h    2023-04-09 20:43:43.124806308 -0400
-@@ -1,5 +1,6 @@
- #pragma once
- #include <ostream>
-+#include <cstdint>
- 
- namespace torch {
- namespace jit {
-
---- a/torch/csrc/jit/runtime/logging.cpp    2023-03-09 17:42:00.000000000 -0500
-+++ b/torch/csrc/jit/runtime/logging.cpp    2023-04-09 20:47:49.758142941 -0400
-@@ -1,6 +1,7 @@
- #include <torch/csrc/jit/runtime/logging.h>
- 
- #include <atomic>
-+#include <stdexcept>
- #include <mutex>
- #include <unordered_map>
- 
-
---- a/torch/csrc/lazy/core/multi_wait.cpp   2023-03-09 17:42:00.000000000 -0500
-+++ b/torch/csrc/lazy/core/multi_wait.cpp   2023-04-09 20:50:36.608145172 -0400
-@@ -1,6 +1,7 @@
- #include <torch/csrc/lazy/core/multi_wait.h>
- 
- #include <chrono>
-+#include <stdexcept>
- #include <exception>
- 
- namespace torch {

diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch b/sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch
deleted file mode 100644
index 3f2d0ae3c30a..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch
+++ /dev/null
@@ -1,15 +0,0 @@
-Fix "undefined symbol: omp_get_max_active_levels" in mkl + <nothing else> builds
-https://github.com/pytorch/pytorch/issues/116576
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1575,6 +1575,10 @@ if(BUILD_SHARED_LIBS)
-     target_link_libraries(torch_global_deps TBB::tbb)
-   endif()
- 
-+  if(USE_OPENMP)
-+    target_link_libraries(torch_global_deps OpenMP::OpenMP_CXX)
-+  endif()
-+
-   install(TARGETS torch_global_deps DESTINATION "${TORCH_INSTALL_LIB_DIR}")
- endif()
- 

diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch b/sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch
deleted file mode 100644
index 731227fa25ee..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-Unset rpath to support blas-lapack-switch
-Bug: https://bugs.gentoo.org/921129
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -10,7 +10,6 @@ endif(APPLE)
- set(CMAKE_SKIP_BUILD_RPATH  FALSE)
- # Don't use the install-rpath during the build phase
- set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
--set(CMAKE_INSTALL_RPATH "${_rpath_portable_origin}")
- # Automatically add all linked folders that are NOT in the build directory to
- # the rpath (per library?)
- set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)

diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch b/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch
deleted file mode 100644
index cb0fa0c48e80..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-Fix for error: invalid argument '-std=c++17' not allowed with 'C'
-https://github.com/pytorch/pytorch/issues/103222
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -30,6 +30,7 @@ hip_add_library(c10_hip ${C10_HIP_SRCS} ${C10_HIP_HEADERS})
- 
- # Propagate HIP_CXX_FLAGS that were set from Dependencies.cmake
- target_compile_options(c10_hip PRIVATE ${HIP_CXX_FLAGS})
-+set_target_properties(c10_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- 
- # caffe2_hip adds a bunch of dependencies like rocsparse, but c10/hip is supposed to be
- # minimal.  I'm not sure if we need hip_hcc or not; for now leave it out
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1598,6 +1598,7 @@ if(USE_ROCM)
- 
-   # Since PyTorch files contain HIP headers, these flags are required for the necessary definitions to be added.
-   target_compile_options(torch_hip PUBLIC ${HIP_CXX_FLAGS})  # experiment
-+  set_target_properties(torch_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-   target_link_libraries(torch_hip PUBLIC c10_hip)
- 
-   if(NOT INTERN_BUILD_MOBILE)
-@@ -1774,6 +1775,7 @@ if(BUILD_TEST)
-       target_include_directories(${test_name} PRIVATE $<INSTALL_INTERFACE:include>)
-       target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE} ${Caffe2_HIP_INCLUDE})
-       target_compile_options(${test_name} PRIVATE ${HIP_CXX_FLAGS})
-+      set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-       add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
-       if(INSTALL_TEST)
-         install(TARGETS ${test_name} DESTINATION test)
-@@ -1955,6 +1957,7 @@ if(BUILD_PYTHON)
-     endif()
-     if(NOT MSVC)
-       target_compile_options(caffe2_pybind11_state_hip PRIVATE ${HIP_CXX_FLAGS} -fvisibility=hidden)
-+      set_target_properties(caffe2_pybind11_state_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-     endif()
-     set_target_properties(caffe2_pybind11_state_hip PROPERTIES PREFIX "")
-     set_target_properties(caffe2_pybind11_state_hip PROPERTIES SUFFIX ${PY_EXT_SUFFIX})
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1287,7 +1287,6 @@ if(USE_ROCM)
-     list(APPEND HIP_CXX_FLAGS -Wno-duplicate-decl-specifier)
-     list(APPEND HIP_CXX_FLAGS -DCAFFE2_USE_MIOPEN)
-     list(APPEND HIP_CXX_FLAGS -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP)
--    list(APPEND HIP_CXX_FLAGS -std=c++17)
-     add_definitions(-DROCM_VERSION=${ROCM_VERSION_DEV_INT})
-     add_definitions(-DTORCH_HIP_VERSION=${TORCH_HIP_VERSION})
-     message("TORCH_HIP_VERSION=${TORCH_HIP_VERSION} is added as a compiler defines")
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -335,6 +335,7 @@ function(caffe2_hip_binary_target target_name_or_src)
-   caffe2_binary_target(${target_name_or_src})
- 
-   target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS})
-+  set_target_properties(${__target} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-   target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE})
- endfunction()
- 
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -31,6 +31,7 @@ if(BUILD_CAFFE2_OPS)
-         ${Detectron_CPU_SRCS}
-         ${Detectron_HIP_SRCS})
-     target_compile_options(caffe2_detectron_ops_hip PRIVATE ${HIP_CXX_FLAGS})
-+    set_target_properties(caffe2_detectron_ops_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-     if(USE_MKLDNN)
-       target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
-     endif()

diff --git a/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch b/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
deleted file mode 100644
index 5472a2c41836..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
+++ /dev/null
@@ -1,195 +0,0 @@
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -474,7 +474,7 @@
- endif()
- 
- # ---[ QNNPACK
--if(USE_QNNPACK)
-+if(FALSE)
-   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
- 
-   if(NOT DEFINED QNNPACK_SOURCE_DIR)
-@@ -530,7 +530,7 @@
- endif()
- 
- # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and neon2sse headers
--if(USE_QNNPACK)
-+if(FALSE)
-   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
-   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
-   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")
-@@ -780,7 +780,7 @@
- endif()
- 
- # ---[ FBGEMM
--if(USE_FBGEMM)
-+if(FALSE)
-   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
-   if(NOT DEFINED FBGEMM_SOURCE_DIR)
-     set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING "FBGEMM source directory")
-@@ -828,6 +828,7 @@
- endif()
- 
- if(USE_FBGEMM)
-+  list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
-   caffe2_update_option(USE_FBGEMM ON)
- else()
-   caffe2_update_option(USE_FBGEMM OFF)
-@@ -1529,7 +1530,6 @@
-       set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
-     endif()
-   endif()
--  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
- 
-   add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
-   if(NOT USE_SYSTEM_ONNX)
-@@ -1796,7 +1796,6 @@
- #
- set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
- set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
--add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
- 
- # Disable compiler feature checks for `fmt`.
- #
-@@ -1805,9 +1804,7 @@
- # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
- # `fmt` is compatible with a superset of the compilers that PyTorch is, it
- # shouldn't be too bad to just disable the checks.
--set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
- 
--list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
- set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
- 
- # ---[ Kineto
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -89,7 +89,7 @@
- if(C10_USE_GLOG)
-     target_link_libraries(c10 PUBLIC glog::glog)
- endif()
--target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
-+target_link_libraries(c10 PRIVATE fmt)
- 
- if(C10_USE_NUMA)
-   target_include_directories(c10 PRIVATE ${Numa_INCLUDE_DIR})
---- a/torch/CMakeLists.txt
-+++ b/torch/CMakeLists.txt
-@@ -59,15 +59,9 @@
-     ${CMAKE_BINARY_DIR}
-     ${CMAKE_BINARY_DIR}/aten/src
-     ${CMAKE_BINARY_DIR}/caffe2/aten/src
--    ${CMAKE_BINARY_DIR}/third_party
--    ${CMAKE_BINARY_DIR}/third_party/onnx
- 
--    ${TORCH_ROOT}/third_party/valgrind-headers
- 
--    ${TORCH_ROOT}/third_party/gloo
--    ${TORCH_ROOT}/third_party/onnx
--    ${TORCH_ROOT}/third_party/flatbuffers/include
--    ${TORCH_ROOT}/third_party/kineto/libkineto/include
-+    /usr/include/kineto
- 
-     ${TORCH_SRC_DIR}/csrc
-     ${TORCH_SRC_DIR}/csrc/api/include
-@@ -80,7 +74,6 @@
-     python::python
-     pybind::pybind11
-     shm
--    fmt::fmt-header-only
-     ATEN_CPU_FILES_GEN_LIB)
- 
- if(USE_ASAN AND TARGET Sanitizer::address)
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -835,12 +835,11 @@
- # Re-include to override append_cxx_flag_if_supported from third_party/FBGEMM
- include(cmake/public/utils.cmake)
- if(NOT MSVC)
--  string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
-+  string(APPEND CMAKE_CXX_FLAGS " -O2")
-   # Eigen fails to build with some versions, so convert this to a warning
-   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
-   string(APPEND CMAKE_CXX_FLAGS " -Wall")
-   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
--  append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
-@@ -930,7 +930,6 @@
-   string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
-   append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
--  append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
- else()
-   # skip unwanted includes from windows.h
-   add_compile_definitions(WIN32_LEAN_AND_MEAN)
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -486,8 +486,6 @@
-   endif()
- 
-   # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
--  target_compile_options(${libname} PRIVATE
--      $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
- 
- endfunction()
- 
---- a/cmake/Codegen.cmake
-+++ b/cmake/Codegen.cmake
-@@ -57,7 +57,7 @@
-   if(MSVC)
-     set(OPT_FLAG "/fp:strict ")
-   else(MSVC)
--    set(OPT_FLAG "-O3 ")
-+    set(OPT_FLAG " ")
-     if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
-       set(OPT_FLAG " ")
-     endif()
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -107,7 +107,7 @@
- # Note: the folders that are being commented out have not been properly
- # addressed yet.
- 
--if(NOT MSVC AND USE_XNNPACK)
-+if(FALSE)
-   if(NOT TARGET fxdiv)
-     set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
-     set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
-@@ -1055,7 +1055,6 @@
- endif()
- 
- if(NOT MSVC AND USE_XNNPACK)
--  TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
- endif()
- 
- # ==========================================================
-@@ -1175,8 +1174,7 @@
- target_include_directories(torch_cpu PRIVATE
-   ${TORCH_ROOT}/third_party/miniz-2.1.0)
- 
--target_include_directories(torch_cpu PRIVATE
--  ${TORCH_ROOT}/third_party/kineto/libkineto/include)
-+target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
- 
- if(USE_KINETO)
-   target_include_directories(torch_cpu PRIVATE
---- a/cmake/External/nnpack.cmake
-+++ b/cmake/External/nnpack.cmake
-@@ -56,7 +56,7 @@
-   set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
-   set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
- 
--  if(NOT TARGET nnpack)
-+  if(FALSE)
-     if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
-       set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
-     endif()
---- a/functorch/CMakeLists.txt	2023-11-30 20:30:45.805209036 +0100
-+++ b/functorch/CMakeLists.txt	2023-11-30 20:31:13.284766157 +0100
-@@ -35,4 +35,4 @@
- if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "")
-   set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS})
- endif()
--install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}")
-+install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}")

diff --git a/sci-libs/caffe2/files/caffe2-2.2.2-musl.patch b/sci-libs/caffe2/files/caffe2-2.2.2-musl.patch
deleted file mode 100644
index f63e9f1df332..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.2.2-musl.patch
+++ /dev/null
@@ -1,13 +0,0 @@
---- a/torch/csrc/profiler/unwind/unwind.cpp	2024-04-29 12:05:40.895667482 +0200
-+++ b/torch/csrc/profiler/unwind/unwind.cpp	2024-04-29 12:05:53.099524760 +0200
-@@ -112,8 +112,8 @@
- }
- 
- struct Version {
--  uint64_t adds_ = LONG_LONG_MAX;
--  uint64_t subs_ = LONG_LONG_MAX;
-+  uint64_t adds_ = LLONG_MAX;
-+  uint64_t subs_ = LLONG_MAX;
- };
- 
- struct UnwindCache {

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch b/sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch
deleted file mode 100644
index eba37d933cac..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/CMakeLists.txt	2024-04-29 20:32:26.259716769 +0200
-+++ b/CMakeLists.txt	2024-04-29 20:32:35.886384618 +0200
-@@ -50,7 +50,7 @@
- 
- # This define is needed to preserve behavior given anticpated changes to cccl/thrust
- # https://nvidia.github.io/libcudacxx/standard_api/numerics_library/complex.html
--string(APPEND CMAKE_CUDA_FLAGS "-DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS")
-+string(APPEND CMAKE_CUDA_FLAGS " -DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS")
- 
- if(LINUX)
-   include(cmake/CheckAbi.cmake)

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch b/sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch
deleted file mode 100644
index 77905dbd1ac8..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/cmake/Dependencies.cmake	2024-04-29 18:37:34.005639858 +0200
-+++ b/cmake/Dependencies.cmake	2024-04-29 18:39:29.126587738 +0200
-@@ -1235,7 +1235,7 @@
-   if(CUDNN_VERSION VERSION_LESS 8.5)
-     message(FATAL_ERROR "PyTorch needs CuDNN-8.5 or above, but found ${CUDNN_VERSION}. Builds are still possible with `USE_CUDNN=0`")
-   endif()
--  set(CUDNN_FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/../third_party/cudnn_frontend/include)
-+  set(CUDNN_FRONTEND_INCLUDE_DIR /opt/cuda/include)
-   target_include_directories(torch::cudnn INTERFACE ${CUDNN_FRONTEND_INCLUDE_DIR})
- endif()
- 

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch b/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch
deleted file mode 100644
index 2c65987acd85..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-Disables aotriton download when both USE_FLASH_ATTENTION and USE_MEM_EFF_ATTENTION cmake flags are OFF
-Backports upstream PR to 2.3.0: https://github.com/pytorch/pytorch/pull/130197
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1334,7 +1334,9 @@ if(USE_ROCM)
-       message(STATUS "Disabling Kernel Assert for ROCm")
-     endif()
- 
--    include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
-+    if(USE_FLASH_ATTENTION)
-+      include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
-+    endif()
-     if(USE_CUDA)
-       caffe2_update_option(USE_MEM_EFF_ATTENTION OFF)
-     endif()
---- a/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
-+++ b/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
-@@ -21,7 +21,7 @@
- #include <cmath>
- #include <functional>
- 
--#if USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
- #include <aotriton/flash.h>
- #endif
- 
-@@ -186,7 +186,7 @@ bool check_flash_attention_hardware_support(sdp_params const& params, bool debug
-   // Check that the gpu is capable of running flash attention
-   using sm80 = SMVersion<8, 0>;
-   using sm90 = SMVersion<9, 0>;
--#if USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
-   auto stream = at::cuda::getCurrentCUDAStream().stream();
-   if (hipSuccess != aotriton::v2::flash::check_gpu(stream)) {
-       auto dprops = at::cuda::getCurrentDeviceProperties();

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch b/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch
deleted file mode 100644
index a6f981b7e054..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-
-When gcc builds libtorch_cpu.so and hipcc (clang-18) build libtorch_hip.so,
-resulting binary fails in runtime due to different mangling.
-Related issue in LLVM: https://github.com/llvm/llvm-project/issues/85656
-Fixed in pytorch-2.4.0 in https://github.com/pytorch/pytorch/commit/a89f442f0b103fa6f38103784a2dfedbd147f863
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1314,6 +1314,9 @@ if(USE_ROCM)
-        list(APPEND HIP_HIPCC_FLAGS -fdebug-info-for-profiling)
-     endif(CMAKE_BUILD_TYPE MATCHES Debug)
- 
-+    # needed for compat with newer versions of hip-clang that introduced C++20 mangling rules
-+    list(APPEND HIP_HIPCC_FLAGS -fclang-abi-compat=17)
-+
-     set(HIP_CLANG_FLAGS ${HIP_CXX_FLAGS})
-     # Ask hcc to generate device code during compilation so we can use
-     # host linker to link.

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch b/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch
deleted file mode 100644
index 75808fd7ec50..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-Workaround for libc++ issue https://github.com/llvm/llvm-project/issues/100802
-"reference to __host__ function 'memcpy' in __device__ function"
---- a/c10/util/Half.h
-+++ b/c10/util/Half.h
-@@ -227,7 +227,7 @@ C10_HOST_DEVICE inline float fp16_ieee_to_fp32_value(uint16_t h) {
-   // const float exp_scale = 0x1.0p-112f;
-   constexpr uint32_t scale_bits = (uint32_t)15 << 23;
-   float exp_scale_val = 0;
--  std::memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
-+  memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
-   const float exp_scale = exp_scale_val;
-   const float normalized_value =
-       fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
-@@ -298,8 +298,8 @@ inline uint16_t fp16_ieee_from_fp32_value(float f) {
-   constexpr uint32_t scale_to_inf_bits = (uint32_t)239 << 23;
-   constexpr uint32_t scale_to_zero_bits = (uint32_t)17 << 23;
-   float scale_to_inf_val = 0, scale_to_zero_val = 0;
--  std::memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
--  std::memcpy(
-+  memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
-+  memcpy(
-       &scale_to_zero_val, &scale_to_zero_bits, sizeof(scale_to_zero_val));
-   const float scale_to_inf = scale_to_inf_val;
-   const float scale_to_zero = scale_to_zero_val;

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch b/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch
deleted file mode 100644
index 81ae075c67cc..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch
+++ /dev/null
@@ -1,18 +0,0 @@
-Fix hip compilation with gcc-14
-Upstream commit: https://github.com/pytorch/pytorch/commit/8c2c3a03fb87c3568a22362d83b00d82b9fb3db2
---- a/aten/src/ATen/native/cuda/IndexKernel.cu
-+++ b/aten/src/ATen/native/cuda/IndexKernel.cu
-@@ -259,7 +259,13 @@ void index_put_kernel_quantized_cuda(TensorIterator& iter, const IntArrayRef ind
- 
-     gpu_index_kernel(iter, index_size, index_stride, [inv_scale, zero_point, qmin, qmax]C10_DEVICE(char* const out_data, const char* const in_data, const int64_t offset) {
-       int64_t qvalue = static_cast<int64_t>(zero_point + nearbyintf(*(float*)in_data * inv_scale));
-+      // See https://github.com/pytorch/pytorch/issues/127666
-+      // hip-clang std::clamp __glibcxx_assert_fail host function when building on Fedora40/gcc14
-+#ifndef USE_ROCM
-       qvalue = std::clamp(qvalue, qmin, qmax);
-+#else
-+      qvalue = (qvalue < qmin) ? qmin : (qmax < qvalue) ? qmax : qvalue;
-+#endif
-       *(scalar_t*)(out_data + offset) = static_cast<scalar_t>(qvalue);
-     });
-   });

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch b/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch
deleted file mode 100644
index dc544255c2bd..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch
+++ /dev/null
@@ -1,235 +0,0 @@
-Makes hipblaslt optional to simplify build for non-datacenter GPUs.
-Based on https://github.com/pytorch/pytorch/pull/120551 with added USE_HIPBLASLT cmake option.
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -225,6 +225,9 @@ option(USE_FAKELOWP "Use FakeLowp operators" OFF)
- option(USE_FFMPEG "Use ffmpeg" OFF)
- option(USE_GFLAGS "Use GFLAGS" OFF)
- option(USE_GLOG "Use GLOG" OFF)
-+cmake_dependent_option(
-+    USE_HIPBLASLT "Use hipBLASLt" ON
-+    "USE_ROCM" OFF)
- option(USE_LEVELDB "Use LEVELDB" OFF)
- option(USE_LITE_PROTO "Use lite protobuf instead of full." OFF)
- option(USE_LMDB "Use LMDB" OFF)
---- a/aten/src/ATen/cuda/CUDABlas.cpp
-+++ b/aten/src/ATen/cuda/CUDABlas.cpp
-@@ -14,7 +14,7 @@
- #include <c10/util/irange.h>
- 
- #ifdef USE_ROCM
--#if ROCM_VERSION >= 60000
-+#ifdef USE_HIPBLASLT
- #include <hipblaslt/hipblaslt-ext.hpp>
- #endif
- // until hipblas has an API to accept flags, we must use rocblas here
-@@ -781,7 +781,7 @@ void gemm<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)) {
-   }
- }
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- 
- #if defined(USE_ROCM) && ROCM_VERSION >= 50700 && ROCM_VERSION < 60000
- // only for rocm 5.7 where we first supported hipblaslt, it was difficult
-@@ -912,6 +912,7 @@ class CuBlasLtMatmulPreference : public CuBlasLtDescriptor<
- };
- } // namespace
- 
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- template <typename Dtype>
- void gemm_and_bias(
-     bool transpose_mat1,
-@@ -1124,7 +1125,7 @@ template void gemm_and_bias(
-     at::BFloat16* result_ptr,
-     int64_t result_ld,
-     GEMMAndBiasActivationEpilogue activation);
--
-+#endif
- void scaled_gemm(
-     char transa,
-     char transb,
---- a/aten/src/ATen/cuda/CUDABlas.h
-+++ b/aten/src/ATen/cuda/CUDABlas.h
-@@ -82,7 +82,7 @@ void gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
- template <>
- void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- enum GEMMAndBiasActivationEpilogue {
-   None,
-   RELU,
---- a/aten/src/ATen/cuda/CUDAContextLight.h
-+++ b/aten/src/ATen/cuda/CUDAContextLight.h
-@@ -9,7 +9,7 @@
- 
- // cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also
- // added bf16 support
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- #include <cublasLt.h>
- #endif
- 
-@@ -82,7 +82,7 @@ TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator();
- /* Handles */
- TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle();
- TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle();
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle();
- #endif
- 
---- a/aten/src/ATen/cuda/CublasHandlePool.cpp
-+++ b/aten/src/ATen/cuda/CublasHandlePool.cpp
-@@ -29,7 +29,7 @@ namespace at::cuda {
- 
- namespace {
- 
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
- void createCublasLtHandle(cublasLtHandle_t *handle) {
-   TORCH_CUDABLAS_CHECK(cublasLtCreate(handle));
- }
-@@ -190,7 +190,7 @@ cublasHandle_t getCurrentCUDABlasHandle() {
-   return handle;
- }
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- cublasLtHandle_t getCurrentCUDABlasLtHandle() {
- #ifdef USE_ROCM
-   c10::DeviceIndex device = 0;
---- a/aten/src/ATen/cuda/tunable/TunableGemm.h
-+++ b/aten/src/ATen/cuda/tunable/TunableGemm.h
-@@ -11,7 +11,7 @@
- 
- #include <ATen/cuda/tunable/GemmCommon.h>
- #ifdef USE_ROCM
--#if ROCM_VERSION >= 50700
-+#ifdef USE_HIPBLASLT
- #include <ATen/cuda/tunable/GemmHipblaslt.h>
- #endif
- #include <ATen/cuda/tunable/GemmRocblas.h>
-@@ -166,7 +166,7 @@ class GemmTunableOp : public TunableOp<GemmParams<T>, StreamTimer> {
-     }
- #endif
- 
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
-     static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
-     if (env == nullptr || strcmp(env, "1") == 0) {
-       // disallow tuning of hipblaslt with c10::complex
-@@ -240,7 +240,7 @@ class GemmStridedBatchedTunableOp : public TunableOp<GemmStridedBatchedParams<T>
-     }
- #endif
- 
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
-     static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
-     if (env == nullptr || strcmp(env, "1") == 0) {
-       // disallow tuning of hipblaslt with c10::complex
---- a/aten/src/ATen/native/cuda/Blas.cpp
-+++ b/aten/src/ATen/native/cuda/Blas.cpp
-@@ -155,7 +155,7 @@ enum class Activation {
-   GELU,
- };
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
- cuda::blas::GEMMAndBiasActivationEpilogue activation_to_gemm_and_blas_arg(Activation a) {
-   switch (a) {
-     case Activation::None:
-@@ -193,6 +193,7 @@ static bool getDisableAddmmCudaLt() {
- 
- #ifdef USE_ROCM
- static bool isSupportedHipLtROCmArch(int index) {
-+#if defined(USE_HIPBLASLT)
-     hipDeviceProp_t* prop = at::cuda::getDeviceProperties(index);
-     std::string device_arch = prop->gcnArchName;
-     static const std::vector<std::string> archs = {"gfx90a", "gfx940", "gfx941", "gfx942"};
-@@ -203,6 +204,7 @@ static bool isSupportedHipLtROCmArch(int index) {
-         }
-     }
-     TORCH_CHECK(false, "Attempting to use hipBLASLt on a unsupported architecture!");
-+#endif
-     return false;
- }
- #endif
-@@ -228,7 +230,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
-   at::ScalarType scalar_type = self.scalar_type();
-   c10::MaybeOwned<Tensor> self_;
-   if (&result != &self) {
--#if (defined(CUDA_VERSION) && CUDA_VERSION >= 11040 && !defined(_MSC_VER)) || defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if (defined(CUDA_VERSION) && CUDA_VERSION >= 11040 && !defined(_MSC_VER)) || defined(USE_ROCM) && defined(USE_HIPBLASLT)
-     // Strangely, if mat2 has only 1 row or column, we get
-     // CUBLAS_STATUS_INVALID_VALUE error from cublasLtMatmulAlgoGetHeuristic.
-     // self.dim() == 1 && result.dim() == 2 && self.sizes()[0] == mat2_sizes[1]
-@@ -271,7 +273,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
-     }
-     self__sizes = self_->sizes();
-   } else {
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
-     useLtInterface = !disable_addmm_cuda_lt &&
-         result.dim() == 2 && result.is_contiguous() &&
-         isSupportedHipLtROCmArch(self.device().index()) &&
-@@ -322,7 +324,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
- 
-   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!args.result->is_conj());
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
-   if (useLtInterface) {
-     AT_DISPATCH_FLOATING_TYPES_AND2(
-         at::ScalarType::Half,
-@@ -876,7 +878,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
-   at::native::resize_output(out, {mat1_sizes[0], mat2_sizes[1]});
-   at::native::resize_output(amax, {});
- 
--#if !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) && ROCM_VERSION >= 60000)
-+#if !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) && defined(USE_HIPBLASLT))
-   cublasCommonArgs args(mat1, mat2, out);
-   const auto out_dtype_ = args.result->scalar_type();
-   TORCH_CHECK(args.transa == 't' && args.transb == 'n', "Only multiplication of row-major and column-major matrices is supported by cuBLASLt");
-@@ -906,7 +908,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
-   TORCH_CHECK(false, "_scaled_mm_out_cuda is not compiled for this platform.");
- #endif
- 
--#if defined(USE_ROCM) && ROCM_VERSION >= 60000
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
-   // rocm's hipblaslt does not yet support amax, so calculate separately
-   auto out_float32 = out.to(kFloat);
-   out_float32.abs_();
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1282,6 +1282,9 @@ if(USE_ROCM)
-     if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "6.0.0")
-       list(APPEND HIP_CXX_FLAGS -DHIPBLAS_V2)
-     endif()
-+    if(hipblast_FOUND)
-+      list(APPEND HIP_CXX_FLAGS -DHIPBLASLT)
-+    endif()
-     if(HIPBLASLT_CUSTOM_DATA_TYPE)
-       list(APPEND HIP_CXX_FLAGS -DHIPBLASLT_CUSTOM_DATA_TYPE)
-     endif()
---- a/cmake/public/LoadHIP.cmake
-+++ b/cmake/public/LoadHIP.cmake
-@@ -155,7 +155,7 @@ if(HIP_FOUND)
-   find_package_and_print_version(hiprand REQUIRED)
-   find_package_and_print_version(rocblas REQUIRED)
-   find_package_and_print_version(hipblas REQUIRED)
--  if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0")
-+  if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0" AND USE_HIPBLASLT)
-     find_package_and_print_version(hipblaslt REQUIRED)
-   endif()
-   find_package_and_print_version(miopen REQUIRED)
-@@ -191,7 +191,7 @@ if(HIP_FOUND)
-   # roctx is part of roctracer
-   find_library(ROCM_ROCTX_LIB roctx64 HINTS ${ROCM_PATH}/lib)
- 
--  if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0")
-+  if(hipblastlt_FOUND)
-     # check whether hipblaslt is using its own datatype
-     set(file "${PROJECT_BINARY_DIR}/hipblaslt_test_data_type.cc")
-     file(WRITE ${file} ""

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch b/sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch
deleted file mode 100644
index 127a31e4b225..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-Fix for error: invalid argument '-std=c++17' not allowed with 'C'
-https://github.com/pytorch/pytorch/issues/103222
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -30,6 +30,7 @@ hip_add_library(c10_hip ${C10_HIP_SRCS} ${C10_HIP_HEADERS})
- 
- # Propagate HIP_CXX_FLAGS that were set from Dependencies.cmake
- target_compile_options(c10_hip PRIVATE ${HIP_CXX_FLAGS})
-+set_target_properties(c10_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- 
- # caffe2_hip adds a bunch of dependencies like rocsparse, but c10/hip is supposed to be
- # minimal.  I'm not sure if we need hip_hcc or not; for now leave it out
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1712,6 +1712,7 @@ if(USE_ROCM)
- 
-   # Since PyTorch files contain HIP headers, these flags are required for the necessary definitions to be added.
-   target_compile_options(torch_hip PUBLIC ${HIP_CXX_FLAGS})  # experiment
-+  set_target_properties(torch_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-   target_link_libraries(torch_hip PUBLIC c10_hip)
- 
-   if(NOT INTERN_BUILD_MOBILE)
-@@ -1908,6 +1909,7 @@ if(BUILD_TEST)
-       target_include_directories(${test_name} PRIVATE $<INSTALL_INTERFACE:include>)
-       target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE} ${Caffe2_HIP_INCLUDE})
-       target_compile_options(${test_name} PRIVATE ${HIP_CXX_FLAGS})
-+      set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-       add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
-       if(INSTALL_TEST)
-         install(TARGETS ${test_name} DESTINATION test)
-@@ -2092,6 +2094,7 @@ if(BUILD_PYTHON)
-     endif()
-     if(NOT MSVC)
-       target_compile_options(caffe2_pybind11_state_hip PRIVATE ${HIP_CXX_FLAGS} -fvisibility=hidden)
-+      set_target_properties(caffe2_pybind11_state_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-     endif()
-     set_target_properties(caffe2_pybind11_state_hip PROPERTIES PREFIX "")
-     set_target_properties(caffe2_pybind11_state_hip PROPERTIES SUFFIX ${PY_EXT_SUFFIX})
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1278,7 +1278,6 @@
-     list(APPEND HIP_CXX_FLAGS -Wno-duplicate-decl-specifier)
-     list(APPEND HIP_CXX_FLAGS -DCAFFE2_USE_MIOPEN)
-     list(APPEND HIP_CXX_FLAGS -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP)
--    list(APPEND HIP_CXX_FLAGS -std=c++17)
-     if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "6.0.0")
-       list(APPEND HIP_CXX_FLAGS -DHIPBLAS_V2)
-     endif()
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -335,6 +335,7 @@ function(caffe2_hip_binary_target target_name_or_src)
-   caffe2_binary_target(${target_name_or_src})
- 
-   target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS})
-+  set_target_properties(${__target} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-   target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE})
- endfunction()
- 
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -31,6 +31,7 @@ if(BUILD_CAFFE2_OPS)
-         ${Detectron_CPU_SRCS}
-         ${Detectron_HIP_SRCS})
-     target_compile_options(caffe2_detectron_ops_hip PRIVATE ${HIP_CXX_FLAGS})
-+    set_target_properties(caffe2_detectron_ops_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
-     if(USE_MKLDNN)
-       target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
-     endif()


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2024-09-04  5:02 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2024-09-04  5:02 UTC (permalink / raw
  To: gentoo-commits

commit:     c347f7335b65811fbf73dad70d68b84b6788b764
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Wed Sep  4 04:40:36 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Wed Sep  4 04:58:52 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=c347f733

sci-libs/caffe2: req cpp-httplib

Closes: https://bugs.gentoo.org/937635
Closes: https://bugs.gentoo.org/937788
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 .../caffe2/{caffe2-2.4.0.ebuild => caffe2-2.4.0-r1.ebuild}  |  6 +++++-
 sci-libs/caffe2/files/caffe2-2.4.0-cpp-httplib.patch        | 13 +++++++++++++
 2 files changed, 18 insertions(+), 1 deletion(-)

diff --git a/sci-libs/caffe2/caffe2-2.4.0.ebuild b/sci-libs/caffe2/caffe2-2.4.0-r1.ebuild
similarity index 98%
rename from sci-libs/caffe2/caffe2-2.4.0.ebuild
rename to sci-libs/caffe2/caffe2-2.4.0-r1.ebuild
index 81a8906ea669..d8e42de52185 100644
--- a/sci-libs/caffe2/caffe2-2.4.0.ebuild
+++ b/sci-libs/caffe2/caffe2-2.4.0-r1.ebuild
@@ -86,7 +86,10 @@ RDEPEND="
 		amdgpu_targets_gfx941? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] )
 		amdgpu_targets_gfx942? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] )
 	)
-	distributed? ( sci-libs/tensorpipe[cuda?] )
+	distributed? (
+		sci-libs/tensorpipe[cuda?]
+		dev-cpp/cpp-httplib
+	)
 	xnnpack? ( >=sci-libs/XNNPACK-2024.02.29 )
 	mkl? ( sci-libs/mkl )
 	openblas? ( sci-libs/openblas )
@@ -122,6 +125,7 @@ PATCHES=(
 	../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
 	../patches/${PN}-2.3.0-fix-libcpp.patch
 	"${FILESDIR}"/${PN}-2.4.0-libfmt-11.patch
+	"${FILESDIR}"/${P}-cpp-httplib.patch
 )
 
 src_prepare() {

diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-cpp-httplib.patch b/sci-libs/caffe2/files/caffe2-2.4.0-cpp-httplib.patch
new file mode 100644
index 000000000000..5d684a4a4738
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.4.0-cpp-httplib.patch
@@ -0,0 +1,13 @@
+--- a/torch/lib/libshm/CMakeLists.txt	2024-09-04 06:09:51.943752841 +0200
++++ b/torch/lib/libshm/CMakeLists.txt	2024-09-04 06:10:52.243821438 +0200
+@@ -24,6 +24,10 @@
+   CXX_STANDARD 17)
+ target_link_libraries(shm PRIVATE ${TORCH_CPU_LIB})
+ 
++if (USE_DISTRIBUTED)
++  target_link_libraries(shm PRIVATE cpp-httplib)
++endif()
++
+ if(UNIX AND NOT APPLE)
+   include(CheckLibraryExists)
+   find_package(Threads REQUIRED)


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
@ 2024-11-14 20:16 Alfredo Tupone
  0 siblings, 0 replies; 16+ messages in thread
From: Alfredo Tupone @ 2024-11-14 20:16 UTC (permalink / raw
  To: gentoo-commits

commit:     ee206a9be4541beb3e60a8f3cbccb00ab0ac9e46
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Thu Nov 14 20:07:11 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Thu Nov 14 20:13:25 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=ee206a9b

sci-libs/caffe2: fix xnnpack use, add deps and other

Closes: https://bugs.gentoo.org/943501
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 ...{caffe2-2.5.1.ebuild => caffe2-2.5.1-r1.ebuild} | 42 +++++++++++++++-------
 .../caffe2/files/caffe2-2.5.1-cpp-httplib.patch    | 13 -------
 2 files changed, 29 insertions(+), 26 deletions(-)

diff --git a/sci-libs/caffe2/caffe2-2.5.1.ebuild b/sci-libs/caffe2/caffe2-2.5.1-r1.ebuild
similarity index 90%
rename from sci-libs/caffe2/caffe2-2.5.1.ebuild
rename to sci-libs/caffe2/caffe2-2.5.1-r1.ebuild
index 81e3a916f3b3..393000c79567 100644
--- a/sci-libs/caffe2/caffe2-2.5.1.ebuild
+++ b/sci-libs/caffe2/caffe2-2.5.1-r1.ebuild
@@ -38,9 +38,10 @@ RDEPEND="
 	dev-cpp/abseil-cpp:=
 	dev-cpp/gflags:=
 	>=dev-cpp/glog-0.5.0
+	dev-cpp/nlohmann_json
+	dev-cpp/opentelemetry-cpp
 	dev-libs/cpuinfo
 	dev-libs/libfmt
-	dev-cpp/opentelemetry-cpp
 	dev-libs/protobuf:=
 	dev-libs/pthreadpool
 	dev-libs/sleef[cpu_flags_x86_avx512f(+),cpu_flags_x86_avx(+)]
@@ -95,6 +96,7 @@ RDEPEND="
 	mkl? ( sci-libs/mkl )
 	openblas? ( sci-libs/openblas )
 "
+
 DEPEND="
 	${RDEPEND}
 	cuda? ( >=dev-libs/cutlass-3.4.1 )
@@ -118,7 +120,7 @@ PATCHES=(
 	"${FILESDIR}"/${P}-fix-functorch-install.patch
 	"${FILESDIR}"/${P}-cudnn_include_fix.patch
 	"${FILESDIR}"/${P}-gentoo.patch
-	"${FILESDIR}"/${P}-cpp-httplib.patch
+	"${FILESDIR}"/${PN}-2.4.0-cpp-httplib.patch
 	"${FILESDIR}"/${P}-glog-0.6.0.patch
 )
 
@@ -132,6 +134,7 @@ src_prepare() {
 		cmake/Dependencies.cmake \
 		torch/CMakeLists.txt \
 		|| die
+
 	# Drop third_party from CMake tree
 	sed -i \
 		-e '/add_subdirectory.*third_party/d' \
@@ -140,10 +143,12 @@ src_prepare() {
 		cmake/ProtoBuf.cmake \
 		aten/src/ATen/CMakeLists.txt \
 		|| die
+
 	cmake_src_prepare
 	pushd torch/csrc/jit/serialization || die
 	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
 	popd
+
 	# prefixify the hardcoded paths, after all patches are applied
 	hprefixify \
 		aten/CMakeLists.txt \
@@ -186,6 +191,7 @@ src_configure() {
 	fi
 
 	local mycmakeargs=(
+		-DBUILD_CUSTOM_PROTOBUF=OFF
 		-DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
 		-DPython_EXECUTABLE="${PYTHON}"
 		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
@@ -213,7 +219,18 @@ src_configure() {
 		-DUSE_PYTORCH_QNNPACK=$(usex qnnpack)
 		-DUSE_PYTORCH_METAL=OFF
 		-DUSE_ROCM=$(usex rocm)
-		-DUSE_SYSTEM_LIBS=ON
+		-DUSE_SYSTEM_CPUINFO=ON
+		-DUSE_SYSTEM_EIGEN_INSTALL=ON
+		-DUSE_SYSTEM_FP16=ON
+		-DUSE_SYSTEM_FXDIV=ON
+		-DUSE_SYSTEM_GLOO=ON
+		-DUSE_SYSTEM_ONNX=ON
+		-DUSE_SYSTEM_PSIMD=ON
+		-DUSE_SYSTEM_PSIMD=ON
+		-DUSE_SYSTEM_PTHREADPOOL=ON
+		-DUSE_SYSTEM_PYBIND11=ON
+		-DUSE_SYSTEM_SLEEF=ON
+		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
 		-DUSE_TENSORPIPE=$(usex distributed)
 		-DUSE_UCC=OFF
 		-DUSE_VALGRIND=OFF
@@ -286,20 +303,19 @@ src_install() {
 	cp torch/version.py python/torch/ || die
 	python_domodule python/torch
 
-	dodir $(python_get_sitedir)/torch/bin
-	dodir $(python_get_sitedir)/torch/lib
-	dodir $(python_get_sitedir)/torch/include
+	mkdir "${D}"$(python_get_sitedir)/torch/bin || die
+	mkdir "${D}"$(python_get_sitedir)/torch/lib || die
+	mkdir "${D}"$(python_get_sitedir)/torch/include || die
 
 	ln -s ../../../../../include/torch \
 		"${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269
 
+	mv "${D}"/usr/bin/torch_shm_manager \
+		"${D}"/$(python_get_sitedir)/torch/bin/ || die
 
-	mv "${ED}"/usr/bin/torch_shm_manager \
-		"${ED}"/$(python_get_sitedir)/torch/bin/ || die
-
-	mv "${ED}"/usr/$(get_libdir)/libtorch_global_deps.so \
-		"${ED}"/$(python_get_sitedir)/torch/lib/ || die
+	mv "${D}"/usr/$(get_libdir)/libtorch_global_deps.so \
+		"${D}"/$(python_get_sitedir)/torch/lib/ || die
 
-	mv "${ED}"/usr/lib/libc10*.so \
-		"${ED}"/usr/$(get_libdir)/ || die
+	mv "${D}"/usr/lib/libc10*.so \
+		"${D}"/usr/$(get_libdir)/ || die
 }

diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-cpp-httplib.patch b/sci-libs/caffe2/files/caffe2-2.5.1-cpp-httplib.patch
deleted file mode 100644
index 5d684a4a4738..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.5.1-cpp-httplib.patch
+++ /dev/null
@@ -1,13 +0,0 @@
---- a/torch/lib/libshm/CMakeLists.txt	2024-09-04 06:09:51.943752841 +0200
-+++ b/torch/lib/libshm/CMakeLists.txt	2024-09-04 06:10:52.243821438 +0200
-@@ -24,6 +24,10 @@
-   CXX_STANDARD 17)
- target_link_libraries(shm PRIVATE ${TORCH_CPU_LIB})
- 
-+if (USE_DISTRIBUTED)
-+  target_link_libraries(shm PRIVATE cpp-httplib)
-+endif()
-+
- if(UNIX AND NOT APPLE)
-   include(CheckLibraryExists)
-   find_package(Threads REQUIRED)


^ permalink raw reply related	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2024-11-14 20:16 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-03-11 19:28 [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/ Alfredo Tupone
  -- strict thread matches above, loose matches on Subject: below --
2024-11-14 20:16 Alfredo Tupone
2024-09-04  5:02 Alfredo Tupone
2024-08-09 16:17 Alfredo Tupone
2024-07-27 19:14 Alfredo Tupone
2024-03-08 18:46 Alfredo Tupone
2023-12-06 11:55 Alfredo Tupone
2023-04-24  8:36 Alfredo Tupone
2023-04-05  9:07 Alfredo Tupone
2023-02-28 20:38 Alfredo Tupone
2023-02-22  8:07 Alfredo Tupone
2023-01-12 12:08 Alfredo Tupone
2022-09-05  6:40 Alfredo Tupone
2022-07-03  9:57 Alfredo Tupone
2022-07-02 21:12 Alfredo Tupone
2022-06-26  7:47 Alfredo Tupone

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox