public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Alfredo Tupone" <tupone@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/
Date: Thu,  4 Apr 2024 09:20:54 +0000 (UTC)	[thread overview]
Message-ID: <1712222403.e74afa2c5f42fbc0ff31574a7796c7ee39727e9f.tupone@gentoo> (raw)

commit:     e74afa2c5f42fbc0ff31574a7796c7ee39727e9f
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Thu Apr  4 09:19:35 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Thu Apr  4 09:20:03 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=e74afa2c

sci-libs/caffe2: add 2.2.2

Closes: https://bugs.gentoo.org/928339
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest            |   1 +
 sci-libs/caffe2/caffe2-2.2.2.ebuild | 269 ++++++++++++++++++++++++++++++++++++
 2 files changed, 270 insertions(+)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index 09e1f64fbe4c..e51168eba07c 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,2 +1,3 @@
 DIST pytorch-2.1.2.tar.gz 116316469 BLAKE2B c5a55ee264bc3477d3556ba6376b5591117e992e56e0dd0c9ba93d12526e2727f7840f6f1e0730a38223b6492c9556840c4ebf22ffd220e97225c2abff303747 SHA512 a8961d78ad785b13c959a0612563a60e0de17a7c8bb9822ddea9a24072796354d07e81c47b6cc8761b21a6448845b088cf80e1661d9e889b0ed5474d3dc76756
 DIST pytorch-2.2.1.tar.gz 116370903 BLAKE2B 7d08e80f91bad76fba1751c30a34bebfe7145058b7758c0d47112702263a80666f70687a8860744725c6aa995e854f766a5bfa4644c23e5635e7e08c8d63a6e9 SHA512 f19ebcf59d183c3348946ba7cfcab2bc4ca93785863b8edc39dba5772083a7b0425ccb4f92a8df4dc0d18246c75e8ff812993161467fbf9dc48d7fb28a1e26f1
+DIST pytorch-2.2.2.tar.gz 116367503 BLAKE2B 0be22f2ec4b9aac6f5e976664cae01facf07929a32565cd57d7cc5b2d9888e9ae71ca301853752fe8f31d174d04c9974eb9ed2f3d452360a50ccf024f200726a SHA512 7990e0f9484038c3458c0bda2c863bf2b19e56edab81fc5938c6e0f08b17558287f853bb67350e8cca8f42bec0f1d4ba0e94e50a145db8da44bdd4bd703d91d0

diff --git a/sci-libs/caffe2/caffe2-2.2.2.ebuild b/sci-libs/caffe2/caffe2-2.2.2.ebuild
new file mode 100644
index 000000000000..fc0b264a9f32
--- /dev/null
+++ b/sci-libs/caffe2/caffe2-2.2.2.ebuild
@@ -0,0 +1,269 @@
+# Copyright 2022-2024 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{10..12} )
+ROCM_VERSION=5.7
+inherit python-single-r1 cmake cuda flag-o-matic prefix rocm
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/"
+SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
+	-> ${MYP}.tar.gz"
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64"
+IUSE="cuda distributed fbgemm ffmpeg gloo mkl mpi nnpack +numpy onednn openblas opencl opencv openmp qnnpack rocm xnnpack"
+RESTRICT="test"
+REQUIRED_USE="
+	${PYTHON_REQUIRED_USE}
+	ffmpeg? ( opencv )
+	mpi? ( distributed )
+	gloo? ( distributed )
+	?? ( cuda rocm )
+	rocm? ( || ( ${ROCM_REQUIRED_USE} ) )
+"
+
+# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
+RDEPEND="
+	${PYTHON_DEPS}
+	dev-cpp/gflags:=
+	>=dev-cpp/glog-0.5.0
+	dev-libs/cpuinfo
+	dev-libs/libfmt
+	dev-libs/protobuf:=
+	dev-libs/pthreadpool
+	dev-libs/sleef
+	virtual/lapack
+	>=sci-libs/onnx-1.12.0
+	<sci-libs/onnx-1.15.0
+	sci-libs/foxi
+	cuda? (
+		=dev-libs/cudnn-8*
+		>=dev-libs/cudnn-frontend-0.9.2:0/8
+		<dev-util/nvidia-cuda-toolkit-12.4.0:=[profiler]
+	)
+	fbgemm? ( >=dev-libs/FBGEMM-2023.12.01 )
+	ffmpeg? ( media-video/ffmpeg:= )
+	gloo? ( sci-libs/gloo[cuda?] )
+	mpi? ( virtual/mpi )
+	nnpack? ( sci-libs/NNPACK )
+	numpy? ( $(python_gen_cond_dep '
+		dev-python/numpy[${PYTHON_USEDEP}]
+		') )
+	onednn? ( dev-libs/oneDNN )
+	opencl? ( virtual/opencl )
+	opencv? ( media-libs/opencv:= )
+	qnnpack? ( sci-libs/QNNPACK )
+	rocm? (
+		>=dev-util/hip-5.7
+		>=dev-libs/rccl-5.7[${ROCM_USEDEP}]
+		>=sci-libs/rocThrust-5.7[${ROCM_USEDEP}]
+		>=sci-libs/rocPRIM-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipBLAS-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipFFT-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipSPARSE-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipRAND-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipCUB-5.7[${ROCM_USEDEP}]
+		>=sci-libs/hipSOLVER-5.7[${ROCM_USEDEP}]
+		>=sci-libs/miopen-5.7[${ROCM_USEDEP}]
+		>=dev-util/roctracer-5.7[${ROCM_USEDEP}]
+	)
+	distributed? ( sci-libs/tensorpipe[cuda?] )
+	xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
+	mkl? ( sci-libs/mkl )
+	openblas? ( sci-libs/openblas )
+"
+DEPEND="
+	${RDEPEND}
+	cuda? ( >=dev-libs/cutlass-3.1.0 )
+	onednn? ( sci-libs/ideep )
+	dev-libs/psimd
+	dev-libs/FP16
+	dev-libs/FXdiv
+	dev-libs/pocketfft
+	dev-libs/flatbuffers
+	>=sci-libs/kineto-0.4.0_p20231031
+	$(python_gen_cond_dep '
+		dev-python/pyyaml[${PYTHON_USEDEP}]
+		dev-python/pybind11[${PYTHON_USEDEP}]
+	')
+"
+
+PATCHES=(
+	"${FILESDIR}"/${PN}-2.2.1-gentoo.patch
+	"${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
+	"${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
+	"${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
+	"${FILESDIR}"/${PN}-2.0.0-gcc13.patch
+	"${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
+	"${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
+	"${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
+	"${FILESDIR}"/${PN}-2.1.2-rocm-fix-std-cpp17.patch
+)
+
+S="${WORKDIR}"/${MYP}
+
+src_prepare() {
+	filter-lto #bug 862672
+	sed -i \
+		-e "/third_party\/gloo/d" \
+		cmake/Dependencies.cmake \
+		|| die
+	cmake_src_prepare
+	pushd torch/csrc/jit/serialization || die
+	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+	popd
+	# prefixify the hardcoded paths, after all patches are applied
+	hprefixify \
+		aten/CMakeLists.txt \
+		caffe2/CMakeLists.txt \
+		cmake/Metal.cmake \
+		cmake/Modules/*.cmake \
+		cmake/Modules_CUDA_fix/FindCUDNN.cmake \
+		cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
+		cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
+		cmake/public/LoadHIP.cmake \
+		cmake/public/cuda.cmake \
+		cmake/Dependencies.cmake \
+		torch/CMakeLists.txt \
+		CMakeLists.txt
+
+	if use rocm; then
+		sed -e "s:/opt/rocm:/usr:" \
+			-e "s:lib/cmake:$(get_libdir)/cmake:g" \
+			-e "s/HIP 1.0/HIP 1.0 REQUIRED/" \
+			-i cmake/public/LoadHIP.cmake || die
+
+		ebegin "HIPifying cuda sources"
+		${EPYTHON} tools/amd_build/build_amd.py || die
+		eend $?
+	fi
+}
+
+src_configure() {
+	if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
+		ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
+		ewarn "These may not be optimal for your GPU."
+		ewarn ""
+		ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
+		ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
+		ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5"
+		ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
+		ewarn ""
+		ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
+		ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
+	fi
+
+	local mycmakeargs=(
+		-DBUILD_CUSTOM_PROTOBUF=OFF
+		-DBUILD_SHARED_LIBS=ON
+
+		-DUSE_CCACHE=OFF
+		-DUSE_CUDA=$(usex cuda)
+		-DUSE_DISTRIBUTED=$(usex distributed)
+		-DUSE_MPI=$(usex mpi)
+		-DUSE_FAKELOWP=OFF
+		-DUSE_FBGEMM=$(usex fbgemm)
+		-DUSE_FFMPEG=$(usex ffmpeg)
+		-DUSE_GFLAGS=ON
+		-DUSE_GLOG=ON
+		-DUSE_GLOO=$(usex gloo)
+		-DUSE_KINETO=OFF # TODO
+		-DUSE_LEVELDB=OFF
+		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+		-DUSE_MKLDNN=$(usex onednn)
+		-DUSE_NNPACK=$(usex nnpack)
+		-DUSE_QNNPACK=$(usex qnnpack)
+		-DUSE_XNNPACK=$(usex xnnpack)
+		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+		-DUSE_TENSORPIPE=$(usex distributed)
+		-DUSE_PYTORCH_QNNPACK=OFF
+		-DUSE_NUMPY=$(usex numpy)
+		-DUSE_OPENCL=$(usex opencl)
+		-DUSE_OPENCV=$(usex opencv)
+		-DUSE_OPENMP=$(usex openmp)
+		-DUSE_ROCM=$(usex rocm)
+		-DUSE_SYSTEM_CPUINFO=ON
+		-DUSE_SYSTEM_PYBIND11=ON
+		-DUSE_UCC=OFF
+		-DUSE_VALGRIND=OFF
+		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
+		-DPYTHON_EXECUTABLE="${PYTHON}"
+		-DUSE_ITT=OFF
+		-DUSE_SYSTEM_PTHREADPOOL=ON
+		-DUSE_SYSTEM_FXDIV=ON
+		-DUSE_SYSTEM_FP16=ON
+		-DUSE_SYSTEM_GLOO=ON
+		-DUSE_SYSTEM_ONNX=ON
+		-DUSE_SYSTEM_SLEEF=ON
+		-DUSE_METAL=OFF
+
+		-Wno-dev
+		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
+		-DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
+	)
+
+	if use mkl; then
+		mycmakeargs+=(-DBLAS=MKL)
+	elif use openblas; then
+		mycmakeargs+=(-DBLAS=OpenBLAS)
+	else
+		mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=)
+	fi
+
+	if use cuda; then
+		addpredict "/dev/nvidiactl" # bug 867706
+		addpredict "/dev/char"
+		addpredict "/proc/self/task" # bug 926116
+
+		mycmakeargs+=(
+			-DUSE_CUDNN=ON
+			-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
+			-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
+			-DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
+		)
+	elif use rocm; then
+		export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
+
+		mycmakeargs+=(
+			-DUSE_NCCL=ON
+			-DUSE_SYSTEM_NCCL=ON
+		)
+	fi
+
+	if use onednn; then
+		mycmakeargs+=(
+			-DUSE_MKLDNN=ON
+			-DMKLDNN_FOUND=ON
+			-DMKLDNN_LIBRARIES=dnnl
+			-DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl"
+		)
+	fi
+
+	cmake_src_configure
+
+	# do not rerun cmake and the build process in src_install
+	sed '/RERUN/,+1d' -i "${BUILD_DIR}"/build.ninja || die
+}
+
+src_install() {
+	cmake_src_install
+
+	insinto "/var/lib/${PN}"
+	doins "${BUILD_DIR}"/CMakeCache.txt
+
+	rm -rf python
+	mkdir -p python/torch/include || die
+	mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
+	cp torch/version.py python/torch/ || die
+	python_domodule python/caffe2
+	python_domodule python/torch
+	ln -s ../../../../../include/torch \
+		"${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269
+}


             reply	other threads:[~2024-04-04  9:20 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-04-04  9:20 Alfredo Tupone [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-11-18  6:59 [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/ Alfredo Tupone
2024-11-15  7:39 Alfredo Tupone
2024-11-01 17:14 Alfredo Tupone
2024-10-27 14:13 Alfredo Tupone
2024-10-26 20:43 Alfredo Tupone
2024-10-11  6:49 Alfredo Tupone
2024-09-24 12:41 Alfredo Tupone
2024-09-16 17:42 Alfredo Tupone
2024-09-15 18:43 Alfredo Tupone
2024-09-15 17:36 Alfredo Tupone
2024-07-22 17:14 Alfredo Tupone
2024-06-07  8:32 Alfredo Tupone
2024-06-06 19:07 Alfredo Tupone
2024-05-09 18:46 Alfredo Tupone
2024-05-07 20:48 Alfredo Tupone
2024-05-07  6:06 Alfredo Tupone
2024-05-05 14:15 Alfredo Tupone
2024-04-20 19:31 Alfredo Tupone
2024-04-04 16:28 Alfredo Tupone
2024-04-04  9:23 Alfredo Tupone
2024-03-30 19:40 Alfredo Tupone
2024-03-30 19:36 Alfredo Tupone
2024-03-11 19:28 Alfredo Tupone
2024-03-08  7:26 Alfredo Tupone
2024-03-01 18:54 Alfredo Tupone
2024-02-24 21:57 Alfredo Tupone
2024-02-17 22:21 Alfredo Tupone
2024-02-03  7:22 Michał Górny
2024-01-28 22:53 Jonas Stein
2024-01-10 21:15 Alfredo Tupone
2024-01-09  7:17 Alfredo Tupone
2024-01-03 18:23 Alfredo Tupone
2023-12-29  9:53 Alfredo Tupone
2023-12-26 22:14 Alfredo Tupone
2023-12-23 16:05 Alfredo Tupone
2023-12-23  8:24 Alfredo Tupone
2023-12-22 22:27 Alfredo Tupone
2023-12-17  9:30 Alfredo Tupone
2023-12-14 18:30 Alfredo Tupone
2023-12-10 10:31 Alfredo Tupone
2023-12-06 19:49 Alfredo Tupone
2023-12-01  5:54 Alfredo Tupone
2023-08-11  8:40 Alfredo Tupone
2023-08-11  7:31 Alfredo Tupone
2023-08-01  7:30 Alfredo Tupone
2023-06-17 14:02 Alfredo Tupone
2023-05-15  6:27 Alfredo Tupone
2023-05-14  7:59 Alfredo Tupone
2023-05-08 21:00 Alfredo Tupone
2023-05-05  6:00 Alfredo Tupone
2023-04-23 17:25 Alfredo Tupone
2023-04-23 13:24 Alfredo Tupone
2023-04-08 14:29 Alfredo Tupone
2023-04-06 19:46 Alfredo Tupone
2023-03-26 18:10 Alfredo Tupone
2023-03-17 18:47 Alfredo Tupone
2023-02-28  7:12 Alfredo Tupone
2023-02-28  7:12 Alfredo Tupone
2023-02-27 14:16 Alfredo Tupone
2023-02-27  7:23 Alfredo Tupone
2023-02-20 21:22 Alfredo Tupone
2023-02-15 19:40 Alfredo Tupone
2023-02-15  7:36 Sam James
2023-02-15  7:18 Alfredo Tupone
2023-02-12  9:09 Alfredo Tupone
2023-01-03  6:22 Alfredo Tupone
2022-12-16 15:56 Alfredo Tupone
2022-11-30 18:20 Alfredo Tupone
2022-11-10 16:51 Alfredo Tupone
2022-09-14 20:52 Alfredo Tupone
2022-08-29  6:42 Alfredo Tupone
2022-08-04  8:00 Alfredo Tupone
2022-07-03 10:45 Alfredo Tupone
2022-07-02 16:20 Alfredo Tupone
2022-06-26 20:24 Alfredo Tupone
2022-06-26 18:03 Alfredo Tupone
2022-06-26 17:30 Alfredo Tupone

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1712222403.e74afa2c5f42fbc0ff31574a7796c7ee39727e9f.tupone@gentoo \
    --to=tupone@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox