public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Alfredo Tupone" <tupone@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/
Date: Wed, 30 Nov 2022 18:20:10 +0000 (UTC)	[thread overview]
Message-ID: <1669832395.3625a2f15847ca2daefbbb7c267840701886507b.tupone@gentoo> (raw)

commit:     3625a2f15847ca2daefbbb7c267840701886507b
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Wed Nov 30 18:19:55 2022 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Wed Nov 30 18:19:55 2022 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=3625a2f1

sci-libs/caffe2: drop 1.11.0-r2

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                |   1 -
 sci-libs/caffe2/caffe2-1.11.0-r2.ebuild | 160 --------------------------------
 2 files changed, 161 deletions(-)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index 0d28654e641f..013309cd70ce 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,2 +1 @@
-DIST pytorch-1.11.0.tar.gz 20719323 BLAKE2B 24e7aaa2c26821d36f8092542de9d8d5ac85a619fb9fffb5131987958842afb1cad395780662d15f3411a7cc6ff83a445871960eca1e469fcbf0b9895d83d6e0 SHA512 2342eb7a1a241f5855a7cf12e11f62bc4baaa78d1d0864e53bfc946e783eb4addd05ca154a814d2376cd602098b5547e61c158d6eddb7cad5a9f3b0c1357adca
 DIST pytorch-1.12.0.tar.gz 106286765 BLAKE2B ff9bafedb35f859f7dccb9b606299cf9c345bdaa0deb87ecfe0c0c30c3c828414d989e1d9a243d9b7cd3f376d56a2f81c241ca2e3c9a8a2b30cddcdeddd3a5c7 SHA512 c9c748a2e0047daaaf199a1ba3198d2d1aee47f664170a9b34ccacd3deeb95f2070e4035eeb900012ef48dc62cf6fb6806f1a1dfe22de8c94892963076e593b7

diff --git a/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild b/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
deleted file mode 100644
index d5d2250da0f5..000000000000
--- a/sci-libs/caffe2/caffe2-1.11.0-r2.ebuild
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright 2022 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-PYTHON_COMPAT=( python3_{8..10} )
-inherit python-r1 cmake
-
-MYPN=pytorch
-MYP=${MYPN}-${PV}
-
-DESCRIPTION="A deep learning framework"
-HOMEPAGE="https://pytorch.org/"
-SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-	-> ${MYP}.tar.gz"
-
-LICENSE="BSD"
-SLOT="0"
-KEYWORDS="~amd64"
-IUSE="cuda ffmpeg nnpack +numpy opencl opencv openmp qnnpack xnnpack"
-RESTRICT="test"
-REQUIRED_USE="
-	${PYTHON_REQUIRED_USE}
-	ffmpeg? ( opencv )
-" # ?? ( cuda rocm )
-
-RDEPEND="
-	${PYTHON_DEPS}
-	dev-cpp/gflags:=
-	<dev-cpp/glog-0.5.0
-	dev-libs/cpuinfo
-	dev-libs/libfmt
-	dev-libs/protobuf
-	dev-libs/pthreadpool
-	dev-libs/sleef
-	sci-libs/lapack
-	sci-libs/onnx
-	sci-libs/foxi
-	cuda? (
-		=dev-libs/cudnn-8*
-		dev-libs/cudnn-frontend:0/8
-		dev-util/nvidia-cuda-toolkit:=[profiler]
-	)
-	ffmpeg? ( media-video/ffmpeg:= )
-	nnpack? ( sci-libs/NNPACK )
-	numpy? ( dev-python/numpy[${PYTHON_USEDEP}] )
-	opencl? ( virtual/opencl )
-	opencv? ( media-libs/opencv:= )
-	qnnpack? ( sci-libs/QNNPACK )
-	xnnpack? ( sci-libs/XNNPACK )
-"
-DEPEND="
-	${RDEPEND}
-	dev-cpp/eigen
-	dev-libs/psimd
-	dev-libs/FP16
-	dev-libs/FXdiv
-	dev-libs/pocketfft
-	dev-libs/flatbuffers
-	dev-python/pyyaml[${PYTHON_USEDEP}]
-	dev-python/pybind11[${PYTHON_USEDEP}]
-"
-
-S="${WORKDIR}"/${MYP}
-
-PATCHES=(
-	"${FILESDIR}"/${PN}-1.11.0-gentoo.patch
-	"${FILESDIR}"/${PN}-1.12.0-install-dirs.patch
-)
-
-src_prepare() {
-	cmake_src_prepare
-	pushd torch/csrc/jit/serialization || die
-	flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
-	popd
-}
-
-src_configure() {
-	if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
-		ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
-		ewarn "These may not be optimal for your GPU."
-		ewarn ""
-		ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
-		ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
-		ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5,3.5"
-		ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
-		ewarn ""
-		ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
-		ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
-	fi
-
-	python_setup
-	local mycmakeargs=(
-		-DBUILD_CUSTOM_PROTOBUF=OFF
-		-DBUILD_SHARED_LIBS=ON
-
-		-DUSE_CCACHE=OFF
-		-DUSE_CUDA=$(usex cuda)
-		-DUSE_CUDNN=$(usex cuda)
-		-DUSE_FAST_NVCC=$(usex cuda)
-		-DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
-		-DUSE_DISTRIBUTED=OFF
-		-DUSE_FAKELOWP=OFF
-		-DUSE_FBGEMM=OFF # TODO
-		-DUSE_FFMPEG=$(usex ffmpeg)
-		-DUSE_GFLAGS=ON
-		-DUSE_GLOG=ON
-		-DUSE_GLOO=OFF
-		-DUSE_KINETO=OFF # TODO
-		-DUSE_LEVELDB=OFF
-		-DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
-		-DUSE_MKLDNN=OFF
-		-DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
-		-DUSE_NNPACK=$(usex nnpack)
-		-DUSE_QNNPACK=$(usex qnnpack)
-		-DUSE_XNNPACK=$(usex xnnpack)
-		-DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
-		-DUSE_PYTORCH_QNNPACK=OFF
-		-DUSE_NUMPY=$(usex numpy)
-		-DUSE_OPENCL=$(usex opencl)
-		-DUSE_OPENCV=$(usex opencv)
-		-DUSE_OPENMP=$(usex openmp)
-		-DUSE_ROCM=OFF # TODO
-		-DUSE_SYSTEM_CPUINFO=ON
-		-DUSE_BREAKPAD=OFF # TODO
-		-DUSE_SYSTEM_BIND11=ON
-		-DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
-		-DPYTHON_EXECUTABLE="${PYTHON}"
-		-DUSE_SYSTEM_EIGEN_INSTALL=ON
-		-DUSE_SYSTEM_PTHREADPOOL=ON
-		-DUSE_SYSTEM_FXDIV=ON
-		-DUSE_SYSTEM_FP16=ON
-		-DUSE_SYSTEM_ONNX=ON
-		-DUSE_SYSTEM_SLEEF=ON
-		-DUSE_TENSORPIPE=OFF
-
-		-Wno-dev
-		-DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
-		-DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
-	)
-	cmake_src_configure
-}
-
-python_install() {
-	python_domodule python/caffe2
-	python_domodule python/torch
-}
-
-src_install() {
-	cmake_src_install
-
-	insinto "/var/lib/${PN}"
-	doins "${BUILD_DIR}"/CMakeCache.txt
-
-	rm -rf python
-	mkdir -p python/torch || die
-	mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
-	cp torch/version.py python/torch/ || die
-	python_foreach_impl python_install
-}


             reply	other threads:[~2022-11-30 18:20 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-30 18:20 Alfredo Tupone [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-11-18  6:59 [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/ Alfredo Tupone
2024-11-15  7:39 Alfredo Tupone
2024-11-01 17:14 Alfredo Tupone
2024-10-27 14:13 Alfredo Tupone
2024-10-26 20:43 Alfredo Tupone
2024-10-11  6:49 Alfredo Tupone
2024-09-24 12:41 Alfredo Tupone
2024-09-16 17:42 Alfredo Tupone
2024-09-15 18:43 Alfredo Tupone
2024-09-15 17:36 Alfredo Tupone
2024-07-22 17:14 Alfredo Tupone
2024-06-07  8:32 Alfredo Tupone
2024-06-06 19:07 Alfredo Tupone
2024-05-09 18:46 Alfredo Tupone
2024-05-07 20:48 Alfredo Tupone
2024-05-07  6:06 Alfredo Tupone
2024-05-05 14:15 Alfredo Tupone
2024-04-20 19:31 Alfredo Tupone
2024-04-04 16:28 Alfredo Tupone
2024-04-04  9:23 Alfredo Tupone
2024-04-04  9:20 Alfredo Tupone
2024-03-30 19:40 Alfredo Tupone
2024-03-30 19:36 Alfredo Tupone
2024-03-11 19:28 Alfredo Tupone
2024-03-08  7:26 Alfredo Tupone
2024-03-01 18:54 Alfredo Tupone
2024-02-24 21:57 Alfredo Tupone
2024-02-17 22:21 Alfredo Tupone
2024-02-03  7:22 Michał Górny
2024-01-28 22:53 Jonas Stein
2024-01-10 21:15 Alfredo Tupone
2024-01-09  7:17 Alfredo Tupone
2024-01-03 18:23 Alfredo Tupone
2023-12-29  9:53 Alfredo Tupone
2023-12-26 22:14 Alfredo Tupone
2023-12-23 16:05 Alfredo Tupone
2023-12-23  8:24 Alfredo Tupone
2023-12-22 22:27 Alfredo Tupone
2023-12-17  9:30 Alfredo Tupone
2023-12-14 18:30 Alfredo Tupone
2023-12-10 10:31 Alfredo Tupone
2023-12-06 19:49 Alfredo Tupone
2023-12-01  5:54 Alfredo Tupone
2023-08-11  8:40 Alfredo Tupone
2023-08-11  7:31 Alfredo Tupone
2023-08-01  7:30 Alfredo Tupone
2023-06-17 14:02 Alfredo Tupone
2023-05-15  6:27 Alfredo Tupone
2023-05-14  7:59 Alfredo Tupone
2023-05-08 21:00 Alfredo Tupone
2023-05-05  6:00 Alfredo Tupone
2023-04-23 17:25 Alfredo Tupone
2023-04-23 13:24 Alfredo Tupone
2023-04-08 14:29 Alfredo Tupone
2023-04-06 19:46 Alfredo Tupone
2023-03-26 18:10 Alfredo Tupone
2023-03-17 18:47 Alfredo Tupone
2023-02-28  7:12 Alfredo Tupone
2023-02-28  7:12 Alfredo Tupone
2023-02-27 14:16 Alfredo Tupone
2023-02-27  7:23 Alfredo Tupone
2023-02-20 21:22 Alfredo Tupone
2023-02-15 19:40 Alfredo Tupone
2023-02-15  7:36 Sam James
2023-02-15  7:18 Alfredo Tupone
2023-02-12  9:09 Alfredo Tupone
2023-01-03  6:22 Alfredo Tupone
2022-12-16 15:56 Alfredo Tupone
2022-11-10 16:51 Alfredo Tupone
2022-09-14 20:52 Alfredo Tupone
2022-08-29  6:42 Alfredo Tupone
2022-08-04  8:00 Alfredo Tupone
2022-07-03 10:45 Alfredo Tupone
2022-07-02 16:20 Alfredo Tupone
2022-06-26 20:24 Alfredo Tupone
2022-06-26 18:03 Alfredo Tupone
2022-06-26 17:30 Alfredo Tupone

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1669832395.3625a2f15847ca2daefbbb7c267840701886507b.tupone@gentoo \
    --to=tupone@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox