From: "Alfredo Tupone" <tupone@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/
Date: Wed, 6 Dec 2023 11:55:48 +0000 (UTC) [thread overview]
Message-ID: <1701863725.783ebfe477601f3a416a1bf7f7f0daf5b0732c5c.tupone@gentoo> (raw)
commit: 783ebfe477601f3a416a1bf7f7f0daf5b0732c5c
Author: Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 6 11:55:09 2023 +0000
Commit: Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Wed Dec 6 11:55:25 2023 +0000
URL: https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=783ebfe4
sci-libs/caffe2: add 2.1.1
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>
sci-libs/caffe2/Manifest | 1 +
sci-libs/caffe2/caffe2-2.1.1.ebuild | 210 +++++++++++++++++++++
| 28 +++
sci-libs/caffe2/files/caffe2-2.1.1-gentoo.patch | 188 ++++++++++++++++++
4 files changed, 427 insertions(+)
diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index 0ee75499ccbe..d51cccdc3c37 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,3 +1,4 @@
DIST pytorch-1.13.1.tar.gz 108279745 BLAKE2B 75de03b74dfdaf8d8fb5ea743fcc0c1b0e408a714ad4160c487921220a7b1755e5fa6e587e6bbc8c9f34dd75e096d2e6dd69c80d24821835fff6c833314434d3 SHA512 f16f89d027efade11d057245cad5b69a390e88b458398310ae30de2dbff7c8fd7f1165be7b8da7ea989c81ac3f5a66c5cb9050610e441a97c83fb8aa28c0bd62
DIST pytorch-2.0.0.tar.gz 111327292 BLAKE2B 6d593a975c0ade714f0b189f7e3c4ff704b9a9a2377b5e441a9cefc202fa22779966d08948e63671912c6ea5a0eee124042155f4f57a654db34e19e42f013cc9 SHA512 4dd76160711c0d87f3026c8b7fa3ed149dd86b8ac0ee9ecea0eaf80d2e6ce8c29368392e77b9466d90b60634087b462b782495997a5d33367cc8ca9fe14c8a14
DIST pytorch-2.0.1.tar.gz 111335778 BLAKE2B 7a10cc2b2d5e2422aef7e060a0c3a62ca5c7460c6e0b9becade9b98939501975c74ed5a175a653731f43ca824d2c9bd31f41d1f633c2b139779ab23d5331e9ce SHA512 2309a22b3be3ccdb36d8d9781a59a7bdcc2fdb8d95ada205702ec77862480f0cbb12cd5d6b8cd3114d01a6e33b7743d0fe9de93debf37138ca5c14403cdb0c43
+DIST pytorch-2.1.1.tar.gz 116317162 BLAKE2B d9819256cba0b9951aabe95d86fb135e97d8bafa2c010d13162cd9b3373ca75f20d218e31279ace41981f3f76308721c522f9e53745a1ff9e6386fa10634f9ad SHA512 31b36e7732ee086ae7565a3811ab2d1b2869e79057bea7a4ffc4a3c95c544757e656a6d2289ee11fe7508828aca144e4220ef1e9ab1878e075e1259cf6ff9ca4
diff --git a/sci-libs/caffe2/caffe2-2.1.1.ebuild b/sci-libs/caffe2/caffe2-2.1.1.ebuild
new file mode 100644
index 000000000000..e5e9a71069cd
--- /dev/null
+++ b/sci-libs/caffe2/caffe2-2.1.1.ebuild
@@ -0,0 +1,210 @@
+# Copyright 2022-2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{9..11} )
+inherit python-single-r1 cmake cuda flag-o-matic prefix
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/"
+SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
+ -> ${MYP}.tar.gz"
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64"
+IUSE="cuda distributed fbgemm ffmpeg gloo mpi nnpack +numpy opencl opencv openmp qnnpack tensorpipe xnnpack"
+RESTRICT="test"
+REQUIRED_USE="
+ ${PYTHON_REQUIRED_USE}
+ ffmpeg? ( opencv )
+ mpi? ( distributed )
+ tensorpipe? ( distributed )
+ distributed? ( tensorpipe )
+ gloo? ( distributed )
+" # ?? ( cuda rocm )
+
+# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
+RDEPEND="
+ ${PYTHON_DEPS}
+ dev-cpp/gflags:=
+ >=dev-cpp/glog-0.5.0
+ dev-libs/cpuinfo
+ dev-libs/libfmt
+ dev-libs/protobuf:=
+ dev-libs/pthreadpool
+ dev-libs/sleef
+ sci-libs/lapack
+ >=sci-libs/onnx-1.12.0
+ sci-libs/foxi
+ cuda? (
+ =dev-libs/cudnn-8*
+ dev-libs/cudnn-frontend:0/8
+ <dev-util/nvidia-cuda-toolkit-12:=[profiler]
+ )
+ fbgemm? ( dev-libs/FBGEMM )
+ ffmpeg? ( media-video/ffmpeg:= )
+ gloo? ( sci-libs/gloo[cuda?] )
+ mpi? ( virtual/mpi )
+ nnpack? ( sci-libs/NNPACK )
+ numpy? ( $(python_gen_cond_dep '
+ dev-python/numpy[${PYTHON_USEDEP}]
+ ') )
+ opencl? ( virtual/opencl )
+ opencv? ( media-libs/opencv:= )
+ qnnpack? ( sci-libs/QNNPACK )
+ tensorpipe? ( sci-libs/tensorpipe[cuda?] )
+ xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
+"
+DEPEND="
+ ${RDEPEND}
+ dev-cpp/eigen
+ cuda? ( dev-libs/cutlass )
+ dev-libs/psimd
+ dev-libs/FP16
+ dev-libs/FXdiv
+ dev-libs/pocketfft
+ dev-libs/flatbuffers
+ >=sci-libs/kineto-0.4.0_p20231031
+ $(python_gen_cond_dep '
+ dev-python/pyyaml[${PYTHON_USEDEP}]
+ dev-python/pybind11[${PYTHON_USEDEP}]
+ ')
+"
+
+S="${WORKDIR}"/${MYP}
+
+PATCHES=(
+ "${FILESDIR}"/${P}-gentoo.patch
+ "${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
+ "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
+ "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
+ "${FILESDIR}"/${PN}-2.0.0-gcc13.patch
+ "${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
+ "${FILESDIR}"/${P}-cudaExtra.patch
+)
+
+src_prepare() {
+ filter-lto #bug 862672
+ sed -i \
+ -e "/third_party\/gloo/d" \
+ cmake/Dependencies.cmake \
+ || die
+ cmake_src_prepare
+ pushd torch/csrc/jit/serialization || die
+ flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+ popd
+ # prefixify the hardcoded paths, after all patches are applied
+ hprefixify \
+ aten/CMakeLists.txt \
+ caffe2/CMakeLists.txt \
+ cmake/Metal.cmake \
+ cmake/Modules/*.cmake \
+ cmake/Modules_CUDA_fix/FindCUDNN.cmake \
+ cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
+ cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
+ cmake/public/LoadHIP.cmake \
+ cmake/public/cuda.cmake \
+ cmake/Dependencies.cmake \
+ torch/CMakeLists.txt \
+ CMakeLists.txt
+}
+
+src_configure() {
+ if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
+ ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
+ ewarn "These may not be optimal for your GPU."
+ ewarn ""
+ ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
+ ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
+ ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5"
+ ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
+ ewarn ""
+ ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
+ ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
+ fi
+
+ local mycmakeargs=(
+ -DBUILD_CUSTOM_PROTOBUF=OFF
+ -DBUILD_SHARED_LIBS=ON
+
+ -DUSE_CCACHE=OFF
+ -DUSE_CUDA=$(usex cuda)
+ -DUSE_CUDNN=$(usex cuda)
+ -DUSE_FAST_NVCC=$(usex cuda)
+ -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
+ -DBUILD_NVFUSER=$(usex cuda)
+ -DUSE_DISTRIBUTED=$(usex distributed)
+ -DUSE_MPI=$(usex mpi)
+ -DUSE_FAKELOWP=OFF
+ -DUSE_FBGEMM=$(usex fbgemm)
+ -DUSE_FFMPEG=$(usex ffmpeg)
+ -DUSE_GFLAGS=ON
+ -DUSE_GLOG=ON
+ -DUSE_GLOO=$(usex gloo)
+ -DUSE_KINETO=OFF # TODO
+ -DUSE_LEVELDB=OFF
+ -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+ -DUSE_MKLDNN=OFF
+ -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
+ -DUSE_NNPACK=$(usex nnpack)
+ -DUSE_QNNPACK=$(usex qnnpack)
+ -DUSE_XNNPACK=$(usex xnnpack)
+ -DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+ -DUSE_TENSORPIPE=$(usex tensorpipe)
+ -DUSE_PYTORCH_QNNPACK=OFF
+ -DUSE_NUMPY=$(usex numpy)
+ -DUSE_OPENCL=$(usex opencl)
+ -DUSE_OPENCV=$(usex opencv)
+ -DUSE_OPENMP=$(usex openmp)
+ -DUSE_ROCM=OFF # TODO
+ -DUSE_SYSTEM_CPUINFO=ON
+ -DUSE_SYSTEM_PYBIND11=ON
+ -DUSE_UCC=OFF
+ -DUSE_VALGRIND=OFF
+ -DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
+ -DPYTHON_EXECUTABLE="${PYTHON}"
+ -DUSE_ITT=OFF
+ -DBLAS=Eigen # avoid the use of MKL, if found on the system
+ -DUSE_SYSTEM_EIGEN_INSTALL=ON
+ -DUSE_SYSTEM_PTHREADPOOL=ON
+ -DUSE_SYSTEM_FXDIV=ON
+ -DUSE_SYSTEM_FP16=ON
+ -DUSE_SYSTEM_GLOO=ON
+ -DUSE_SYSTEM_ONNX=ON
+ -DUSE_SYSTEM_SLEEF=ON
+
+ -Wno-dev
+ -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
+ -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
+ )
+
+ if use cuda; then
+ addpredict "/dev/nvidiactl" # bug 867706
+ addpredict "/dev/char"
+
+ mycmakeargs+=(
+ -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
+ )
+ fi
+ cmake_src_configure
+}
+
+src_install() {
+ cmake_src_install
+
+ insinto "/var/lib/${PN}"
+ doins "${BUILD_DIR}"/CMakeCache.txt
+
+ rm -rf python
+ mkdir -p python/torch/include || die
+ mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
+ mv "${ED}"/usr/include/torch python/torch/include || die
+ cp torch/version.py python/torch/ || die
+ python_domodule python/caffe2
+ python_domodule python/torch
+}
--git a/sci-libs/caffe2/files/caffe2-2.1.1-cudaExtra.patch b/sci-libs/caffe2/files/caffe2-2.1.1-cudaExtra.patch
new file mode 100644
index 000000000000..f12623f2068a
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.1.1-cudaExtra.patch
@@ -0,0 +1,28 @@
+--- a/third_party/nvfuser/CMakeLists.txt 2023-11-30 21:42:07.336946970 +0100
++++ b/third_party/nvfuser/CMakeLists.txt 2023-11-30 21:46:35.101749250 +0100
+@@ -18,7 +18,7 @@
+ set(NVFUSER_ROOT ${PROJECT_SOURCE_DIR})
+ set(NVFUSER_SRCS_DIR "${NVFUSER_ROOT}/csrc")
+ set(TORCH_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/../..")
+-set(TORCH_INSTALL_LIB_DIR ${TORCH_ROOT}/torch/lib)
++set(TORCH_INSTALL_LIB_DIR ${CMAKE_INSTALL_LIBDIR})
+
+ # --- build nvfuser_codegen library
+
+@@ -218,7 +218,7 @@
+ message(STATUS "somehow this is happening")
+ set_target_properties(${NVFUSER} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS})
+ endif()
+- install(TARGETS ${NVFUSER} EXPORT NvfuserTargets DESTINATION ${TORCH_ROOT}/nvfuser/)
++ install(TARGETS ${NVFUSER} EXPORT NvfuserTargets DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+
+ # install nvfuser python files
+ install(DIRECTORY "${NVFUSER_ROOT}/python/"
+--- a/functorch/CMakeLists.txt 2023-11-30 20:30:45.805209036 +0100
++++ b/functorch/CMakeLists.txt 2023-11-30 20:31:13.284766157 +0100
+@@ -35,4 +35,4 @@
+ if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "")
+ set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS})
+ endif()
+-install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}")
++install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}")
diff --git a/sci-libs/caffe2/files/caffe2-2.1.1-gentoo.patch b/sci-libs/caffe2/files/caffe2-2.1.1-gentoo.patch
new file mode 100644
index 000000000000..056ac9afe5b3
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.1.1-gentoo.patch
@@ -0,0 +1,188 @@
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -487,7 +487,7 @@ endif()
+ list(APPEND Caffe2_DEPENDENCY_LIBS cpuinfo)
+
+ # ---[ QNNPACK
+-if(USE_QNNPACK)
++if(FALSE)
+ set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+
+ if(NOT DEFINED QNNPACK_SOURCE_DIR)
+@@ -543,7 +543,7 @@ if(USE_QNNPACK)
+ endif()
+
+ # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and neon2sse headers
+-if(USE_QNNPACK)
++if(FALSE)
+ set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
+ include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")
+@@ -803,7 +803,7 @@
+ endif()
+
+ # ---[ FBGEMM
+-if(USE_FBGEMM)
++if(FALSE)
+ set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ if(NOT DEFINED FBGEMM_SOURCE_DIR)
+ set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING "FBGEMM source directory")
+@@ -848,6 +848,7 @@
+ endif()
+
+ if(USE_FBGEMM)
++ list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
+ caffe2_update_option(USE_FBGEMM ON)
+ else()
+ caffe2_update_option(USE_FBGEMM OFF)
+@@ -1552,7 +1553,6 @@
+ set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
+ endif()
+ endif()
+- add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
+
+ add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
+ if(NOT USE_SYSTEM_ONNX)
+@@ -1831,7 +1831,6 @@
+ #
+ set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
+ set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
+-add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+
+ # Disable compiler feature checks for `fmt`.
+ #
+@@ -1840,9 +1839,7 @@
+ # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
+ # `fmt` is compatible with a superset of the compilers that PyTorch is, it
+ # shouldn't be too bad to just disable the checks.
+-set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
+
+-list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
+ set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
+
+ # ---[ Kineto
+--- a/c10/CMakeLists.txt
++++ b/c10/CMakeLists.txt
+@@ -87,7 +87,7 @@
+ if(${USE_GLOG})
+ target_link_libraries(c10 PUBLIC glog::glog)
+ endif()
+-target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
++target_link_libraries(c10 PRIVATE fmt)
+
+ find_package(Backtrace)
+ if(Backtrace_FOUND)
+--- a/torch/CMakeLists.txt
++++ b/torch/CMakeLists.txt
+@@ -63,15 +63,9 @@
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_BINARY_DIR}/aten/src
+ ${CMAKE_BINARY_DIR}/caffe2/aten/src
+- ${CMAKE_BINARY_DIR}/third_party
+- ${CMAKE_BINARY_DIR}/third_party/onnx
+
+- ${TORCH_ROOT}/third_party/valgrind-headers
+
+- ${TORCH_ROOT}/third_party/gloo
+- ${TORCH_ROOT}/third_party/onnx
+- ${TORCH_ROOT}/third_party/flatbuffers/include
+- ${TORCH_ROOT}/third_party/kineto/libkineto/include
++ /usr/include/kineto
+
+ ${TORCH_SRC_DIR}/csrc
+ ${TORCH_SRC_DIR}/csrc/api/include
+@@ -84,7 +78,6 @@
+ python::python
+ pybind::pybind11
+ shm
+- fmt::fmt-header-only
+ ATEN_CPU_FILES_GEN_LIB)
+
+ if(USE_ASAN AND TARGET Sanitizer::address)
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -812,12 +812,11 @@
+
+ # ---[ Build flags
+ if(NOT MSVC)
+- string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
++ string(APPEND CMAKE_CXX_FLAGS " -fPIC")
+ # Eigen fails to build with some versions, so convert this to a warning
+ # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+ string(APPEND CMAKE_CXX_FLAGS " -Wall")
+ string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+- append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
+@@ -917,8 +916,6 @@
+ string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
+ append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
+- append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
+- append_cxx_flag_if_supported("-Werror=cast-function-type" CMAKE_CXX_FLAGS)
+ else()
+ # skip unwanted includes from windows.h
+ add_compile_definitions(WIN32_LEAN_AND_MEAN)
+--- a/cmake/public/utils.cmake
++++ b/cmake/public/utils.cmake
+@@ -492,8 +492,6 @@
+ endif()
+
+ # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
+- target_compile_options(${libname} PRIVATE
+- $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
+
+ endfunction()
+
+--- a/cmake/Codegen.cmake
++++ b/cmake/Codegen.cmake
+@@ -57,7 +57,7 @@
+ if(MSVC)
+ set(OPT_FLAG "/fp:strict ")
+ else(MSVC)
+- set(OPT_FLAG "-O3 ")
++ set(OPT_FLAG " ")
+ if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+ set(OPT_FLAG " ")
+ endif()
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -107,7 +107,7 @@
+ # Note: the folders that are being commented out have not been properly
+ # addressed yet.
+
+-if(NOT MSVC AND USE_XNNPACK)
++if(FALSE)
+ if(NOT TARGET fxdiv)
+ set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
+ set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
+@@ -1023,7 +1025,6 @@
+ endif()
+
+ if(NOT MSVC AND USE_XNNPACK)
+- TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
+ endif()
+
+ # ==========================================================
+@@ -1143,8 +1146,7 @@
+ target_include_directories(torch_cpu PRIVATE
+ ${TORCH_ROOT}/third_party/miniz-2.1.0)
+
+-target_include_directories(torch_cpu PRIVATE
+- ${TORCH_ROOT}/third_party/kineto/libkineto/include)
++target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
+
+ if(USE_KINETO)
+ target_include_directories(torch_cpu PRIVATE
+--- a/cmake/External/nnpack.cmake
++++ b/cmake/External/nnpack.cmake
+@@ -58,7 +58,7 @@
+ set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
+ set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
+
+- if(NOT TARGET nnpack)
++ if(FALSE)
+ if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
+ set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
+ endif()
next reply other threads:[~2023-12-06 11:55 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-06 11:55 Alfredo Tupone [this message]
-- strict thread matches above, loose matches on Subject: below --
2024-09-04 5:02 [gentoo-commits] repo/gentoo:master commit in: sci-libs/caffe2/files/, sci-libs/caffe2/ Alfredo Tupone
2024-08-09 16:17 Alfredo Tupone
2024-07-27 19:14 Alfredo Tupone
2024-03-11 19:28 Alfredo Tupone
2024-03-08 18:46 Alfredo Tupone
2023-04-24 8:36 Alfredo Tupone
2023-04-05 9:07 Alfredo Tupone
2023-02-28 20:38 Alfredo Tupone
2023-02-22 8:07 Alfredo Tupone
2023-01-12 12:08 Alfredo Tupone
2022-09-05 6:40 Alfredo Tupone
2022-07-03 9:57 Alfredo Tupone
2022-07-02 21:12 Alfredo Tupone
2022-06-26 7:47 Alfredo Tupone
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1701863725.783ebfe477601f3a416a1bf7f7f0daf5b0732c5c.tupone@gentoo \
--to=tupone@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox