commit:     26296e4b172b355097bdb4816ebceeaa0755bb74
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Wed Jul  2 06:12:05 2025 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Wed Jul  2 06:12:49 2025 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=26296e4b

sci-ml/caffe2: drop 2.4.1-r7

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-ml/caffe2/Manifest                           |   2 -
 sci-ml/caffe2/caffe2-2.4.1-r7.ebuild             | 305 -----------------------
 sci-ml/caffe2/files/caffe2-2.4.0-cstdint.patch   |  10 -
 sci-ml/caffe2/files/caffe2-2.4.0-libfmt-11.patch |  44 ----
 4 files changed, 361 deletions(-)

diff --git a/sci-ml/caffe2/Manifest b/sci-ml/caffe2/Manifest
index 8d81a58682be..fda2a4a29e01 100644
--- a/sci-ml/caffe2/Manifest
+++ b/sci-ml/caffe2/Manifest
@@ -1,8 +1,6 @@
-DIST caffe2-patches-20240809.tar.gz 15242 BLAKE2B 
77503c61487e7d85cca5afcab9a6e638f9833a70861845638cf1b62bc492d7b6650e6db81d53ebb2f39c6313509250d339f725f04d03ec6dd23dd0cf70843d8c
 SHA512 
74b3b0b6671b655ecac93f7436c4ed7cb0157a83aafbf6afcc0811e11cef341cd8f638db1a111bcbb01e1a6dd4daf3a36b96d7a8ce90f04c2fa091bd6e3a142b
 DIST composable_kernel-50ee4267.tar.gz 4194795 BLAKE2B 
b3c97d98a0c9e4620fdae3d30006edf55cc60ffa7f8518f6acb8d808647bc4de362c2e2b7e974686503fa2c7f359b6981cfbda74e40cc1bad4d351c5d2ff92e1
 SHA512 
9fc6f5f15556f020414b4567520329ef762209a82411a246c2bc1240a9fed2669f7fcb982cf773e3e9561bf9a2c557dba82b8b469d2e5844e679e2f5ab7c3e17
 DIST composable_kernel-8086bbe3.tar.gz 4418862 BLAKE2B 
b710e3d4586899443ec01044dad19fd2f992c351e2f65ba526dfcc47cc65c095beaf8ac21a8f71c02a0eb524d364e817b27241a9198884f2bdae9924b51e24e4
 SHA512 
8410b5a1c864d71f3034ef0d9d1245078856d09cc191faec59856c229bf11d89ae291036d735cb5cec4f1d72e6e9e8f6921833147f9619d30cfab8722d3a9f63
 DIST flash-attention-2.7.4.gh.tar.gz 5841323 BLAKE2B 
432999d763f2b3d732580ddfea5d3e01370351db0656546259a5e500a07516dd03c98828bfb55855dabe4adc651033b5d97ea4725ca46158b9970f0fbc662710
 SHA512 
05a4afb09e666f7404d6a3f8b5256e7bed6eba60a6f1bde2b7dbb96d318975f0b458c2521c7a38d88e97b6e4c27f29077cf787849daf82586e33f43a3d9a84b3
-DIST pytorch-2.4.1.tar.gz 115029469 BLAKE2B 
c2909ff27d527bc57cba56b780d3b8cd07a043ab045caa6c6b27857a16f9ad10aaab2116b26226b1e46ee08ffb44007965d914464418e4ae14ca48c3f3f383bb
 SHA512 
7e9b4485e242eaf0d648765c6621d73d95e7107b766646a098175436d1ab2e2b864badd0757a3bab6b7c318233f2120bad9ac07b39bb9e357897919580c87631
 DIST pytorch-2.5.1.tar.gz 116091366 BLAKE2B 
7838b17562b94ffc7d798031348689db607dd5eae2a3c35be365972e2b52a2c1b12067068d5aca5ab00cf0977d9c2c3c9ae5337d69534c864c732e6256cbeef6
 SHA512 
a913a466324a65fa3d79c5e9ad4d605fc7976f0134fda2f81aaa3cea29d56926604999b8a238759646d211e63b47bbb446cdffa86ca8defd8159f11e30301289
 DIST pytorch-2.6.0.tar.gz 119594438 BLAKE2B 
3152eb341cf42295e147e59625beb9c06608aa4b78f9618c1c0024b10c1c767715d07fe8c4be52d029ac47f808cd0d5e65c9530ec90d951a64b993083b4067ad
 SHA512 
a70da80ff09d226085e18228132cf6bb236ad8cc47eed52375d0d2a615f09dd33849da947270b5670c184eab60cb8e2adf11d801babfbda7aa621400501d07b0
 DIST pytorch-2.7.0.tar.gz 50197290 BLAKE2B 
2a317d1e9b0d8876f1593382246cd9f786eff3c1b8602353c5e0010dc8414720c5de61886361843a0c33268830c784963a89b410b361e1b67636e652f6a6a2eb
 SHA512 
63eb0363ea68d23567f5524ee8b51756d9302bbe1cbefa367335ab5ebe652523dba75fa417ea3e7eedfc67aa4bef1434c8b7e3dfde2152061b91b6e489763a55

diff --git a/sci-ml/caffe2/caffe2-2.4.1-r7.ebuild 
b/sci-ml/caffe2/caffe2-2.4.1-r7.ebuild
deleted file mode 100644
index b51bab3da130..000000000000
--- a/sci-ml/caffe2/caffe2-2.4.1-r7.ebuild
+++ /dev/null
@@ -1,305 +0,0 @@
-# Copyright 2022-2025 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-PYTHON_COMPAT=( python3_{10..13} )
-ROCM_VERSION=6.1
-inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs
-
-MYPN=pytorch
-MYP=${MYPN}-${PV}
-
-DESCRIPTION="A deep learning framework"
-HOMEPAGE="https://pytorch.org/";
-SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-       -> ${MYP}.tar.gz
-       https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz";
-
-S="${WORKDIR}"/${MYP}
-
-LICENSE="BSD"
-SLOT="0"
-KEYWORDS="~amd64"
-IUSE="cuda distributed fbgemm flash gloo mkl mpi nnpack +numpy onednn openblas 
opencl openmp qnnpack rocm xnnpack"
-RESTRICT="test"
-REQUIRED_USE="
-       ${PYTHON_REQUIRED_USE}
-       mpi? ( distributed )
-       gloo? ( distributed )
-       ?? ( cuda rocm )
-       rocm? (
-               || ( ${ROCM_REQUIRED_USE} )
-               !flash
-       )
-"
-
-# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
-RDEPEND="
-       ${PYTHON_DEPS}
-       dev-cpp/abseil-cpp:=
-       dev-cpp/gflags:=
-       >=dev-cpp/glog-0.5.0
-       dev-libs/cpuinfo
-       dev-libs/libfmt:=
-       dev-cpp/opentelemetry-cpp
-       dev-libs/protobuf:=
-       dev-libs/pthreadpool
-       dev-libs/sleef[cpu_flags_x86_avx512f(+),cpu_flags_x86_avx(+)]
-       dev-libs/sleef[cpu_flags_x86_sse3(+),cpu_flags_x86_ssse3(+)]
-       dev-libs/sleef[cpu_flags_x86_sse4_1(+),cpu_flags_x86_sse4_2(+)]
-       virtual/lapack
-       sci-ml/onnx
-       sci-ml/foxi
-       cuda? (
-               dev-libs/cudnn
-               >=sci-ml/cudnn-frontend-1.0.3:0/8
-               <dev-util/nvidia-cuda-toolkit-12.5:=[profiler]
-       )
-       fbgemm? ( sci-ml/FBGEMM )
-       gloo? ( sci-ml/gloo[cuda?] )
-       mpi? ( virtual/mpi )
-       nnpack? ( sci-ml/NNPACK )
-       numpy? ( $(python_gen_cond_dep '
-               dev-python/numpy[${PYTHON_USEDEP}]
-               ') )
-       onednn? ( sci-ml/oneDNN )
-       opencl? ( virtual/opencl )
-       qnnpack? (
-               !sci-libs/QNNPACK
-               sci-ml/gemmlowp
-       )
-       rocm? (
-               =dev-util/hip-6.1*
-               =dev-libs/rccl-6.1*
-               =sci-libs/rocThrust-6.1*
-               =sci-libs/rocPRIM-6.1*
-               =sci-libs/hipBLAS-6.1*
-               =sci-libs/hipFFT-6.1*
-               =sci-libs/hipSPARSE-6.1*
-               =sci-libs/hipRAND-6.1*
-               =sci-libs/hipCUB-6.1*
-               =sci-libs/hipSOLVER-6.1*
-               =sci-libs/miopen-6.1*
-               =dev-util/roctracer-6.1*
-
-               =sci-libs/hipBLASLt-6.1*
-               amdgpu_targets_gfx90a? ( 
=sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx90a] )
-               amdgpu_targets_gfx940? ( 
=sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx940] )
-               amdgpu_targets_gfx941? ( 
=sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] )
-               amdgpu_targets_gfx942? ( 
=sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] )
-       )
-       distributed? (
-               sci-ml/tensorpipe[cuda?]
-               dev-cpp/cpp-httplib
-       )
-       xnnpack? ( sci-ml/XNNPACK )
-       mkl? ( sci-libs/mkl )
-       openblas? ( sci-libs/openblas )
-"
-DEPEND="
-       ${RDEPEND}
-       dev-libs/clog
-       dev-libs/psimd
-       dev-libs/FXdiv
-       dev-libs/pocketfft
-       dev-libs/flatbuffers
-       sci-ml/FP16
-       sci-ml/kineto
-       $(python_gen_cond_dep '
-               dev-python/pybind11[${PYTHON_USEDEP}]
-               dev-python/pyyaml[${PYTHON_USEDEP}]
-               dev-python/typing-extensions[${PYTHON_USEDEP}]
-       ')
-       cuda? ( <=dev-libs/cutlass-3.4.1 )
-       onednn? ( sci-ml/ideep )
-"
-
-PATCHES=(
-       ../patches/${PN}-2.4.0-gentoo.patch
-       ../patches/${PN}-2.4.0-install-dirs.patch
-       ../patches/${PN}-1.12.0-glog-0.6.0.patch
-       ../patches/${PN}-1.13.1-tensorpipe.patch
-       ../patches/${PN}-2.3.0-cudnn_include_fix.patch
-       ../patches/${PN}-2.1.2-fix-rpath.patch
-       ../patches/${PN}-2.4.0-fix-openmp-link.patch
-       ../patches/${PN}-2.4.0-rocm-fix-std-cpp17.patch
-       ../patches/${PN}-2.2.2-musl.patch
-       ../patches/${PN}-2.4.0-exclude-aotriton.patch
-       ../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
-       ../patches/${PN}-2.3.0-fix-libcpp.patch
-       "${FILESDIR}"/${PN}-2.4.0-libfmt-11.patch
-       "${FILESDIR}"/${PN}-2.4.0-cpp-httplib.patch
-       "${FILESDIR}"/${PN}-2.4.0-cstdint.patch
-)
-
-src_prepare() {
-       filter-lto #bug 862672
-       sed -i \
-               -e "/third_party\/gloo/d" \
-               cmake/Dependencies.cmake \
-               || die
-       cmake_src_prepare
-       pushd torch/csrc/jit/serialization || die
-       flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
-       popd
-       # prefixify the hardcoded paths, after all patches are applied
-       hprefixify \
-               aten/CMakeLists.txt \
-               caffe2/CMakeLists.txt \
-               cmake/Metal.cmake \
-               cmake/Modules/*.cmake \
-               cmake/Modules_CUDA_fix/FindCUDNN.cmake \
-               cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
-               
cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
-               cmake/public/LoadHIP.cmake \
-               cmake/public/cuda.cmake \
-               cmake/Dependencies.cmake \
-               torch/CMakeLists.txt \
-               CMakeLists.txt
-
-       if use rocm; then
-               sed -e "s:/opt/rocm:/usr:" \
-                       -e "s:lib/cmake:$(get_libdir)/cmake:g" \
-                       -e "s/HIP 1.0/HIP 1.0 REQUIRED/" \
-                       -i cmake/public/LoadHIP.cmake || die
-
-               ebegin "HIPifying cuda sources"
-               ${EPYTHON} tools/amd_build/build_amd.py || die
-               eend $?
-       fi
-}
-
-src_configure() {
-       if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
-               ewarn "WARNING: caffe2 is being built with its default CUDA 
compute capabilities: 3.5 and 7.0."
-               ewarn "These may not be optimal for your GPU."
-               ewarn ""
-               ewarn "To configure caffe2 with the CUDA compute capability 
that is optimal for your GPU,"
-               ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and 
re-emerge caffe2."
-               ewarn "For example, to use CUDA capability 7.5 & 3.5, add: 
TORCH_CUDA_ARCH_LIST=7.5 3.5"
-               ewarn "For a Maxwell model GPU, an example value would be: 
TORCH_CUDA_ARCH_LIST=Maxwell"
-               ewarn ""
-               ewarn "You can look up your GPU's CUDA compute capability at 
https://developer.nvidia.com/cuda-gpus";
-               ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | 
grep 'CUDA Capability'"
-       fi
-
-       local mycmakeargs=(
-               -DBUILD_CUSTOM_PROTOBUF=OFF
-               -DBUILD_SHARED_LIBS=ON
-
-               -DUSE_CCACHE=OFF
-               -DUSE_CUDA=$(usex cuda)
-               -DUSE_DISTRIBUTED=$(usex distributed)
-               -DUSE_MPI=$(usex mpi)
-               -DUSE_FAKELOWP=OFF
-               -DUSE_FBGEMM=$(usex fbgemm)
-               -DUSE_FLASH_ATTENTION=$(usex flash)
-               -DUSE_MEM_EFF_ATTENTION=OFF
-               -DUSE_GFLAGS=ON
-               -DUSE_GLOG=ON
-               -DUSE_GLOO=$(usex gloo)
-               -DUSE_KINETO=OFF # TODO
-               -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
-               -DUSE_MKLDNN=$(usex onednn)
-               -DUSE_NNPACK=$(usex nnpack)
-               -DUSE_XNNPACK=$(usex xnnpack)
-               -DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
-               -DUSE_TENSORPIPE=$(usex distributed)
-               -DUSE_PYTORCH_QNNPACK=$(usex qnnpack)
-               -DUSE_NUMPY=$(usex numpy)
-               -DUSE_OPENCL=$(usex opencl)
-               -DUSE_OPENMP=$(usex openmp)
-               -DUSE_ROCM=$(usex rocm)
-               -DUSE_SYSTEM_CPUINFO=ON
-               -DUSE_SYSTEM_PYBIND11=ON
-               -DUSE_UCC=OFF
-               -DUSE_VALGRIND=OFF
-               -DPython_EXECUTABLE="${PYTHON}"
-               -DUSE_ITT=OFF
-               -DUSE_SYSTEM_PTHREADPOOL=ON
-               -DUSE_SYSTEM_PSIMD=ON
-               -DUSE_SYSTEM_FXDIV=ON
-               -DUSE_SYSTEM_FP16=ON
-               -DUSE_SYSTEM_GLOO=ON
-               -DUSE_SYSTEM_ONNX=ON
-               -DUSE_SYSTEM_SLEEF=ON
-               -DUSE_PYTORCH_METAL=OFF
-               -DUSE_XPU=OFF
-
-               -Wno-dev
-               -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
-               -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
-       )
-
-       if use mkl; then
-               mycmakeargs+=(-DBLAS=MKL)
-       elif use openblas; then
-               mycmakeargs+=(-DBLAS=OpenBLAS)
-       else
-               mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=)
-       fi
-
-       if use cuda; then
-               addpredict "/dev/nvidiactl" # bug 867706
-               addpredict "/dev/char"
-               addpredict "/proc/self/task" # bug 926116
-
-               mycmakeargs+=(
-                       -DUSE_CUDNN=ON
-                       -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 
7.0}"
-                       -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication 
Library
-                       -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
-               )
-       elif use rocm; then
-               export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
-
-               mycmakeargs+=(
-                       -DUSE_NCCL=ON
-                       -DUSE_SYSTEM_NCCL=ON
-               )
-
-               # ROCm libraries produce too much warnings
-               append-cxxflags -Wno-deprecated-declarations -Wno-unused-result
-
-               if tc-is-clang; then
-                       # fix mangling in LLVM: 
https://github.com/llvm/llvm-project/issues/85656
-                       append-cxxflags -fclang-abi-compat=17
-               fi
-       fi
-
-       if use onednn; then
-               mycmakeargs+=(
-                       -DUSE_MKLDNN=ON
-                       -DMKLDNN_FOUND=ON
-                       -DMKLDNN_LIBRARIES=dnnl
-                       
-DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl"
-               )
-       fi
-
-       cmake_src_configure
-
-       # do not rerun cmake and the build process in src_install
-       sed '/RERUN/,+1d' -i "${BUILD_DIR}"/build.ninja || die
-}
-
-python_install() {
-       python_domodule python/caffe2
-       python_domodule python/torch
-       ln -s ../../../../../include/torch \
-               "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 
923269
-}
-
-src_install() {
-       cmake_src_install
-
-       # Used by pytorch ebuild
-       insinto "/var/lib/${PN}"
-       doins "${BUILD_DIR}"/CMakeCache.txt
-
-       rm -rf python
-       mkdir -p python/torch/include || die
-       mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
-       cp torch/version.py python/torch/ || die
-       python_install
-}

diff --git a/sci-ml/caffe2/files/caffe2-2.4.0-cstdint.patch 
b/sci-ml/caffe2/files/caffe2-2.4.0-cstdint.patch
deleted file mode 100644
index f248ab031eb0..000000000000
--- a/sci-ml/caffe2/files/caffe2-2.4.0-cstdint.patch
+++ /dev/null
@@ -1,10 +0,0 @@
---- a/caffe2/utils/string_utils.cc     2024-09-05 08:29:06.930438069 +0200
-+++ b/caffe2/utils/string_utils.cc     2024-09-05 08:29:28.398137596 +0200
-@@ -3,6 +3,7 @@
- #include <algorithm>
- #include <sstream>
- #include <vector>
-+#include <cstdint>
- 
- namespace caffe2 {
- 

diff --git a/sci-ml/caffe2/files/caffe2-2.4.0-libfmt-11.patch 
b/sci-ml/caffe2/files/caffe2-2.4.0-libfmt-11.patch
deleted file mode 100644
index 9f6740a07f1f..000000000000
--- a/sci-ml/caffe2/files/caffe2-2.4.0-libfmt-11.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-Fix build against libfmt-11
-
-https://github.com/pytorch/pytorch/commit/83eedf66b9e7f52323d9f45c5dfaa64472452595
-https://github.com/pytorch/pytorch/pull/130628
-
-From 83eedf66b9e7f52323d9f45c5dfaa64472452595 Mon Sep 17 00:00:00 2001
-From: Aaron Gokaslan <[email protected]>
-Date: Tue, 16 Jul 2024 06:12:08 +0000
-Subject: [PATCH] Update libfmt submodule to 11.0.1 (#130628)
-
-Update libfmt to 11.0.1 reopen of 
https://github.com/pytorch/pytorch/pull/129962. Requires a kineto update and 
moves fmt::join into a separate include so added it where necessary.
-
-Pull Request resolved: https://github.com/pytorch/pytorch/pull/130628
-Approved by: https://github.com/aaronenyeshi
---- a/torch/csrc/distributed/c10d/socket.cpp
-+++ b/torch/csrc/distributed/c10d/socket.cpp
-@@ -32,6 +32,7 @@ C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wdeprecated")
- #include <fmt/chrono.h>
- C10_DIAGNOSTIC_POP()
- #include <fmt/format.h>
-+#include <fmt/ranges.h>
- 
- #include <torch/csrc/distributed/c10d/error.h>
- #include <torch/csrc/distributed/c10d/exception.h>
---- a/torch/csrc/profiler/standalone/execution_trace_observer.cpp
-+++ b/torch/csrc/profiler/standalone/execution_trace_observer.cpp
-@@ -10,6 +10,7 @@
- #endif // _WIN32
- 
- #include <fmt/format.h>
-+#include <fmt/ranges.h>
- #include <chrono>
- #include <cmath>
- #include <fstream>
---- a/torch/csrc/profiler/util.cpp
-+++ b/torch/csrc/profiler/util.cpp
-@@ -5,6 +5,7 @@
- #include <c10/util/ArrayRef.h>
- #include <c10/util/irange.h>
- #include <fmt/format.h>
-+#include <fmt/ranges.h>
- 
- #ifdef USE_KINETO
- #include <libkineto.h>

Reply via email to