commit:     7354afd9d872a15c6bdf1ee831db7f7f131fca01
Author:     Sv. Lockal <lockalsash <AT> gmail <DOT> com>
AuthorDate: Thu Aug  8 03:44:50 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Thu Aug  8 16:22:28 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=7354afd9

sci-libs/caffe2: add 2.4.0

Changes:
* This version has no integration with ffmpeg/opencv anymore
* qnnpack ceased to exist as a separate library. Now it is a part of c10 
sources. Splitting it to sci-libs/QNNPACK would require some major effort, 
while it is very unlikely that some non-pytorch project would use it. Therefore 
it is now bundled and sci-libs/QNNPACK added as a conflicting library (to be 
sunset in the future).
* sci-libs/hipBLASLt "is optional" patch was dropped, as it is now deeply 
integrated with caffe2. However it should not be a problem as 
sci-libs/hipBLASLt has "no kernels" mode where it compiles as a stub library 
(useful for all non-server GPUs)
* sci-libs/XNNPACK bumped; old xnnpack is not compatible
* sci-libs/kineto bumped; old kineto is not compatible
* no new patches, but 5 old patches require manual conflict resolution
* '-fclang-abi-compat=17' patch in previous release was incomplete; new version 
should handle clang+hipcc combination

Signed-off-by: Sv. Lockal <lockalsash <AT> gmail.com>
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                           |   1 +
 sci-libs/caffe2/caffe2-2.4.0.ebuild                | 289 +++++++++++++++++++++
 .../files/caffe2-2.4.0-exclude-aotriton.patch      |  65 +++++
 .../files/caffe2-2.4.0-fix-openmp-link.patch       |  14 +
 sci-libs/caffe2/files/caffe2-2.4.0-gentoo.patch    | 211 +++++++++++++++
 .../caffe2/files/caffe2-2.4.0-install-dirs.patch   |  70 +++++
 .../files/caffe2-2.4.0-rocm-fix-std-cpp17.patch    |  50 ++++
 7 files changed, 700 insertions(+)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index 246ab4d21a1a..f34355be2e16 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,3 +1,4 @@
 DIST pytorch-2.2.2.tar.gz 116367503 BLAKE2B 
0be22f2ec4b9aac6f5e976664cae01facf07929a32565cd57d7cc5b2d9888e9ae71ca301853752fe8f31d174d04c9974eb9ed2f3d452360a50ccf024f200726a
 SHA512 
7990e0f9484038c3458c0bda2c863bf2b19e56edab81fc5938c6e0f08b17558287f853bb67350e8cca8f42bec0f1d4ba0e94e50a145db8da44bdd4bd703d91d0
 DIST pytorch-2.3.0.tar.gz 117029829 BLAKE2B 
8f9c0d71ee0a9219b495eddccdcc65107f7ad537c43c68100b229f3d27b0e6c01ccb1659c7fffc356a48d80f2adc0a10361305dc8f1df20446de837d380f89f6
 SHA512 
67f7e9a096c3ffb952206ebf9105bedebb68c24ad82456083adf1d1d210437fcaa9dd52b68484cfc97d408c9eebc9541c76868c34a7c9982494dc3f424cfb07c
 DIST pytorch-2.3.1.tar.gz 117035696 BLAKE2B 
d419d7fa1342f1fb317ffce09ec9dc1447414627cc83d36578fe60f68c283c620b2b4d49f414cd206d537b90b16432a06cd1941662720db05d5e2b6c493325f5
 SHA512 
e1bcae44f9939fc7ccb1360a9b1970d92426f25e5de73e36964df3dd15ad5d8d9f5bd2f9a7dda6b8f64e2bba3674005bd869f542489cc442ad0125a02676f587
+DIST pytorch-2.4.0.tar.gz 115031093 BLAKE2B 
d206477963977011627df284efa01482fbf57e9fcb5f58f51d679c742b8e5dde6aa6affd8745ab817fcd09477d129a81e74e07be576b5d3585eaca1c735b8e01
 SHA512 
804d25944035f33de6591fd942fbda44d3de037717a4397d38a97474b01775d30eaf93d16dd708a832c0119050d24d73b90990fd3e3773be79d26ada25244d22

diff --git a/sci-libs/caffe2/caffe2-2.4.0.ebuild 
b/sci-libs/caffe2/caffe2-2.4.0.ebuild
new file mode 100644
index 000000000000..b4384eb7df11
--- /dev/null
+++ b/sci-libs/caffe2/caffe2-2.4.0.ebuild
@@ -0,0 +1,289 @@
+# Copyright 2022-2024 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{10..12} )
+ROCM_VERSION=6.1
+inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/";
+SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
+       -> ${MYP}.tar.gz"
+
+S="${WORKDIR}"/${MYP}
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64"
+IUSE="cuda distributed fbgemm flash gloo mkl mpi nnpack +numpy onednn openblas 
opencl openmp qnnpack rocm xnnpack"
+RESTRICT="test"
+REQUIRED_USE="
+       ${PYTHON_REQUIRED_USE}
+       mpi? ( distributed )
+       gloo? ( distributed )
+       ?? ( cuda rocm )
+       rocm? (
+               || ( ${ROCM_REQUIRED_USE} )
+               !flash
+       )
+"
+
+# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
+RDEPEND="
+       ${PYTHON_DEPS}
+       dev-cpp/gflags:=
+       >=dev-cpp/glog-0.5.0
+       dev-libs/cpuinfo
+       dev-libs/libfmt
+       dev-cpp/opentelemetry-cpp
+       dev-libs/protobuf:=
+       dev-libs/pthreadpool
+       dev-libs/sleef
+       virtual/lapack
+       sci-libs/onnx
+       sci-libs/foxi
+       cuda? (
+               dev-libs/cudnn
+               >=dev-libs/cudnn-frontend-1.0.3:0/8
+               <dev-util/nvidia-cuda-toolkit-12.4.0:=[profiler]
+       )
+       fbgemm? ( >=dev-libs/FBGEMM-2023.12.01 )
+       gloo? ( sci-libs/gloo[cuda?] )
+       mpi? ( virtual/mpi )
+       nnpack? ( sci-libs/NNPACK )
+       numpy? ( $(python_gen_cond_dep '
+               dev-python/numpy[${PYTHON_USEDEP}]
+               ') )
+       onednn? ( dev-libs/oneDNN )
+       opencl? ( virtual/opencl )
+       qnnpack? (
+               !sci-libs/QNNPACK
+               dev-cpp/gemmlowp
+       )
+       rocm? (
+               =dev-util/hip-6.1*
+               =dev-libs/rccl-6.1*[${ROCM_USEDEP}]
+               =sci-libs/rocThrust-6.1*[${ROCM_USEDEP}]
+               =sci-libs/rocPRIM-6.1*[${ROCM_USEDEP}]
+               =sci-libs/hipBLAS-6.1*[${ROCM_USEDEP}]
+               =sci-libs/hipFFT-6.1*[${ROCM_USEDEP}]
+               =sci-libs/hipSPARSE-6.1*[${ROCM_USEDEP}]
+               =sci-libs/hipRAND-6.1*[${ROCM_USEDEP}]
+               =sci-libs/hipCUB-6.1*[${ROCM_USEDEP}]
+               =sci-libs/hipSOLVER-6.1*[${ROCM_USEDEP}]
+               =sci-libs/miopen-6.1*[${ROCM_USEDEP}]
+               =dev-util/roctracer-6.1*[${ROCM_USEDEP}]
+
+               =sci-libs/hipBLASLt-6.1*
+               amdgpu_targets_gfx90a? ( 
=sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx90a] )
+               amdgpu_targets_gfx940? ( 
=sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx940] )
+               amdgpu_targets_gfx941? ( 
=sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] )
+               amdgpu_targets_gfx942? ( 
=sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] )
+       )
+       distributed? ( sci-libs/tensorpipe[cuda?] )
+       xnnpack? ( >=sci-libs/XNNPACK-2024.02.29 )
+       mkl? ( sci-libs/mkl )
+       openblas? ( sci-libs/openblas )
+"
+DEPEND="
+       ${RDEPEND}
+       cuda? ( >=dev-libs/cutlass-3.4.1 )
+       onednn? ( sci-libs/ideep )
+       dev-libs/psimd
+       dev-libs/FP16
+       dev-libs/FXdiv
+       dev-libs/pocketfft
+       dev-libs/flatbuffers
+       >=sci-libs/kineto-0.4.0_p20240525
+       $(python_gen_cond_dep '
+               dev-python/pyyaml[${PYTHON_USEDEP}]
+               dev-python/pybind11[${PYTHON_USEDEP}]
+               dev-python/typing-extensions[${PYTHON_USEDEP}]
+       ')
+"
+
+PATCHES=(
+       "${FILESDIR}"/${PN}-2.4.0-gentoo.patch
+       "${FILESDIR}"/${PN}-2.4.0-install-dirs.patch
+       "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
+       "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
+       "${FILESDIR}"/${PN}-2.3.0-cudnn_include_fix.patch
+       "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
+       "${FILESDIR}"/${PN}-2.4.0-fix-openmp-link.patch
+       "${FILESDIR}"/${PN}-2.4.0-rocm-fix-std-cpp17.patch
+       "${FILESDIR}"/${PN}-2.2.2-musl.patch
+       "${FILESDIR}"/${PN}-2.4.0-exclude-aotriton.patch
+       "${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+       "${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
+)
+
+src_prepare() {
+       filter-lto #bug 862672
+       sed -i \
+               -e "/third_party\/gloo/d" \
+               cmake/Dependencies.cmake \
+               || die
+       cmake_src_prepare
+       pushd torch/csrc/jit/serialization || die
+       flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+       popd
+       # prefixify the hardcoded paths, after all patches are applied
+       hprefixify \
+               aten/CMakeLists.txt \
+               caffe2/CMakeLists.txt \
+               cmake/Metal.cmake \
+               cmake/Modules/*.cmake \
+               cmake/Modules_CUDA_fix/FindCUDNN.cmake \
+               cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
+               
cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
+               cmake/public/LoadHIP.cmake \
+               cmake/public/cuda.cmake \
+               cmake/Dependencies.cmake \
+               torch/CMakeLists.txt \
+               CMakeLists.txt
+
+       if use rocm; then
+               sed -e "s:/opt/rocm:/usr:" \
+                       -e "s:lib/cmake:$(get_libdir)/cmake:g" \
+                       -e "s/HIP 1.0/HIP 1.0 REQUIRED/" \
+                       -i cmake/public/LoadHIP.cmake || die
+
+               ebegin "HIPifying cuda sources"
+               ${EPYTHON} tools/amd_build/build_amd.py || die
+               eend $?
+       fi
+}
+
+src_configure() {
+       if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
+               ewarn "WARNING: caffe2 is being built with its default CUDA 
compute capabilities: 3.5 and 7.0."
+               ewarn "These may not be optimal for your GPU."
+               ewarn ""
+               ewarn "To configure caffe2 with the CUDA compute capability 
that is optimal for your GPU,"
+               ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and 
re-emerge caffe2."
+               ewarn "For example, to use CUDA capability 7.5 & 3.5, add: 
TORCH_CUDA_ARCH_LIST=7.5 3.5"
+               ewarn "For a Maxwell model GPU, an example value would be: 
TORCH_CUDA_ARCH_LIST=Maxwell"
+               ewarn ""
+               ewarn "You can look up your GPU's CUDA compute capability at 
https://developer.nvidia.com/cuda-gpus";
+               ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | 
grep 'CUDA Capability'"
+       fi
+
+       local mycmakeargs=(
+               -DBUILD_CUSTOM_PROTOBUF=OFF
+               -DBUILD_SHARED_LIBS=ON
+
+               -DUSE_CCACHE=OFF
+               -DUSE_CUDA=$(usex cuda)
+               -DUSE_DISTRIBUTED=$(usex distributed)
+               -DUSE_MPI=$(usex mpi)
+               -DUSE_FAKELOWP=OFF
+               -DUSE_FBGEMM=$(usex fbgemm)
+               -DUSE_FLASH_ATTENTION=$(usex flash)
+               -DUSE_MEM_EFF_ATTENTION=OFF
+               -DUSE_GFLAGS=ON
+               -DUSE_GLOG=ON
+               -DUSE_GLOO=$(usex gloo)
+               -DUSE_KINETO=OFF # TODO
+               -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+               -DUSE_MKLDNN=$(usex onednn)
+               -DUSE_NNPACK=$(usex nnpack)
+               -DUSE_XNNPACK=$(usex xnnpack)
+               -DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+               -DUSE_TENSORPIPE=$(usex distributed)
+               -DUSE_PYTORCH_QNNPACK=$(usex qnnpack)
+               -DUSE_NUMPY=$(usex numpy)
+               -DUSE_OPENCL=$(usex opencl)
+               -DUSE_OPENMP=$(usex openmp)
+               -DUSE_ROCM=$(usex rocm)
+               -DUSE_SYSTEM_CPUINFO=ON
+               -DUSE_SYSTEM_PYBIND11=ON
+               -DUSE_UCC=OFF
+               -DUSE_VALGRIND=OFF
+               -DPython_EXECUTABLE="${PYTHON}"
+               -DUSE_ITT=OFF
+               -DUSE_SYSTEM_PTHREADPOOL=ON
+               -DUSE_SYSTEM_PSIMD=ON
+               -DUSE_SYSTEM_FXDIV=ON
+               -DUSE_SYSTEM_FP16=ON
+               -DUSE_SYSTEM_GLOO=ON
+               -DUSE_SYSTEM_ONNX=ON
+               -DUSE_SYSTEM_SLEEF=ON
+               -DUSE_PYTORCH_METAL=OFF
+               -DUSE_XPU=OFF
+
+               -Wno-dev
+               -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
+               -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
+       )
+
+       if use mkl; then
+               mycmakeargs+=(-DBLAS=MKL)
+       elif use openblas; then
+               mycmakeargs+=(-DBLAS=OpenBLAS)
+       else
+               mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=)
+       fi
+
+       if use cuda; then
+               addpredict "/dev/nvidiactl" # bug 867706
+               addpredict "/dev/char"
+               addpredict "/proc/self/task" # bug 926116
+
+               mycmakeargs+=(
+                       -DUSE_CUDNN=ON
+                       -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 
7.0}"
+                       -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication 
Library
+                       -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
+               )
+       elif use rocm; then
+               export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
+
+               mycmakeargs+=(
+                       -DUSE_NCCL=ON
+                       -DUSE_SYSTEM_NCCL=ON
+               )
+
+               # ROCm libraries produce too much warnings
+               append-cxxflags -Wno-deprecated-declarations -Wno-unused-result
+
+               if tc-is-clang; then
+                       # fix mangling in LLVM: 
https://github.com/llvm/llvm-project/issues/85656
+                       append-cxxflags -fclang-abi-compat=17
+               fi
+       fi
+
+       if use onednn; then
+               mycmakeargs+=(
+                       -DUSE_MKLDNN=ON
+                       -DMKLDNN_FOUND=ON
+                       -DMKLDNN_LIBRARIES=dnnl
+                       
-DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl"
+               )
+       fi
+
+       cmake_src_configure
+
+       # do not rerun cmake and the build process in src_install
+       sed '/RERUN/,+1d' -i "${BUILD_DIR}"/build.ninja || die
+}
+
+src_install() {
+       cmake_src_install
+
+       insinto "/var/lib/${PN}"
+       doins "${BUILD_DIR}"/CMakeCache.txt
+
+       rm -rf python
+       mkdir -p python/torch/include || die
+       mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
+       cp torch/version.py python/torch/ || die
+       python_domodule python/caffe2
+       python_domodule python/torch
+       ln -s ../../../../../include/torch \
+               "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 
923269
+}

diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-exclude-aotriton.patch 
b/sci-libs/caffe2/files/caffe2-2.4.0-exclude-aotriton.patch
new file mode 100644
index 000000000000..72ab792b2278
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.4.0-exclude-aotriton.patch
@@ -0,0 +1,65 @@
+Disables aotriton download when both USE_FLASH_ATTENTION and 
USE_MEM_EFF_ATTENTION cmake flags are OFF
+Backports upstream PR to 2.3.0: https://github.com/pytorch/pytorch/pull/130197
+--- a/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
++++ b/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
+@@ -24,7 +24,7 @@
+ #include <c10/core/SymInt.h>
+ #include <c10/util/string_view.h>
+ 
+-#if USE_ROCM
++#if defined(USE_ROCM) && (defined(USE_MEM_EFF_ATTENTION) || 
defined(USE_FLASH_ATTENTION))
+ #include <aotriton/flash.h>
+ #endif
+ 
+@@ -207,7 +207,7 @@ bool check_flash_attention_hardware_support(sdp_params 
const& params, bool debug
+   // Check that the gpu is capable of running flash attention
+   using sm80 = SMVersion<8, 0>;
+   using sm90 = SMVersion<9, 0>;
+-#if USE_ROCM
++#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
+   auto stream = at::cuda::getCurrentCUDAStream().stream();
+   if (hipSuccess != aotriton::v2::flash::check_gpu(stream)) {
+       auto dprops = at::cuda::getCurrentDeviceProperties();
+@@ -238,7 +238,7 @@ bool check_mem_efficient_hardware_support(sdp_params 
const& params, bool debug)
+   // Mem Efficient attention supports hardware in the range [sm_50, sm_90]
+   using sm50 = SMVersion<5, 0>;
+   using sm90 = SMVersion<9, 0>;
+-#if USE_ROCM
++#if defined(USE_ROCM) && defined(USE_MEM_EFF_ATTENTION)
+   auto stream = at::cuda::getCurrentCUDAStream().stream();
+   if (hipSuccess != aotriton::v2::flash::check_gpu(stream)) {
+       auto dprops = at::cuda::getCurrentDeviceProperties();
+@@ -623,7 +623,7 @@ bool can_use_mem_efficient_attention(sdp_params const& 
params, bool debug) {
+       array_of<at::ScalarType>(at::kHalf, at::kFloat, at::kBFloat16);
+   constexpr auto less_than_sm80_mem_efficient_dtypes =
+       array_of<at::ScalarType>(at::kHalf, at::kFloat);
+-#ifdef USE_ROCM
++#if defined(USE_ROCM) && defined(USE_MEM_EFF_ATTENTION)
+   constexpr auto aotriton_mem_efficient_dtypes =
+       array_of<at::ScalarType>(at::kHalf, at::kFloat, at::kBFloat16);
+ #endif
+@@ -668,7 +668,7 @@ bool can_use_mem_efficient_attention(sdp_params const& 
params, bool debug) {
+     }
+   }
+ 
+-#ifdef USE_ROCM
++#if defined(USE_ROCM) && defined(USE_MEM_EFF_ATTENTION)
+   return check_tensor_dtype(params, aotriton_mem_efficient_dtypes, debug);
+ #else
+   auto dprop = at::cuda::getCurrentDeviceProperties();
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1095,10 +1095,12 @@ if(USE_ROCM)
+       message(STATUS "Disabling Kernel Assert for ROCm")
+     endif()
+ 
+-    include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
+     if(USE_CUDA)
+       caffe2_update_option(USE_MEM_EFF_ATTENTION OFF)
+     endif()
++    if(USE_FLASH_ATTENTION OR USE_MEM_EFF_ATTENTION)
++      include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
++    endif()
+   else()
+     caffe2_update_option(USE_ROCM OFF)
+   endif()

diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-fix-openmp-link.patch 
b/sci-libs/caffe2/files/caffe2-2.4.0-fix-openmp-link.patch
new file mode 100644
index 000000000000..9b0fe0b97c0f
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.4.0-fix-openmp-link.patch
@@ -0,0 +1,14 @@
+Fix "undefined symbol: omp_get_max_active_levels" in mkl + <nothing else> 
builds
+https://github.com/pytorch/pytorch/issues/116576
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -1643,6 +1643,9 @@ if(BUILD_SHARED_LIBS)
+   if(CAFFE2_USE_MKL)
+     target_link_libraries(torch_global_deps caffe2::mkl)
+   endif()
++  if(USE_OPENMP)
++    target_link_libraries(torch_global_deps OpenMP::OpenMP_CXX)
++  endif()
+   # The CUDA libraries are linked here for a different reason: in some
+   # cases we load these libraries with ctypes, and if they weren't opened
+   # with RTLD_GLOBAL, we'll do the "normal" search process again (and

diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-gentoo.patch 
b/sci-libs/caffe2/files/caffe2-2.4.0-gentoo.patch
new file mode 100644
index 000000000000..d592a346386b
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.4.0-gentoo.patch
@@ -0,0 +1,211 @@
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -966,12 +966,11 @@ endif()
+ # third_party/FBGEMM
+ include(cmake/public/utils.cmake)
+ if(NOT MSVC)
+-  string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
++  string(APPEND CMAKE_CXX_FLAGS " -O2")
+   # Eigen fails to build with some versions, so convert this to a warning
+   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+   string(APPEND CMAKE_CXX_FLAGS " -Wall")
+   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+-  append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
+@@ -1074,7 +1073,6 @@ if(NOT MSVC)
+   string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
+   append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
+-  append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
+ else()
+   # skip unwanted includes from windows.h
+   add_compile_definitions(WIN32_LEAN_AND_MEAN)
+--- a/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
++++ b/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
+@@ -324,16 +324,8 @@ set_target_properties(pytorch_qnnpack PROPERTIES 
PUBLIC_HEADER include/pytorch_q
+ set_target_properties(pytorch_qnnpack PROPERTIES PUBLIC_HEADER 
include/qnnpack_func.h)
+ 
+ # ---[ Configure clog
+-if(NOT TARGET clog)
+-  set(CLOG_BUILD_TESTS OFF CACHE BOOL "")
+-  set(CLOG_RUNTIME_TYPE "${CPUINFO_RUNTIME_TYPE}" CACHE STRING "")
+-  add_subdirectory(
+-    "${CLOG_SOURCE_DIR}"
+-    "${CONFU_DEPENDENCIES_BINARY_DIR}/clog")
+-  # We build static version of clog but a dynamic library may indirectly 
depend on it
+-  set_property(TARGET clog PROPERTY POSITION_INDEPENDENT_CODE ON)
+-endif()
+-target_link_libraries(pytorch_qnnpack PUBLIC clog)
++find_library(CLOG_LIBRARY NAMES clog REQUIRED)
++target_link_libraries(pytorch_qnnpack PUBLIC ${CLOG_LIBRARY})
+ 
+ # ---[ Configure cpuinfo
+ if(NOT TARGET cpuinfo AND USE_SYSTEM_CPUINFO)
+--- a/c10/CMakeLists.txt
++++ b/c10/CMakeLists.txt
+@@ -94,7 +94,7 @@ if(NOT BUILD_LIBTORCHLESS)
+   if(C10_USE_GLOG)
+     target_link_libraries(c10 PUBLIC glog::glog)
+   endif()
+-  target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
++  target_link_libraries(c10 PRIVATE fmt)
+ 
+   if(C10_USE_NUMA)
+     message(STATUS "NUMA paths:")
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -87,7 +87,7 @@ endif()
+ # Note: the folders that are being commented out have not been properly
+ # addressed yet.
+ 
+-if(NOT MSVC AND USE_XNNPACK)
++if(FALSE)
+   if(NOT TARGET fxdiv)
+     set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
+     set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
+@@ -1075,7 +1075,6 @@ if(USE_XPU)
+ endif()
+ 
+ if(NOT MSVC AND USE_XNNPACK)
+-  TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
+ endif()
+ 
+ # ==========================================================
+@@ -1178,8 +1177,7 @@ target_include_directories(torch_cpu PRIVATE
+ target_include_directories(torch_cpu PRIVATE
+   ${TORCH_ROOT}/third_party/miniz-2.1.0)
+ 
+-target_include_directories(torch_cpu PRIVATE
+-  ${TORCH_ROOT}/third_party/kineto/libkineto/include)
++target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
+ 
+ if(USE_KINETO)
+   target_include_directories(torch_cpu PRIVATE
+--- a/cmake/Codegen.cmake
++++ b/cmake/Codegen.cmake
+@@ -57,7 +57,7 @@ if(INTERN_BUILD_ATEN_OPS)
+   if(MSVC)
+     set(OPT_FLAG "/fp:strict ")
+   else(MSVC)
+-    set(OPT_FLAG "-O3 ")
++    set(OPT_FLAG " ")
+     if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+       set(OPT_FLAG " ")
+     endif()
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -466,7 +466,9 @@ if(USE_PYTORCH_QNNPACK)
+       set_property(TARGET pytorch_qnnpack PROPERTY POSITION_INDEPENDENT_CODE 
ON)
+       set_property(TARGET cpuinfo PROPERTY POSITION_INDEPENDENT_CODE ON)
+       # QNNPACK depends on gemmlowp headers
+-      target_include_directories(pytorch_qnnpack PRIVATE 
"${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
++      find_package(gemmlowp REQUIRED)
++      get_target_property(GEMMLOWP_INCLUDE_DIRS gemmlowp::gemmlowp 
INTERFACE_INCLUDE_DIRECTORIES)
++      target_include_directories(pytorch_qnnpack PRIVATE 
${GEMMLOWP_INCLUDE_DIRS})
+ 
+       if(PYTORCH_QNNPACK_CUSTOM_THREADPOOL)
+         target_compile_definitions(
+@@ -705,7 +707,7 @@ if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK OR 
BUILD_MOBILE_TEST)
+ endif()
+ 
+ # ---[ FBGEMM
+-if(USE_FBGEMM)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+   if(NOT DEFINED FBGEMM_SOURCE_DIR)
+     set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING 
"FBGEMM source directory")
+@@ -753,6 +755,7 @@ if(USE_FBGEMM)
+ endif()
+ 
+ if(USE_FBGEMM)
++  list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
+   caffe2_update_option(USE_FBGEMM ON)
+ else()
+   caffe2_update_option(USE_FBGEMM OFF)
+@@ -1288,7 +1291,6 @@ if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND NOT 
INTERN_DISABLE_ONNX)
+       set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
+     endif()
+   endif()
+-  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi 
EXCLUDE_FROM_ALL)
+ 
+   add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
+   if(NOT USE_SYSTEM_ONNX)
+@@ -1530,7 +1532,6 @@ endif()
+ #
+ set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
+ set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
+-add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+ 
+ # Disable compiler feature checks for `fmt`.
+ #
+@@ -1539,9 +1540,7 @@ add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+ # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
+ # `fmt` is compatible with a superset of the compilers that PyTorch is, it
+ # shouldn't be too bad to just disable the checks.
+-set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES 
"")
+ 
+-list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
+ set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared 
libs" FORCE)
+ 
+ # ---[ Kineto
+--- a/cmake/External/nnpack.cmake
++++ b/cmake/External/nnpack.cmake
+@@ -56,7 +56,7 @@ if(ANDROID OR IOS OR ${CMAKE_SYSTEM_NAME} STREQUAL "Linux" 
OR ${CMAKE_SYSTEM_NAM
+   set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE 
STRING "pthreadpool source directory")
+   set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE 
STRING "Google Test source directory")
+ 
+-  if(NOT TARGET nnpack)
++  if(FALSE)
+     if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
+       set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
+     endif()
+--- a/cmake/public/utils.cmake
++++ b/cmake/public/utils.cmake
+@@ -483,8 +483,6 @@ function(torch_compile_options libname)
+   endif()
+ 
+   # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in 
perf regression)
+-  target_compile_options(${libname} PRIVATE
+-      
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
+ 
+ endfunction()
+ 
+--- a/functorch/CMakeLists.txt
++++ b/functorch/CMakeLists.txt
+@@ -42,4 +42,4 @@ endif()
+ if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "")
+   set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS 
${TORCH_PYTHON_LINK_FLAGS})
+ endif()
+-install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}")
++install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}")
+--- a/torch/CMakeLists.txt
++++ b/torch/CMakeLists.txt
+@@ -59,16 +59,8 @@ set(TORCH_PYTHON_INCLUDE_DIRECTORIES
+     ${CMAKE_BINARY_DIR}
+     ${CMAKE_BINARY_DIR}/aten/src
+     ${CMAKE_BINARY_DIR}/caffe2/aten/src
+-    ${CMAKE_BINARY_DIR}/third_party
+-    ${CMAKE_BINARY_DIR}/third_party/onnx
+-
+-    ${TORCH_ROOT}/third_party/valgrind-headers
+-
+-    ${TORCH_ROOT}/third_party/gloo
+-    ${TORCH_ROOT}/third_party/onnx
+-    ${TORCH_ROOT}/third_party/flatbuffers/include
+-    ${TORCH_ROOT}/third_party/kineto/libkineto/include
+-    ${TORCH_ROOT}/third_party/cpp-httplib
++    
++    /usr/include/kineto
+ 
+     ${TORCH_SRC_DIR}/csrc
+     ${TORCH_SRC_DIR}/csrc/api/include
+@@ -83,7 +75,6 @@ set(TORCH_PYTHON_LINK_LIBRARIES
+     opentelemetry::api
+     httplib
+     shm
+-    fmt::fmt-header-only
+     ATEN_CPU_FILES_GEN_LIB)
+ 
+ if(USE_ASAN AND TARGET Sanitizer::address)

diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-install-dirs.patch 
b/sci-libs/caffe2/files/caffe2-2.4.0-install-dirs.patch
new file mode 100644
index 000000000000..ee6e8fb91562
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.4.0-install-dirs.patch
@@ -0,0 +1,70 @@
+--- a/c10/CMakeLists.txt
++++ b/c10/CMakeLists.txt
+@@ -157,7 +157,7 @@ if(NOT BUILD_LIBTORCHLESS)
+   # Note: for now, we will put all export path into one single Caffe2Targets 
group
+   # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
+   # individual libraries like libc10.so and libcaffe2.so are still 
self-contained.
+-  install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
++  install(TARGETS c10 EXPORT Caffe2Targets DESTINATION 
${CMAKE_INSTALL_LIBDIR})
+ endif()
+ 
+ install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+--- a/c10/cuda/CMakeLists.txt
++++ b/c10/cuda/CMakeLists.txt
+@@ -82,7 +82,7 @@ if(NOT BUILD_LIBTORCHLESS)
+ # Note: for now, we will put all export path into one single Caffe2Targets 
group
+ # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
+ # individual libraries like libc10.so and libcaffe2.so are still 
self-contained.
+-install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION lib)
++install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION 
${CMAKE_INSTALL_LIBDIR})
+ 
+ endif()
+ 
+--- a/c10/hip/CMakeLists.txt
++++ b/c10/hip/CMakeLists.txt
+@@ -57,7 +57,7 @@ if(NOT BUILD_LIBTORCHLESS)
+       $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../..>
+       $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}>
+       $<INSTALL_INTERFACE:include>)
+-  install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION lib)
++  install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION 
${CMAKE_INSTALL_LIBDIR})
+   set(C10_HIP_LIB c10_hip)
+ endif()
+ 
+--- a/c10/xpu/CMakeLists.txt
++++ b/c10/xpu/CMakeLists.txt
+@@ -45,7 +45,7 @@ target_include_directories(
+     $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}>
+     $<INSTALL_INTERFACE:include>
+     )
+-  install(TARGETS c10_xpu EXPORT Caffe2Targets DESTINATION lib)
++  install(TARGETS c10_xpu EXPORT Caffe2Targets DESTINATION 
${CMAKE_INSTALL_LIBDIR})
+   set(C10_XPU_LIB c10_xpu)
+   add_subdirectory(test)
+ endif()
+--- a/test/cpp/c10d/CMakeLists.txt
++++ b/test/cpp/c10d/CMakeLists.txt
+@@ -64,7 +64,7 @@ if(USE_CUDA)
+       torch_cpu c10d_cuda_test gtest_main __caffe2_ucc)
+     if(INSTALL_TEST)
+       install(TARGETS ProcessGroupUCCTest DESTINATION bin)
+-      install(TARGETS c10d_cuda_test DESTINATION lib)
++      install(TARGETS c10d_cuda_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
+     endif()
+   endif()
+ else()
+--- a/test/cpp/jit/CMakeLists.txt
++++ b/test/cpp/jit/CMakeLists.txt
+@@ -32,9 +32,9 @@ endif()
+ target_link_libraries(backend_with_compiler torch)
+ 
+ if(INSTALL_TEST)
+-  install(TARGETS torchbind_test DESTINATION lib)
+-  install(TARGETS jitbackend_test DESTINATION lib)
+-  install(TARGETS backend_with_compiler DESTINATION lib)
++  install(TARGETS torchbind_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
++  install(TARGETS jitbackend_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
++  install(TARGETS backend_with_compiler DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ endif()
+ 
+ # Build the cpp gtest binary containing the cpp-only tests.

diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-rocm-fix-std-cpp17.patch 
b/sci-libs/caffe2/files/caffe2-2.4.0-rocm-fix-std-cpp17.patch
new file mode 100644
index 000000000000..3612c3db1a0b
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.4.0-rocm-fix-std-cpp17.patch
@@ -0,0 +1,50 @@
+Fix for error: invalid argument '-std=c++17' not allowed with 'C'
+https://github.com/pytorch/pytorch/issues/103222
+--- a/c10/hip/CMakeLists.txt
++++ b/c10/hip/CMakeLists.txt
+@@ -36,6 +36,7 @@ if(NOT BUILD_LIBTORCHLESS)
+ 
+   # Propagate HIP_CXX_FLAGS that were set from Dependencies.cmake
+   target_compile_options(c10_hip PRIVATE ${HIP_CXX_FLAGS})
++  set_target_properties(c10_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
+ 
+   # caffe2_hip adds a bunch of dependencies like rocsparse, but c10/hip is 
supposed to be
+   # minimal.  I'm not sure if we need hip_hcc or not; for now leave it out
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -1670,6 +1670,7 @@ if(USE_ROCM)
+ 
+   # Since PyTorch files contain HIP headers, these flags are required for the 
necessary definitions to be added.
+   target_compile_options(torch_hip PUBLIC ${HIP_CXX_FLAGS})  # experiment
++  set_target_properties(torch_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS 
OFF)
+ 
+   target_link_libraries(torch_hip PUBLIC c10_hip)
+ 
+@@ -1867,6 +1868,7 @@ if(BUILD_TEST)
+       target_include_directories(${test_name} PRIVATE 
$<INSTALL_INTERFACE:include>)
+       target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE} 
${Caffe2_HIP_INCLUDE})
+       target_compile_options(${test_name} PRIVATE ${HIP_CXX_FLAGS})
++      set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17 
CXX_EXTENSIONS OFF)
+       add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
+       if(INSTALL_TEST)
+         install(TARGETS ${test_name} DESTINATION test)
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1050,7 +1050,6 @@ if(USE_ROCM)
+     list(APPEND HIP_CXX_FLAGS -Wno-duplicate-decl-specifier)
+     list(APPEND HIP_CXX_FLAGS -DCAFFE2_USE_MIOPEN)
+     list(APPEND HIP_CXX_FLAGS -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP)
+-    list(APPEND HIP_CXX_FLAGS -std=c++17)
+     list(APPEND HIP_CXX_FLAGS -DHIPBLAS_V2)
+     if(HIP_NEW_TYPE_ENUMS)
+       list(APPEND HIP_CXX_FLAGS -DHIP_NEW_TYPE_ENUMS)
+--- a/cmake/public/utils.cmake
++++ b/cmake/public/utils.cmake
+@@ -332,6 +332,7 @@ function(caffe2_hip_binary_target target_name_or_src)
+   caffe2_binary_target(${target_name_or_src})
+ 
+   target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS})
++  set_target_properties(${__target} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS 
OFF)
+   target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE})
+ endfunction()
+ 

Reply via email to