commit:     bd53f5db4fd7a23956623e81ce0058505609928b
Author:     Sergey Alirzaev <l29ah <AT> riseup <DOT> net>
AuthorDate: Thu Jul 10 00:16:05 2025 +0000
Commit:     David Roman <davidroman96 <AT> gmail <DOT> com>
CommitDate: Thu Jul 10 00:16:05 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=bd53f5db

sci-misc/llama-cpp: fix hip/rocm, remove old

Signed-off-by: Sergey Alirzaev <l29ah <AT> riseup.net>

 sci-misc/llama-cpp/Manifest                        |  2 +-
 sci-misc/llama-cpp/llama-cpp-0_pre4848.ebuild      | 93 ----------------------
 ...-cpp-9999.ebuild => llama-cpp-0_pre5857.ebuild} | 39 ++++-----
 sci-misc/llama-cpp/llama-cpp-9999.ebuild           | 39 ++++-----
 4 files changed, 33 insertions(+), 140 deletions(-)

diff --git a/sci-misc/llama-cpp/Manifest b/sci-misc/llama-cpp/Manifest
index 5ac03312f7..6f878fb6e4 100644
--- a/sci-misc/llama-cpp/Manifest
+++ b/sci-misc/llama-cpp/Manifest
@@ -1,6 +1,6 @@
 DIST llama-cpp-0_pre4576.tar.gz 20506059 BLAKE2B 
8f011811e4df1f8d0c26b19f96a709980e078dc7e769b33cbbb03a852a29b489f80c8a1e298fecea53997068f6b7897e4536ba5db289aa445a1a6f16f98adce3
 SHA512 
21150721524283454ab53e370fdaf4e766f89fbb8d4b43072b10657d8c8b686630616cddbae7954147a2ba0360ad20c4643761f3774481e13a7b180812935c4e
-DIST llama-cpp-0_pre4848.tar.gz 20799416 BLAKE2B 
6731dd8ab01f66fca24ad385c2611bc4a11df8beda692e88d79e1d6ab931c908d5a5d304d9c423d43c09d89e80c0b8dd1d57be23cb1a3bb522dbeac112ded604
 SHA512 
4e08cd24a96ad7c96abdc834d4b5d2d74ce01dd8774d81b693c25b890a0982ca9135cfb743b02f886277d2d0fc92c4e4b330e9acf0977fe00b2f1d4df70243d3
 DIST llama-cpp-0_pre5097.tar.gz 21018571 BLAKE2B 
001241580964aa6874a3aa4dbfa0a8cda58a144578992f6a6df7c5c7887cda847503f47c7f3be7b19bb3758ab6ce8de60435e29129cac71672160b29b1cab340
 SHA512 
86543cd001014fa4fee01a37d46e1794c2ffac7c25c7ed328aa4afd3d615b7f42b617ca5d8a0a78b5a41e31cb81184fc6f55f58ffd9433acb3f36cb947a620a5
 DIST llama-cpp-0_pre5332.tar.gz 21140774 BLAKE2B 
a390d4c1c6902d90d1e779291e1fcbe69ab57eb35a5df0be6fb3d9edc88b086a18bcf48983b3c0b2e88d0cfaaddbfdeee74fb126b8a758547836f5b83dd4bc33
 SHA512 
c19c3a6b47684f9466e2872aa67d8516add69028c4fdc7d1abb7a0ff7d87b92adfdaf773cda87461be8e891285c6de34a4edca70244936e8efaf10cc02126a8d
 DIST llama-cpp-0_pre5633.tar.gz 24986657 BLAKE2B 
6215dbfea54cb23a57419cc5a530be5622ec834c6d005337bcf92c50e152979375592088e215845e8f07c6b3f7eec15132cd15ebf9b0725adabe499951ae4735
 SHA512 
11a1917eb86c7065ea901cb62bdc7a25d8d7b962358570c2c7ae0c2d7abce6d19ebc6af74512593ebafbb4ee23546128cf8bfee5ba769c4f3cd2e254cdc1a1a4
 DIST llama-cpp-0_pre5821.tar.gz 25019017 BLAKE2B 
5bf7e168a690ac02aee17dd72469481db3b7c61db990407596a99f814eef1737e9c83aae18ef27d3cd3cca01159104e702ed114cd28c1291aea03422a0b5c0f2
 SHA512 
7aed0a1a29bb4096d67f781299bf48718021f5a0916451a9bdaada2ac1181cc84cbaeab43811e12c13a10beb0d23f0897cfb5f2f26929a166dfd50d90d026d37
+DIST llama-cpp-0_pre5857.tar.gz 25037397 BLAKE2B 
c5b9105ace7b66341b9dff32d3246f38e056097f2024df1919be2f7ac516ba37caa534aa521e5eb7717963b2df8a5fbe72663d829e0e67a0883edcbdb1b124d7
 SHA512 
1f91c4b11091a3ede785d5df1a0ab22360bafb36a0b7ee19ce70331bc36bae862ea52f2f0a5c8a4494022c37c8f363e850eb98d74ba910276267a7b5b4f927ed

diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre4848.ebuild 
b/sci-misc/llama-cpp/llama-cpp-0_pre4848.ebuild
deleted file mode 100644
index b4db64b49d..0000000000
--- a/sci-misc/llama-cpp/llama-cpp-0_pre4848.ebuild
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2025 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-ROCM_VERSION="6.3"
-
-inherit cmake rocm
-
-if [[ "${PV}" != "9999" ]]; then
-       KEYWORDS="~amd64"
-       MY_PV="b${PV#0_pre}"
-       S="${WORKDIR}/llama.cpp-${MY_PV}"
-       
SRC_URI="https://github.com/ggerganov/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
-else
-       inherit git-r3
-       EGIT_REPO_URI="https://github.com/ggerganov/llama.cpp.git";
-fi
-
-DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
-HOMEPAGE="https://github.com/ggerganov/llama.cpp";
-
-LICENSE="MIT"
-SLOT="0"
-CPU_FLAGS_X86=( avx avx2 f16c )
-IUSE="curl openblas blis hip"
-REQUIRED_USE="?? ( openblas blis )"
-
-AMDGPU_TARGETS_COMPAT=(
-       gfx900
-       gfx90c
-       gfx902
-       gfx1010
-       gfx1011
-       gfx1012
-       gfx1030
-       gfx1031
-       gfx1032
-       gfx1034
-       gfx1035
-       gfx1036
-       gfx1100
-       gfx1101
-       gfx1102
-       gfx1103
-       gfx1150
-       gfx1151
-)
-
-# curl is needed for pulling models from huggingface
-# numpy is used by convert_hf_to_gguf.py
-DEPEND="
-       curl? ( net-misc/curl:= )
-       openblas? ( sci-libs/openblas:= )
-       blis? ( sci-libs/blis:= )
-       hip? (  >=dev-util/hip-6.3:= )
-"
-RDEPEND="${DEPEND}
-       dev-python/numpy
-"
-PATCHES=( "${FILESDIR}/blas-ld.diff" )
-
-src_configure() {
-       local mycmakeargs=(
-               -DLLAMA_BUILD_TESTS=OFF
-               -DLLAMA_BUILD_SERVER=ON
-               -DCMAKE_SKIP_BUILD_RPATH=ON
-               -DGGML_NATIVE=0 # don't set march
-               -DLLAMA_CURL=$(usex curl ON OFF)
-               -DBUILD_NUMBER="1"
-       )
-
-       if use openblas ; then
-               mycmakeargs+=(
-                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
-               )
-       fi
-
-       if use blis ; then
-               mycmakeargs+=(
-                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME
-               )
-       fi
-
-       if use hip; then
-               rocm_use_hipcc
-               mycmakeargs+=(
-                       -DGGML_HIP=ON -DAMDGPU_TARGETS=$(get_amdgpu_flags)
-               )
-       fi
-
-       cmake_src_configure
-}

diff --git a/sci-misc/llama-cpp/llama-cpp-9999.ebuild 
b/sci-misc/llama-cpp/llama-cpp-0_pre5857.ebuild
similarity index 84%
copy from sci-misc/llama-cpp/llama-cpp-9999.ebuild
copy to sci-misc/llama-cpp/llama-cpp-0_pre5857.ebuild
index 297952fc97..3c299357c9 100644
--- a/sci-misc/llama-cpp/llama-cpp-9999.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-0_pre5857.ebuild
@@ -5,7 +5,7 @@ EAPI=8
 
 ROCM_VERSION="6.3"
 
-inherit cmake cuda rocm
+inherit cmake cuda rocm linux-info
 
 if [[ "${PV}" != "9999" ]]; then
        KEYWORDS="~amd64"
@@ -26,34 +26,15 @@ CPU_FLAGS_X86=( avx avx2 f16c )
 IUSE="curl openblas blis hip cuda vulkan"
 REQUIRED_USE="?? ( openblas blis )"
 
-AMDGPU_TARGETS_COMPAT=(
-       gfx900
-       gfx90c
-       gfx902
-       gfx1010
-       gfx1011
-       gfx1012
-       gfx1030
-       gfx1031
-       gfx1032
-       gfx1034
-       gfx1035
-       gfx1036
-       gfx1100
-       gfx1101
-       gfx1102
-       gfx1103
-       gfx1150
-       gfx1151
-)
-
 # curl is needed for pulling models from huggingface
 # numpy is used by convert_hf_to_gguf.py
 CDEPEND="
        curl? ( net-misc/curl:= )
        openblas? ( sci-libs/openblas:= )
        blis? ( sci-libs/blis:= )
-       hip? (  >=dev-util/hip-6.3:= )
+       hip? ( >=dev-util/hip-6.3:=
+               >=sci-libs/hipBLAS-6.3:=
+       )
        cuda? ( dev-util/nvidia-cuda-toolkit:= )
 "
 DEPEND="${CDEPEND}
@@ -64,6 +45,18 @@ RDEPEND="${CDEPEND}
        vulkan? ( media-libs/vulkan-loader )
 "
 
+pkg_setup() {
+       if use hip; then
+               linux-info_pkg_setup
+               if linux-info_get_any_version && linux_config_exists; then
+                       if ! linux_chkconfig_present HSA_AMD_SVM; then
+                               ewarn "To use ROCm/HIP, you need to have 
HSA_AMD_SVM option enabled in your kernel."
+                       fi
+               fi
+
+       fi
+}
+
 src_prepare() {
        use cuda && cuda_src_prepare
 

diff --git a/sci-misc/llama-cpp/llama-cpp-9999.ebuild 
b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
index 297952fc97..3c299357c9 100644
--- a/sci-misc/llama-cpp/llama-cpp-9999.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
@@ -5,7 +5,7 @@ EAPI=8
 
 ROCM_VERSION="6.3"
 
-inherit cmake cuda rocm
+inherit cmake cuda rocm linux-info
 
 if [[ "${PV}" != "9999" ]]; then
        KEYWORDS="~amd64"
@@ -26,34 +26,15 @@ CPU_FLAGS_X86=( avx avx2 f16c )
 IUSE="curl openblas blis hip cuda vulkan"
 REQUIRED_USE="?? ( openblas blis )"
 
-AMDGPU_TARGETS_COMPAT=(
-       gfx900
-       gfx90c
-       gfx902
-       gfx1010
-       gfx1011
-       gfx1012
-       gfx1030
-       gfx1031
-       gfx1032
-       gfx1034
-       gfx1035
-       gfx1036
-       gfx1100
-       gfx1101
-       gfx1102
-       gfx1103
-       gfx1150
-       gfx1151
-)
-
 # curl is needed for pulling models from huggingface
 # numpy is used by convert_hf_to_gguf.py
 CDEPEND="
        curl? ( net-misc/curl:= )
        openblas? ( sci-libs/openblas:= )
        blis? ( sci-libs/blis:= )
-       hip? (  >=dev-util/hip-6.3:= )
+       hip? ( >=dev-util/hip-6.3:=
+               >=sci-libs/hipBLAS-6.3:=
+       )
        cuda? ( dev-util/nvidia-cuda-toolkit:= )
 "
 DEPEND="${CDEPEND}
@@ -64,6 +45,18 @@ RDEPEND="${CDEPEND}
        vulkan? ( media-libs/vulkan-loader )
 "
 
+pkg_setup() {
+       if use hip; then
+               linux-info_pkg_setup
+               if linux-info_get_any_version && linux_config_exists; then
+                       if ! linux_chkconfig_present HSA_AMD_SVM; then
+                               ewarn "To use ROCm/HIP, you need to have 
HSA_AMD_SVM option enabled in your kernel."
+                       fi
+               fi
+
+       fi
+}
+
 src_prepare() {
        use cuda && cuda_src_prepare
 

Reply via email to