commit:     d233181e96f30991c95ac43d5c70c8a4df145b3b
Author:     Alexey Korepanov <kaikaikai <AT> yandex <DOT> ru>
AuthorDate: Mon Jun 16 13:01:09 2025 +0000
Commit:     David Roman <davidroman96 <AT> gmail <DOT> com>
CommitDate: Mon Jun 16 13:01:09 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=d233181e

sci-misc/llama-cpp: support cuda, update upstream url

Signed-off-by: Alexey Korepanov <kaikaikai <AT> yandex.ru>

 ..._pre5633.ebuild => llama-cpp-0_pre5633-r1.ebuild} | 20 ++++++++++++++------
 sci-misc/llama-cpp/llama-cpp-9999.ebuild             | 20 ++++++++++++++------
 sci-misc/llama-cpp/metadata.xml                      |  2 +-
 3 files changed, 29 insertions(+), 13 deletions(-)

diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre5633.ebuild 
b/sci-misc/llama-cpp/llama-cpp-0_pre5633-r1.ebuild
similarity index 76%
rename from sci-misc/llama-cpp/llama-cpp-0_pre5633.ebuild
rename to sci-misc/llama-cpp/llama-cpp-0_pre5633-r1.ebuild
index eb94fdfb0a..e85f09260c 100644
--- a/sci-misc/llama-cpp/llama-cpp-0_pre5633.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-0_pre5633-r1.ebuild
@@ -5,25 +5,25 @@ EAPI=8
 
 ROCM_VERSION="6.3"
 
-inherit cmake rocm
+inherit cmake cuda rocm
 
 if [[ "${PV}" != "9999" ]]; then
        KEYWORDS="~amd64"
        MY_PV="b${PV#0_pre}"
        S="${WORKDIR}/llama.cpp-${MY_PV}"
-       
SRC_URI="https://github.com/ggerganov/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
+       
SRC_URI="https://github.com/ggml-org/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
 else
        inherit git-r3
-       EGIT_REPO_URI="https://github.com/ggerganov/llama.cpp.git";
+       EGIT_REPO_URI="https://github.com/ggml-org/llama.cpp.git";
 fi
 
 DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
-HOMEPAGE="https://github.com/ggerganov/llama.cpp";
+HOMEPAGE="https://github.com/ggml-org/llama.cpp";
 
 LICENSE="MIT"
 SLOT="0"
 CPU_FLAGS_X86=( avx avx2 f16c )
-IUSE="curl openblas blis hip"
+IUSE="curl openblas blis hip cuda"
 REQUIRED_USE="?? ( openblas blis )"
 
 AMDGPU_TARGETS_COMPAT=(
@@ -54,11 +54,17 @@ DEPEND="
        openblas? ( sci-libs/openblas:= )
        blis? ( sci-libs/blis:= )
        hip? (  >=dev-util/hip-6.3:= )
+       cuda? ( dev-util/nvidia-cuda-toolkit:= )
 "
 RDEPEND="${DEPEND}
        dev-python/numpy
 "
-PATCHES=( "${FILESDIR}/blas-ld.diff" )
+
+src_prepare() {
+       use cuda && cuda_src_prepare
+
+       cmake_src_prepare
+}
 
 src_configure() {
        local mycmakeargs=(
@@ -69,6 +75,8 @@ src_configure() {
                -DGGML_RPC=ON
                -DLLAMA_CURL=$(usex curl ON OFF)
                -DBUILD_NUMBER="1"
+               -DGENTOO_REMOVE_CMAKE_BLAS_HACK=ON
+               -DGGML_CUDA=$(usex cuda ON OFF)
        )
 
        if use openblas ; then

diff --git a/sci-misc/llama-cpp/llama-cpp-9999.ebuild 
b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
index eb94fdfb0a..e85f09260c 100644
--- a/sci-misc/llama-cpp/llama-cpp-9999.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
@@ -5,25 +5,25 @@ EAPI=8
 
 ROCM_VERSION="6.3"
 
-inherit cmake rocm
+inherit cmake cuda rocm
 
 if [[ "${PV}" != "9999" ]]; then
        KEYWORDS="~amd64"
        MY_PV="b${PV#0_pre}"
        S="${WORKDIR}/llama.cpp-${MY_PV}"
-       
SRC_URI="https://github.com/ggerganov/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
+       
SRC_URI="https://github.com/ggml-org/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
 else
        inherit git-r3
-       EGIT_REPO_URI="https://github.com/ggerganov/llama.cpp.git";
+       EGIT_REPO_URI="https://github.com/ggml-org/llama.cpp.git";
 fi
 
 DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
-HOMEPAGE="https://github.com/ggerganov/llama.cpp";
+HOMEPAGE="https://github.com/ggml-org/llama.cpp";
 
 LICENSE="MIT"
 SLOT="0"
 CPU_FLAGS_X86=( avx avx2 f16c )
-IUSE="curl openblas blis hip"
+IUSE="curl openblas blis hip cuda"
 REQUIRED_USE="?? ( openblas blis )"
 
 AMDGPU_TARGETS_COMPAT=(
@@ -54,11 +54,17 @@ DEPEND="
        openblas? ( sci-libs/openblas:= )
        blis? ( sci-libs/blis:= )
        hip? (  >=dev-util/hip-6.3:= )
+       cuda? ( dev-util/nvidia-cuda-toolkit:= )
 "
 RDEPEND="${DEPEND}
        dev-python/numpy
 "
-PATCHES=( "${FILESDIR}/blas-ld.diff" )
+
+src_prepare() {
+       use cuda && cuda_src_prepare
+
+       cmake_src_prepare
+}
 
 src_configure() {
        local mycmakeargs=(
@@ -69,6 +75,8 @@ src_configure() {
                -DGGML_RPC=ON
                -DLLAMA_CURL=$(usex curl ON OFF)
                -DBUILD_NUMBER="1"
+               -DGENTOO_REMOVE_CMAKE_BLAS_HACK=ON
+               -DGGML_CUDA=$(usex cuda ON OFF)
        )
 
        if use openblas ; then

diff --git a/sci-misc/llama-cpp/metadata.xml b/sci-misc/llama-cpp/metadata.xml
index 70af1186d9..ee98b16235 100644
--- a/sci-misc/llama-cpp/metadata.xml
+++ b/sci-misc/llama-cpp/metadata.xml
@@ -2,7 +2,7 @@
 <!DOCTYPE pkgmetadata SYSTEM "https://www.gentoo.org/dtd/metadata.dtd";>
 <pkgmetadata>
        <upstream>
-               <remote-id type="github">ggerganov/llama.cpp</remote-id>
+               <remote-id type="github">ggml-org/llama.cpp</remote-id>
        </upstream>
        <use>
                <flag name="blis">Build a BLIS backend</flag>

Reply via email to