commit:     acfe4100ae65b6aeff208b50529bd8b132ded8f3
Author:     Sergey Alirzaev <l29ah <AT> riseup <DOT> net>
AuthorDate: Wed Oct  8 02:10:18 2025 +0000
Commit:     David Roman <davidroman96 <AT> gmail <DOT> com>
CommitDate: Wed Oct  8 02:10:18 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=acfe4100

sci-misc/llama-cpp: bump up to 6710, rm old

LFM2-MoE support and fixes

Signed-off-by: Sergey Alirzaev <l29ah <AT> riseup.net>

 sci-misc/llama-cpp/Manifest                   |   2 +-
 sci-misc/llama-cpp/llama-cpp-0_pre5097.ebuild |  93 -------------------
 sci-misc/llama-cpp/llama-cpp-0_pre6710.ebuild | 125 ++++++++++++++++++++++++++
 3 files changed, 126 insertions(+), 94 deletions(-)

diff --git a/sci-misc/llama-cpp/Manifest b/sci-misc/llama-cpp/Manifest
index dc9de598e4..1947f949f6 100644
--- a/sci-misc/llama-cpp/Manifest
+++ b/sci-misc/llama-cpp/Manifest
@@ -1,6 +1,6 @@
 DIST llama-cpp-0_pre4576.tar.gz 20506059 BLAKE2B 
8f011811e4df1f8d0c26b19f96a709980e078dc7e769b33cbbb03a852a29b489f80c8a1e298fecea53997068f6b7897e4536ba5db289aa445a1a6f16f98adce3
 SHA512 
21150721524283454ab53e370fdaf4e766f89fbb8d4b43072b10657d8c8b686630616cddbae7954147a2ba0360ad20c4643761f3774481e13a7b180812935c4e
-DIST llama-cpp-0_pre5097.tar.gz 21018571 BLAKE2B 
001241580964aa6874a3aa4dbfa0a8cda58a144578992f6a6df7c5c7887cda847503f47c7f3be7b19bb3758ab6ce8de60435e29129cac71672160b29b1cab340
 SHA512 
86543cd001014fa4fee01a37d46e1794c2ffac7c25c7ed328aa4afd3d615b7f42b617ca5d8a0a78b5a41e31cb81184fc6f55f58ffd9433acb3f36cb947a620a5
 DIST llama-cpp-0_pre5332.tar.gz 21140774 BLAKE2B 
a390d4c1c6902d90d1e779291e1fcbe69ab57eb35a5df0be6fb3d9edc88b086a18bcf48983b3c0b2e88d0cfaaddbfdeee74fb126b8a758547836f5b83dd4bc33
 SHA512 
c19c3a6b47684f9466e2872aa67d8516add69028c4fdc7d1abb7a0ff7d87b92adfdaf773cda87461be8e891285c6de34a4edca70244936e8efaf10cc02126a8d
 DIST llama-cpp-0_pre5633.tar.gz 24986657 BLAKE2B 
6215dbfea54cb23a57419cc5a530be5622ec834c6d005337bcf92c50e152979375592088e215845e8f07c6b3f7eec15132cd15ebf9b0725adabe499951ae4735
 SHA512 
11a1917eb86c7065ea901cb62bdc7a25d8d7b962358570c2c7ae0c2d7abce6d19ebc6af74512593ebafbb4ee23546128cf8bfee5ba769c4f3cd2e254cdc1a1a4
 DIST llama-cpp-0_pre6140.tar.gz 25487270 BLAKE2B 
48b809c860437a8eb8b886e417299c2135dcd3c74aa2782b0380b0785b4e8c383c6061bd7ac43e9eddb1e31769ccceb48811a5433d45ac41adb00ece0e6af93d
 SHA512 
d8e904ddd6b935f84230ee163414922f572e71e2dd05d639a5b1a4a36c6841ac476f81be8e39f087712766e88564d8a499cdff4d717a5f910caf9f799a2b998d
 DIST llama-cpp-0_pre6318.tar.gz 25626090 BLAKE2B 
b95826a5fd4ab27927d390cdc091648d1ffe281d5d9946fdfa4e6c8c59fb7461dd1e2b83751c86c575b4f00207bbd0cfbe467a0ae9dfdb3b192356bc77e0f808
 SHA512 
f3b5655123919a76fa27f1be05ffb2a7f681d7793d4d9e24106739a21846a2918ffdf9ef326ac99a55f6b4943059e4f76de754da894ff6fdd7e2d56a41edc56b
+DIST llama-cpp-0_pre6710.tar.gz 25894417 BLAKE2B 
147f30d76fd49bf18fa0ab9e3e75d0ad337dcd87a73f1dbce43f180488ea06b40b1a2a93b4686a88b5a442dd4dd6a8e45bf848ceb549bdc0ad0078427336c56e
 SHA512 
75c5918713256cb11f704b94d6e249a9f3ac2dde1107a6f4506134ba9c772e1c42d991915b571887207003f4b0679a183cd0787ffd742a08d2283fdfb86695eb

diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre5097.ebuild 
b/sci-misc/llama-cpp/llama-cpp-0_pre5097.ebuild
deleted file mode 100644
index b4db64b49d..0000000000
--- a/sci-misc/llama-cpp/llama-cpp-0_pre5097.ebuild
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2025 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-ROCM_VERSION="6.3"
-
-inherit cmake rocm
-
-if [[ "${PV}" != "9999" ]]; then
-       KEYWORDS="~amd64"
-       MY_PV="b${PV#0_pre}"
-       S="${WORKDIR}/llama.cpp-${MY_PV}"
-       
SRC_URI="https://github.com/ggerganov/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
-else
-       inherit git-r3
-       EGIT_REPO_URI="https://github.com/ggerganov/llama.cpp.git";
-fi
-
-DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
-HOMEPAGE="https://github.com/ggerganov/llama.cpp";
-
-LICENSE="MIT"
-SLOT="0"
-CPU_FLAGS_X86=( avx avx2 f16c )
-IUSE="curl openblas blis hip"
-REQUIRED_USE="?? ( openblas blis )"
-
-AMDGPU_TARGETS_COMPAT=(
-       gfx900
-       gfx90c
-       gfx902
-       gfx1010
-       gfx1011
-       gfx1012
-       gfx1030
-       gfx1031
-       gfx1032
-       gfx1034
-       gfx1035
-       gfx1036
-       gfx1100
-       gfx1101
-       gfx1102
-       gfx1103
-       gfx1150
-       gfx1151
-)
-
-# curl is needed for pulling models from huggingface
-# numpy is used by convert_hf_to_gguf.py
-DEPEND="
-       curl? ( net-misc/curl:= )
-       openblas? ( sci-libs/openblas:= )
-       blis? ( sci-libs/blis:= )
-       hip? (  >=dev-util/hip-6.3:= )
-"
-RDEPEND="${DEPEND}
-       dev-python/numpy
-"
-PATCHES=( "${FILESDIR}/blas-ld.diff" )
-
-src_configure() {
-       local mycmakeargs=(
-               -DLLAMA_BUILD_TESTS=OFF
-               -DLLAMA_BUILD_SERVER=ON
-               -DCMAKE_SKIP_BUILD_RPATH=ON
-               -DGGML_NATIVE=0 # don't set march
-               -DLLAMA_CURL=$(usex curl ON OFF)
-               -DBUILD_NUMBER="1"
-       )
-
-       if use openblas ; then
-               mycmakeargs+=(
-                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
-               )
-       fi
-
-       if use blis ; then
-               mycmakeargs+=(
-                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME
-               )
-       fi
-
-       if use hip; then
-               rocm_use_hipcc
-               mycmakeargs+=(
-                       -DGGML_HIP=ON -DAMDGPU_TARGETS=$(get_amdgpu_flags)
-               )
-       fi
-
-       cmake_src_configure
-}

diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre6710.ebuild 
b/sci-misc/llama-cpp/llama-cpp-0_pre6710.ebuild
new file mode 100644
index 0000000000..acbdfc0735
--- /dev/null
+++ b/sci-misc/llama-cpp/llama-cpp-0_pre6710.ebuild
@@ -0,0 +1,125 @@
+# Copyright 2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+ROCM_VERSION="6.3"
+
+inherit cmake cuda rocm linux-info
+
+if [[ "${PV}" != "9999" ]]; then
+       KEYWORDS="~amd64"
+       MY_PV="b${PV#0_pre}"
+       S="${WORKDIR}/llama.cpp-${MY_PV}"
+       
SRC_URI="https://github.com/ggml-org/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
+else
+       inherit git-r3
+       EGIT_REPO_URI="https://github.com/ggml-org/llama.cpp.git";
+fi
+
+DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
+HOMEPAGE="https://github.com/ggml-org/llama.cpp";
+
+LICENSE="MIT"
+SLOT="0"
+CPU_FLAGS_X86=( avx avx2 f16c )
+IUSE="curl openblas +openmp blis hip cuda opencl vulkan"
+REQUIRED_USE="?? ( openblas blis )"
+
+# curl is needed for pulling models from huggingface
+# numpy is used by convert_hf_to_gguf.py
+CDEPEND="
+       curl? ( net-misc/curl:= )
+       openblas? ( sci-libs/openblas:= )
+       openmp? ( llvm-runtimes/openmp:= )
+       blis? ( sci-libs/blis:= )
+       hip? ( >=dev-util/hip-6.3:=
+               >=sci-libs/hipBLAS-6.3:=
+       )
+       cuda? ( dev-util/nvidia-cuda-toolkit:= )
+"
+DEPEND="${CDEPEND}
+       opencl? ( dev-util/opencl-headers )
+       vulkan? ( dev-util/vulkan-headers )
+"
+RDEPEND="${CDEPEND}
+       dev-python/numpy
+       opencl? ( dev-libs/opencl-icd-loader )
+       vulkan? ( media-libs/vulkan-loader )
+"
+BDEPEND="media-libs/shaderc"
+
+pkg_setup() {
+       if use hip; then
+               linux-info_pkg_setup
+               if linux-info_get_any_version && linux_config_exists; then
+                       if ! linux_chkconfig_present HSA_AMD_SVM; then
+                               ewarn "To use ROCm/HIP, you need to have 
HSA_AMD_SVM option enabled in your kernel."
+                       fi
+               fi
+
+       fi
+}
+
+src_prepare() {
+       use cuda && cuda_src_prepare
+
+       cmake_src_prepare
+}
+
+src_configure() {
+       local mycmakeargs=(
+               -DLLAMA_BUILD_TESTS=OFF
+               -DLLAMA_BUILD_SERVER=ON
+               -DCMAKE_SKIP_BUILD_RPATH=ON
+               -DGGML_NATIVE=0 # don't set march
+               -DGGML_RPC=ON
+               -DLLAMA_CURL=$(usex curl ON OFF)
+               -DBUILD_NUMBER="1"
+               -DGENTOO_REMOVE_CMAKE_BLAS_HACK=ON
+               -DGGML_CUDA=$(usex cuda ON OFF)
+               -DGGML_OPENCL=$(usex opencl ON OFF)
+               -DGGML_OPENMP=$(usex openmp ON OFF)
+               -DGGML_VULKAN=$(usex vulkan ON OFF)
+
+               # avoid clashing with whisper.cpp
+               -DCMAKE_INSTALL_LIBDIR="${EPREFIX}/usr/$(get_libdir)/llama.cpp"
+               -DCMAKE_INSTALL_RPATH="${EPREFIX}/usr/$(get_libdir)/llama.cpp"
+       )
+
+       if use openblas ; then
+               mycmakeargs+=(
+                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
+               )
+       fi
+
+       if use blis ; then
+               mycmakeargs+=(
+                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME
+               )
+       fi
+
+       if use cuda; then
+               local -x CUDAHOSTCXX="$(cuda_gccdir)"
+               # tries to recreate dev symlinks
+               cuda_add_sandbox
+               addpredict "/dev/char/"
+       fi
+
+       if use hip; then
+               rocm_use_hipcc
+               mycmakeargs+=(
+                       -DGGML_HIP=ON -DAMDGPU_TARGETS=$(get_amdgpu_flags)
+               )
+       fi
+
+       cmake_src_configure
+}
+
+src_install() {
+       cmake_src_install
+       dobin "${BUILD_DIR}/bin/rpc-server"
+
+       # avoid clashing with whisper.cpp
+       rm -rf "${ED}/usr/include"
+}

Reply via email to