public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] repo/proj/guru:dev commit in: sci-misc/llama-cpp/, sci-misc/llama-cpp/files/
@ 2025-02-09 11:55 Alexey Korepanov
  0 siblings, 0 replies; only message in thread
From: Alexey Korepanov @ 2025-02-09 11:55 UTC (permalink / raw
  To: gentoo-commits

commit:     a48cf7afbbf35240bfe3cf0736098a8f7ec66c92
Author:     Alexey Korepanov <kaikaikai <AT> yandex <DOT> ru>
AuthorDate: Sun Feb  9 11:51:53 2025 +0000
Commit:     Alexey Korepanov <kaikaikai <AT> yandex <DOT> ru>
CommitDate: Sun Feb  9 11:55:04 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=a48cf7af

sci-misc/llama-cpp: support OpenBLAS or BLIS backends

A significant speed up in some workflows when running on a CPU

Signed-off-by: Alexey Korepanov <kaikaikai <AT> yandex.ru>

 sci-misc/llama-cpp/files/blas-ld.diff         | 16 ++++++++++++++++
 sci-misc/llama-cpp/llama-cpp-0_pre4576.ebuild | 25 ++++++++++++++++++++++---
 sci-misc/llama-cpp/llama-cpp-9999.ebuild      | 25 ++++++++++++++++++++++---
 sci-misc/llama-cpp/metadata.xml               |  4 ++++
 4 files changed, 64 insertions(+), 6 deletions(-)

diff --git a/sci-misc/llama-cpp/files/blas-ld.diff b/sci-misc/llama-cpp/files/blas-ld.diff
new file mode 100644
index 000000000..884f218ed
--- /dev/null
+++ b/sci-misc/llama-cpp/files/blas-ld.diff
@@ -0,0 +1,16 @@
+cmake does not properly configure linking against OpenBLAS or BLIS
+https://github.com/ggerganov/llama.cpp/pull/11741
+
+diff --git a/ggml/src/ggml-blas/CMakeLists.txt b/ggml/src/ggml-blas/CMakeLists.txt
+index 0bf3c05d93a89..117416b988665 100644
+--- a/ggml/src/ggml-blas/CMakeLists.txt
++++ b/ggml/src/ggml-blas/CMakeLists.txt
+@@ -54,6 +54,8 @@ if (BLAS_FOUND)
+         endif()
+         if (DepBLAS_FOUND)
+             set(BLAS_INCLUDE_DIRS ${DepBLAS_INCLUDE_DIRS})
++            set(BLAS_LIBRARIES ${DepBLAS_LIBRARIES})
++            set(BLAS_LINKER_FLAGS ${DepBLAS_LINKER_FLAGS})
+         else()
+             message(WARNING "BLAS_INCLUDE_DIRS neither been provided nor been automatically"
+             " detected by pkgconfig, trying to find cblas.h from possible paths...")

diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre4576.ebuild b/sci-misc/llama-cpp/llama-cpp-0_pre4576.ebuild
index 8b9c1438d..1e6b82c0d 100644
--- a/sci-misc/llama-cpp/llama-cpp-0_pre4576.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-0_pre4576.ebuild
@@ -1,4 +1,4 @@
-# Copyright 2023 Gentoo Authors
+# Copyright 2025 Gentoo Authors
 # Distributed under the terms of the GNU General Public License v2
 
 EAPI=8
@@ -21,14 +21,20 @@ HOMEPAGE="https://github.com/ggerganov/llama.cpp"
 LICENSE="MIT"
 SLOT="0"
 CPU_FLAGS_X86=( avx avx2 f16c )
-IUSE="curl"
+IUSE="curl openblas blis"
+REQUIRED_USE="?? ( openblas blis )"
 
 # curl is needed for pulling models from huggingface
 # numpy is used by convert_hf_to_gguf.py
-DEPEND="curl? ( net-misc/curl:= )"
+DEPEND="
+	curl? ( net-misc/curl:= )
+	openblas? ( sci-libs/openblas:= )
+	blis? ( sci-libs/blis:= )
+"
 RDEPEND="${DEPEND}
 	dev-python/numpy
 "
+PATCHES=( "${FILESDIR}/blas-ld.diff" )
 
 src_configure() {
 	local mycmakeargs=(
@@ -39,5 +45,18 @@ src_configure() {
 		-DLLAMA_CURL=$(usex curl ON OFF)
 		-DBUILD_NUMBER="1"
 	)
+
+	if use openblas ; then
+		mycmakeargs+=(
+			-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
+		)
+	fi
+
+	if use blis ; then
+		mycmakeargs+=(
+			-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME
+		)
+	fi
+
 	cmake_src_configure
 }

diff --git a/sci-misc/llama-cpp/llama-cpp-9999.ebuild b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
index 8b9c1438d..1e6b82c0d 100644
--- a/sci-misc/llama-cpp/llama-cpp-9999.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
@@ -1,4 +1,4 @@
-# Copyright 2023 Gentoo Authors
+# Copyright 2025 Gentoo Authors
 # Distributed under the terms of the GNU General Public License v2
 
 EAPI=8
@@ -21,14 +21,20 @@ HOMEPAGE="https://github.com/ggerganov/llama.cpp"
 LICENSE="MIT"
 SLOT="0"
 CPU_FLAGS_X86=( avx avx2 f16c )
-IUSE="curl"
+IUSE="curl openblas blis"
+REQUIRED_USE="?? ( openblas blis )"
 
 # curl is needed for pulling models from huggingface
 # numpy is used by convert_hf_to_gguf.py
-DEPEND="curl? ( net-misc/curl:= )"
+DEPEND="
+	curl? ( net-misc/curl:= )
+	openblas? ( sci-libs/openblas:= )
+	blis? ( sci-libs/blis:= )
+"
 RDEPEND="${DEPEND}
 	dev-python/numpy
 "
+PATCHES=( "${FILESDIR}/blas-ld.diff" )
 
 src_configure() {
 	local mycmakeargs=(
@@ -39,5 +45,18 @@ src_configure() {
 		-DLLAMA_CURL=$(usex curl ON OFF)
 		-DBUILD_NUMBER="1"
 	)
+
+	if use openblas ; then
+		mycmakeargs+=(
+			-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
+		)
+	fi
+
+	if use blis ; then
+		mycmakeargs+=(
+			-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME
+		)
+	fi
+
 	cmake_src_configure
 }

diff --git a/sci-misc/llama-cpp/metadata.xml b/sci-misc/llama-cpp/metadata.xml
index 53f72119c..c93fd3afb 100644
--- a/sci-misc/llama-cpp/metadata.xml
+++ b/sci-misc/llama-cpp/metadata.xml
@@ -4,6 +4,10 @@
 	<upstream>
 		<remote-id type="github">ggerganov/llama.cpp</remote-id>
 	</upstream>
+	<use>
+		<flag name="blis">Build a BLIS backend</flag>
+		<flag name="openblas">Build an OpenBLAS backend</flag>
+	</use>
 	<maintainer type="person">
 		<email>zl29ah@gmail.com</email>
 		<name>Sergey Alirzaev</name>


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2025-02-09 11:55 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-02-09 11:55 [gentoo-commits] repo/proj/guru:dev commit in: sci-misc/llama-cpp/, sci-misc/llama-cpp/files/ Alexey Korepanov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox