From: "Paul Zander" <negril.nx+gentoo@gmail.com>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] repo/proj/guru:dev commit in: sci-ml/ollama/
Date: Wed, 16 Apr 2025 12:24:28 +0000 (UTC) [thread overview]
Message-ID: <1744805744.bf4efd75347fbd1c1fd123823d24ac77864d4054.negril.nx+gentoo@gentoo> (raw)
commit: bf4efd75347fbd1c1fd123823d24ac77864d4054
Author: Paul Zander <negril.nx+gentoo <AT> gmail <DOT> com>
AuthorDate: Wed Apr 16 12:15:44 2025 +0000
Commit: Paul Zander <negril.nx+gentoo <AT> gmail <DOT> com>
CommitDate: Wed Apr 16 12:15:44 2025 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=bf4efd75
sci-ml/ollama: fix accelerated library lookup path
The go binary discarded libraries not in `lib/ollama`. This only showed when
running with `OLLAMA_DEBUG=1`.
Signed-off-by: Paul Zander <negril.nx+gentoo <AT> gmail.com>
...{ollama-0.6.5.ebuild => ollama-0.6.5-r1.ebuild} | 61 ++++++++--------------
sci-ml/ollama/ollama-9999.ebuild | 61 ++++++++--------------
2 files changed, 42 insertions(+), 80 deletions(-)
diff --git a/sci-ml/ollama/ollama-0.6.5.ebuild b/sci-ml/ollama/ollama-0.6.5-r1.ebuild
similarity index 85%
rename from sci-ml/ollama/ollama-0.6.5.ebuild
rename to sci-ml/ollama/ollama-0.6.5-r1.ebuild
index f9f8e9cb2..d5fda71a5 100644
--- a/sci-ml/ollama/ollama-0.6.5.ebuild
+++ b/sci-ml/ollama/ollama-0.6.5-r1.ebuild
@@ -68,7 +68,7 @@ DEPEND="
RDEPEND="
${COMMON_DEPEND}
acct-group/${PN}
- acct-user/${PN}[cuda?]
+ >=acct-user/${PN}-3[cuda?]
"
PATCHES=(
@@ -97,6 +97,19 @@ src_prepare() {
-e "s/-O3/$(get-flag O)/g" \
-i ml/backend/ggml/ggml/src/ggml-cpu/cpu.go || die sed
+ # fix library location
+ sed \
+ -e "s#lib/ollama#$(get_libdir)/ollama#g" \
+ -i CMakeLists.txt || die sed
+
+ sed \
+ -e "s/\"..\", \"lib\"/\"..\", \"$(get_libdir)\"/" \
+ -e "s#\"lib/ollama\"#\"$(get_libdir)/ollama\"#" \
+ -i \
+ ml/backend/ggml/ggml/src/ggml.go \
+ discover/path.go \
+ || die
+
if use amd64; then
if ! use cpu_flags_x86_avx; then
sed -e "/ggml_add_cpu_backend_variant(sandybridge/s/^/# /g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
@@ -218,12 +231,11 @@ src_configure() {
if use rocm; then
mycmakeargs+=(
+ -DCMAKE_HIP_ARCHITECTURES="$(get_amdgpu_flags)"
-DCMAKE_HIP_PLATFORM="amd"
)
- local -x HIP_ARCHS HIP_PATH
- HIP_ARCHS="$(get_amdgpu_flags)"
- HIP_PATH="${ESYSROOT}/usr"
+ local -x HIP_PATH="${ESYSROOT}/usr"
check_amdgpu
else
@@ -233,48 +245,12 @@ src_configure() {
fi
cmake_src_configure
-
- # if ! use cuda && ! use rocm; then
- # # to configure and build only CPU variants
- # set -- cmake --preset Default "${mycmakeargs[@]}"
- # fi
-
- # if use cuda; then
- # # to configure and build only CUDA
- # set -- cmake --preset CUDA "${mycmakeargs[@]}"
- # fi
-
- # if use rocm; then
- # # to configure and build only ROCm
- # set -- cmake --preset ROCm "${mycmakeargs[@]}"
- # fi
-
- # echo "$@" >&2
- # "$@" || die -n "${*} failed"
}
src_compile() {
ego build
cmake_src_compile
-
- # if ! use cuda && ! use rocm; then
- # # to configure and build only CPU variants
- # set -- cmake --build --preset Default -j16
- # fi
-
- # if use cuda; then
- # # to configure and build only CUDA
- # set -- cmake --build --preset CUDA -j16
- # fi
-
- # if use rocm; then
- # # to configure and build only ROCm
- # set -- cmake --build --preset ROCm -j16
- # fi
-
- # echo "$@" >&2
- # "$@" || die -n "${*} failed"
}
src_install() {
@@ -302,4 +278,9 @@ pkg_postinst() {
einfo
einfo "See available models at https://ollama.com/library"
fi
+
+ if use cuda ; then
+ einfo "When using cuda the user running ${PN} has to be in the video group or it won't detect devices."
+ einfo "The ebuild ensures this for user ${PN} via acct-user/${PN}[cuda]"
+ fi
}
diff --git a/sci-ml/ollama/ollama-9999.ebuild b/sci-ml/ollama/ollama-9999.ebuild
index d8944477e..085c656fa 100644
--- a/sci-ml/ollama/ollama-9999.ebuild
+++ b/sci-ml/ollama/ollama-9999.ebuild
@@ -68,7 +68,7 @@ DEPEND="
RDEPEND="
${COMMON_DEPEND}
acct-group/${PN}
- acct-user/${PN}[cuda?]
+ >=acct-user/${PN}-3[cuda?]
"
PATCHES=(
@@ -97,6 +97,19 @@ src_prepare() {
-e "s/-O3/$(get-flag O)/g" \
-i ml/backend/ggml/ggml/src/ggml-cpu/cpu.go || die sed
+ # fix library location
+ sed \
+ -e "s#lib/ollama#$(get_libdir)/ollama#g" \
+ -i CMakeLists.txt || die sed
+
+ sed \
+ -e "s/\"..\", \"lib\"/\"..\", \"$(get_libdir)\"/" \
+ -e "s#\"lib/ollama\"#\"$(get_libdir)/ollama\"#" \
+ -i \
+ ml/backend/ggml/ggml/src/ggml.go \
+ discover/path.go \
+ || die
+
if use amd64; then
if ! use cpu_flags_x86_avx; then
sed -e "/ggml_add_cpu_backend_variant(sandybridge/s/^/# /g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
@@ -217,12 +230,11 @@ src_configure() {
if use rocm; then
mycmakeargs+=(
+ -DCMAKE_HIP_ARCHITECTURES="$(get_amdgpu_flags)"
-DCMAKE_HIP_PLATFORM="amd"
)
- local -x HIP_ARCHS HIP_PATH
- HIP_ARCHS="$(get_amdgpu_flags)"
- HIP_PATH="${ESYSROOT}/usr"
+ local -x HIP_PATH="${ESYSROOT}/usr"
check_amdgpu
else
@@ -232,48 +244,12 @@ src_configure() {
fi
cmake_src_configure
-
- # if ! use cuda && ! use rocm; then
- # # to configure and build only CPU variants
- # set -- cmake --preset Default "${mycmakeargs[@]}"
- # fi
-
- # if use cuda; then
- # # to configure and build only CUDA
- # set -- cmake --preset CUDA "${mycmakeargs[@]}"
- # fi
-
- # if use rocm; then
- # # to configure and build only ROCm
- # set -- cmake --preset ROCm "${mycmakeargs[@]}"
- # fi
-
- # echo "$@" >&2
- # "$@" || die -n "${*} failed"
}
src_compile() {
ego build
cmake_src_compile
-
- # if ! use cuda && ! use rocm; then
- # # to configure and build only CPU variants
- # set -- cmake --build --preset Default -j16
- # fi
-
- # if use cuda; then
- # # to configure and build only CUDA
- # set -- cmake --build --preset CUDA -j16
- # fi
-
- # if use rocm; then
- # # to configure and build only ROCm
- # set -- cmake --build --preset ROCm -j16
- # fi
-
- # echo "$@" >&2
- # "$@" || die -n "${*} failed"
}
src_install() {
@@ -301,4 +277,9 @@ pkg_postinst() {
einfo
einfo "See available models at https://ollama.com/library"
fi
+
+ if use cuda ; then
+ einfo "When using cuda the user running ${PN} has to be in the video group or it won't detect devices."
+ einfo "The ebuild ensures this for user ${PN} via acct-user/${PN}[cuda]"
+ fi
}
next reply other threads:[~2025-04-16 12:24 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-16 12:24 Paul Zander [this message]
-- strict thread matches above, loose matches on Subject: below --
2025-04-28 18:52 [gentoo-commits] repo/proj/guru:dev commit in: sci-ml/ollama/ Paul Zander
2025-04-16 12:24 Paul Zander
2025-04-16 12:24 Paul Zander
2025-04-10 21:48 Paul Zander
2025-04-07 18:22 Paul Zander
2025-04-07 18:22 Paul Zander
2025-04-07 18:22 Paul Zander
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1744805744.bf4efd75347fbd1c1fd123823d24ac77864d4054.negril.nx+gentoo@gentoo \
--to=negril.nx+gentoo@gmail.com \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox