* [gentoo-commits] repo/proj/guru:dev commit in: app-misc/ollama/, app-misc/ollama/files/
@ 2025-02-08 21:46 Paul Zander
0 siblings, 0 replies; 2+ messages in thread
From: Paul Zander @ 2025-02-08 21:46 UTC (permalink / raw
To: gentoo-commits
commit: 0e23d436589a5ed76dd10ff0e35964f691b809c7
Author: Paul Zander <negril.nx+gentoo <AT> gmail <DOT> com>
AuthorDate: Sat Feb 8 19:29:18 2025 +0000
Commit: Paul Zander <negril.nx+gentoo <AT> gmail <DOT> com>
CommitDate: Sat Feb 8 21:42:29 2025 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=0e23d436
app-misc/ollama: update to 0.5.7-r1
Bug: https://bugs.gentoo.org/948424
Bug: https://bugs.gentoo.org/935842
Closes: https://bugs.gentoo.org/920301
Signed-off-by: Paul Zander <negril.nx+gentoo <AT> gmail.com>
.../files/ollama-0.5.7-include-cstdint.patch | 24 +++
app-misc/ollama/files/{ollama => ollama.init} | 2 +
app-misc/ollama/metadata.xml | 1 +
app-misc/ollama/ollama-0.5.7-r1.ebuild | 176 +++++++++++++++++++++
app-misc/ollama/ollama-0.5.7.ebuild | 115 --------------
5 files changed, 203 insertions(+), 115 deletions(-)
diff --git a/app-misc/ollama/files/ollama-0.5.7-include-cstdint.patch b/app-misc/ollama/files/ollama-0.5.7-include-cstdint.patch
new file mode 100644
index 000000000..14975ca42
--- /dev/null
+++ b/app-misc/ollama/files/ollama-0.5.7-include-cstdint.patch
@@ -0,0 +1,24 @@
+From d711567ba482e80520b5cc36026c80f55f721319 Mon Sep 17 00:00:00 2001
+From: Paul Zander <negril.nx@gmail.com>
+Date: Sat, 25 Jan 2025 19:00:31 +0100
+Subject: [PATCH] include cstdint
+
+---
+ llama/llama-mmap.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/llama/llama-mmap.h b/llama/llama-mmap.h
+index ebd7dc16..4c8e3929 100644
+--- a/llama/llama-mmap.h
++++ b/llama/llama-mmap.h
+@@ -26,6 +26,7 @@
+
+ #pragma once
+
++#include <cstdint>
+ #include <memory>
+ #include <vector>
+
+--
+2.48.0
+
diff --git a/app-misc/ollama/files/ollama b/app-misc/ollama/files/ollama.init
similarity index 89%
rename from app-misc/ollama/files/ollama
rename to app-misc/ollama/files/ollama.init
index 9359f48a1..17e632899 100644
--- a/app-misc/ollama/files/ollama
+++ b/app-misc/ollama/files/ollama.init
@@ -1,4 +1,6 @@
#!/sbin/openrc-run
+# Copyright 1999-2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License, v2
description="Ollama Service"
command="/usr/bin/ollama"
diff --git a/app-misc/ollama/metadata.xml b/app-misc/ollama/metadata.xml
index 4ebf01228..cb4ddec8a 100644
--- a/app-misc/ollama/metadata.xml
+++ b/app-misc/ollama/metadata.xml
@@ -8,6 +8,7 @@
</maintainer>
<use>
<flag name="cuda">Enable NVIDIA CUDA support</flag>
+ <flag name="rocm">Enable ROCm gpu computing support</flag>
</use>
<upstream>
<remote-id type="github">ollama/ollama</remote-id>
diff --git a/app-misc/ollama/ollama-0.5.7-r1.ebuild b/app-misc/ollama/ollama-0.5.7-r1.ebuild
new file mode 100644
index 000000000..e95741936
--- /dev/null
+++ b/app-misc/ollama/ollama-0.5.7-r1.ebuild
@@ -0,0 +1,176 @@
+# Copyright 2024-2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+ROCM_VERSION=6.1
+inherit cuda rocm
+inherit go-module
+
+DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other language models."
+HOMEPAGE="https://ollama.com"
+
+if [[ ${PV} == *9999* ]]; then
+ inherit git-r3
+ EGIT_REPO_URI="https://github.com/ollama/ollama.git"
+else
+ SRC_URI="
+ https://github.com/ollama/${PN}/archive/refs/tags/v${PV}.tar.gz -> ${P}.gh.tar.gz
+ https://github.com/Tapchicoma/ebuild-deps/raw/refs/heads/main/go-deps/${PN}-${PV}-deps.tar.xz
+ "
+ KEYWORDS="~amd64"
+fi
+
+LICENSE="MIT"
+SLOT="0"
+
+X86_CPU_FLAGS=(
+ avx
+ avx2
+ avx512f
+ avx512vbmi
+ avx512_vnni
+ avx512_bf16
+)
+CPU_FLAGS=( "${X86_CPU_FLAGS[@]/#/cpu_flags_x86_}" )
+IUSE="${CPU_FLAGS[*]} cuda rocm"
+
+REQUIRED_USE="
+ cpu_flags_x86_avx2? ( cpu_flags_x86_avx )
+ cpu_flags_x86_avx512f? ( cpu_flags_x86_avx2 )
+ cpu_flags_x86_avx512vbmi? ( cpu_flags_x86_avx512f )
+ cpu_flags_x86_avx512_vnni? ( cpu_flags_x86_avx512f )
+ cpu_flags_x86_avx512_bf16? ( cpu_flags_x86_avx512f )
+"
+
+DEPEND="
+ >=dev-lang/go-1.23.4
+ cuda? (
+ dev-util/nvidia-cuda-toolkit:=
+ )
+ rocm? (
+ >=sci-libs/hipBLAS-${ROCM_VERSION}:=[${ROCM_USEDEP}]
+ )
+"
+
+RDEPEND="
+ acct-group/${PN}
+ acct-user/${PN}
+"
+
+PATCHES=(
+ "${FILESDIR}/${PN}-0.5.7-include-cstdint.patch"
+)
+
+pkg_pretend() {
+ if use rocm; then
+ ewarn "WARNING: AMD support in this ebuild are experimental"
+ einfo "If you run into issues, especially compiling dev-libs/rocm-opencl-runtime"
+ einfo "you may try the docker image here https://github.com/ROCm/ROCm-docker"
+ einfo "and follow instructions here"
+ einfo "https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html"
+ fi
+}
+
+src_prepare() {
+ default
+
+ sed \
+ -e "s/(CFLAGS)/(NVCCFLAGS)/g" \
+ -e "s/(CXXFLAGS)/(NVCCFLAGS)/g" \
+ -i make/cuda.make || die
+
+ if use rocm; then
+ # --hip-version gets appended to the compile flags which isn't a known flag.
+ # This causes rocm builds to fail because -Wunused-command-line-argument is turned on.
+ # Use nuclear option to fix this.
+ # Disable -Werror's from go modules.
+ find "${S}" -name ".go" -exec sed -i "s/ -Werror / /g" {} + || die
+ fi
+}
+
+src_configure() {
+ local CUSTOM_CPU_FLAGS=()
+ use cpu_flags_x86_avx && CUSTOM_CPU_FLAGS+=( "avx" )
+ use cpu_flags_x86_avx2 && CUSTOM_CPU_FLAGS+=( "avx2" )
+ use cpu_flags_x86_avx512f && CUSTOM_CPU_FLAGS+=( "avx512" )
+ use cpu_flags_x86_avx512vbmi && CUSTOM_CPU_FLAGS+=( "avx512vbmi" )
+ use cpu_flags_x86_avx512_vnni && CUSTOM_CPU_FLAGS+=( "avx512vnni" )
+ use cpu_flags_x86_avx512_bf16 && CUSTOM_CPU_FLAGS+=( "avx512bf16" )
+
+ # Build basic ollama executable with cpu features built in
+ emakeargs=(
+ # CCACHE=""
+ "CUSTOM_CPU_FLAGS=$( IFS=','; echo "${CUSTOM_CPU_FLAGS[*]}")"
+ )
+
+ if use cuda; then
+ export NVCC_CCBIN
+ NVCC_CCBIN="$(cuda_gccdir)"
+
+ if [[ -n ${CUDAARCHS} ]]; then
+ emakeargs+=(
+ CUDA_ARCHITECTURES="${CUDAARCHS}"
+ )
+ fi
+
+ if has_version "=dev-util/nvidia-cuda-toolkit-12*"; then
+ emakeargs+=(
+ CUDA_12_COMPILER="${CUDA_PATH:=${EPREFIX}/opt/cuda}/bin/nvcc"
+ CUDA_12_PATH="${CUDA_PATH:=${EPREFIX}/opt/cuda}"
+ )
+ fi
+
+ if has_version "=dev-util/nvidia-cuda-toolkit-11*"; then
+ emakeargs+=(
+ CUDA_11_COMPILER="${CUDA_PATH:=${EPREFIX}/opt/cuda}/bin/nvcc"
+ CUDA_11_PATH="${CUDA_PATH:=${EPREFIX}/opt/cuda}"
+ )
+ fi
+
+ cuda_add_sandbox -w
+ else
+ emakeargs+=( OLLAMA_SKIP_CUDA_GENERATE="1" )
+ fi
+
+ if use rocm; then
+ emakeargs+=(
+ HIP_ARCHS="$(get_amdgpu_flags)"
+ HIP_PATH="${EPREFIX}/usr"
+ )
+
+ check_amdgpu
+ else
+ emakeargs+=( OLLAMA_SKIP_ROCM_GENERATE="1" )
+ fi
+
+ emake "${emakeargs[@]}" help-runners
+ export emakeargs
+}
+
+src_compile() {
+ emake "${emakeargs[@]}" dist
+}
+
+src_install() {
+ dobin "dist/linux-${ARCH}/bin/ollama"
+
+ if [[ -d "dist/linux-${ARCH}/lib/ollama" ]] ; then
+ insinto /usr/lib
+ doins -r "dist/linux-${ARCH}/lib/ollama"
+ fi
+
+ doinitd "${FILESDIR}"/ollama.init
+}
+
+pkg_preinst() {
+ keepdir /var/log/ollama
+ fowners ollama:ollama /var/log/ollama
+}
+
+pkg_postinst() {
+ einfo "Quick guide:"
+ einfo "ollama serve"
+ einfo "ollama run llama3:70b"
+ einfo "See available models at https://ollama.com/library"
+}
diff --git a/app-misc/ollama/ollama-0.5.7.ebuild b/app-misc/ollama/ollama-0.5.7.ebuild
deleted file mode 100644
index f1373d6fa..000000000
--- a/app-misc/ollama/ollama-0.5.7.ebuild
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 2024-2025 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-ROCM_VERSION=6.1
-inherit go-module rocm
-
-DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other language models."
-HOMEPAGE="https://ollama.com"
-SRC_URI="https://github.com/ollama/${PN}/archive/refs/tags/v${PV}.tar.gz -> ${P}.gh.tar.gz"
-SRC_URI+=" https://github.com/Tapchicoma/ebuild-deps/raw/refs/heads/main/go-deps/${PN}-${PV}-deps.tar.xz"
-S="${WORKDIR}/${PN}-${PV}"
-LICENSE="MIT"
-SLOT="0"
-
-KEYWORDS="~amd64"
-
-IUSE="cuda video_cards_amdgpu
-cpu_flags_x86_avx cpu_flags_x86_avx2
-cpu_flags_x86_avx512f cpu_flags_x86_avx512vbmi cpu_flags_x86_avx512_vnni cpu_flags_x86_avx512_bf16
-"
-
-REQUIRED_USE="
- cpu_flags_x86_avx2? ( cpu_flags_x86_avx )
- cpu_flags_x86_avx512f? ( cpu_flags_x86_avx2 )
- cpu_flags_x86_avx512vbmi? ( cpu_flags_x86_avx512f )
- cpu_flags_x86_avx512_vnni? ( cpu_flags_x86_avx512f )
- cpu_flags_x86_avx512_bf16? ( cpu_flags_x86_avx512f )
-"
-
-RDEPEND="
- acct-group/ollama
- acct-user/ollama
-"
-
-DEPEND="
- >=dev-lang/go-1.23.4
- >=dev-build/cmake-3.24
- >=sys-devel/gcc-11.4.0
- cuda? ( dev-util/nvidia-cuda-toolkit )
- video_cards_amdgpu? (
- sci-libs/hipBLAS[${ROCM_USEDEP}]
- )
-"
-
-pkg_pretend() {
- if use video_cards_amdgpu; then
- ewarn "WARNING: AMD support in this ebuild are experimental"
- einfo "If you run into issues, especially compiling dev-libs/rocm-opencl-runtime"
- einfo "you may try the docker image here https://github.com/ROCm/ROCm-docker"
- einfo "and follow instructions here"
- einfo "https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html"
- fi
-}
-
-src_prepare() {
- default
-
- if use video_cards_amdgpu; then
- # --hip-version gets appended to the compile flags which isn't a known flag.
- # This causes rocm builds to fail because -Wunused-command-line-argument is turned on.
- # Use nuclear option to fix this.
- # Disable -Werror's from go modules.
- find "${S}" -name ".go" -exec sed -i "s/ -Werror / /g" {} + || die
- fi
-}
-
-src_compile() {
- CUSTOM_CPU_FLAGS=""
- use cpu_flags_x86_avx && CUSTOM_CPU_FLAGS+="avx"
- use cpu_flags_x86_avx2 && CUSTOM_CPU_FLAGS+=",avx2"
- use cpu_flags_x86_avx512f && CUSTOM_CPU_FLAGS+=",avx512"
- use cpu_flags_x86_avx512vbmi && CUSTOM_CPU_FLAGS+=",avx512vbmi"
- use cpu_flags_x86_avx512_vnni && CUSTOM_CPU_FLAGS+=",avx512vnni"
- use cpu_flags_x86_avx512_bf16 && CUSTOM_CPU_FLAGS+=",avx512bf16"
-
- # Build basic ollama executable with cpu features built in
- export CUSTOM_CPU_FLAGS
-
- if use video_cards_amdgpu; then
- export HIP_ARCHS=$(get_amdgpu_flags)
- export HIP_PATH="/usr"
- else
- export OLLAMA_SKIP_ROCM_GENERATE=1
- fi
-
- if ! use cuda; then
- export OLLAMA_SKIP_CUDA_GENERATE=1
- fi
- emake dist
-}
-
-src_install() {
- dobin dist/linux-${ARCH}/bin/ollama
-
- if [[ -d "dist/linux-${ARCH}/lib/ollama" ]] ; then
- insinto /usr/lib
- doins -r dist/linux-${ARCH}/lib/ollama
- fi
-
- doinitd "${FILESDIR}"/ollama
-}
-
-pkg_preinst() {
- keepdir /var/log/ollama
- fowners ollama:ollama /var/log/ollama
-}
-
-pkg_postinst() {
- einfo "Quick guide:"
- einfo "ollama serve"
- einfo "ollama run llama3:70b"
- einfo "See available models at https://ollama.com/library"
-}
^ permalink raw reply related [flat|nested] 2+ messages in thread
* [gentoo-commits] repo/proj/guru:dev commit in: app-misc/ollama/, app-misc/ollama/files/
@ 2025-02-13 9:04 James McGeehan
0 siblings, 0 replies; 2+ messages in thread
From: James McGeehan @ 2025-02-13 9:04 UTC (permalink / raw
To: gentoo-commits
commit: d0c98d31d827335e76ca9d7acf914bc57b9ec4cd
Author: James McGeehan IV <bivsk <AT> tutanota <DOT> com>
AuthorDate: Thu Feb 13 08:56:51 2025 +0000
Commit: James McGeehan <bivsk <AT> tutanota <DOT> com>
CommitDate: Thu Feb 13 09:03:40 2025 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=d0c98d31
app-misc/ollama: install systemd service file
The ollama documentation refers to a systemd service file to start
the service here:
https://github.com/ollama/ollama/blob/main/docs/linux.md
Signed-off-by: James McGeehan IV <bivsk <AT> tutanota.com>
app-misc/ollama/files/ollama.service | 14 ++++++++
app-misc/ollama/ollama-0.5.7-r1.ebuild | 28 ++++++++-------
app-misc/ollama/ollama-9999.ebuild | 64 +++++++++++++++++++---------------
3 files changed, 65 insertions(+), 41 deletions(-)
diff --git a/app-misc/ollama/files/ollama.service b/app-misc/ollama/files/ollama.service
new file mode 100644
index 000000000..9deee1da0
--- /dev/null
+++ b/app-misc/ollama/files/ollama.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Ollama Service
+After=network-online.target
+
+[Service]
+ExecStart=/usr/bin/ollama serve
+User=ollama
+Group=ollama
+Restart=always
+RestartSec=3
+Environment="PATH=$PATH"
+
+[Install]
+WantedBy=default.target
diff --git a/app-misc/ollama/ollama-0.5.7-r1.ebuild b/app-misc/ollama/ollama-0.5.7-r1.ebuild
index 120a9b5ff..7146a2fda 100644
--- a/app-misc/ollama/ollama-0.5.7-r1.ebuild
+++ b/app-misc/ollama/ollama-0.5.7-r1.ebuild
@@ -5,7 +5,7 @@ EAPI=8
ROCM_VERSION=6.1
inherit cuda rocm
-inherit go-module
+inherit go-module systemd
DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other language models."
HOMEPAGE="https://ollama.com"
@@ -32,7 +32,7 @@ X86_CPU_FLAGS=(
avx512_vnni
avx512_bf16
)
-CPU_FLAGS=( "${X86_CPU_FLAGS[@]/#/cpu_flags_x86_}" )
+CPU_FLAGS=("${X86_CPU_FLAGS[@]/#/cpu_flags_x86_}")
IUSE="${CPU_FLAGS[*]} cuda rocm"
REQUIRED_USE="
@@ -91,17 +91,20 @@ src_prepare() {
src_configure() {
local CUSTOM_CPU_FLAGS=()
- use cpu_flags_x86_avx && CUSTOM_CPU_FLAGS+=( "avx" )
- use cpu_flags_x86_avx2 && CUSTOM_CPU_FLAGS+=( "avx2" )
- use cpu_flags_x86_avx512f && CUSTOM_CPU_FLAGS+=( "avx512" )
- use cpu_flags_x86_avx512vbmi && CUSTOM_CPU_FLAGS+=( "avx512vbmi" )
- use cpu_flags_x86_avx512_vnni && CUSTOM_CPU_FLAGS+=( "avx512vnni" )
- use cpu_flags_x86_avx512_bf16 && CUSTOM_CPU_FLAGS+=( "avx512bf16" )
+ use cpu_flags_x86_avx && CUSTOM_CPU_FLAGS+=("avx")
+ use cpu_flags_x86_avx2 && CUSTOM_CPU_FLAGS+=("avx2")
+ use cpu_flags_x86_avx512f && CUSTOM_CPU_FLAGS+=("avx512")
+ use cpu_flags_x86_avx512vbmi && CUSTOM_CPU_FLAGS+=("avx512vbmi")
+ use cpu_flags_x86_avx512_vnni && CUSTOM_CPU_FLAGS+=("avx512vnni")
+ use cpu_flags_x86_avx512_bf16 && CUSTOM_CPU_FLAGS+=("avx512bf16")
# Build basic ollama executable with cpu features built in
emakeargs=(
# CCACHE=""
- "CUSTOM_CPU_FLAGS=$( IFS=','; echo "${CUSTOM_CPU_FLAGS[*]}")"
+ "CUSTOM_CPU_FLAGS=$(
+ IFS=','
+ echo "${CUSTOM_CPU_FLAGS[*]}"
+ )"
)
if use cuda; then
@@ -130,7 +133,7 @@ src_configure() {
cuda_add_sandbox -w
else
- emakeargs+=( OLLAMA_SKIP_CUDA_GENERATE="1" )
+ emakeargs+=(OLLAMA_SKIP_CUDA_GENERATE="1")
fi
if use rocm; then
@@ -141,7 +144,7 @@ src_configure() {
check_amdgpu
else
- emakeargs+=( OLLAMA_SKIP_ROCM_GENERATE="1" )
+ emakeargs+=(OLLAMA_SKIP_ROCM_GENERATE="1")
fi
emake "${emakeargs[@]}" help-runners
@@ -155,7 +158,7 @@ src_compile() {
src_install() {
dobin "dist/linux-${ARCH}/bin/ollama"
- if [[ -d "dist/linux-${ARCH}/lib/ollama" ]] ; then
+ if [[ -d "dist/linux-${ARCH}/lib/ollama" ]]; then
insinto /usr/lib
doins -r "dist/linux-${ARCH}/lib/ollama"
fi
@@ -165,6 +168,7 @@ src_install() {
fi
doinitd "${FILESDIR}"/ollama.init
+ systemd_dounit "${FILESDIR}"/ollama.service
}
pkg_preinst() {
diff --git a/app-misc/ollama/ollama-9999.ebuild b/app-misc/ollama/ollama-9999.ebuild
index 79153a59f..54746c5fb 100644
--- a/app-misc/ollama/ollama-9999.ebuild
+++ b/app-misc/ollama/ollama-9999.ebuild
@@ -6,7 +6,7 @@ EAPI=8
ROCM_VERSION=6.1
inherit cuda rocm
inherit cmake
-inherit go-module toolchain-funcs
+inherit go-module systemd toolchain-funcs
DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other language models."
HOMEPAGE="https://ollama.com"
@@ -34,7 +34,7 @@ X86_CPU_FLAGS=(
amx_tile
amx_int8
)
-CPU_FLAGS=( "${X86_CPU_FLAGS[@]/#/cpu_flags_x86_}" )
+CPU_FLAGS=("${X86_CPU_FLAGS[@]/#/cpu_flags_x86_}")
IUSE="${CPU_FLAGS[*]} cuda blas mkl rocm"
# IUSE+=" opencl vulkan"
@@ -91,53 +91,58 @@ src_prepare() {
fi
if
! use cpu_flags_x86_avx ||
- ! use cpu_flags_x86_f16c ||
- ! use cpu_flags_x86_avx2 ||
- ! use cpu_flags_x86_fma3; then
+ ! use cpu_flags_x86_f16c ||
+ ! use cpu_flags_x86_avx2 ||
+ ! use cpu_flags_x86_fma3
+ then
sed -e "/ggml_add_cpu_backend_variant(haswell/s/^/# /g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
# AVX F16C AVX2 FMA)
fi
if
! use cpu_flags_x86_avx ||
- ! use cpu_flags_x86_f16c ||
- ! use cpu_flags_x86_avx2 ||
- ! use cpu_flags_x86_fma3 ||
- ! use cpu_flags_x86_avx512f; then
- sed -e "/ggml_add_cpu_backend_variant(skylakex/s/^/# /g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
+ ! use cpu_flags_x86_f16c ||
+ ! use cpu_flags_x86_avx2 ||
+ ! use cpu_flags_x86_fma3 ||
+ ! use cpu_flags_x86_avx512f
+ then
+ sed -e "/ggml_add_cpu_backend_variant(skylakex/s/^/# /g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
# AVX F16C AVX2 FMA AVX512)
fi
if
! use cpu_flags_x86_avx ||
- ! use cpu_flags_x86_f16c ||
- ! use cpu_flags_x86_avx2 ||
- ! use cpu_flags_x86_fma3 ||
- ! use cpu_flags_x86_avx512f ||
- ! use cpu_flags_x86_avx512vbmi ||
- ! use cpu_flags_x86_avx512_vnni; then
+ ! use cpu_flags_x86_f16c ||
+ ! use cpu_flags_x86_avx2 ||
+ ! use cpu_flags_x86_fma3 ||
+ ! use cpu_flags_x86_avx512f ||
+ ! use cpu_flags_x86_avx512vbmi ||
+ ! use cpu_flags_x86_avx512_vnni
+ then
sed -e "/ggml_add_cpu_backend_variant(icelake/s/^/# /g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
# AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI)
fi
if
! use cpu_flags_x86_avx ||
- ! use cpu_flags_x86_f16c ||
- ! use cpu_flags_x86_avx2 ||
- ! use cpu_flags_x86_fma3 ||
- ! use cpu_flags_x86_avx_vnni; then
+ ! use cpu_flags_x86_f16c ||
+ ! use cpu_flags_x86_avx2 ||
+ ! use cpu_flags_x86_fma3 ||
+ ! use cpu_flags_x86_avx_vnni
+ then
sed -e "/ggml_add_cpu_backend_variant(alderlake/s/^/# /g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
# AVX F16C AVX2 FMA AVX_VNNI)
fi
if
! use cpu_flags_x86_avx ||
- ! use cpu_flags_x86_f16c ||
- ! use cpu_flags_x86_avx2 ||
- ! use cpu_flags_x86_fma3 ||
- ! use cpu_flags_x86_avx512f ||
- ! use cpu_flags_x86_avx512vbmi ||
- ! use cpu_flags_x86_avx512_vnni ||
- ! use cpu_flags_x86_avx512_bf16 ||
- ! use cpu_flags_x86_amx_tile ||
- ! use cpu_flags_x86_amx_int8 ; then
+ ! use cpu_flags_x86_f16c ||
+ ! use cpu_flags_x86_avx2 ||
+ ! use cpu_flags_x86_fma3 ||
+ ! use cpu_flags_x86_avx512f ||
+ ! use cpu_flags_x86_avx512vbmi ||
+ ! use cpu_flags_x86_avx512_vnni ||
+ ! use cpu_flags_x86_avx512_bf16 ||
+ ! use cpu_flags_x86_amx_tile ||
+ ! use cpu_flags_x86_amx_int8
+ then
sed -e "/ggml_add_cpu_backend_variant(sapphirerapids/s/^/# /g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
#AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8)
fi
@@ -271,6 +276,7 @@ src_install() {
fi
doinitd "${FILESDIR}"/ollama.init
+ systemd_dounit "${FILESDIR}"/ollama.service
}
pkg_preinst() {
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-02-13 9:04 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-02-08 21:46 [gentoo-commits] repo/proj/guru:dev commit in: app-misc/ollama/, app-misc/ollama/files/ Paul Zander
-- strict thread matches above, loose matches on Subject: below --
2025-02-13 9:04 James McGeehan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox