Alexander Rødseth pushed to branch main at Arch Linux / Packaging / Packages / 
ollama-cuda


Commits:
e5c7bf39 by Alexander F. Rødseth at 2024-02-06T18:43:17+01:00
upgpkg: 0.1.23-1

- - - - -


2 changed files:

- .SRCINFO
- PKGBUILD


Changes:

=====================================
.SRCINFO
=====================================
@@ -1,7 +1,7 @@
 pkgbase = ollama-cuda
        pkgdesc = Create, run and share large language models (LLMs) with CUDA
-       pkgver = 0.1.22
-       pkgrel = 2
+       pkgver = 0.1.23
+       pkgrel = 1
        url = https://github.com/jmorganca/ollama
        arch = x86_64
        license = MIT
@@ -11,8 +11,8 @@ pkgbase = ollama-cuda
        makedepends = go
        provides = ollama
        conflicts = ollama
-       source = git+https://github.com/jmorganca/ollama#tag=v0.1.22
-       source = 
llama.cpp::git+https://github.com/ggerganov/llama.cpp#commit=cd4fddb29f81d6a1f6d51a0c016bc6b486d68def
+       source = git+https://github.com/jmorganca/ollama#tag=v0.1.23
+       source = 
llama.cpp::git+https://github.com/ggerganov/llama.cpp#commit=d2f650cb5b04ee2726663e79b47da5efe196ce00
        source = sysusers.conf
        source = tmpfiles.d
        source = ollama.service


=====================================
PKGBUILD
=====================================
@@ -3,15 +3,15 @@
 
 pkgname=ollama-cuda
 pkgdesc='Create, run and share large language models (LLMs) with CUDA'
-pkgver=0.1.22
-pkgrel=2
+pkgver=0.1.23
+pkgrel=1
 arch=(x86_64)
 url='https://github.com/jmorganca/ollama'
 license=(MIT)
-_ollamacommit=a47d8b2557259ffc9881817df97fbf6d6824e89e # tag: v0.1.22
+_ollamacommit=09a6f76f4c30fb8a9708680c519d08feeb504197 # tag: v0.1.23
 # The llama.cpp git submodule commit hash can be found here:
-# https://github.com/jmorganca/ollama/tree/v0.1.22/llm
-_llama_cpp_commit=cd4fddb29f81d6a1f6d51a0c016bc6b486d68def
+# https://github.com/jmorganca/ollama/tree/v0.1.23/llm
+_llama_cpp_commit=d2f650cb5b04ee2726663e79b47da5efe196ce00
 makedepends=(cmake cuda git go)
 provides=(ollama)
 conflicts=(ollama)
@@ -36,6 +36,9 @@ prepare() {
 
   # Turn LTO on and set the build type to Release
   sed -i 's,T_CODE=on,T_CODE=on -D LLAMA_LTO=on -D CMAKE_BUILD_TYPE=Release,g' 
llm/generate/gen_linux.sh
+
+  # Display a more helpful error message
+  sed -i "s|could not connect to ollama server, run 'ollama serve' to start 
it|ollama is not running, try 'systemctl start ollama'|g" cmd/cmd.go
 }
 
 build() {



View it on GitLab: 
https://gitlab.archlinux.org/archlinux/packaging/packages/ollama-cuda/-/commit/e5c7bf3987de98333e60ea5a1080114573546cc4

-- 
View it on GitLab: 
https://gitlab.archlinux.org/archlinux/packaging/packages/ollama-cuda/-/commit/e5c7bf3987de98333e60ea5a1080114573546cc4
You're receiving this email because of your account on gitlab.archlinux.org.


Reply via email to