Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package ollama for openSUSE:Factory checked 
in at 2024-07-15 19:49:07
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/ollama (Old)
 and      /work/SRC/openSUSE:Factory/.ollama.new.17339 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "ollama"

Mon Jul 15 19:49:07 2024 rev:12 rq:1187407 version:0.2.5

Changes:
--------
--- /work/SRC/openSUSE:Factory/ollama/ollama.changes    2024-07-08 
19:08:47.559908351 +0200
+++ /work/SRC/openSUSE:Factory/.ollama.new.17339/ollama.changes 2024-07-15 
19:49:51.660971428 +0200
@@ -1,0 +2,47 @@
+Sun Jul 14 17:48:36 UTC 2024 - eyadlore...@gmail.com
+
+- Update to version 0.2.5:
+  * Fixed issue where a model's SYSTEM message not be applied
+
+- Update to version 0.2.4:
+  * Fixed issue where context, load_duration and total_duration 
+    fields would not be set in the /api/generate endpoint.
+  * Ollama will no longer error if loading models larger than 
+    system memory if disk space is available
+
+- Update to version 0.2.3:
+  * Fix issue where system prompt would not be applied
+
+- Update to version 0.2.2:
+  * Fixed errors that occurred when using Ollama with Nvidia V100 
+    GPUs
+  * glm4 models will no longer fail to load from out of memory
+    errors
+  * Fixed error that would occur when running deepseek-v2 and 
+    deepseek-coder-v2 models
+  * Fixed a series of out of memory issues when using Nvidia
+    GPUs
+  * Fixed a series of errors that would occur when using multiple 
+    Radeon GPUs
+
+- Update to version 0.2.1:
+  * Fixed issue where setting OLLAMA_NUM_PARALLEL would cause 
+    models to be reloaded after each request
+
+- Update to version 0.2.0:
+  * Ollama 0.2.0 is now available with concurrency support. 
+    This unlocks 2 specific features:
+    ~ Ollama can now serve multiple requests at the same time
+    ~ Ollama now supports loading different models at the same time
+  * New models: GLM-4: A strong multi-lingual general language 
+    model with competitive performance to Llama 3.
+  * New models: CodeGeeX4: A versatile model for AI software 
+    development scenarios, including code completion.
+  * New models: Gemma 2: Improved output quality and base text 
+    generation models now available
+  * Ollama will now show a better error if a model architecture 
+    isn't supported
+  * Improved handling of quotes and spaces in Modelfile FROM lines
+  * Ollama will now return an error if the system does not have 
+    enough memory to run a model on Linux
+-------------------------------------------------------------------

Old:
----
  ollama-0.1.48.obscpio

New:
----
  ollama-0.2.5.obscpio

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ ollama.spec ++++++
--- /var/tmp/diff_new_pack.Ofgihb/_old  2024-07-15 19:49:53.429036565 +0200
+++ /var/tmp/diff_new_pack.Ofgihb/_new  2024-07-15 19:49:53.433036713 +0200
@@ -17,7 +17,7 @@
 
 
 Name:           ollama
-Version:        0.1.48
+Version:        0.2.5
 Release:        0
 Summary:        Tool for running AI models on-premise
 License:        MIT

++++++ _service ++++++
--- /var/tmp/diff_new_pack.Ofgihb/_old  2024-07-15 19:49:53.461037744 +0200
+++ /var/tmp/diff_new_pack.Ofgihb/_new  2024-07-15 19:49:53.465037892 +0200
@@ -3,7 +3,7 @@
   <service name="obs_scm" mode="manual">
     <param name="url">https://github.com/ollama/ollama.git</param>
     <param name="scm">git</param>
-    <param name="revision">v0.1.48</param>
+    <param name="revision">v0.2.5</param>
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="versionrewrite-pattern">v(.*)</param>
     <param name="changesgenerate">enable</param>

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.Ofgihb/_old  2024-07-15 19:49:53.485038628 +0200
+++ /var/tmp/diff_new_pack.Ofgihb/_new  2024-07-15 19:49:53.489038776 +0200
@@ -1,6 +1,6 @@
 <servicedata>
 <service name="tar_scm">
                 <param name="url">https://github.com/ollama/ollama.git</param>
-              <param 
name="changesrevision">717f7229eb4f9220d4070aae617923950643d327</param></service></servicedata>
+              <param 
name="changesrevision">f7ee0123008dbdb3fd5954438d12196951b58b78</param></service></servicedata>
 (No newline at EOF)
 

++++++ enable-lto.patch ++++++
--- /var/tmp/diff_new_pack.Ofgihb/_old  2024-07-15 19:49:53.501039218 +0200
+++ /var/tmp/diff_new_pack.Ofgihb/_new  2024-07-15 19:49:53.501039218 +0200
@@ -1,28 +1,28 @@
 diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh
-index 28ce1f2..4193a43 100755
+index db2c6c3..8194cd9 100755
 --- a/llm/generate/gen_linux.sh
 +++ b/llm/generate/gen_linux.sh
 @@ -52,6 +52,7 @@ if [ -z "${CUDACXX}" ]; then
      fi
  fi
- COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off 
-DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off 
-DLLAMA_F16C=off -DLLAMA_OPENMP=off"
-+COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DLLAMA_LTO=on 
-DCMAKE_BUILD_TYPE=Release"
+ COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off 
-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on 
-DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off 
-DGGML_OPENMP=off"
++COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DGGML_LTO=on 
-DCMAKE_BUILD_TYPE=Release"
  source $(dirname $0)/gen_common.sh
  init_vars
  git_module_setup
 @@ -78,6 +79,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
          init_vars
          echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
-         CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} 
-DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
-+        CMAKE_DEFS="${CMAKE_DEFS} -DLLAMA_LTO=on"
+         CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=off 
-DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
++        CMAKE_DEFS="${CMAKE_DEFS} -DGGML_LTO=on"
          BUILD_DIR="../build/linux/${ARCH}/cpu"
          echo "Building custom CPU"
          build
 @@ -94,6 +96,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
-         # -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake
+         # -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
  
-         COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on 
-DLLAMA_NATIVE=off -DLLAMA_OPENMP=off"
-+        COMMON_CPU_DEFS="-DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release"
+         COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=off 
-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
++        COMMON_CPU_DEFS="-DGGML_LTO=on -DCMAKE_BUILD_TYPE=Release"
          if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; 
then
              #
              # CPU first for the default library, set up as lowest common 
denominator for maximum compatibility (including Rosetta)

++++++ ollama-0.1.48.obscpio -> ollama-0.2.5.obscpio ++++++
/work/SRC/openSUSE:Factory/ollama/ollama-0.1.48.obscpio 
/work/SRC/openSUSE:Factory/.ollama.new.17339/ollama-0.2.5.obscpio differ: char 
49, line 1

++++++ ollama.obsinfo ++++++
--- /var/tmp/diff_new_pack.Ofgihb/_old  2024-07-15 19:49:53.545040839 +0200
+++ /var/tmp/diff_new_pack.Ofgihb/_new  2024-07-15 19:49:53.549040986 +0200
@@ -1,5 +1,5 @@
 name: ollama
-version: 0.1.48
-mtime: 1719628771
-commit: 717f7229eb4f9220d4070aae617923950643d327
+version: 0.2.5
+mtime: 1720908480
+commit: f7ee0123008dbdb3fd5954438d12196951b58b78
 

++++++ vendor.tar.zstd ++++++
Binary files /var/tmp/diff_new_pack.Ofgihb/_old and 
/var/tmp/diff_new_pack.Ofgihb/_new differ

Reply via email to