Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package llamacpp for openSUSE:Factory 
checked in at 2025-05-09 18:52:00
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/llamacpp (Old)
 and      /work/SRC/openSUSE:Factory/.llamacpp.new.30101 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "llamacpp"

Fri May  9 18:52:00 2025 rev:7 rq:1276203 version:5321

Changes:
--------
--- /work/SRC/openSUSE:Factory/llamacpp/llamacpp.changes        2025-04-20 
20:02:18.554778936 +0200
+++ /work/SRC/openSUSE:Factory/.llamacpp.new.30101/llamacpp.changes     
2025-05-09 18:54:00.451691188 +0200
@@ -1,0 +2,17 @@
+Fri May  9 09:25:51 UTC 2025 - Eyad Issa <eyadlore...@gmail.com>
+
+- Use source urls instead of obs_scm
+
+- Add libllava and libmtmd libraries
+
+- Update to version 5327:
+  * A new binary llama-mtmd-cli is introduced to replace llava-cli,
+    minicpmv-cli, gemma3-cli (#13012) and qwen2vl-cli (#13141),
+    libllava will be deprecated
+  * Full changes here:
+    https://github.com/ggml-org/llama.cpp/compare/b5158...b5321
+
+- Delete patch 0002-build-main-cli.patch: build system changed
+  upstream
+
+-------------------------------------------------------------------

Old:
----
  0002-build-main-cli.patch
  llamacpp-5158.obscpio

New:
----
  llamacpp-5321.tar.gz

BETA DEBUG BEGIN:
  Old:
- Delete patch 0002-build-main-cli.patch: build system changed
  upstream
BETA DEBUG END:

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ llamacpp.spec ++++++
--- /var/tmp/diff_new_pack.izmkGH/_old  2025-05-09 18:54:01.187721805 +0200
+++ /var/tmp/diff_new_pack.izmkGH/_new  2025-05-09 18:54:01.187721805 +0200
@@ -17,14 +17,13 @@
 
 
 Name:           llamacpp
-Version:        5158
+Version:        5321
 Release:        0
 Summary:        llama-cli tool to run inference using the llama.cpp library
 License:        MIT
-URL:            https://github.com/ggerganov/llama.cpp
-Source:         %{name}-%{version}.tar.gz
-Patch0:         0001-dl-load-path.patch
-Patch1:         0002-build-main-cli.patch
+URL:            https://github.com/ggml-org/llama.cpp
+Source:         
https://github.com/ggml-org/llama.cpp/archive/b%{version}/%{name}-%{version}.tar.gz
+Patch1:         0001-dl-load-path.patch
 BuildRequires:  cmake >= 3.14
 BuildRequires:  gcc-c++
 BuildRequires:  git
@@ -118,8 +117,28 @@
 This package includes the development files necessary for building applications
 that depend on ggml.
 
+%package -n libmtmd
+Summary:        Library to run multimodals inference models
+
+%description -n libmtmd
+As outlined in the history, libmtmd is the modern library designed to
+replace the original llava.cpp implementation for handling multimodal inputs.
+
+Built upon clip.cpp (similar to llava.cpp), libmtmd offers several advantages:
+- Unified Interface: Aims to consolidate interaction for various multimodal 
models.
+- Improved UX/DX: Features a more intuitive API, inspired by the Processor 
class
+  in the Hugging Face transformers library.
+- Flexibility: Designed to support multiple input types (text, audio, images) 
while
+  respecting the wide variety of chat templates used by different models.
+
+%package -n libllava
+Summary:        Library to run multimodals inference models
+
+%description -n libllava
+Library to handle multimodal inputs for llama.cpp.
+
 %prep
-%autosetup -p1
+%autosetup -p1 -n llama.cpp-b%{version}
 
 %build
 %define _lto_cflags %{nil}
@@ -128,11 +147,13 @@
 %cmake \
     -DCMAKE_SKIP_RPATH=ON \
     -DLLAMA_BUILD_TESTS=OFF \
+    -DLLAMA_BUILD_EXAMPLES=OFF \
+    -DLLAMA_BUILD_TOOLS=ON \
+    -DLLAMA_CURL=ON \
     -DGGML_CPU=ON \
     -DGGML_VULKAN=ON \
     -DGGML_OPENCL=ON \
     -DGGML_OPENCL_USE_ADRENO_KERNELS=OFF \
-    -DLLAMA_CURL=ON
 
 %cmake_build
 
@@ -141,42 +162,55 @@
 
 # used for shader compilation only
 rm %{buildroot}%{_bindir}/vulkan-shaders-gen
-# remove dev scripts
+# dev scripts
 rm %{buildroot}%{_bindir}/convert_hf_to_gguf.py
 
 %files
 %doc README.md
 %license LICENSE
-
-%{_bindir}/llama-cli
-%{_bindir}/llama-server
-%{_bindir}/llama-bench
+%{_bindir}/llama-*
 
 %files -n libllama
+%license LICENSE
 %{_libdir}/libllama.so
 
 %files -n libggml
+%license LICENSE
 %{_libdir}/libggml.so
 
 %files -n libggml-base
+%license LICENSE
 %{_libdir}/libggml-base.so
 
 %files -n libggml-cpu
+%license LICENSE
 %{_libdir}/libggml-cpu.so
 
 %files -n libggml-vulkan
+%license LICENSE
 %{_libdir}/libggml-vulkan.so
 
 %files -n libggml-opencl
+%license LICENSE
 %{_libdir}/libggml-opencl.so
 
 %files devel
+%license LICENSE
 %{_includedir}/llama*
 %{_libdir}/cmake/llama
 %{_libdir}/pkgconfig/llama.pc
 
 %files -n ggml-devel
+%license LICENSE
 %{_includedir}/ggml*.h
 %{_includedir}/gguf.h
 %{_libdir}/cmake/ggml
 
+%files -n libmtmd
+%license LICENSE
+%{_libdir}/libmtmd_shared.so
+
+%files -n libllava
+%license LICENSE
+%{_libdir}/libllava_shared.so
+

++++++ _service ++++++
--- /var/tmp/diff_new_pack.izmkGH/_old  2025-05-09 18:54:01.227723469 +0200
+++ /var/tmp/diff_new_pack.izmkGH/_new  2025-05-09 18:54:01.231723636 +0200
@@ -1,24 +1,5 @@
 <services>
   <service name="format_spec_file" mode="manual" />
-  <service name="obs_scm" mode="manual">
-    <param name="filename">llamacpp</param>
-    <param name="url">https://github.com/ggml-org/llama.cpp.git</param>
-    <param name="scm">git</param>
-    <param name="revision">b5158</param>
-    <param name="versionformat">@PARENT_TAG@</param>
-    <param name="versionrewrite-pattern">b(.*)</param>
-    <param name="changesgenerate">enable</param>
-    <param name="submodules">enable</param>
-  </service>
-  <service name="set_version" mode="manual" />
-
-  <service name="tar" mode="buildtime">
-    <param name="package-meta">yes</param>
-  </service>
-  <service name="recompress" mode="buildtime">
-    <param name="compression">gz</param>
-    <param name="file">llamacpp-*.tar</param>
-  </service>
-
+  <service name="download_files" mode="manual" />
 </services>
 

Reply via email to