This is an automated email from the ASF dual-hosted git repository.

xtsong pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-agents.git

commit 4a2fc467c3bfc3602805419b6edf8db0e904d7ba
Author: WenjinXie <[email protected]>
AuthorDate: Tue Jan 20 20:50:37 2026 +0800

    [integration][python] Update ollama sdk version and support disable think.
---
 docs/content/docs/development/chat_models.md       | 23 +++++++++++-----------
 .../integrations/chat_models/ollama_chat_model.py  | 10 +++++++++-
 python/pyproject.toml                              |  2 +-
 3 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/docs/content/docs/development/chat_models.md 
b/docs/content/docs/development/chat_models.md
index d6659425..8894eb78 100644
--- a/docs/content/docs/development/chat_models.md
+++ b/docs/content/docs/development/chat_models.md
@@ -346,17 +346,18 @@ Ollama provides local chat models that run on your 
machine, offering privacy, co
 
 {{< tab "Python" >}}
 
-| Parameter | Type | Default | Description |
-|-----------|------|---------|-------------|
-| `connection` | str | Required | Reference to connection method name |
-| `model` | str | Required | Name of the chat model to use |
-| `prompt` | Prompt \| str | None | Prompt template or reference to prompt 
resource |
-| `tools` | List[str] | None | List of tool names available to the model |
-| `temperature` | float | `0.75` | Sampling temperature (0.0 to 1.0) |
-| `num_ctx` | int | `2048` | Maximum number of context tokens |
-| `keep_alive` | str \| float | `"5m"` | How long to keep model loaded in 
memory |
-| `extract_reasoning` | bool | `True` | Extract reasoning content from 
response |
-| `additional_kwargs` | dict | `{}` | Additional Ollama API parameters |
+| Parameter           | Type                                     | Default  | 
Description                                     |
+|---------------------|------------------------------------------|----------|-------------------------------------------------|
+| `connection`        | str                                      | Required | 
Reference to connection method name             |
+| `model`             | str                                      | Required | 
Name of the chat model to use                   |
+| `prompt`            | Prompt \| str                            | None     | 
Prompt template or reference to prompt resource |
+| `tools`             | List[str]                                | None     | 
List of tool names available to the model       |
+| `temperature`       | float                                    | `0.75`   | 
Sampling temperature (0.0 to 1.0)               |
+| `num_ctx`           | int                                      | `2048`   | 
Maximum number of context tokens                |
+| `keep_alive`        | str \| float                             | `"5m"`   | 
How long to keep model loaded in memory         |
+| `extract_reasoning` | bool                                     | `True`   | 
Extract reasoning content from response         |
+| `additional_kwargs` | dict                                     | `{}`     | 
Additional Ollama API parameters                |
+| `think`             | bool \| Literal["low", "medium", "high"] | True     | 
Whether enable model think                      |
 {{< /tab >}}
 
 {{< tab "Java" >}}
diff --git a/python/flink_agents/integrations/chat_models/ollama_chat_model.py 
b/python/flink_agents/integrations/chat_models/ollama_chat_model.py
index 4bbc74b6..a879dcf9 100644
--- a/python/flink_agents/integrations/chat_models/ollama_chat_model.py
+++ b/python/flink_agents/integrations/chat_models/ollama_chat_model.py
@@ -16,7 +16,7 @@
 # limitations under the License.
 
#################################################################################
 import uuid
-from typing import Any, Dict, List, Sequence
+from typing import Any, Dict, List, Literal, Sequence
 
 from ollama import Client, Message
 from pydantic import Field
@@ -103,6 +103,7 @@ class OllamaChatModelConnection(BaseChatModelConnection):
             tools=ollama_tools,
             options=kwargs,
             keep_alive=kwargs.get("keep_alive", False),
+            think=kwargs.get("think", True),
         )
 
         ollama_tool_calls = response.message.tool_calls
@@ -224,6 +225,10 @@ class OllamaChatModelSetup(BaseChatModelSetup):
         "stores it in additional_kwargs.",
     )
 
+    think: bool | Literal["low", "medium", "high"] = Field(
+        default=True, description="Whether or not enable thinking for think 
model. "
+    )
+
     def __init__(
         self,
         connection: str,
@@ -233,6 +238,7 @@ class OllamaChatModelSetup(BaseChatModelSetup):
         request_timeout: float | None = DEFAULT_REQUEST_TIMEOUT,
         additional_kwargs: Dict[str, Any] | None = None,
         keep_alive: float | str | None = None,
+        think: bool | Literal["low", "medium", "high"] = True,
         extract_reasoning: bool | None = True,
         **kwargs: Any,
     ) -> None:
@@ -247,6 +253,7 @@ class OllamaChatModelSetup(BaseChatModelSetup):
             request_timeout=request_timeout,
             additional_kwargs=additional_kwargs,
             keep_alive=keep_alive,
+            think=think,
             extract_reasoning=extract_reasoning,
             **kwargs,
         )
@@ -259,6 +266,7 @@ class OllamaChatModelSetup(BaseChatModelSetup):
             "temperature": self.temperature,
             "num_ctx": self.num_ctx,
             "keep_alive": self.keep_alive,
+            "think": self.think,
             "extract_reasoning": self.extract_reasoning,
         }
         return {
diff --git a/python/pyproject.toml b/python/pyproject.toml
index a4a17a90..40b13bd2 100644
--- a/python/pyproject.toml
+++ b/python/pyproject.toml
@@ -49,7 +49,7 @@ dependencies = [
     "setuptools>=75.3",
     "find_libpython",
     #TODO: Seperate integration dependencies from project
-    "ollama==0.4.8",
+    "ollama==0.6.1",
     "dashscope~=1.24.2",
     "openai>=1.66.3",
     "anthropic>=0.64.0",

Reply via email to