This is an automated email from the ASF dual-hosted git repository.

skrawcz pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/burr.git

commit 182d46247ddd6706bd541804c70bd45cb78cdffc
Author: AndrĂ© Ahlert <[email protected]>
AuthorDate: Thu Apr 9 09:48:00 2026 -0300

    fix: use gpt-4o for routing actions, gpt-4o-mini for generation
    
    choose_mode uses gpt-4o (replaces gpt-4) to preserve the intentional
    two-tier model design. chat_response uses gpt-4o-mini (replaces 
gpt-3.5-turbo).
---
 examples/multi-modal-chatbot/application.py      | 2 +-
 examples/streaming-overview/application.py       | 2 +-
 examples/streaming-overview/async_application.py | 2 +-
 examples/test-case-creation/application.py       | 2 +-
 examples/tracing-and-spans/application.py        | 2 +-
 5 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/examples/multi-modal-chatbot/application.py 
b/examples/multi-modal-chatbot/application.py
index 91148640..663d22d4 100644
--- a/examples/multi-modal-chatbot/application.py
+++ b/examples/multi-modal-chatbot/application.py
@@ -72,7 +72,7 @@ def choose_mode(state: State) -> State:
     )
 
     result = _get_openai_client().chat.completions.create(
-        model="gpt-4o-mini",
+        model="gpt-4o",
         messages=[
             {"role": "system", "content": "You are a helpful assistant"},
             {"role": "user", "content": prompt},
diff --git a/examples/streaming-overview/application.py 
b/examples/streaming-overview/application.py
index 1c458771..911ee5f7 100644
--- a/examples/streaming-overview/application.py
+++ b/examples/streaming-overview/application.py
@@ -60,7 +60,7 @@ def choose_mode(state: State) -> Tuple[dict, State]:
     )
 
     result = _get_openai_client().chat.completions.create(
-        model="gpt-4o-mini",
+        model="gpt-4o",
         messages=[
             {"role": "system", "content": "You are a helpful assistant"},
             {"role": "user", "content": prompt},
diff --git a/examples/streaming-overview/async_application.py 
b/examples/streaming-overview/async_application.py
index 1caa6d88..4ca8402a 100644
--- a/examples/streaming-overview/async_application.py
+++ b/examples/streaming-overview/async_application.py
@@ -61,7 +61,7 @@ async def choose_mode(state: State) -> Tuple[dict, State]:
     )
 
     result = await _get_openai_client().chat.completions.create(
-        model="gpt-4o-mini",
+        model="gpt-4o",
         messages=[
             {"role": "system", "content": "You are a helpful assistant"},
             {"role": "user", "content": prompt},
diff --git a/examples/test-case-creation/application.py 
b/examples/test-case-creation/application.py
index e582510c..20a0a1a0 100644
--- a/examples/test-case-creation/application.py
+++ b/examples/test-case-creation/application.py
@@ -49,7 +49,7 @@ def choose_mode(state: State) -> Tuple[dict, State]:
     )
 
     result = _get_openai_client().chat.completions.create(
-        model="gpt-4o-mini",
+        model="gpt-4o",
         messages=[
             {"role": "system", "content": "You are a helpful assistant"},
             {"role": "user", "content": prompt},
diff --git a/examples/tracing-and-spans/application.py 
b/examples/tracing-and-spans/application.py
index bc9f6855..a07ce093 100644
--- a/examples/tracing-and-spans/application.py
+++ b/examples/tracing-and-spans/application.py
@@ -69,7 +69,7 @@ def choose_mode(state: State, __tracer: TracerFactory) -> 
Tuple[dict, State]:
             client = _get_openai_client()
         with __tracer("query_openai") as tracer:
             result = client.chat.completions.create(
-                model="gpt-4o-mini",
+                model="gpt-4o",
                 messages=[
                     {"role": "system", "content": "You are a helpful 
assistant"},
                     {"role": "user", "content": prompt},

Reply via email to