This is an automated email from the ASF dual-hosted git repository.

skrawcz pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/burr.git

commit c86901dee25a4e8866edc1efc4d751c1cc776ddd
Author: AndrĂ© Ahlert <[email protected]>
AuthorDate: Thu Apr 9 09:30:18 2026 -0300

    fix: update deprecated OpenAI models to gpt-4o-mini in examples
    
    Replaces gpt-3.5-turbo and gpt-4 references with gpt-4o-mini across
    8 example files. Closes #521.
---
 examples/multi-modal-chatbot/application.py         | 4 ++--
 examples/other-examples/hamilton-multi-modal/dag.py | 2 +-
 examples/simple-chatbot-intro/application.py        | 2 +-
 examples/streaming-fastapi/application.py           | 2 +-
 examples/streaming-overview/application.py          | 4 ++--
 examples/streaming-overview/async_application.py    | 4 ++--
 examples/test-case-creation/application.py          | 2 +-
 examples/tracing-and-spans/application.py           | 4 ++--
 8 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/examples/multi-modal-chatbot/application.py 
b/examples/multi-modal-chatbot/application.py
index 63b49536..91148640 100644
--- a/examples/multi-modal-chatbot/application.py
+++ b/examples/multi-modal-chatbot/application.py
@@ -72,7 +72,7 @@ def choose_mode(state: State) -> State:
     )
 
     result = _get_openai_client().chat.completions.create(
-        model="gpt-4",
+        model="gpt-4o-mini",
         messages=[
             {"role": "system", "content": "You are a helpful assistant"},
             {"role": "user", "content": prompt},
@@ -100,7 +100,7 @@ def prompt_for_more(state: State) -> State:
 
 @action(reads=["prompt", "chat_history", "mode"], writes=["response"])
 def chat_response(
-    state: State, prepend_prompt: str, display_type: str = "text", model: str 
= "gpt-3.5-turbo"
+    state: State, prepend_prompt: str, display_type: str = "text", model: str 
= "gpt-4o-mini"
 ) -> State:
     chat_history = copy.deepcopy(state["chat_history"])
     chat_history[-1]["content"] = f"{prepend_prompt}: 
{chat_history[-1]['content']}"
diff --git a/examples/other-examples/hamilton-multi-modal/dag.py 
b/examples/other-examples/hamilton-multi-modal/dag.py
index df7b9d53..61d52d4b 100644
--- a/examples/other-examples/hamilton-multi-modal/dag.py
+++ b/examples/other-examples/hamilton-multi-modal/dag.py
@@ -34,7 +34,7 @@ def client() -> openai.Client:
 
 
 def text_model() -> str:
-    return "gpt-3.5-turbo"
+    return "gpt-4o-mini"
 
 
 def image_model() -> str:
diff --git a/examples/simple-chatbot-intro/application.py 
b/examples/simple-chatbot-intro/application.py
index c96f9516..702de7ac 100644
--- a/examples/simple-chatbot-intro/application.py
+++ b/examples/simple-chatbot-intro/application.py
@@ -43,7 +43,7 @@ def ai_response(state: State) -> Tuple[dict, State]:
     client = openai.Client()  # replace this with your favorite LLM client 
library
     content = (
         client.chat.completions.create(
-            model="gpt-3.5-turbo",
+            model="gpt-4o-mini",
             messages=state["chat_history"],
         )
         .choices[0]
diff --git a/examples/streaming-fastapi/application.py 
b/examples/streaming-fastapi/application.py
index d90a5e33..5685627e 100644
--- a/examples/streaming-fastapi/application.py
+++ b/examples/streaming-fastapi/application.py
@@ -95,7 +95,7 @@ async def prompt_for_more(state: State) -> 
AsyncGenerator[Tuple[dict, Optional[S
 
 @streaming_action(reads=["prompt", "chat_history", "mode"], 
writes=["response"])
 async def chat_response(
-    state: State, prepend_prompt: str, model: str = "gpt-3.5-turbo"
+    state: State, prepend_prompt: str, model: str = "gpt-4o-mini"
 ) -> AsyncGenerator[Tuple[dict, Optional[State]], None]:
     """Streaming action, as we don't have the result immediately. This makes 
it more interactive"""
     chat_history = copy.deepcopy(state["chat_history"])
diff --git a/examples/streaming-overview/application.py 
b/examples/streaming-overview/application.py
index 34fe1b20..1c458771 100644
--- a/examples/streaming-overview/application.py
+++ b/examples/streaming-overview/application.py
@@ -60,7 +60,7 @@ def choose_mode(state: State) -> Tuple[dict, State]:
     )
 
     result = _get_openai_client().chat.completions.create(
-        model="gpt-4",
+        model="gpt-4o-mini",
         messages=[
             {"role": "system", "content": "You are a helpful assistant"},
             {"role": "user", "content": prompt},
@@ -89,7 +89,7 @@ def prompt_for_more(state: State) -> Tuple[dict, State]:
 
 @streaming_action(reads=["prompt", "chat_history", "mode"], 
writes=["response"])
 def chat_response(
-    state: State, prepend_prompt: str, model: str = "gpt-3.5-turbo"
+    state: State, prepend_prompt: str, model: str = "gpt-4o-mini"
 ) -> Generator[Tuple[dict, Optional[State]], None, None]:
     """Streaming action, as we don't have the result immediately. This makes 
it more interactive"""
     chat_history = state["chat_history"].copy()
diff --git a/examples/streaming-overview/async_application.py 
b/examples/streaming-overview/async_application.py
index 11185233..1caa6d88 100644
--- a/examples/streaming-overview/async_application.py
+++ b/examples/streaming-overview/async_application.py
@@ -61,7 +61,7 @@ async def choose_mode(state: State) -> Tuple[dict, State]:
     )
 
     result = await _get_openai_client().chat.completions.create(
-        model="gpt-4",
+        model="gpt-4o-mini",
         messages=[
             {"role": "system", "content": "You are a helpful assistant"},
             {"role": "user", "content": prompt},
@@ -90,7 +90,7 @@ def prompt_for_more(state: State) -> Tuple[dict, State]:
 
 @streaming_action(reads=["prompt", "chat_history", "mode"], 
writes=["response"])
 async def chat_response(
-    state: State, prepend_prompt: str, model: str = "gpt-3.5-turbo"
+    state: State, prepend_prompt: str, model: str = "gpt-4o-mini"
 ) -> Tuple[dict, State]:
     """Streaming action, as we don't have the result immediately. This makes 
it more interactive"""
     chat_history = state["chat_history"].copy()
diff --git a/examples/test-case-creation/application.py 
b/examples/test-case-creation/application.py
index a10786d8..e582510c 100644
--- a/examples/test-case-creation/application.py
+++ b/examples/test-case-creation/application.py
@@ -49,7 +49,7 @@ def choose_mode(state: State) -> Tuple[dict, State]:
     )
 
     result = _get_openai_client().chat.completions.create(
-        model="gpt-4",
+        model="gpt-4o-mini",
         messages=[
             {"role": "system", "content": "You are a helpful assistant"},
             {"role": "user", "content": prompt},
diff --git a/examples/tracing-and-spans/application.py 
b/examples/tracing-and-spans/application.py
index 1116bf71..bc9f6855 100644
--- a/examples/tracing-and-spans/application.py
+++ b/examples/tracing-and-spans/application.py
@@ -69,7 +69,7 @@ def choose_mode(state: State, __tracer: TracerFactory) -> 
Tuple[dict, State]:
             client = _get_openai_client()
         with __tracer("query_openai") as tracer:
             result = client.chat.completions.create(
-                model="gpt-4",
+                model="gpt-4o-mini",
                 messages=[
                     {"role": "system", "content": "You are a helpful 
assistant"},
                     {"role": "user", "content": prompt},
@@ -107,7 +107,7 @@ def chat_response(
     state: State,
     prepend_prompt: str,
     __tracer: TracerFactory,
-    model: str = "gpt-3.5-turbo",
+    model: str = "gpt-4o-mini",
 ) -> Tuple[dict, State]:
     __tracer.log_attributes(model=model, prepend_prompt=prepend_prompt)
     with __tracer("process_chat_history"):

Reply via email to