This is an automated email from the ASF dual-hosted git repository.

skrawcz pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/burr.git

commit c73b24cede7cb742ea021ef7c8dce1d0f6ed9ff5
Author: AndrĂ© Ahlert <[email protected]>
AuthorDate: Thu Apr 9 10:28:05 2026 -0300

    fix: update deprecated OpenAI models in example notebooks
    
    Replaces gpt-3.5-turbo with gpt-4o-mini and gpt-4-turbo-preview with
    gpt-4o in 5 example notebooks. Skips parallelism/notebook.ipynb which
    intentionally uses multiple models to demonstrate parallel execution.
---
 examples/conversational-rag/graph_db_example/notebook.ipynb | 4 ++--
 examples/multi-modal-chatbot/burr_demo.ipynb                | 2 +-
 examples/simple-chatbot-intro/notebook.ipynb                | 2 +-
 examples/talks/data_for_ai_oct_2024.ipynb                   | 2 +-
 examples/tracing-and-spans/burr_otel_demo.ipynb             | 2 +-
 5 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/examples/conversational-rag/graph_db_example/notebook.ipynb 
b/examples/conversational-rag/graph_db_example/notebook.ipynb
index 13758a35..7625cbe2 100644
--- a/examples/conversational-rag/graph_db_example/notebook.ipynb
+++ b/examples/conversational-rag/graph_db_example/notebook.ipynb
@@ -242,7 +242,7 @@
     "    messages = state[\"chat_history\"]\n",
     "    # Call the function\n",
     "    response = client.chat.completions.create(\n",
-    "        model=\"gpt-4-turbo-preview\",\n",
+    "        model=\"gpt-4o\",\n",
     "        messages=messages,\n",
     "        tools=[run_cypher_query_tool_description],\n",
     "        tool_choice=\"auto\",\n",
@@ -315,7 +315,7 @@
     "    \"\"\"AI step to generate the response given the current chat 
history.\"\"\"\n",
     "    messages = state[\"chat_history\"]\n",
     "    response = client.chat.completions.create(\n",
-    "        model=\"gpt-4-turbo-preview\",\n",
+    "        model=\"gpt-4o\",\n",
     "        messages=messages,\n",
     "    )  # get a new response from the model where it can see the function 
response\n",
     "    response_message = response.choices[0].message\n",
diff --git a/examples/multi-modal-chatbot/burr_demo.ipynb 
b/examples/multi-modal-chatbot/burr_demo.ipynb
index 8ab0712f..6637ab86 100644
--- a/examples/multi-modal-chatbot/burr_demo.ipynb
+++ b/examples/multi-modal-chatbot/burr_demo.ipynb
@@ -174,7 +174,7 @@
     "\n",
     "@action(reads=[\"prompt\", \"chat_history\", \"mode\"], 
writes=[\"response\"])\n",
     "def chat_response(\n",
-    "        state: State, prepend_prompt: str, model: str = 
\"gpt-3.5-turbo\"\n",
+    "        state: State, prepend_prompt: str, model: str = 
\"gpt-4o-mini\"\n",
     ") -> State:\n",
     "\n",
     "    chat_history = copy.deepcopy(state[\"chat_history\"])\n",
diff --git a/examples/simple-chatbot-intro/notebook.ipynb 
b/examples/simple-chatbot-intro/notebook.ipynb
index 3e4956fa..9b475aa3 100644
--- a/examples/simple-chatbot-intro/notebook.ipynb
+++ b/examples/simple-chatbot-intro/notebook.ipynb
@@ -89,7 +89,7 @@
     "    but we wanted to keep it simple to demonstrate\"\"\"\n",
     "    client = openai.Client()  # replace with your favorite LLM client 
library\n",
     "    content = client.chat.completions.create(\n",
-    "        model=\"gpt-3.5-turbo\",\n",
+    "        model=\"gpt-4o-mini\",\n",
     "        messages=state[\"chat_history\"],\n",
     "    ).choices[0].message.content\n",
     "    chat_item = {\n",
diff --git a/examples/talks/data_for_ai_oct_2024.ipynb 
b/examples/talks/data_for_ai_oct_2024.ipynb
index 49eb22f6..d135741c 100644
--- a/examples/talks/data_for_ai_oct_2024.ipynb
+++ b/examples/talks/data_for_ai_oct_2024.ipynb
@@ -547,7 +547,7 @@
     "\n",
     "@action(reads=[\"prompt\", \"chat_history\", \"mode\"], 
writes=[\"response\"])\n",
     "def chat_response(\n",
-    "        state: State, prepend_prompt: str, model: str = 
\"gpt-3.5-turbo\"\n",
+    "        state: State, prepend_prompt: str, model: str = 
\"gpt-4o-mini\"\n",
     ") -> State:\n",
     "    \n",
     "    chat_history = copy.deepcopy(state[\"chat_history\"])\n",
diff --git a/examples/tracing-and-spans/burr_otel_demo.ipynb 
b/examples/tracing-and-spans/burr_otel_demo.ipynb
index 2d446573..95d0958c 100644
--- a/examples/tracing-and-spans/burr_otel_demo.ipynb
+++ b/examples/tracing-and-spans/burr_otel_demo.ipynb
@@ -183,7 +183,7 @@
     "\n",
     "@action(reads=[\"prompt\", \"chat_history\", \"mode\"], 
writes=[\"response\"])\n",
     "def chat_response(\n",
-    "        state: State, prepend_prompt: str, model: str = 
\"gpt-3.5-turbo\"\n",
+    "        state: State, prepend_prompt: str, model: str = 
\"gpt-4o-mini\"\n",
     ") -> State:\n",
     "    \n",
     "    chat_history = copy.deepcopy(state[\"chat_history\"])\n",

Reply via email to