Skip to content

Commit c73b24c

Browse files
andreahlertskrawcz
authored andcommitted
fix: update deprecated OpenAI models in example notebooks
Replaces gpt-3.5-turbo with gpt-4o-mini and gpt-4-turbo-preview with gpt-4o in 5 example notebooks. Skips parallelism/notebook.ipynb which intentionally uses multiple models to demonstrate parallel execution.
1 parent 182d462 commit c73b24c

5 files changed

Lines changed: 6 additions & 6 deletions

File tree

examples/conversational-rag/graph_db_example/notebook.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@
242242
" messages = state[\"chat_history\"]\n",
243243
" # Call the function\n",
244244
" response = client.chat.completions.create(\n",
245-
" model=\"gpt-4-turbo-preview\",\n",
245+
" model=\"gpt-4o\",\n",
246246
" messages=messages,\n",
247247
" tools=[run_cypher_query_tool_description],\n",
248248
" tool_choice=\"auto\",\n",
@@ -315,7 +315,7 @@
315315
" \"\"\"AI step to generate the response given the current chat history.\"\"\"\n",
316316
" messages = state[\"chat_history\"]\n",
317317
" response = client.chat.completions.create(\n",
318-
" model=\"gpt-4-turbo-preview\",\n",
318+
" model=\"gpt-4o\",\n",
319319
" messages=messages,\n",
320320
" ) # get a new response from the model where it can see the function response\n",
321321
" response_message = response.choices[0].message\n",

examples/multi-modal-chatbot/burr_demo.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@
174174
"\n",
175175
"@action(reads=[\"prompt\", \"chat_history\", \"mode\"], writes=[\"response\"])\n",
176176
"def chat_response(\n",
177-
" state: State, prepend_prompt: str, model: str = \"gpt-3.5-turbo\"\n",
177+
" state: State, prepend_prompt: str, model: str = \"gpt-4o-mini\"\n",
178178
") -> State:\n",
179179
"\n",
180180
" chat_history = copy.deepcopy(state[\"chat_history\"])\n",

examples/simple-chatbot-intro/notebook.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@
8989
" but we wanted to keep it simple to demonstrate\"\"\"\n",
9090
" client = openai.Client() # replace with your favorite LLM client library\n",
9191
" content = client.chat.completions.create(\n",
92-
" model=\"gpt-3.5-turbo\",\n",
92+
" model=\"gpt-4o-mini\",\n",
9393
" messages=state[\"chat_history\"],\n",
9494
" ).choices[0].message.content\n",
9595
" chat_item = {\n",

examples/talks/data_for_ai_oct_2024.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@
547547
"\n",
548548
"@action(reads=[\"prompt\", \"chat_history\", \"mode\"], writes=[\"response\"])\n",
549549
"def chat_response(\n",
550-
" state: State, prepend_prompt: str, model: str = \"gpt-3.5-turbo\"\n",
550+
" state: State, prepend_prompt: str, model: str = \"gpt-4o-mini\"\n",
551551
") -> State:\n",
552552
" \n",
553553
" chat_history = copy.deepcopy(state[\"chat_history\"])\n",

examples/tracing-and-spans/burr_otel_demo.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@
183183
"\n",
184184
"@action(reads=[\"prompt\", \"chat_history\", \"mode\"], writes=[\"response\"])\n",
185185
"def chat_response(\n",
186-
" state: State, prepend_prompt: str, model: str = \"gpt-3.5-turbo\"\n",
186+
" state: State, prepend_prompt: str, model: str = \"gpt-4o-mini\"\n",
187187
") -> State:\n",
188188
" \n",
189189
" chat_history = copy.deepcopy(state[\"chat_history\"])\n",

0 commit comments

Comments
 (0)