Skip to content

Commit

Permalink
Merge pull request #3 from DanielMeixner/p4nb
Browse files Browse the repository at this point in the history
"Add Phase 4 - Efficiency code" - Jupyter Notebook
  • Loading branch information
denniszielke authored May 22, 2024
2 parents 09f22ee + be6ad5d commit 91a449e
Show file tree
Hide file tree
Showing 3 changed files with 512 additions and 0 deletions.
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ pydantic==2.7.1
python-dotenv==1.0.1
quadrant==1.0
requests==2.32.0
tiktoken==0.7.0
280 changes: 280 additions & 0 deletions src-agents/phase3/notebook.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,280 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Phase 3 - Function Calling\n",
"If not already done run this in the top level folder:\n",
"```\n",
"pip install -r requirements.txt\n",
"```\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import requests\n",
"import os\n",
"import openai\n",
"from enum import Enum\n",
"from pydantic import BaseModel\n",
"from openai import AzureOpenAI\n",
"from dotenv import load_dotenv\n",
"\n",
"# Load environment variables\n",
"if load_dotenv():\n",
" print(\"Found Azure OpenAI API Base Endpoint: \" + os.getenv(\"AZURE_OPENAI_ENDPOINT\"))\n",
"else: \n",
" print(\"Azure OpenAI API Base Endpoint not found. Have you configured the .env file?\")\n",
" \n",
"API_KEY = os.getenv(\"AZURE_OPENAI_API_KEY\")\n",
"API_VERSION = os.getenv(\"OPENAI_API_VERSION\")\n",
"RESOURCE_ENDPOINT = os.getenv(\"AZURE_OPENAI_ENDPOINT\")\n",
"\n",
"\n",
"client = AzureOpenAI(\n",
" azure_endpoint = os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n",
" api_key = os.getenv(\"AZURE_OPENAI_API_KEY\"),\n",
" api_version = os.getenv(\"AZURE_OPENAI_VERSION\")\n",
")\n",
"deployment_name = os.getenv(\"AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME\")\n",
"model_name = os.getenv(\"AZURE_OPENAI_COMPLETION_MODEL\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is the object model for receiving questions."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"class QuestionType(str, Enum):\n",
" multiple_choice = \"multiple_choice\"\n",
" true_false = \"true_false\"\n",
" popular_choice = \"popular_choice\"\n",
" estimation = \"estimation\"\n",
"\n",
"class Ask(BaseModel):\n",
" question: str | None = None\n",
" type: QuestionType\n",
" correlationToken: str | None = None\n",
"\n",
"class Answer(BaseModel):\n",
" answer: str\n",
" correlationToken: str | None = None\n",
" promptTokensUsed: int | None = None\n",
" completionTokensUsed: int | None = None\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Sample Function "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pytz\n",
"from datetime import datetime\n",
"\n",
"def get_current_time(location):\n",
" try:\n",
" # Get the timezone for the city\n",
" timezone = pytz.timezone(location)\n",
"\n",
" # Get the current time in the timezone\n",
" now = datetime.now(timezone)\n",
" current_time = now.strftime(\"%I:%M:%S %p\")\n",
"\n",
" return current_time\n",
" except:\n",
" return \"Sorry, I couldn't find the timezone for that location.\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Make the model aware of the function\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"functions = [\n",
" {\n",
" \"type\": \"function\",\n",
" \"function\": {\n",
" \"name\": \"get_current_time\",\n",
" \"description\": \"Get the current time in a given location\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"location\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The location name. The pytz is used to get the timezone for that location. Location names should be in a format like America/New_York, Asia/Bangkok, Europe/London\",\n",
" }\n",
" },\n",
" \"required\": [\"location\"],\n",
" },\n",
" }\n",
" }\n",
" ] \n",
"available_functions = {\n",
" \"get_current_time\": get_current_time \n",
" } "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"YOUR Mission: Adjust the function below and reuse it in the main.py file later to deploy to Azure and to update your service\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"async def ask_question(ask: Ask):\n",
" \"\"\"\n",
" Ask a question\n",
" \"\"\"\n",
" \n",
" question = ask.question\n",
" messages= [{\"role\" : \"assistant\", \"content\" : question}\n",
" , { \"role\" : \"system\", \"content\" : \"if asked for time answer in the format HH:MM:SS AM/PM only. Do not answer with a full sentence.\"}\n",
" ]\n",
" first_response = client.chat.completions.create(\n",
" model = deployment_name,\n",
" messages = messages,\n",
" tools = functions,\n",
" tool_choice = \"auto\",\n",
" )\n",
"\n",
" print(first_response)\n",
" response_message = first_response.choices[0].message\n",
" tool_calls = response_message.tool_calls\n",
"\n",
" # Step 2: check if GPT wanted to call a function\n",
" if tool_calls:\n",
" print(\"Recommended Function call:\")\n",
" print(tool_calls)\n",
" print()\n",
" \n",
" # Step 3: call the function\n",
" messages.append(response_message)\n",
"\n",
" for tool_call in tool_calls:\n",
" function_name = tool_call.function.name\n",
" # verify function exists\n",
" if function_name not in available_functions:\n",
" return \"Function \" + function_name + \" does not exist\"\n",
" else:\n",
" print(\"Calling function: \" + function_name)\n",
" function_to_call = available_functions[function_name]\n",
" function_args = json.loads(tool_call.function.arguments)\n",
" print(function_args)\n",
" function_response = function_to_call(**function_args)\n",
" messages.append(\n",
" {\n",
" \"tool_call_id\": tool_call.id,\n",
" \"role\": \"tool\",\n",
" \"name\": function_name,\n",
" \"content\": function_response,\n",
" }\n",
" ) \n",
" print(\"Addding this message to the next prompt:\") \n",
" print (messages)\n",
" \n",
" # extend conversation with function response\n",
" second_response = client.chat.completions.create(\n",
" model = model_name,\n",
" messages = messages) # get a new response from the model where it can see the function response\n",
" \n",
" print(\"second_response\")\n",
" \n",
" return second_response.choices[0].message.content\n",
" \n",
"\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Use this snippet to try your method with several questions."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"ask = Ask(question=\"What time is it in New York?\", type=QuestionType.estimation)\n",
"answer = await ask_question(ask)\n",
"print('Answer:', answer )\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Make sure you transfer your code changes into main.py (or additional files). Then redeploy your container using this command.\n",
"```\n",
"bash ./azd-hooks/deploy.sh phase1 $AZURE_ENV_NAME\n",
"```\n",
"Make sure to provide the URL of your endpoint in the team portal!"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Loading

0 comments on commit 91a449e

Please sign in to comment.