diff --git a/AGENTS.md b/AGENTS.md index 0ba6330..19ee864 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -29,10 +29,12 @@ All example scripts are located in the root directory. They follow a consistent - `chat_safety.py` - Content safety filter exception handling **Function Calling Scripts:** -- `function_calling_basic.py` - Single function declaration, prints tool calls -- `function_calling_call.py` - Executes the function when requested -- `function_calling_extended.py` - Full round-trip with function execution and response -- `function_calling_multiple.py` - Multiple functions, demonstrates choice logic +- `function_calling_basic.py` - Single function declaration, prints tool calls (no execution) +- `function_calling_call.py` - Executes the function once if the model requests it +- `function_calling_extended.py` - Full round-trip: executes, returns tool output, gets final answer +- `function_calling_errors.py` - Same as extended but with robust error handling (malformed JSON args, missing tool, tool exceptions, JSON serialization) +- `function_calling_parallel.py` - Shows model requesting multiple tools in one response +- `function_calling_while_loop.py` - Conversation loop that keeps executing sequential tool calls until the model produces a final natural language answer (with error handling) **Structured Outputs Scripts:** - `structured_outputs_basic.py` - Basic Pydantic model extraction @@ -119,7 +121,7 @@ These scripts are automatically run by `azd provision` via the `azure.yaml` post - `.devcontainer/devcontainer.json` - Default dev container (Azure OpenAI setup with azd) - `.devcontainer/Dockerfile` - Base Python 3.12 image, installs all requirements-dev.txt - `.devcontainer/github/` - GitHub Models variant -- `.devcontainer/ollama/` - Ollama variant +- `.devcontainer/ollama/` - Ollama variant - `.devcontainer/openai/` - OpenAI.com variant All dev containers install all dependencies from `requirements-dev.txt` which includes base, RAG, and dev tools. diff --git a/README.md b/README.md index 5b371db..093ee41 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,9 @@ Scripts (in increasing order of capability): 1. [`function_calling_basic.py`](./function_calling_basic.py): Declares a single `lookup_weather` function and prompts the model. It prints the tool call (if any) or falls back to the model's normal content. No actual function execution occurs. 2. [`function_calling_call.py`](./function_calling_call.py): Executes the `lookup_weather` function if the model requests it by parsing the returned arguments JSON and calling the local Python function. 3. [`function_calling_extended.py`](./function_calling_extended.py): Shows a full round‑trip: after executing the function, it appends a `tool` role message containing the function result and asks the model again so it can incorporate real data into a final user-facing response. -4. [`function_calling_multiple.py`](./function_calling_multiple.py): Exposes multiple functions (`lookup_weather`, `lookup_movies`) so you can see how the model chooses among them and how multiple tool calls could be returned. +4. [`function_calling_errors.py`](./function_calling_errors.py): Same as the extended example but adds robust error handling (malformed JSON arguments, missing tool implementations, execution exceptions, JSON serialization fallback). +5. [`function_calling_parallel.py`](./function_calling_parallel.py): Demonstrates the model returning multiple tool calls in a single response +6. [`function_calling_while_loop.py`](./function_calling_while_loop.py): An iterative conversation loop that keeps executing sequential tool calls (with error handling) until the model produces a final natural language answer. You must use a model that supports function calling (such as the defaults `gpt-4o`, `gpt-4o-mini`, etc.). Some local or older models may not support the `tools` parameter. diff --git a/function_calling_basic.py b/function_calling_basic.py index 70aeeae..d6da190 100644 --- a/function_calling_basic.py +++ b/function_calling_basic.py @@ -1,3 +1,4 @@ +import logging import os import azure.identity @@ -6,6 +7,7 @@ # Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API load_dotenv(override=True) +logging.basicConfig(level=logging.DEBUG) API_HOST = os.getenv("API_HOST", "github") if API_HOST == "azure": diff --git a/function_calling_errors.py b/function_calling_errors.py new file mode 100644 index 0000000..6e0e068 --- /dev/null +++ b/function_calling_errors.py @@ -0,0 +1,170 @@ +import json +import os +from collections.abc import Callable +from typing import Any + +import azure.identity +import openai +from dotenv import load_dotenv + +# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API +load_dotenv(override=True) +API_HOST = os.getenv("API_HOST", "github") + +if API_HOST == "azure": + token_provider = azure.identity.get_bearer_token_provider( + azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + client = openai.OpenAI( + base_url=os.environ["AZURE_OPENAI_ENDPOINT"], + api_key=token_provider, + ) + MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] + +elif API_HOST == "ollama": + client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") + MODEL_NAME = os.environ["OLLAMA_MODEL"] + +elif API_HOST == "github": + client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) + MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") + +else: + client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) + MODEL_NAME = os.environ["OPENAI_MODEL"] + + +# --------------------------------------------------------------------------- +# Tool implementation(s) +# --------------------------------------------------------------------------- +def search_database(search_query: str, price_filter: dict | None = None) -> dict[str, str]: + """Search database for relevant products based on user query""" + if not search_query: + raise ValueError("search_query is required") + if price_filter: + if "comparison_operator" not in price_filter or "value" not in price_filter: + raise ValueError("Both comparison_operator and value are required in price_filter") + if price_filter["comparison_operator"] not in {">", "<", ">=", "<=", "="}: + raise ValueError("Invalid comparison_operator in price_filter") + if not isinstance(price_filter["value"], int | float): + raise ValueError("Value in price_filter must be a number") + return [{"id": "123", "name": "Example Product", "price": 19.99}] + + +tool_mapping: dict[str, Callable[..., Any]] = { + "search_database": search_database, +} + +tools = [ + { + "type": "function", + "function": { + "name": "search_database", + "description": "Search database for relevant products based on user query", + "parameters": { + "type": "object", + "properties": { + "search_query": { + "type": "string", + "description": "Query string to use for full text search, e.g. 'red shoes'", + }, + "price_filter": { + "type": "object", + "description": "Filter search results based on price of the product", + "properties": { + "comparison_operator": { + "type": "string", + "description": "Operator to compare the column value, either '>', '<', '>=', '<=', '='", # noqa + }, + "value": { + "type": "number", + "description": "Value to compare against, e.g. 30", + }, + }, + }, + }, + "required": ["search_query"], + }, + }, + } +] + +messages: list[dict[str, Any]] = [ + {"role": "system", "content": "You are a product search assistant."}, + {"role": "user", "content": "Find me a red shirt under $20."}, +] + +print(f"Model: {MODEL_NAME} on Host: {API_HOST}\n") + +# First model response (may include tool call) +response = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + tool_choice="auto", + parallel_tool_calls=False, +) + +assistant_msg = response.choices[0].message + +# If no tool calls were requested, just print the answer. +if not assistant_msg.tool_calls: + print("Assistant:") + print(assistant_msg.content) +else: + # Append assistant message including tool call metadata + messages.append( + { + "role": "assistant", + "content": assistant_msg.content or "", + "tool_calls": [tc.model_dump() for tc in assistant_msg.tool_calls], + } + ) + + # Process each requested tool sequentially (though usually one here) + for tool_call in assistant_msg.tool_calls: + fn_name = tool_call.function.name + raw_args = tool_call.function.arguments or "{}" + print(f"Tool request: {fn_name}({raw_args})") + + target = tool_mapping.get(fn_name) + if not target: + tool_result: Any = f"ERROR: No implementation registered for tool '{fn_name}'" + else: + # Parse arguments safely + try: + parsed_args = json.loads(raw_args) if raw_args.strip() else {} + except json.JSONDecodeError: + parsed_args = {} + tool_result = "Warning: Malformed JSON arguments received; proceeding with empty args" + else: + try: + tool_result = target(**parsed_args) + except Exception as e: # safeguard tool execution + tool_result = f"Tool execution error in {fn_name}: {e}" + + # Serialize tool output (dict or str) as JSON string for the model + try: + tool_content = json.dumps(tool_result) + except Exception: + # Fallback to string conversion if something isn't JSON serializable + tool_content = json.dumps({"result": str(tool_result)}) + + messages.append( + { + "role": "tool", + "tool_call_id": tool_call.id, + "name": fn_name, + "content": tool_content, + } + ) + + # Follow-up model response after supplying tool outputs + followup = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + ) + final_msg = followup.choices[0].message + print("Assistant (final):") + print(final_msg.content) diff --git a/function_calling_fewshots.py b/function_calling_fewshots.py new file mode 100644 index 0000000..2f54f1d --- /dev/null +++ b/function_calling_fewshots.py @@ -0,0 +1,209 @@ +import json +import os +from collections.abc import Callable +from typing import Any + +import azure.identity +import openai +from dotenv import load_dotenv + +# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API +load_dotenv(override=True) +API_HOST = os.getenv("API_HOST", "github") + +if API_HOST == "azure": + token_provider = azure.identity.get_bearer_token_provider( + azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + client = openai.OpenAI( + base_url=os.environ["AZURE_OPENAI_ENDPOINT"], + api_key=token_provider, + ) + MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] + +elif API_HOST == "ollama": + client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") + MODEL_NAME = os.environ["OLLAMA_MODEL"] + +elif API_HOST == "github": + client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) + MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") + +else: + client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) + MODEL_NAME = os.environ["OPENAI_MODEL"] + + +# --------------------------------------------------------------------------- +# Tool implementation(s) +# --------------------------------------------------------------------------- +def search_database(search_query: str, price_filter: dict | None = None) -> dict[str, str]: + """Search database for relevant products based on user query""" + if not search_query: + raise ValueError("search_query is required") + if price_filter: + if "comparison_operator" not in price_filter or "value" not in price_filter: + raise ValueError("Both comparison_operator and value are required in price_filter") + if price_filter["comparison_operator"] not in {">", "<", ">=", "<=", "="}: + raise ValueError("Invalid comparison_operator in price_filter") + if not isinstance(price_filter["value"], int | float): + raise ValueError("Value in price_filter must be a number") + return [{"id": "123", "name": "Example Product", "price": 19.99}] + + +tool_mapping: dict[str, Callable[..., Any]] = { + "search_database": search_database, +} + +tools = [ + { + "type": "function", + "function": { + "name": "search_database", + "description": "Search database for relevant products based on user query", + "parameters": { + "type": "object", + "properties": { + "search_query": { + "type": "string", + "description": "Query string to use for full text search, e.g. 'red shoes'", + }, + "price_filter": { + "type": "object", + "description": "Filter search results based on price of the product", + "properties": { + "comparison_operator": { + "type": "string", + "description": "Operator to compare the column value, either '>', '<', '>=', '<=', '='", # noqa + }, + "value": { + "type": "number", + "description": "Value to compare against, e.g. 30", + }, + }, + }, + }, + "required": ["search_query"], + }, + }, + } +] + +messages: list[dict[str, Any]] = [ + {"role": "system", "content": "You are a product search assistant."}, + {"role": "user", "content": "good options for climbing gear that can be used outside?"}, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": {"name": "search_database", "arguments": '{"search_query":"climbing gear outside"}'}, + } + ], + }, + { + "role": "tool", + "tool_call_id": "call_abc123", + "name": "search_database", + "content": "Search results for climbing gear that can be used outside: ...", + }, + {"role": "user", "content": "are there any shoes less than $50?"}, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_abc456", + "type": "function", + "function": { + "name": "search_database", + "arguments": '{"search_query":"tenis","price_filter":{"comparison_operator":"<","value":50}}', + }, + } + ], + }, + { + "role": "tool", + "tool_call_id": "call_abc456", + "name": "search_database", + "content": "Search results for shoes cheaper than 50: ...", + }, + {"role": "user", "content": "Find me a red shirt under $20."}, +] + +print(f"Model: {MODEL_NAME} on Host: {API_HOST}\n") + +# First model response (may include tool call) +response = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + tool_choice="auto", + parallel_tool_calls=False, +) + +assistant_msg = response.choices[0].message + +# If no tool calls were requested, just print the answer. +if not assistant_msg.tool_calls: + print("Assistant:") + print(assistant_msg.content) +else: + # Append assistant message including tool call metadata + messages.append( + { + "role": "assistant", + "content": assistant_msg.content or "", + "tool_calls": [tc.model_dump() for tc in assistant_msg.tool_calls], + } + ) + + # Process each requested tool sequentially (though usually one here) + for tool_call in assistant_msg.tool_calls: + fn_name = tool_call.function.name + raw_args = tool_call.function.arguments or "{}" + print(f"Tool request: {fn_name}({raw_args})") + + target = tool_mapping.get(fn_name) + if not target: + tool_result: Any = f"ERROR: No implementation registered for tool '{fn_name}'" + else: + # Parse arguments safely + try: + parsed_args = json.loads(raw_args) if raw_args.strip() else {} + except json.JSONDecodeError: + parsed_args = {} + tool_result = "Warning: Malformed JSON arguments received; proceeding with empty args" + else: + try: + tool_result = target(**parsed_args) + except Exception as e: # safeguard tool execution + tool_result = f"Tool execution error in {fn_name}: {e}" + + # Serialize tool output (dict or str) as JSON string for the model + try: + tool_content = json.dumps(tool_result) + except Exception: + # Fallback to string conversion if something isn't JSON serializable + tool_content = json.dumps({"result": str(tool_result)}) + + messages.append( + { + "role": "tool", + "tool_call_id": tool_call.id, + "name": fn_name, + "content": tool_content, + } + ) + + # Follow-up model response after supplying tool outputs + followup = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + ) + final_msg = followup.choices[0].message + print("Assistant (final):") + print(final_msg.content) diff --git a/function_calling_multiple.py b/function_calling_multiple.py deleted file mode 100644 index f2d3003..0000000 --- a/function_calling_multiple.py +++ /dev/null @@ -1,92 +0,0 @@ -import os - -import azure.identity -import openai -from dotenv import load_dotenv - -# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API -load_dotenv(override=True) -API_HOST = os.getenv("API_HOST", "github") - -if API_HOST == "azure": - token_provider = azure.identity.get_bearer_token_provider( - azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" - ) - client = openai.OpenAI( - base_url=os.environ["AZURE_OPENAI_ENDPOINT"], - api_key=token_provider, - ) - MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] - -elif API_HOST == "ollama": - client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") - MODEL_NAME = os.environ["OLLAMA_MODEL"] - -elif API_HOST == "github": - client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) - MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") - -else: - client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) - MODEL_NAME = os.environ["OPENAI_MODEL"] - - -tools = [ - { - "type": "function", - "function": { - "name": "lookup_weather", - "description": "Lookup the weather for a given city name or zip code.", - "parameters": { - "type": "object", - "properties": { - "city_name": { - "type": "string", - "description": "The city name", - }, - "zip_code": { - "type": "string", - "description": "The zip code", - }, - }, - "additionalProperties": False, - }, - }, - }, - { - "type": "function", - "function": { - "name": "lookup_movies", - "description": "Lookup movies playing in a given city name or zip code.", - "parameters": { - "type": "object", - "properties": { - "city_name": { - "type": "string", - "description": "The city name", - }, - "zip_code": { - "type": "string", - "description": "The zip code", - }, - }, - "additionalProperties": False, - }, - }, - }, -] - -response = client.chat.completions.create( - model=MODEL_NAME, - messages=[ - {"role": "system", "content": "You are a tourism chatbot."}, - {"role": "user", "content": "is it rainy enough in sydney to watch movies and which ones are on?"}, - ], - tools=tools, - tool_choice="auto", -) - -print(f"Response from {MODEL_NAME} on {API_HOST}: \n") -for message in response.choices[0].message.tool_calls: - print(message.function.name) - print(message.function.arguments) diff --git a/function_calling_parallel.py b/function_calling_parallel.py new file mode 100644 index 0000000..e368bbd --- /dev/null +++ b/function_calling_parallel.py @@ -0,0 +1,160 @@ +import json +import os +from concurrent.futures import ThreadPoolExecutor + +import azure.identity +import openai +from dotenv import load_dotenv + +# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API +load_dotenv(override=True) +API_HOST = os.getenv("API_HOST", "github") + +if API_HOST == "azure": + token_provider = azure.identity.get_bearer_token_provider( + azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + client = openai.OpenAI( + base_url=os.environ["AZURE_OPENAI_ENDPOINT"], + api_key=token_provider, + ) + MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] + +elif API_HOST == "ollama": + client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") + MODEL_NAME = os.environ["OLLAMA_MODEL"] + +elif API_HOST == "github": + client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) + MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") + +else: + client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) + MODEL_NAME = os.environ["OPENAI_MODEL"] + + +tools = [ + { + "type": "function", + "function": { + "name": "lookup_weather", + "description": "Lookup the weather for a given city name or zip code.", + "parameters": { + "type": "object", + "properties": { + "city_name": { + "type": "string", + "description": "The city name", + }, + "zip_code": { + "type": "string", + "description": "The zip code", + }, + }, + "additionalProperties": False, + }, + }, + }, + { + "type": "function", + "function": { + "name": "lookup_movies", + "description": "Lookup movies playing in a given city name or zip code.", + "parameters": { + "type": "object", + "properties": { + "city_name": { + "type": "string", + "description": "The city name", + }, + "zip_code": { + "type": "string", + "description": "The zip code", + }, + }, + "additionalProperties": False, + }, + }, + }, +] + + +# --------------------------------------------------------------------------- +# Tool (function) implementations +# --------------------------------------------------------------------------- +def lookup_weather(city_name: str | None = None, zip_code: str | None = None) -> str: + """Looks up the weather for given city_name and zip_code.""" + location = city_name or zip_code or "unknown" + # In a real implementation, call an external weather API here. + return { + "location": location, + "condition": "rain showers", + "rain_mm_last_24h": 7, + "recommendation": "Good day for indoor activities if you dislike drizzle.", + } + + +def lookup_movies(city_name: str | None = None, zip_code: str | None = None) -> str: + """Returns a list of movies playing in the given location.""" + location = city_name or zip_code or "unknown" + # A real implementation could query a cinema listings API. + return { + "location": location, + "movies": [ + {"title": "The Quantum Reef", "rating": "PG-13"}, + {"title": "Storm Over Harbour Bay", "rating": "PG"}, + {"title": "Midnight Koala", "rating": "R"}, + ], + } + + +messages = [ + {"role": "system", "content": "You are a tourism chatbot."}, + {"role": "user", "content": "is it rainy enough in sydney to watch movies and which ones are on?"}, +] +response = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + tool_choice="auto", +) + +print(f"Response from {MODEL_NAME} on {API_HOST}: \n") + +# Map function names to actual functions +available_functions = { + "lookup_weather": lookup_weather, + "lookup_movies": lookup_movies, +} + +# Execute all tool calls in parallel using ThreadPoolExecutor +if response.choices[0].message.tool_calls: + tool_calls = response.choices[0].message.tool_calls + print(f"Model requested {len(tool_calls)} tool call(s):\n") + + # Add the assistant's message (with tool calls) to the conversation + messages.append(response.choices[0].message) + + with ThreadPoolExecutor() as executor: + # Submit all tool calls to the thread pool + futures = [] + for tool_call in tool_calls: + function_name = tool_call.function.name + arguments = json.loads(tool_call.function.arguments) + print(f"Tool request: {function_name}({arguments})") + + if function_name in available_functions: + future = executor.submit(available_functions[function_name], **arguments) + futures.append((tool_call, function_name, future)) + + # Add each tool result to the conversation + for tool_call, function_name, future in futures: + result = future.result() + messages.append({"role": "tool", "tool_call_id": tool_call.id, "content": json.dumps(result)}) + + # Get final response from the model with all tool results + final_response = client.chat.completions.create(model=MODEL_NAME, messages=messages, tools=tools) + print("Assistant:") + print(final_response.choices[0].message.content) +else: + print(response.choices[0].message.content) diff --git a/function_calling_while_loop.py b/function_calling_while_loop.py new file mode 100644 index 0000000..765f47b --- /dev/null +++ b/function_calling_while_loop.py @@ -0,0 +1,169 @@ +import json +import os + +import azure.identity +import openai +from dotenv import load_dotenv + +# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API +load_dotenv(override=True) +API_HOST = os.getenv("API_HOST", "github") + +if API_HOST == "azure": + token_provider = azure.identity.get_bearer_token_provider( + azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + client = openai.OpenAI( + base_url=os.environ["AZURE_OPENAI_ENDPOINT"], + api_key=token_provider, + ) + MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] + +elif API_HOST == "ollama": + client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") + MODEL_NAME = os.environ["OLLAMA_MODEL"] + +elif API_HOST == "github": + client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) + MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") + +else: + client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) + MODEL_NAME = os.environ["OPENAI_MODEL"] + + +tools = [ + { + "type": "function", + "function": { + "name": "lookup_weather", + "description": "Lookup the weather for a given city name or zip code.", + "parameters": { + "type": "object", + "properties": { + "city_name": { + "type": "string", + "description": "The city name", + }, + "zip_code": { + "type": "string", + "description": "The zip code", + }, + }, + "additionalProperties": False, + }, + }, + }, + { + "type": "function", + "function": { + "name": "lookup_movies", + "description": "Lookup movies playing in a given city name or zip code.", + "parameters": { + "type": "object", + "properties": { + "city_name": { + "type": "string", + "description": "The city name", + }, + "zip_code": { + "type": "string", + "description": "The zip code", + }, + }, + "additionalProperties": False, + }, + }, + }, +] + + +# --------------------------------------------------------------------------- +# Tool (function) implementations +# --------------------------------------------------------------------------- +def lookup_weather(city_name: str | None = None, zip_code: str | None = None) -> str: + """Looks up the weather for given city_name and zip_code.""" + location = city_name or zip_code or "unknown" + # In a real implementation, call an external weather API here. + return { + "location": location, + "condition": "rain showers", + "rain_mm_last_24h": 7, + "recommendation": "Good day for indoor activities if you dislike drizzle.", + } + + +def lookup_movies(city_name: str | None = None, zip_code: str | None = None) -> str: + """Returns a list of movies playing in the given location.""" + location = city_name or zip_code or "unknown" + # A real implementation could query a cinema listings API. + return { + "location": location, + "movies": [ + {"title": "The Quantum Reef", "rating": "PG-13"}, + {"title": "Storm Over Harbour Bay", "rating": "PG"}, + {"title": "Midnight Koala", "rating": "R"}, + ], + } + + +tool_mapping = { + "lookup_weather": lookup_weather, + "lookup_movies": lookup_movies, +} + + +# --------------------------------------------------------------------------- +# Conversation loop +# --------------------------------------------------------------------------- +messages = [ + {"role": "system", "content": "You are a tourism chatbot."}, + {"role": "user", "content": "Is it rainy enough in Sydney to watch movies and which ones are on?"}, +] + +print(f"Model: {MODEL_NAME} on Host: {API_HOST}\n") + +while True: + print("Calling model...\n") + response = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, # includes prior tool outputs + tools=tools, + tool_choice="auto", + parallel_tool_calls=False, # ensure sequential tool calls + ) + + assistant_message = response.choices[0].message + # If the assistant returned standard content with no tool calls, we're done. + if not assistant_message.tool_calls: + print("Assistant:") + print(assistant_message.content) + break + + # Append the assistant tool request message to conversation + messages.append( + { + "role": "assistant", + "content": assistant_message.content or "", + "tool_calls": [tc.model_dump() for tc in assistant_message.tool_calls], + } + ) + + # Execute each requested tool sequentially. + for tool_call in assistant_message.tool_calls: + fn_name = tool_call.function.name + raw_args = tool_call.function.arguments or "{}" + print(f"Tool request: {fn_name}({raw_args})") + target_tool = tool_mapping.get(fn_name) + parsed_args = json.loads(raw_args) + tool_result = target_tool(**parsed_args) + tool_result_str = json.dumps(tool_result) + # Provide the tool output back to the model + messages.append( + { + "role": "tool", + "tool_call_id": tool_call.id, + "name": fn_name, + "content": tool_result_str, + } + ) diff --git a/spanish/README.md b/spanish/README.md index 5495ed2..7f7904b 100644 --- a/spanish/README.md +++ b/spanish/README.md @@ -20,16 +20,16 @@ Este repositorio contiene una colección de scripts en Python que demuestran có ### Completados de chat de OpenAI Estos scripts usan el paquete `openai` de Python para demostrar cómo utilizar la API de Chat Completions. En orden creciente de complejidad: - -1. [`chat.py`](../chat.py): Script simple que muestra cómo generar un completado de chat. -2. [`chat_stream.py`](../chat_stream.py): Añade `stream=True` para recibir el completado progresivamente. -3. [`chat_history.py`](../chat_history.py): Añade un chat bidireccional que conserva el historial y lo reenvía en cada llamada. -4. [`chat_history_stream.py`](../chat_history_stream.py): Igual que el anterior pero además con `stream=True`. +1. [`chat.py`](chat.py): Script simple que muestra cómo generar un completado de chat. +2. [`chat_stream.py`](chat_stream.py): Añade `stream=True` para recibir el completado progresivamente. +3. [`chat_history.py`](chat_history.py): Añade un chat bidireccional que conserva el historial y lo reenvía en cada llamada. +4. [`chat_history_stream.py`](chat_history_stream.py): Igual que el anterior pero además con `stream=True`. Scripts adicionales de características: -* [`chat_safety.py`](../chat_safety.py): Manejo de excepciones para filtros de seguridad de contenido (Azure AI Content Safety). -* [`chat_async.py`](../chat_async.py): Uso de clientes asíncronos y envío concurrente de múltiples solicitudes con `asyncio.gather`. +* [`chat_safety.py`](chat_safety.py): Manejo de excepciones para filtros de seguridad de contenido (Azure AI Content Safety). +* [`chat_async.py`](chat_async.py): Uso de clientes asíncronos y envío concurrente de múltiples solicitudes con `asyncio.gather`. +* [`few_shot_examples.py`](few_shot_examples.py): Demuestra patrones de few‑shot (proporcionar ejemplos en el prompt) para guiar respuestas del modelo. ### Llamadas a funciones (Function calling) @@ -39,10 +39,13 @@ En todos los ejemplos se declara una lista de funciones en el parámetro `tools` Scripts (en orden de capacidad): -1. [`function_calling_basic.py`](../function_calling_basic.py): Declara una sola función `lookup_weather` y muestra la llamada (si existe) o el contenido normal. -2. [`function_calling_call.py`](../function_calling_call.py): Ejecuta `lookup_weather` si el modelo la solicita, parseando los argumentos JSON. -3. [`function_calling_extended.py`](../function_calling_extended.py): Hace el ciclo completo: tras ejecutar la función, añade un mensaje de rol `tool` con el resultado y vuelve a consultar al modelo para incorporar los datos reales. -4. [`function_calling_multiple.py`](../function_calling_multiple.py): Expone múltiples funciones (`lookup_weather`, `lookup_movies`) para observar cómo el modelo elige y cómo podrían devolverse múltiples llamadas. +1. [`function_calling_basic.py`](function_calling_basic.py): Declara una sola función `lookup_weather` y muestra la llamada (si existe) o el contenido normal (no ejecuta la función). +2. [`function_calling_call.py`](function_calling_call.py): Ejecuta `lookup_weather` si el modelo la solicita, parseando los argumentos JSON. +3. [`function_calling_extended.py`](function_calling_extended.py): Hace el ciclo completo: tras ejecutar la función, añade un mensaje de rol `tool` con el resultado y vuelve a consultar al modelo para incorporar los datos reales. +4. [`function_calling_errors.py`](function_calling_errors.py): Igual que el ejemplo extendido pero con manejo robusto de errores (JSON malformado, herramienta inexistente, excepciones de ejecución, serialización JSON de respaldo). +5. [`function_calling_parallel.py`](function_calling_parallel.py): Demuestra el modelo devolviendo múltiples llamadas a herramientas en una sola respuesta. +6. [`function_calling_while_loop.py`](function_calling_while_loop.py): Bucle conversacional iterativo que sigue ejecutando llamadas secuenciales (con manejo de errores) hasta que el modelo da una respuesta final en lenguaje natural. +7. [`function_calling_fewshots.py`](function_calling_fewshots.py): Combina function calling con ejemplos few‑shot para reforzar esquemas y estilos de respuesta. Debe usarse un modelo que soporte function calling (por ejemplo, `gpt-4o`, `gpt-4o-mini`, etc.). Algunos modelos locales o antiguos no soportan `tools`. @@ -58,22 +61,31 @@ python -m pip install -r requirements-rag.txt Luego ejecuta (en orden de complejidad): -* [`rag_csv.py`](../rag_csv.py): Recupera filas coincidentes de un CSV y las usa para responder. -* [`rag_multiturn.py`](../rag_multiturn.py): Igual, pero con chat multi‑turno y preservación de historial. -* [`rag_queryrewrite.py`](../rag_queryrewrite.py): Añade reescritura de la consulta del usuario para mejorar la recuperación. -* [`rag_documents_ingestion.py`](../rag_documents_ingestion.py): Ingeste de PDFs: convierte a Markdown (pymupdf), divide en fragmentos (LangChain), genera embeddings (OpenAI) y guarda en un JSON local. -* [`rag_documents_flow.py`](../rag_documents_flow.py): Flujo RAG que consulta el JSON creado anteriormente. -* [`rag_documents_hybrid.py`](../rag_documents_hybrid.py): Recuperación híbrida (vector + keywords), fusión con RRF y re‑ranking semántico con un modelo cross‑encoder. +* [`rag_csv.py`](rag_csv.py): Recupera filas coincidentes de un CSV y las usa para responder. +* [`rag_multiturn.py`](rag_multiturn.py): Igual, pero con chat multi‑turno y preservación de historial. +* [`rag_queryrewrite.py`](rag_queryrewrite.py): Añade reescritura de la consulta del usuario para mejorar la recuperación. +* [`rag_documents_ingestion.py`](rag_documents_ingestion.py): Ingeste de PDFs: convierte a Markdown (pymupdf), divide en fragmentos (LangChain), genera embeddings (OpenAI) y guarda en un JSON local. +* [`rag_documents_flow.py`](rag_documents_flow.py): Flujo RAG que consulta el JSON creado anteriormente. +* [`rag_documents_hybrid.py`](rag_documents_hybrid.py): Recuperación híbrida (vector + keywords), fusión con RRF y re‑ranking semántico con un modelo cross‑encoder. +* [`retrieval_augmented_generation.py`](retrieval_augmented_generation.py): Variante alternativa de RAG con un flujo simplificado de recuperación + generación. ### Salidas estructuradas Estos scripts muestran cómo generar respuestas estructuradas usando modelos Pydantic: -* [`structured_outputs_basic.py`](../structured_outputs_basic.py): Extrae información simple de un evento. -* [`structured_outputs_description.py`](../structured_outputs_description.py): Añade descripciones en campos para guiar el formato. -* [`structured_outputs_enum.py`](../structured_outputs_enum.py): Usa enumeraciones para restringir valores. -* [`structured_outputs_function_calling.py`](../structured_outputs_function_calling.py): Usa funciones definidas con Pydantic para llamadas automáticas. -* [`structured_outputs_nested.py`](../structured_outputs_nested.py): Modelos anidados para estructuras más complejas (por ejemplo, eventos con participantes detallados). +* [`structured_outputs_basic.py`](structured_outputs_basic.py): Extrae información simple de un evento. +* [`structured_outputs_description.py`](structured_outputs_description.py): Añade descripciones en campos para guiar el formato. +* [`structured_outputs_enum.py`](structured_outputs_enum.py): Usa enumeraciones para restringir valores. +* [`structured_outputs_function_calling.py`](structured_outputs_function_calling.py): Usa funciones definidas con Pydantic para llamadas automáticas. +* [`structured_outputs_nested.py`](structured_outputs_nested.py): Modelos anidados para estructuras más complejas (por ejemplo, eventos con participantes detallados). + +### Ingeniería de prompts y otros + +Scripts adicionales fuera de las categorías anteriores: + +* [`prompt_engineering.py`](prompt_engineering.py): Técnicas de ingeniería de prompts (roles, instrucciones, delimitadores, control de formato). +* [`chained_calls.py`](chained_calls.py): Llamadas encadenadas; salida de una respuesta alimenta la siguiente (pipeline de pasos). +* [`retrieval_augmented_generation.py`](retrieval_augmented_generation.py): (Listado también en RAG) Alternativa minimalista de flujo RAG. ## Configuración del entorno de Python diff --git a/spanish/function_calling_basic.py b/spanish/function_calling_basic.py index 3363ef8..5269c1a 100644 --- a/spanish/function_calling_basic.py +++ b/spanish/function_calling_basic.py @@ -36,7 +36,7 @@ "type": "function", "function": { "name": "lookup_weather", - "description": "Buscar el clima para un nombre de ciudad o código postal dado.", + "description": "Busca el clima para un nombre de ciudad o código postal dado.", "parameters": { "type": "object", "properties": { diff --git a/spanish/function_calling_call.py b/spanish/function_calling_call.py index 2bc73ac..f0ae902 100644 --- a/spanish/function_calling_call.py +++ b/spanish/function_calling_call.py @@ -33,7 +33,7 @@ def lookup_weather(city_name=None, zip_code=None): - """Buscar el clima para un nombre de ciudad o código postal dado.""" + """Busca el clima para un nombre de ciudad o código postal dado.""" print(f"Buscando el clima para {city_name or zip_code}...") return "¡Está soleado!" @@ -43,7 +43,7 @@ def lookup_weather(city_name=None, zip_code=None): "type": "function", "function": { "name": "lookup_weather", - "description": "Buscar el clima para un nombre de ciudad o código postal dado.", + "description": "Busca el clima para un nombre de ciudad o código postal dado.", "parameters": { "type": "object", "properties": { @@ -67,7 +67,7 @@ def lookup_weather(city_name=None, zip_code=None): model=MODEL_NAME, messages=[ {"role": "system", "content": "Eres un chatbot del clima."}, - {"role": "user", "content": "¿está soleado en esa pequeña ciudad cerca de Sydney donde vive Anthony?"}, + {"role": "user", "content": "¿está soleado en Berkeley, California?"}, ], tools=tools, tool_choice="auto", @@ -81,3 +81,5 @@ def lookup_weather(city_name=None, zip_code=None): arguments = json.loads(tool_call.function.arguments) if function_name == "lookup_weather": lookup_weather(**arguments) +else: + print(response.choices[0].message.content) diff --git a/spanish/function_calling_errors.py b/spanish/function_calling_errors.py new file mode 100644 index 0000000..62b9a71 --- /dev/null +++ b/spanish/function_calling_errors.py @@ -0,0 +1,178 @@ +import json +import os +from collections.abc import Callable +from typing import Any + +import azure.identity +import openai +from dotenv import load_dotenv + +# Setup del cliente OpenAI para usar Azure, OpenAI.com, Ollama o GitHub Models (según variables de entorno) +load_dotenv(override=True) +API_HOST = os.getenv("API_HOST", "github") + +if API_HOST == "azure": + token_provider = azure.identity.get_bearer_token_provider( + azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + client = openai.OpenAI( + base_url=os.environ["AZURE_OPENAI_ENDPOINT"], + api_key=token_provider, + ) + MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] + +elif API_HOST == "ollama": + client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") + MODEL_NAME = os.environ["OLLAMA_MODEL"] + +elif API_HOST == "github": + client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) + MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") + +else: + client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) + MODEL_NAME = os.environ["OPENAI_MODEL"] + + +# --------------------------------------------------------------------------- +# Implementación de la tool(s) +# --------------------------------------------------------------------------- +def search_database(search_query: str, price_filter: dict | None = None) -> dict[str, str]: + """Busca productos relevantes en la base de datos usando el query del usuario. + + search_query: texto que quieres buscar (por ejemplo "playera roja"). + price_filter: objeto opcional con filtros de precio. Debe incluir: + - comparison_operator: uno de ">", "<", ">=", "<=", "=" + - value: número límite para comparar. + + Regresa una lista con productos dummy (ejemplo) para mostrar el flujo de function calling. + """ + if not search_query: + raise ValueError("search_query es requerido") + if price_filter: + if "comparison_operator" not in price_filter or "value" not in price_filter: + raise ValueError("Se requieren comparison_operator y value en price_filter") + if price_filter["comparison_operator"] not in {">", "<", ">=", "<=", "="}: + raise ValueError("comparison_operator inválido en price_filter") + if not isinstance(price_filter["value"], int | float): + raise ValueError("value en price_filter debe ser numérico") + return [{"id": "123", "name": "Producto Ejemplo", "price": 19.99}] + + +tool_mapping: dict[str, Callable[..., Any]] = { + "search_database": search_database, +} + +tools = [ + { + "type": "function", + "function": { + "name": "search_database", + "description": "Busca en la base de datos productos relevantes según el query del usuario", + "parameters": { + "type": "object", + "properties": { + "search_query": { + "type": "string", + "description": "Texto (query) para búsqueda full text, ej: 'tenis rojos'", + }, + "price_filter": { + "type": "object", + "description": "Filtra resultados según el precio del producto", + "properties": { + "comparison_operator": { + "type": "string", + "description": "Operador para comparar el valor de la columna: '>', '<', '>=', '<=', '='", # noqa + }, + "value": { + "type": "number", + "description": "Valor límite para comparar, ej: 30", + }, + }, + }, + }, + "required": ["search_query"], + }, + }, + } +] + +messages: list[dict[str, Any]] = [ + {"role": "system", "content": "Eres un assistant que ayuda a buscar productos."}, + {"role": "user", "content": "Búscame una camiseta roja que cueste menos de $20."}, +] + +print(f"Modelo: {MODEL_NAME} en Host: {API_HOST}\n") + +# Primera respuesta del model (puede incluir una tool call) +response = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + tool_choice="auto", + parallel_tool_calls=False, +) + +assistant_msg = response.choices[0].message + +# Si el model no pidió ninguna tool call, solo imprime la respuesta. +if not assistant_msg.tool_calls: + print("Assistant:") + print(assistant_msg.content) +else: + # Agrega el mensaje del assistant incluyendo metadata de la tool call + messages.append( + { + "role": "assistant", + "content": assistant_msg.content or "", + "tool_calls": [tc.model_dump() for tc in assistant_msg.tool_calls], + } + ) + + # Procesa cada tool pedida de forma secuencial (normalmente solo una aquí) + for tool_call in assistant_msg.tool_calls: + fn_name = tool_call.function.name + raw_args = tool_call.function.arguments or "{}" + print(f"Tool request: {fn_name}({raw_args})") + + target = tool_mapping.get(fn_name) + if not target: + tool_result: Any = f"ERROR: No hay implementación registrada para la tool '{fn_name}'" + else: + # Parseo seguro de argumentos JSON + try: + parsed_args = json.loads(raw_args) if raw_args.strip() else {} + except json.JSONDecodeError: + parsed_args = {} + tool_result = "Warning: JSON arguments malformados; sigo con args vacíos" + else: + try: + tool_result = target(**parsed_args) + except Exception as e: # safeguard tool execution + tool_result = f"Error ejecutando la tool {fn_name}: {e}" + + # Serializa el output de la tool (dict o str) como JSON string para el model + try: + tool_content = json.dumps(tool_result) + except Exception: + # Fallback a string si algo no es serializable a JSON + tool_content = json.dumps({"result": str(tool_result)}) + + messages.append( + { + "role": "tool", + "tool_call_id": tool_call.id, + "name": fn_name, + "content": tool_content, + } + ) + + # Segunda respuesta del model después de darle los tool outputs + followup = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + ) + final_msg = followup.choices[0].message + print("Assistant (final):") + print(final_msg.content) diff --git a/spanish/function_calling_fewshots.py b/spanish/function_calling_fewshots.py new file mode 100644 index 0000000..09edd7c --- /dev/null +++ b/spanish/function_calling_fewshots.py @@ -0,0 +1,217 @@ +import json +import os +from collections.abc import Callable +from typing import Any + +import azure.identity +import openai +from dotenv import load_dotenv + +# Setup del cliente OpenAI para usar Azure, OpenAI.com, Ollama o GitHub Models (según vars de entorno) +load_dotenv(override=True) +API_HOST = os.getenv("API_HOST", "github") + +if API_HOST == "azure": + token_provider = azure.identity.get_bearer_token_provider( + azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + client = openai.OpenAI( + base_url=os.environ["AZURE_OPENAI_ENDPOINT"], + api_key=token_provider, + ) + MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] + +elif API_HOST == "ollama": + client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") + MODEL_NAME = os.environ["OLLAMA_MODEL"] + +elif API_HOST == "github": + client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) + MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") + +else: + client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) + MODEL_NAME = os.environ["OPENAI_MODEL"] + + +# --------------------------------------------------------------------------- +# Implementación de la tool(s) +# --------------------------------------------------------------------------- +def search_database(search_query: str, price_filter: dict | None = None) -> dict[str, str]: + """Busca productos relevantes en la base de datos según el query del usuario. + + search_query: texto para buscar (ej: "equipo escalada" o "tenis rojos"). + price_filter: objeto opcional con: + - comparison_operator: uno de ">", "<", ">=", "<=", "=" + - value: número límite. + + Retorna lista dummy para mostrar el flujo de function calling. + """ + if not search_query: + raise ValueError("search_query es requerido") + if price_filter: + if "comparison_operator" not in price_filter or "value" not in price_filter: + raise ValueError("Se requieren comparison_operator y value en price_filter") + if price_filter["comparison_operator"] not in {">", "<", ">=", "<=", "="}: + raise ValueError("comparison_operator inválido en price_filter") + if not isinstance(price_filter["value"], int | float): + raise ValueError("value en price_filter debe ser numérico") + return [{"id": "123", "name": "Producto Ejemplo", "price": 19.99}] + + +tool_mapping: dict[str, Callable[..., Any]] = { + "search_database": search_database, +} + +tools = [ + { + "type": "function", + "function": { + "name": "search_database", + "description": "Busca productos relevantes según el query del usuario", + "parameters": { + "type": "object", + "properties": { + "search_query": { + "type": "string", + "description": "Texto (query) para búsqueda full text, ej: 'tenis rojos'", + }, + "price_filter": { + "type": "object", + "description": "Filtra resultados según el precio del producto", + "properties": { + "comparison_operator": { + "type": "string", + "description": "Operador para comparar el valor de la columna: '>', '<', '>=', '<=', '='", # noqa + }, + "value": { + "type": "number", + "description": "Valor límite para comparar, ej: 30", + }, + }, + }, + }, + "required": ["search_query"], + }, + }, + } +] + +messages: list[dict[str, Any]] = [ + {"role": "system", "content": "Eres un assistant que ayuda a buscar productos."}, + {"role": "user", "content": "¿Buenas opciones de equipo de escalada para usar afuera?"}, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": {"name": "search_database", "arguments": '{"search_query":"equipo escalada exterior"}'}, + } + ], + }, + { + "role": "tool", + "tool_call_id": "call_abc123", + "name": "search_database", + "content": json.dumps({"result": "Resultados de búsqueda para equipo de escalada exterior: ..."}), + }, + {"role": "user", "content": "¿Hay tenis por menos de $50?"}, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_abc456", + "type": "function", + "function": { + "name": "search_database", + "arguments": '{"search_query":"tenis","price_filter":{"comparison_operator":"<","value":50}}', + }, + } + ], + }, + { + "role": "tool", + "tool_call_id": "call_abc456", + "name": "search_database", + "content": json.dumps({"result": "Resultados de búsqueda para tenis más baratos que 50: ..."}), + }, + {"role": "user", "content": "Búscame una camiseta roja por menos de $20."}, +] + +print(f"Modelo: {MODEL_NAME} en Host: {API_HOST}\n") + +# Primera respuesta del model (puede incluir tool call) +response = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + tool_choice="auto", + parallel_tool_calls=False, +) + +assistant_msg = response.choices[0].message + +# Si el model no pidió ninguna tool call, imprime la respuesta. +if not assistant_msg.tool_calls: + print("Assistant:") + print(assistant_msg.content) +else: + # Agrega el mensaje del assistant con metadata de las tool calls + messages.append( + { + "role": "assistant", + "content": assistant_msg.content or "", + "tool_calls": [tc.model_dump() for tc in assistant_msg.tool_calls], + } + ) + + # Procesa cada tool pedida de forma secuencial (normalmente solo una aquí) + for tool_call in assistant_msg.tool_calls: + fn_name = tool_call.function.name + raw_args = tool_call.function.arguments or "{}" + print(f"Tool request: {fn_name}({raw_args})") + + target = tool_mapping.get(fn_name) + if not target: + tool_result: Any = f"ERROR: No hay implementación registrada para la tool '{fn_name}'" + else: + # Parseo seguro de argumentos JSON + try: + parsed_args = json.loads(raw_args) if raw_args.strip() else {} + except json.JSONDecodeError: + parsed_args = {} + tool_result = "Warning: JSON arguments malformados; sigo con args vacíos" + else: + try: + tool_result = target(**parsed_args) + except Exception as e: # safeguard tool execution + tool_result = f"Error ejecutando la tool {fn_name}: {e}" + + # Serializa el output de la tool (dict o str) como JSON string para el model + try: + tool_content = json.dumps(tool_result) + except Exception: + # Fallback a string si no se puede serializar a JSON + tool_content = json.dumps({"result": str(tool_result)}) + + messages.append( + { + "role": "tool", + "tool_call_id": tool_call.id, + "name": fn_name, + "content": tool_content, + } + ) + + # Segunda respuesta del model después de dar los tool outputs + followup = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + ) + final_msg = followup.choices[0].message + print("Assistant (final):") + print(final_msg.content) diff --git a/spanish/function_calling_multiple.py b/spanish/function_calling_multiple.py deleted file mode 100644 index 91d2002..0000000 --- a/spanish/function_calling_multiple.py +++ /dev/null @@ -1,95 +0,0 @@ -import os - -import azure.identity -import openai -from dotenv import load_dotenv - -# Configura el cliente de OpenAI para usar la API de Azure, OpenAI.com u Ollama -load_dotenv(override=True) -API_HOST = os.getenv("API_HOST", "github") - -if API_HOST == "azure": - token_provider = azure.identity.get_bearer_token_provider( - azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" - ) - client = openai.OpenAI( - base_url=os.environ["AZURE_OPENAI_ENDPOINT"], - api_key=token_provider, - ) - MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] - -elif API_HOST == "ollama": - client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") - MODEL_NAME = os.environ["OLLAMA_MODEL"] - -elif API_HOST == "github": - client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) - MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") - -else: - client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) - MODEL_NAME = os.environ["OPENAI_MODEL"] - - -tools = [ - { - "type": "function", - "function": { - "name": "lookup_weather", - "description": "Busca el clima según nombre de ciudad o código postal.", - "parameters": { - "type": "object", - "properties": { - "city_name": { - "type": "string", - "description": "El nombre de la ciudad", - }, - "zip_code": { - "type": "string", - "description": "El código postal", - }, - }, - "additionalProperties": False, - }, - }, - }, - { - "type": "function", - "function": { - "name": "lookup_movies", - "description": "Buscar películas en cines según nombre de ciudad o código postal.", - "parameters": { - "type": "object", - "properties": { - "city_name": { - "type": "string", - "description": "El nombre de la ciudad", - }, - "zip_code": { - "type": "string", - "description": "El código postal", - }, - }, - "additionalProperties": False, - }, - }, - }, -] - -response = client.chat.completions.create( - model=MODEL_NAME, - messages=[ - {"role": "system", "content": "Eres un chatbot de turismo."}, - { - "role": "user", - "content": "¿Está lloviendo lo suficiente en Sídney como para ver películas y cuáles estan en los cines?", - }, - ], - tools=tools, - tool_choice="auto", -) - -print(f"Respuesta de {API_HOST}: \n") -for message in response.choices[0].message.tool_calls: - print(message.function.name) - print(message.function.arguments) diff --git a/spanish/function_calling_parallel.py b/spanish/function_calling_parallel.py new file mode 100644 index 0000000..ba5c9fa --- /dev/null +++ b/spanish/function_calling_parallel.py @@ -0,0 +1,163 @@ +import json +import os +from concurrent.futures import ThreadPoolExecutor + +import azure.identity +import openai +from dotenv import load_dotenv + +# Configura el cliente de OpenAI para usar la API de Azure, OpenAI.com u Ollama +load_dotenv(override=True) +API_HOST = os.getenv("API_HOST", "github") + +if API_HOST == "azure": + token_provider = azure.identity.get_bearer_token_provider( + azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + client = openai.OpenAI( + base_url=os.environ["AZURE_OPENAI_ENDPOINT"], + api_key=token_provider, + ) + MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] + +elif API_HOST == "ollama": + client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") + MODEL_NAME = os.environ["OLLAMA_MODEL"] + +elif API_HOST == "github": + client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) + MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") + +else: + client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) + MODEL_NAME = os.environ["OPENAI_MODEL"] + + +tools = [ + { + "type": "function", + "function": { + "name": "lookup_weather", + "description": "Busca el clima según nombre de ciudad o código postal.", + "parameters": { + "type": "object", + "properties": { + "city_name": { + "type": "string", + "description": "El nombre de la ciudad", + }, + "zip_code": { + "type": "string", + "description": "El código postal", + }, + }, + "additionalProperties": False, + }, + }, + }, + { + "type": "function", + "function": { + "name": "lookup_movies", + "description": "Buscar películas en cines según nombre de ciudad o código postal.", + "parameters": { + "type": "object", + "properties": { + "city_name": { + "type": "string", + "description": "El nombre de la ciudad", + }, + "zip_code": { + "type": "string", + "description": "El código postal", + }, + }, + "additionalProperties": False, + }, + }, + }, +] + + +# --------------------------------------------------------------------------- +# Tool (function) implementations +# --------------------------------------------------------------------------- +def lookup_weather(city_name: str | None = None, zip_code: str | None = None) -> str: + """Looks up the weather for given city_name and zip_code.""" + location = city_name or zip_code or "unknown" + # In a real implementation, call an external weather API here. + return { + "location": location, + "condition": "rain showers", + "rain_mm_last_24h": 7, + "recommendation": "Good day for indoor activities if you dislike drizzle.", + } + + +def lookup_movies(city_name: str | None = None, zip_code: str | None = None) -> str: + """Returns a list of movies playing in the given location.""" + location = city_name or zip_code or "unknown" + # A real implementation could query a cinema listings API. + return { + "location": location, + "movies": [ + {"title": "The Quantum Reef", "rating": "PG-13"}, + {"title": "Storm Over Harbour Bay", "rating": "PG"}, + {"title": "Midnight Koala", "rating": "R"}, + ], + } + + +messages = [ + {"role": "system", "content": "Eres un chatbot de turismo."}, + { + "role": "user", + "content": "¿Está lloviendo lo suficiente en Sídney como para ver películas y cuáles estan en los cines?", + }, +] +response = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + tool_choice="auto", +) + +print(f"Respuesta de {MODEL_NAME} en {API_HOST}: \n") + +# Map function names to actual functions +available_functions = { + "lookup_weather": lookup_weather, + "lookup_movies": lookup_movies, +} + +# Execute all tool calls in parallel using ThreadPoolExecutor +if response.choices[0].message.tool_calls: + tool_calls = response.choices[0].message.tool_calls + print(f"El modelo solicitó {len(tool_calls)} llamada(s) de herramienta:\n") + + # Add the assistant's message (with tool calls) to the conversation + messages.append(response.choices[0].message) + + with ThreadPoolExecutor() as executor: + # Submit all tool calls to the thread pool + futures = [] + for tool_call in tool_calls: + function_name = tool_call.function.name + arguments = json.loads(tool_call.function.arguments) + print(f"Solicitud de herramienta: {function_name}({arguments})") + + if function_name in available_functions: + future = executor.submit(available_functions[function_name], **arguments) + futures.append((tool_call, function_name, future)) + + # Add each tool result to the conversation + for tool_call, function_name, future in futures: + result = future.result() + messages.append({"role": "tool", "tool_call_id": tool_call.id, "content": json.dumps(result)}) + + # Get final response from the model with all tool results + final_response = client.chat.completions.create(model=MODEL_NAME, messages=messages, tools=tools) + print("Asistente:") + print(final_response.choices[0].message.content) +else: + print(response.choices[0].message.content) diff --git a/spanish/function_calling_while_loop.py b/spanish/function_calling_while_loop.py new file mode 100644 index 0000000..1c2dd4e --- /dev/null +++ b/spanish/function_calling_while_loop.py @@ -0,0 +1,153 @@ +import json +import os +from collections.abc import Callable +from typing import Any + +import azure.identity +import openai +from dotenv import load_dotenv + +# Configura el cliente de OpenAI para usar Azure, OpenAI.com, GitHub Models u Ollama +load_dotenv(override=True) +API_HOST = os.getenv("API_HOST", "github") + +if API_HOST == "azure": + token_provider = azure.identity.get_bearer_token_provider( + azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + client = openai.OpenAI( + base_url=os.environ["AZURE_OPENAI_ENDPOINT"], + api_key=token_provider, + ) + MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"] +elif API_HOST == "ollama": + client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded") + MODEL_NAME = os.environ["OLLAMA_MODEL"] +elif API_HOST == "github": + client = openai.OpenAI(base_url="https://models.github.ai/inference", api_key=os.environ["GITHUB_TOKEN"]) + MODEL_NAME = os.getenv("GITHUB_MODEL", "openai/gpt-4o") +else: + client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"]) + MODEL_NAME = os.environ["OPENAI_MODEL"] + + +tools = [ + { + "type": "function", + "function": { + "name": "lookup_weather", + "description": "Lookup the weather for a given city name or zip code.", + "parameters": { + "type": "object", + "properties": { + "city_name": {"type": "string", "description": "The city name"}, + "zip_code": {"type": "string", "description": "The zip code"}, + }, + "additionalProperties": False, + }, + }, + }, + { + "type": "function", + "function": { + "name": "lookup_movies", + "description": "Lookup movies playing in a given city name or zip code.", + "parameters": { + "type": "object", + "properties": { + "city_name": {"type": "string", "description": "The city name"}, + "zip_code": {"type": "string", "description": "The zip code"}, + }, + "additionalProperties": False, + }, + }, + }, +] + + +# --------------------------------------------------------------------------- +# Implementaciones de herramientas +# --------------------------------------------------------------------------- +def lookup_weather(city_name: str | None = None, zip_code: str | None = None) -> dict[str, Any]: + """Devuelve un clima simulado para la ubicación proporcionada.""" + location = city_name or zip_code or "desconocido" + return { + "ubicacion": location, + "condicion": "chubascos", + "lluvia_mm_ult_24h": 7, + "recomendacion": "Buen día para actividades bajo techo si no te gusta la llovizna.", + } + + +def lookup_movies(city_name: str | None = None, zip_code: str | None = None) -> dict[str, Any]: + """Devuelve una lista simulada de películas en cartelera.""" + location = city_name or zip_code or "desconocido" + return { + "ubicacion": location, + "peliculas": [ + {"titulo": "El Arrecife Cuántico", "clasificacion": "PG-13"}, + {"titulo": "Tormenta Sobre Bahía Puerto", "clasificacion": "PG"}, + {"titulo": "Koala de Medianoche", "clasificacion": "R"}, + ], + } + + +tool_mapping: dict[str, Callable[..., Any]] = { + "lookup_weather": lookup_weather, + "lookup_movies": lookup_movies, +} + + +# --------------------------------------------------------------------------- +# Bucle conversacional +# --------------------------------------------------------------------------- +messages: list[dict[str, Any]] = [ + {"role": "system", "content": "Eres un chatbot de turismo."}, + {"role": "user", "content": "¿Llueve lo suficiente en Sídney como para ir al cine y qué películas hay?"}, +] + +print(f"Modelo: {MODEL_NAME} en Host: {API_HOST}\n") + +while True: + print("Invocando el modelo...\n") + response = client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + tools=tools, + tool_choice="auto", + parallel_tool_calls=False, + ) + + choice = response.choices[0] + assistant_message = choice.message + + if not assistant_message.tool_calls: + print("Asistente:") + print(assistant_message.content) + break + + messages.append( + { + "role": "assistant", + "content": assistant_message.content or "", + "tool_calls": [tc.model_dump() for tc in assistant_message.tool_calls], + } + ) + + for tool_call in assistant_message.tool_calls: + fn_name = tool_call.function.name + raw_args = tool_call.function.arguments or "{}" + print(f"Solicitud de herramienta: {fn_name}({raw_args})") + target_tool = tool_mapping.get(fn_name) + parsed_args = json.loads(raw_args) + tool_result = target_tool(**parsed_args) + tool_result_str = json.dumps(tool_result) + # Agrega la respuesta de la herramienta a la conversación + messages.append( + { + "role": "tool", + "tool_call_id": tool_call.id, + "name": fn_name, + "content": tool_result_str, + } + )