|
| 1 | +import json |
1 | 2 | import os |
| 3 | +from concurrent.futures import ThreadPoolExecutor |
2 | 4 |
|
3 | 5 | import azure.identity |
4 | 6 | import openai |
|
76 | 78 | }, |
77 | 79 | ] |
78 | 80 |
|
| 81 | + |
| 82 | +# --------------------------------------------------------------------------- |
| 83 | +# Tool (function) implementations |
| 84 | +# --------------------------------------------------------------------------- |
| 85 | +def lookup_weather(city_name: str | None = None, zip_code: str | None = None) -> str: |
| 86 | + """Looks up the weather for given city_name and zip_code.""" |
| 87 | + location = city_name or zip_code or "unknown" |
| 88 | + # In a real implementation, call an external weather API here. |
| 89 | + return { |
| 90 | + "location": location, |
| 91 | + "condition": "rain showers", |
| 92 | + "rain_mm_last_24h": 7, |
| 93 | + "recommendation": "Good day for indoor activities if you dislike drizzle.", |
| 94 | + } |
| 95 | + |
| 96 | + |
| 97 | +def lookup_movies(city_name: str | None = None, zip_code: str | None = None) -> str: |
| 98 | + """Returns a list of movies playing in the given location.""" |
| 99 | + location = city_name or zip_code or "unknown" |
| 100 | + # A real implementation could query a cinema listings API. |
| 101 | + return { |
| 102 | + "location": location, |
| 103 | + "movies": [ |
| 104 | + {"title": "The Quantum Reef", "rating": "PG-13"}, |
| 105 | + {"title": "Storm Over Harbour Bay", "rating": "PG"}, |
| 106 | + {"title": "Midnight Koala", "rating": "R"}, |
| 107 | + ], |
| 108 | + } |
| 109 | + |
| 110 | + |
| 111 | +messages = [ |
| 112 | + {"role": "system", "content": "Eres un chatbot de turismo."}, |
| 113 | + { |
| 114 | + "role": "user", |
| 115 | + "content": "¿Está lloviendo lo suficiente en Sídney como para ver películas y cuáles estan en los cines?", |
| 116 | + }, |
| 117 | +] |
79 | 118 | response = client.chat.completions.create( |
80 | 119 | model=MODEL_NAME, |
81 | | - messages=[ |
82 | | - {"role": "system", "content": "Eres un chatbot de turismo."}, |
83 | | - { |
84 | | - "role": "user", |
85 | | - "content": "¿Está lloviendo lo suficiente en Sídney como para ver películas y cuáles estan en los cines?", |
86 | | - }, |
87 | | - ], |
| 120 | + messages=messages, |
88 | 121 | tools=tools, |
89 | 122 | tool_choice="auto", |
90 | 123 | ) |
91 | 124 |
|
92 | | -print(f"Respuesta de {API_HOST}: \n") |
93 | | -for message in response.choices[0].message.tool_calls: |
94 | | - print(message.function.name) |
95 | | - print(message.function.arguments) |
| 125 | +print(f"Respuesta de {MODEL_NAME} en {API_HOST}: \n") |
| 126 | + |
| 127 | +# Map function names to actual functions |
| 128 | +available_functions = { |
| 129 | + "lookup_weather": lookup_weather, |
| 130 | + "lookup_movies": lookup_movies, |
| 131 | +} |
| 132 | + |
| 133 | +# Execute all tool calls in parallel using ThreadPoolExecutor |
| 134 | +if response.choices[0].message.tool_calls: |
| 135 | + tool_calls = response.choices[0].message.tool_calls |
| 136 | + print(f"El modelo solicitó {len(tool_calls)} llamada(s) de herramienta:\n") |
| 137 | + |
| 138 | + # Add the assistant's message (with tool calls) to the conversation |
| 139 | + messages.append(response.choices[0].message) |
| 140 | + |
| 141 | + with ThreadPoolExecutor() as executor: |
| 142 | + # Submit all tool calls to the thread pool |
| 143 | + futures = [] |
| 144 | + for tool_call in tool_calls: |
| 145 | + function_name = tool_call.function.name |
| 146 | + arguments = json.loads(tool_call.function.arguments) |
| 147 | + print(f"Solicitud de herramienta: {function_name}({arguments})") |
| 148 | + |
| 149 | + if function_name in available_functions: |
| 150 | + future = executor.submit(available_functions[function_name], **arguments) |
| 151 | + futures.append((tool_call, function_name, future)) |
| 152 | + |
| 153 | + # Add each tool result to the conversation |
| 154 | + for tool_call, function_name, future in futures: |
| 155 | + result = future.result() |
| 156 | + messages.append({"role": "tool", "tool_call_id": tool_call.id, "content": json.dumps(result)}) |
| 157 | + |
| 158 | + # Get final response from the model with all tool results |
| 159 | + final_response = client.chat.completions.create(model=MODEL_NAME, messages=messages, tools=tools) |
| 160 | + print("Asistente:") |
| 161 | + print(final_response.choices[0].message.content) |
| 162 | +else: |
| 163 | + print(response.choices[0].message.content) |
0 commit comments