diff --git a/.github/.sync-sha b/.github/.sync-sha index cb63f35cb..9a30e8825 100644 --- a/.github/.sync-sha +++ b/.github/.sync-sha @@ -1 +1 @@ -e448696a5a619683d4c940e9bfe666bd23d3f8a5 +b606a7dcc200a54e39178bac4ff4430384f2c2ef diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 07a19a4a6..e6754052e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -33,11 +33,12 @@ /samples/java/quickstart/create-agent/src/main/java/com/azure/ai/agents/CreateAgent.java @microsoft-foundry/AI-Platform-Docs /samples/python/enterprise-agent-tutorial/1-idea-to-prototype/evaluate.py @microsoft-foundry/AI-Platform-Docs /samples/python/enterprise-agent-tutorial/1-idea-to-prototype/main.py @microsoft-foundry/AI-Platform-Docs +/samples/python/foundry-models/model-router/model-router-chat-completions.py @microsoft-foundry/AI-Platform-Docs +/samples/python/foundry-models/model-router/model-router-foundry-responses.py @microsoft-foundry/AI-Platform-Docs /samples/python/quickstart/chat-with-agent/quickstart-chat-with-agent.py @microsoft-foundry/AI-Platform-Docs /samples/python/quickstart/create-agent/quickstart-create-agent.py @microsoft-foundry/AI-Platform-Docs /samples/python/quickstart/responses/quickstart-responses.py @microsoft-foundry/AI-Platform-Docs /samples/typescript/quickstart/chat-with-agent/src/quickstart-chat-with-agent.ts @microsoft-foundry/AI-Platform-Docs /samples/typescript/quickstart/create-agent/src/quickstart-create-agent.ts @microsoft-foundry/AI-Platform-Docs /samples/typescript/quickstart/responses/src/quickstart-responses.ts @microsoft-foundry/AI-Platform-Docs -samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow @microsoft-foundry/foundry-vscode-extension -samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow @microsoft-foundry/foundry-vscode-extension + diff --git a/infrastructure/infrastructure-setup-bicep/15-private-network-standard-agent-setup/README.md b/infrastructure/infrastructure-setup-bicep/15-private-network-standard-agent-setup/README.md index 1ced00130..9bffa6610 100644 --- a/infrastructure/infrastructure-setup-bicep/15-private-network-standard-agent-setup/README.md +++ b/infrastructure/infrastructure-setup-bicep/15-private-network-standard-agent-setup/README.md @@ -136,6 +136,25 @@ To use an existing VNet and subnets, set the existingVnetResourceId parameter to To use an existing Cosmos DB for NoSQL resource, set cosmosDBResourceId parameter to the full Azure Resource ID of the target Cosmos DB. - param azureCosmosDBAccountResourceId string = /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName} +> **⚠️ Important: Cosmos DB Connection Requirements** +> +> When creating the Cosmos DB connection (e.g., via REST API or ARM), ensure the following: +> - The `authType` **must** be set to `AAD`. This is the only supported authentication type for the Cosmos DB connection used by the Agent Service. +> - The `metadata` section **must** include the `ResourceId` property, set to the full Azure Resource ID of your Cosmos DB account. The Agent Service relies on this property to correctly identify and connect to your Cosmos DB resource. Omitting `ResourceId` from the metadata will cause the connection to fail. +> +> Example connection properties: +> ```json +> { +> "category": "CosmosDB", +> "authType": "AAD", +> "metadata": { +> "ApiType": "Azure", +> "ResourceId": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName}", +> "location": "{region}" +> } +> } +> ``` + 3. **Use an existing Azure AI Search resource** diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/README.md b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/README.md new file mode 100644 index 000000000..cffad0739 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/README.md @@ -0,0 +1,212 @@ +--- +description: This set of templates demonstrates how to set up a network-secured Azure AI Foundry environment for evaluation scenarios without Cosmos DB, AI Search, or project capability host. +page_type: sample +products: +- azure +- azure-resource-manager +urlFragment: network-secured-evaluation-only +languages: +- bicep +- json +--- + +# Azure AI Foundry: Evaluation-Only Setup with Private Network Isolation + +> **IMPORTANT** +> +> This template is a simplified version of the [standard agent setup](../15-private-network-standard-agent-setup/) designed for **evaluation scenarios only**. It does **not** deploy Cosmos DB, AI Search, or a project capability host. If you need full agent capabilities (thread storage, vector search, stateful agents), use the standard agent setup instead. + +--- +## Overview +This infrastructure-as-code (IaC) solution deploys a **minimal** network-secured Azure AI Foundry environment with private networking and role-based access control (RBAC), intended for evaluation and testing purposes. + +Unlike the full standard agent setup, this template: +- **Does NOT** create an Azure Cosmos DB account (no thread/conversation storage) +- **Does NOT** create an Azure AI Search resource (no vector stores) +- **Does NOT** create a project capability host (no stateful agent support) + +What it **does** deploy: +- Azure AI Services account with a model deployment +- An AI Foundry project with a storage connection +- An Azure Storage account (or uses an existing one) +- A VNet with private endpoints for AI Services and Storage +- Private DNS zones for secure name resolution +- RBAC role assignments for the project on the storage account + +--- + +## Key Information + +**Region and Resource Placement Requirements** +- **All Foundry workspace resources should be in the same region as the VNet**, including the Storage Account, Foundry Account, Project, and Managed Identity. The only exception is within the Foundry Account, you may choose to deploy your model to a different region. + - **Note:** Your Virtual Network can be in a different resource group than your Foundry workspace resources. + +--- + +## Prerequisites + +1. **Active Azure subscription with appropriate permissions** + - **Azure AI Account Owner**: Needed to create a cognitive services account and project + - **Owner or Role Based Access Administrator**: Needed to assign RBAC to the storage account + - **Azure AI User**: Needed to create and use evaluation workloads + +1. **Register Resource Providers** + + ```bash + az provider register --namespace 'Microsoft.KeyVault' + az provider register --namespace 'Microsoft.CognitiveServices' + az provider register --namespace 'Microsoft.Storage' + az provider register --namespace 'Microsoft.Network' + az provider register --namespace 'Microsoft.App' + az provider register --namespace 'Microsoft.ContainerService' + ``` + +1. Network administrator permissions (if operating in a restricted or enterprise environment) + +1. Sufficient quota for all resources in your target Azure region + * If no parameters are passed in, this template creates an Azure AI Foundry resource, Foundry project, and Azure Storage account + +1. Azure CLI installed and configured on your local workstation or deployment pipeline server + +--- + +## Pre-Deployment Steps + +### Networking Requirements +1. Review network requirements and plan Virtual Network address space (e.g., 192.168.0.0/16) + +2. Two subnets are needed: + - **Agent Subnet** (e.g., 192.168.0.0/24): Hosts Agent client for workloads, delegated to Microsoft.App/environments + - **Private endpoint Subnet** (e.g., 192.168.1.0/24): Hosts private endpoints + - Ensure that the address spaces do not overlap with any existing networks + + > **Notes:** + - If you do not provide an existing virtual network, the template will create a new virtual network with the default address spaces and subnets described above. + - You must ensure the subnet is not already in use by another account. + - You must ensure the subnet is exclusively delegated to __Microsoft.App/environments__. + +--- + +## Template Customization + +Note: If not provided, the following resources will be created automatically for you: +- VNet and two subnets +- Azure Storage + +### Parameters + +1. **Use Existing Virtual Network and Subnets** + +To use an existing VNet and subnets, set the `existingVnetResourceId` parameter to the full Azure Resource ID of the target VNet: +``` +param existingVnetResourceId = "/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/" +param agentSubnetName string = 'agent-subnet' +param peSubnetName string = 'pe-subnet' +``` + +2. **Use an existing Azure Storage account** + +To use an existing Azure Storage account: +``` +param azureStorageAccountResourceId string = /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{storageAccountName} +``` + +--- + +## Deploy the bicep template + +**Option 1: Manually deploy the bicep template** +- **Create a New (or Use Existing) Resource Group** + + ```bash + az group create --name --location + ``` +- Deploy the main.bicep file + + ```bash + az deployment group create --resource-group --template-file main.bicep --parameters main.bicepparam + ``` + +> **Note:** To access your Foundry resource securely, use either a VM, VPN, or ExpressRoute. + +--- + +## Architecture + +### Azure Resources Created + +| Resource | Type | Description | +|----------|------|-------------| +| Azure AI Foundry | `Microsoft.CognitiveServices/accounts` | AI Services account with disabled public access | +| AI Model Deployment | `Microsoft.CognitiveServices/accounts/deployments` | Model deployment (e.g., gpt-4.1) | +| Foundry Project | `Microsoft.CognitiveServices/accounts/projects` | Project with system-assigned managed identity | +| Storage Account | `Microsoft.Storage/storageAccounts` | StorageV2 with disabled public access | +| Virtual Network | `Microsoft.Network/virtualNetworks` | VNet with agent and PE subnets | +| Private Endpoints | `Microsoft.Network/privateEndpoints` | For AI Services and Storage | + +### Network Security Design + +**Private Endpoints** are created for: +- Azure AI Foundry (account) +- Azure Storage (blob) + +**Private DNS Zones**: +| Private Link Resource Type | Sub Resource | Private DNS Zone Name | +|----------------------------|--------------|------------------------| +| **Azure AI Foundry** | account | `privatelink.cognitiveservices.azure.com`
`privatelink.openai.azure.com`
`privatelink.services.ai.azure.com` | +| **Azure Storage** | blob | `privatelink.blob.core.windows.net` | + +### Role Assignments + +- **AI Services Account** + - Azure AI User (`53ca6127-db72-4b80-b1b0-d745d6d5456d`) — grants the project MI data-plane access +- **Azure Storage Account** + - Storage Blob Data Contributor (`ba92f5b4-2d11-453d-a403-e96b0029c9fe`) + - Storage Blob Data Owner (`b7e6dc6d-f1e8-4753-8033-0f276bb0955b`) — scoped to project containers + +--- + +## Module Structure + +```text +modules-network-secured/ +├── ai-account-identity.bicep # Azure AI Foundry deployment and configuration +├── ai-account-role-assignment.bicep # Azure AI User role assignment on the account +├── ai-project-identity.bicep # Foundry project deployment with storage connection +├── azure-storage-account-role-assignment.bicep # Storage Account RBAC configuration +├── blob-storage-container-role-assignments.bicep # Blob Storage Container RBAC configuration +├── existing-vnet.bicep # Bring your existing virtual network +├── format-project-workspace-id.bicep # Formatting the project workspace ID +├── network-agent-vnet.bicep # Logic for routing virtual network set-up +├── private-endpoint-and-dns.bicep # Private endpoints and DNS zones (AI Services + Storage only) +├── standard-dependent-resources.bicep # Deploying Storage Account +├── subnet.bicep # Setting the subnet +├── validate-existing-resources.bicep # Validate existing Storage Account +└── vnet.bicep # Deploying a new virtual network +``` + +--- + +## Comparison with Standard Agent Setup + +| Feature | This Template (Evaluation-Only) | Standard Agent Setup (15) | +|---------|-------------------------------|--------------------------| +| AI Services + Model | ✅ | ✅ | +| Project | ✅ | ✅ | +| Storage Account | ✅ | ✅ | +| VNet + Private Endpoints | ✅ (AI + Storage) | ✅ (AI + Storage + Search + Cosmos) | +| Cosmos DB | ❌ | ✅ | +| AI Search | ❌ | ✅ | +| Project Capability Host | ❌ | ✅ | +| Stateful Agents | ❌ | ✅ | + +--- + +## Maintenance + +### Troubleshooting + +1. Verify private endpoint connectivity +2. Check DNS resolution +3. Validate role assignments +4. Review network security groups diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/azuredeploy.json b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/azuredeploy.json new file mode 100644 index 000000000..58cc95a6a --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/azuredeploy.json @@ -0,0 +1,1942 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "3823237790915182165" + } + }, + "parameters": { + "location": { + "type": "string", + "defaultValue": "eastus", + "allowedValues": [ + "westus", + "eastus", + "eastus2", + "japaneast", + "francecentral", + "spaincentral", + "uaenorth", + "southcentralus", + "italynorth", + "germanywestcentral", + "brazilsouth", + "southafricanorth", + "australiaeast", + "swedencentral", + "canadaeast", + "westeurope", + "westus3", + "uksouth", + "southindia", + "koreacentral", + "polandcentral", + "switzerlandnorth", + "norwayeast" + ], + "metadata": { + "description": "Location for all resources." + } + }, + "aiServices": { + "type": "string", + "defaultValue": "aiservices", + "metadata": { + "description": "Name for your AI Services resource." + } + }, + "modelName": { + "type": "string", + "defaultValue": "gpt-4.1", + "metadata": { + "description": "The name of the model you want to deploy" + } + }, + "modelFormat": { + "type": "string", + "defaultValue": "OpenAI", + "metadata": { + "description": "The provider of your model" + } + }, + "modelVersion": { + "type": "string", + "defaultValue": "2025-04-14", + "metadata": { + "description": "The version of your model" + } + }, + "modelSkuName": { + "type": "string", + "defaultValue": "GlobalStandard", + "metadata": { + "description": "The sku of your model deployment" + } + }, + "modelCapacity": { + "type": "int", + "defaultValue": 30, + "metadata": { + "description": "The tokens per minute (TPM) of your model deployment" + } + }, + "deploymentTimestamp": { + "type": "string", + "defaultValue": "[utcNow('yyyyMMddHHmmss')]" + }, + "firstProjectName": { + "type": "string", + "defaultValue": "project", + "metadata": { + "description": "Name for your project resource." + } + }, + "projectDescription": { + "type": "string", + "defaultValue": "A project for the AI Foundry account with network secured evaluation setup", + "metadata": { + "description": "This project will be a sub-resource of your account" + } + }, + "displayName": { + "type": "string", + "defaultValue": "network secured evaluation project", + "metadata": { + "description": "The display name of the project" + } + }, + "vnetName": { + "type": "string", + "defaultValue": "agent-vnet-test", + "metadata": { + "description": "Virtual Network name for the Agent to create new or existing virtual network" + } + }, + "agentSubnetName": { + "type": "string", + "defaultValue": "agent-subnet", + "metadata": { + "description": "The name of Agents Subnet to create new or existing subnet for agents" + } + }, + "peSubnetName": { + "type": "string", + "defaultValue": "pe-subnet", + "metadata": { + "description": "The name of Private Endpoint subnet to create new or existing subnet for private endpoints" + } + }, + "existingVnetResourceId": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Existing Virtual Network name Resource ID" + } + }, + "vnetAddressPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address space for the VNet (only used for new VNet)" + } + }, + "agentSubnetPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address prefix for the agent subnet. The default value is 192.168.0.0/24 but you can choose any size /26 or any class like 10.0.0.0 or 172.168.0.0" + } + }, + "peSubnetPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address prefix for the private endpoint subnet" + } + }, + "azureStorageAccountResourceId": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "The AI Storage Account full ARM Resource ID. This is an optional field, and if not provided, the resource will be created." + } + }, + "dnsZonesSubscriptionId": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Subscription ID where existing private DNS zones are located. Leave empty to use current subscription." + } + }, + "existingDnsZones": { + "type": "object", + "defaultValue": { + "privatelink.services.ai.azure.com": "", + "privatelink.openai.azure.com": "", + "privatelink.cognitiveservices.azure.com": "", + "privatelink.blob.core.windows.net": "" + }, + "metadata": { + "description": "Object mapping DNS zone names to their resource group, or empty string to indicate creation" + } + }, + "dnsZoneNames": { + "type": "array", + "defaultValue": [ + "privatelink.services.ai.azure.com", + "privatelink.openai.azure.com", + "privatelink.cognitiveservices.azure.com", + "privatelink.blob.core.windows.net" + ], + "metadata": { + "description": "Zone Names for Validation of existing Private Dns Zones" + } + } + }, + "variables": { + "uniqueSuffix": "[substring(uniqueString(format('{0}-{1}', resourceGroup().id, parameters('deploymentTimestamp'))), 0, 4)]", + "accountName": "[toLower(format('{0}{1}', parameters('aiServices'), variables('uniqueSuffix')))]", + "projectName": "[toLower(format('{0}{1}', parameters('firstProjectName'), variables('uniqueSuffix')))]", + "azureStorageName": "[toLower(format('{0}{1}storage', parameters('aiServices'), variables('uniqueSuffix')))]", + "storagePassedIn": "[not(equals(parameters('azureStorageAccountResourceId'), ''))]", + "existingVnetPassedIn": "[not(equals(parameters('existingVnetResourceId'), ''))]", + "storageParts": "[split(parameters('azureStorageAccountResourceId'), '/')]", + "azureStorageSubscriptionId": "[if(variables('storagePassedIn'), variables('storageParts')[2], subscription().subscriptionId)]", + "azureStorageResourceGroupName": "[if(variables('storagePassedIn'), variables('storageParts')[4], resourceGroup().name)]", + "vnetParts": "[split(parameters('existingVnetResourceId'), '/')]", + "vnetSubscriptionId": "[if(variables('existingVnetPassedIn'), variables('vnetParts')[2], subscription().subscriptionId)]", + "vnetResourceGroupName": "[if(variables('existingVnetPassedIn'), variables('vnetParts')[4], resourceGroup().name)]", + "existingVnetName": "[if(variables('existingVnetPassedIn'), last(variables('vnetParts')), parameters('vnetName'))]", + "trimVnetName": "[trim(variables('existingVnetName'))]", + "resolvedDnsZonesSubscriptionId": "[if(empty(parameters('dnsZonesSubscriptionId')), subscription().subscriptionId, parameters('dnsZonesSubscriptionId'))]" + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('vnet-{0}-{1}-deployment', variables('trimVnetName'), variables('uniqueSuffix'))]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "location": { + "value": "[parameters('location')]" + }, + "vnetName": { + "value": "[variables('trimVnetName')]" + }, + "useExistingVnet": { + "value": "[variables('existingVnetPassedIn')]" + }, + "existingVnetResourceGroupName": { + "value": "[variables('vnetResourceGroupName')]" + }, + "agentSubnetName": { + "value": "[parameters('agentSubnetName')]" + }, + "peSubnetName": { + "value": "[parameters('peSubnetName')]" + }, + "vnetAddressPrefix": { + "value": "[parameters('vnetAddressPrefix')]" + }, + "agentSubnetPrefix": { + "value": "[parameters('agentSubnetPrefix')]" + }, + "peSubnetPrefix": { + "value": "[parameters('peSubnetPrefix')]" + }, + "existingVnetSubscriptionId": { + "value": "[variables('vnetSubscriptionId')]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "17284036880023313732" + } + }, + "parameters": { + "location": { + "type": "string", + "metadata": { + "description": "Azure region for the deployment" + } + }, + "vnetName": { + "type": "string", + "metadata": { + "description": "The name of the virtual network" + } + }, + "useExistingVnet": { + "type": "bool", + "defaultValue": false, + "metadata": { + "description": "Indicates if an existing VNet should be used" + } + }, + "existingVnetSubscriptionId": { + "type": "string", + "defaultValue": "[subscription().subscriptionId]", + "metadata": { + "description": "Subscription ID of the existing VNet (if different from current subscription)" + } + }, + "existingVnetResourceGroupName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "Resource Group name of the existing VNet (if different from current resource group)" + } + }, + "agentSubnetName": { + "type": "string", + "defaultValue": "agent-subnet", + "metadata": { + "description": "The name of Agents Subnet" + } + }, + "peSubnetName": { + "type": "string", + "defaultValue": "pe-subnet", + "metadata": { + "description": "The name of Private Endpoint subnet" + } + }, + "vnetAddressPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address space for the VNet (only used for new VNet)" + } + }, + "agentSubnetPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address prefix for the agent subnet" + } + }, + "peSubnetPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address prefix for the private endpoint subnet" + } + } + }, + "resources": [ + { + "condition": "[not(parameters('useExistingVnet'))]", + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "vnet-deployment", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "location": { + "value": "[parameters('location')]" + }, + "vnetName": { + "value": "[parameters('vnetName')]" + }, + "agentSubnetName": { + "value": "[parameters('agentSubnetName')]" + }, + "peSubnetName": { + "value": "[parameters('peSubnetName')]" + }, + "vnetAddressPrefix": { + "value": "[parameters('vnetAddressPrefix')]" + }, + "agentSubnetPrefix": { + "value": "[parameters('agentSubnetPrefix')]" + }, + "peSubnetPrefix": { + "value": "[parameters('peSubnetPrefix')]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "7044568158736020946" + } + }, + "parameters": { + "location": { + "type": "string", + "metadata": { + "description": "Azure region for the deployment" + } + }, + "vnetName": { + "type": "string", + "defaultValue": "agents-vnet-test", + "metadata": { + "description": "The name of the virtual network" + } + }, + "agentSubnetName": { + "type": "string", + "defaultValue": "agent-subnet", + "metadata": { + "description": "The name of Agents Subnet" + } + }, + "peSubnetName": { + "type": "string", + "defaultValue": "pe-subnet", + "metadata": { + "description": "The name of Hub subnet" + } + }, + "vnetAddressPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address space for the VNet" + } + }, + "agentSubnetPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address prefix for the agent subnet" + } + }, + "peSubnetPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address prefix for the private endpoint subnet" + } + } + }, + "variables": { + "defaultVnetAddressPrefix": "192.168.0.0/16", + "vnetAddress": "[if(empty(parameters('vnetAddressPrefix')), variables('defaultVnetAddressPrefix'), parameters('vnetAddressPrefix'))]", + "agentSubnet": "[if(empty(parameters('agentSubnetPrefix')), cidrSubnet(variables('vnetAddress'), 24, 0), parameters('agentSubnetPrefix'))]", + "peSubnet": "[if(empty(parameters('peSubnetPrefix')), cidrSubnet(variables('vnetAddress'), 24, 1), parameters('peSubnetPrefix'))]" + }, + "resources": [ + { + "type": "Microsoft.Network/virtualNetworks", + "apiVersion": "2024-05-01", + "name": "[parameters('vnetName')]", + "location": "[parameters('location')]", + "properties": { + "addressSpace": { + "addressPrefixes": [ + "[variables('vnetAddress')]" + ] + }, + "subnets": [ + { + "name": "[parameters('agentSubnetName')]", + "properties": { + "addressPrefix": "[variables('agentSubnet')]", + "delegations": [ + { + "name": "Microsoft.app/environments", + "properties": { + "serviceName": "Microsoft.App/environments" + } + } + ] + } + }, + { + "name": "[parameters('peSubnetName')]", + "properties": { + "addressPrefix": "[variables('peSubnet')]" + } + } + ] + } + } + ], + "outputs": { + "peSubnetName": { + "type": "string", + "value": "[parameters('peSubnetName')]" + }, + "agentSubnetName": { + "type": "string", + "value": "[parameters('agentSubnetName')]" + }, + "agentSubnetId": { + "type": "string", + "value": "[format('{0}/subnets/{1}', resourceId('Microsoft.Network/virtualNetworks', parameters('vnetName')), parameters('agentSubnetName'))]" + }, + "peSubnetId": { + "type": "string", + "value": "[format('{0}/subnets/{1}', resourceId('Microsoft.Network/virtualNetworks', parameters('vnetName')), parameters('peSubnetName'))]" + }, + "virtualNetworkName": { + "type": "string", + "value": "[parameters('vnetName')]" + }, + "virtualNetworkId": { + "type": "string", + "value": "[resourceId('Microsoft.Network/virtualNetworks', parameters('vnetName'))]" + }, + "virtualNetworkResourceGroup": { + "type": "string", + "value": "[resourceGroup().name]" + }, + "virtualNetworkSubscriptionId": { + "type": "string", + "value": "[subscription().subscriptionId]" + } + } + } + } + }, + { + "condition": "[parameters('useExistingVnet')]", + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "existing-vnet-deployment", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "vnetName": { + "value": "[parameters('vnetName')]" + }, + "vnetResourceGroupName": { + "value": "[parameters('existingVnetResourceGroupName')]" + }, + "vnetSubscriptionId": { + "value": "[parameters('existingVnetSubscriptionId')]" + }, + "agentSubnetName": { + "value": "[parameters('agentSubnetName')]" + }, + "peSubnetName": { + "value": "[parameters('peSubnetName')]" + }, + "agentSubnetPrefix": { + "value": "[parameters('agentSubnetPrefix')]" + }, + "peSubnetPrefix": { + "value": "[parameters('peSubnetPrefix')]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "9399102347973763543" + } + }, + "parameters": { + "vnetName": { + "type": "string", + "metadata": { + "description": "The name of the existing virtual network" + } + }, + "vnetSubscriptionId": { + "type": "string", + "defaultValue": "[subscription().subscriptionId]", + "metadata": { + "description": "Subscription ID of virtual network (if different from current subscription)" + } + }, + "vnetResourceGroupName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "Resource Group name of the existing VNet (if different from current resource group)" + } + }, + "agentSubnetName": { + "type": "string", + "defaultValue": "agent-subnet", + "metadata": { + "description": "The name of Agents Subnet" + } + }, + "peSubnetName": { + "type": "string", + "defaultValue": "pe-subnet", + "metadata": { + "description": "The name of Private Endpoint subnet" + } + }, + "agentSubnetPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address prefix for the agent subnet (only needed if creating new subnet)" + } + }, + "peSubnetPrefix": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Address prefix for the private endpoint subnet (only needed if creating new subnet)" + } + } + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('agent-subnet-{0}', uniqueString(deployment().name, parameters('agentSubnetName')))]", + "resourceGroup": "[parameters('vnetResourceGroupName')]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "vnetName": { + "value": "[parameters('vnetName')]" + }, + "subnetName": { + "value": "[parameters('agentSubnetName')]" + }, + "addressPrefix": "[if(empty(parameters('agentSubnetPrefix')), createObject('value', cidrSubnet(reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks', parameters('vnetName')), '2024-05-01').addressSpace.addressPrefixes[0], 24, 0)), createObject('value', parameters('agentSubnetPrefix')))]", + "delegations": { + "value": [ + { + "name": "Microsoft.App/environments", + "properties": { + "serviceName": "Microsoft.App/environments" + } + } + ] + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "8293031405760656222" + } + }, + "parameters": { + "vnetName": { + "type": "string", + "metadata": { + "description": "Name of the virtual network" + } + }, + "subnetName": { + "type": "string", + "metadata": { + "description": "Name of the subnet" + } + }, + "addressPrefix": { + "type": "string", + "metadata": { + "description": "Address prefix for the subnet" + } + }, + "delegations": { + "type": "array", + "defaultValue": [], + "metadata": { + "description": "Array of subnet delegations" + } + } + }, + "resources": [ + { + "type": "Microsoft.Network/virtualNetworks/subnets", + "apiVersion": "2024-05-01", + "name": "[format('{0}/{1}', parameters('vnetName'), parameters('subnetName'))]", + "properties": { + "addressPrefix": "[parameters('addressPrefix')]", + "delegations": "[parameters('delegations')]" + } + } + ], + "outputs": { + "subnetId": { + "type": "string", + "value": "[resourceId('Microsoft.Network/virtualNetworks/subnets', split(format('{0}/{1}', parameters('vnetName'), parameters('subnetName')), '/')[0], split(format('{0}/{1}', parameters('vnetName'), parameters('subnetName')), '/')[1])]" + }, + "subnetName": { + "type": "string", + "value": "[parameters('subnetName')]" + } + } + } + } + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('pe-subnet-{0}', uniqueString(deployment().name, parameters('peSubnetName')))]", + "resourceGroup": "[parameters('vnetResourceGroupName')]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "vnetName": { + "value": "[parameters('vnetName')]" + }, + "subnetName": { + "value": "[parameters('peSubnetName')]" + }, + "addressPrefix": "[if(empty(parameters('peSubnetPrefix')), createObject('value', cidrSubnet(reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks', parameters('vnetName')), '2024-05-01').addressSpace.addressPrefixes[0], 24, 1)), createObject('value', parameters('peSubnetPrefix')))]", + "delegations": { + "value": [] + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "8293031405760656222" + } + }, + "parameters": { + "vnetName": { + "type": "string", + "metadata": { + "description": "Name of the virtual network" + } + }, + "subnetName": { + "type": "string", + "metadata": { + "description": "Name of the subnet" + } + }, + "addressPrefix": { + "type": "string", + "metadata": { + "description": "Address prefix for the subnet" + } + }, + "delegations": { + "type": "array", + "defaultValue": [], + "metadata": { + "description": "Array of subnet delegations" + } + } + }, + "resources": [ + { + "type": "Microsoft.Network/virtualNetworks/subnets", + "apiVersion": "2024-05-01", + "name": "[format('{0}/{1}', parameters('vnetName'), parameters('subnetName'))]", + "properties": { + "addressPrefix": "[parameters('addressPrefix')]", + "delegations": "[parameters('delegations')]" + } + } + ], + "outputs": { + "subnetId": { + "type": "string", + "value": "[resourceId('Microsoft.Network/virtualNetworks/subnets', split(format('{0}/{1}', parameters('vnetName'), parameters('subnetName')), '/')[0], split(format('{0}/{1}', parameters('vnetName'), parameters('subnetName')), '/')[1])]" + }, + "subnetName": { + "type": "string", + "value": "[parameters('subnetName')]" + } + } + } + } + } + ], + "outputs": { + "peSubnetName": { + "type": "string", + "value": "[parameters('peSubnetName')]" + }, + "agentSubnetName": { + "type": "string", + "value": "[parameters('agentSubnetName')]" + }, + "agentSubnetId": { + "type": "string", + "value": "[format('{0}/subnets/{1}', extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks', parameters('vnetName')), parameters('agentSubnetName'))]" + }, + "peSubnetId": { + "type": "string", + "value": "[format('{0}/subnets/{1}', extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks', parameters('vnetName')), parameters('peSubnetName'))]" + }, + "virtualNetworkName": { + "type": "string", + "value": "[parameters('vnetName')]" + }, + "virtualNetworkId": { + "type": "string", + "value": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks', parameters('vnetName'))]" + }, + "virtualNetworkResourceGroup": { + "type": "string", + "value": "[parameters('vnetResourceGroupName')]" + }, + "virtualNetworkSubscriptionId": { + "type": "string", + "value": "[parameters('vnetSubscriptionId')]" + } + } + } + } + } + ], + "outputs": { + "virtualNetworkName": { + "type": "string", + "value": "[if(parameters('useExistingVnet'), reference(resourceId('Microsoft.Resources/deployments', 'existing-vnet-deployment'), '2025-04-01').outputs.virtualNetworkName.value, reference(resourceId('Microsoft.Resources/deployments', 'vnet-deployment'), '2025-04-01').outputs.virtualNetworkName.value)]" + }, + "virtualNetworkId": { + "type": "string", + "value": "[if(parameters('useExistingVnet'), reference(resourceId('Microsoft.Resources/deployments', 'existing-vnet-deployment'), '2025-04-01').outputs.virtualNetworkId.value, reference(resourceId('Microsoft.Resources/deployments', 'vnet-deployment'), '2025-04-01').outputs.virtualNetworkId.value)]" + }, + "virtualNetworkSubscriptionId": { + "type": "string", + "value": "[if(parameters('useExistingVnet'), reference(resourceId('Microsoft.Resources/deployments', 'existing-vnet-deployment'), '2025-04-01').outputs.virtualNetworkSubscriptionId.value, reference(resourceId('Microsoft.Resources/deployments', 'vnet-deployment'), '2025-04-01').outputs.virtualNetworkSubscriptionId.value)]" + }, + "virtualNetworkResourceGroup": { + "type": "string", + "value": "[if(parameters('useExistingVnet'), reference(resourceId('Microsoft.Resources/deployments', 'existing-vnet-deployment'), '2025-04-01').outputs.virtualNetworkResourceGroup.value, reference(resourceId('Microsoft.Resources/deployments', 'vnet-deployment'), '2025-04-01').outputs.virtualNetworkResourceGroup.value)]" + }, + "agentSubnetName": { + "type": "string", + "value": "[parameters('agentSubnetName')]" + }, + "peSubnetName": { + "type": "string", + "value": "[parameters('peSubnetName')]" + }, + "agentSubnetId": { + "type": "string", + "value": "[if(parameters('useExistingVnet'), reference(resourceId('Microsoft.Resources/deployments', 'existing-vnet-deployment'), '2025-04-01').outputs.agentSubnetId.value, reference(resourceId('Microsoft.Resources/deployments', 'vnet-deployment'), '2025-04-01').outputs.agentSubnetId.value)]" + }, + "peSubnetId": { + "type": "string", + "value": "[if(parameters('useExistingVnet'), reference(resourceId('Microsoft.Resources/deployments', 'existing-vnet-deployment'), '2025-04-01').outputs.peSubnetId.value, reference(resourceId('Microsoft.Resources/deployments', 'vnet-deployment'), '2025-04-01').outputs.peSubnetId.value)]" + } + } + } + } + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('{0}-{1}-deployment', variables('accountName'), variables('uniqueSuffix'))]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "accountName": { + "value": "[variables('accountName')]" + }, + "location": { + "value": "[parameters('location')]" + }, + "modelName": { + "value": "[parameters('modelName')]" + }, + "modelFormat": { + "value": "[parameters('modelFormat')]" + }, + "modelVersion": { + "value": "[parameters('modelVersion')]" + }, + "modelSkuName": { + "value": "[parameters('modelSkuName')]" + }, + "modelCapacity": { + "value": "[parameters('modelCapacity')]" + }, + "agentSubnetId": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('vnet-{0}-{1}-deployment', variables('trimVnetName'), variables('uniqueSuffix'))), '2025-04-01').outputs.agentSubnetId.value]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "1167841404925031150" + } + }, + "parameters": { + "accountName": { + "type": "string" + }, + "location": { + "type": "string" + }, + "modelName": { + "type": "string" + }, + "modelFormat": { + "type": "string" + }, + "modelVersion": { + "type": "string" + }, + "modelSkuName": { + "type": "string" + }, + "modelCapacity": { + "type": "int" + }, + "agentSubnetId": { + "type": "string" + }, + "networkInjection": { + "type": "string", + "defaultValue": "true" + } + }, + "resources": [ + { + "type": "Microsoft.CognitiveServices/accounts", + "apiVersion": "2025-04-01-preview", + "name": "[parameters('accountName')]", + "location": "[parameters('location')]", + "sku": { + "name": "S0" + }, + "kind": "AIServices", + "identity": { + "type": "SystemAssigned" + }, + "properties": { + "allowProjectManagement": true, + "customSubDomainName": "[parameters('accountName')]", + "networkAcls": { + "defaultAction": "Deny", + "virtualNetworkRules": [], + "ipRules": [], + "bypass": "AzureServices" + }, + "publicNetworkAccess": "Disabled", + "networkInjections": "[if(equals(parameters('networkInjection'), 'true'), createArray(createObject('scenario', 'agent', 'subnetArmId', parameters('agentSubnetId'), 'useMicrosoftManagedNetwork', false())), null())]", + "disableLocalAuth": false + } + }, + { + "type": "Microsoft.CognitiveServices/accounts/deployments", + "apiVersion": "2025-04-01-preview", + "name": "[format('{0}/{1}', parameters('accountName'), parameters('modelName'))]", + "sku": { + "capacity": "[parameters('modelCapacity')]", + "name": "[parameters('modelSkuName')]" + }, + "properties": { + "model": { + "name": "[parameters('modelName')]", + "format": "[parameters('modelFormat')]", + "version": "[parameters('modelVersion')]" + } + }, + "dependsOn": [ + "[resourceId('Microsoft.CognitiveServices/accounts', parameters('accountName'))]" + ] + } + ], + "outputs": { + "accountName": { + "type": "string", + "value": "[parameters('accountName')]" + }, + "accountID": { + "type": "string", + "value": "[resourceId('Microsoft.CognitiveServices/accounts', parameters('accountName'))]" + }, + "accountTarget": { + "type": "string", + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', parameters('accountName')), '2025-04-01-preview').endpoint]" + }, + "accountPrincipalId": { + "type": "string", + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', parameters('accountName')), '2025-04-01-preview', 'full').identity.principalId]" + } + } + } + }, + "dependsOn": [ + "[resourceId('Microsoft.Resources/deployments', format('vnet-{0}-{1}-deployment', variables('trimVnetName'), variables('uniqueSuffix')))]" + ] + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('validate-existing-resources-{0}-deployment', variables('uniqueSuffix'))]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "azureStorageAccountResourceId": { + "value": "[parameters('azureStorageAccountResourceId')]" + }, + "existingDnsZones": { + "value": "[parameters('existingDnsZones')]" + }, + "dnsZoneNames": { + "value": "[parameters('dnsZoneNames')]" + }, + "dnsZonesSubscriptionId": { + "value": "[variables('resolvedDnsZonesSubscriptionId')]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "11208930521589341070" + } + }, + "parameters": { + "azureStorageAccountResourceId": { + "type": "string", + "metadata": { + "description": "Resource ID of the Azure Storage Account." + } + }, + "existingDnsZones": { + "type": "object", + "metadata": { + "description": "Object mapping DNS zone names to their resource group, or empty string to indicate creation" + } + }, + "dnsZonesSubscriptionId": { + "type": "string", + "metadata": { + "description": "Subscription ID where existing private DNS zones are located. Should be resolved to current subscription if empty." + } + }, + "dnsZoneNames": { + "type": "array", + "metadata": { + "description": "List of private DNS zone names to validate" + } + } + }, + "variables": { + "storagePassedIn": "[not(equals(parameters('azureStorageAccountResourceId'), ''))]", + "storageParts": "[split(parameters('azureStorageAccountResourceId'), '/')]", + "azureStorageSubscriptionId": "[if(and(variables('storagePassedIn'), greater(length(variables('storageParts')), 2)), variables('storageParts')[2], subscription().subscriptionId)]", + "azureStorageResourceGroupName": "[if(and(variables('storagePassedIn'), greater(length(variables('storageParts')), 4)), variables('storageParts')[4], resourceGroup().name)]" + }, + "resources": [], + "outputs": { + "azureStorageExists": { + "type": "bool", + "value": "[and(variables('storagePassedIn'), equals(last(split(parameters('azureStorageAccountResourceId'), '/')), variables('storageParts')[8]))]" + }, + "azureStorageSubscriptionId": { + "type": "string", + "value": "[variables('azureStorageSubscriptionId')]" + }, + "azureStorageResourceGroupName": { + "type": "string", + "value": "[variables('azureStorageResourceGroupName')]" + }, + "dnsZoneExists": { + "type": "array", + "copy": { + "count": "[length(parameters('dnsZoneNames'))]", + "input": { + "name": "[parameters('dnsZoneNames')[copyIndex()]]", + "exists": "[not(empty(parameters('existingDnsZones')[parameters('dnsZoneNames')[copyIndex()]]))]" + } + } + } + } + } + } + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('dependencies-{0}-deployment', variables('uniqueSuffix'))]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "location": { + "value": "[parameters('location')]" + }, + "azureStorageName": { + "value": "[variables('azureStorageName')]" + }, + "azureStorageAccountResourceId": { + "value": "[parameters('azureStorageAccountResourceId')]" + }, + "azureStorageExists": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('validate-existing-resources-{0}-deployment', variables('uniqueSuffix'))), '2025-04-01').outputs.azureStorageExists.value]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "4263282633179188093" + } + }, + "parameters": { + "location": { + "type": "string", + "metadata": { + "description": "Azure region of the deployment" + } + }, + "azureStorageName": { + "type": "string", + "metadata": { + "description": "Name of the storage account" + } + }, + "azureStorageAccountResourceId": { + "type": "string", + "metadata": { + "description": "The AI Storage Account full ARM Resource ID. This is an optional field, and if not provided, the resource will be created." + } + }, + "azureStorageExists": { + "type": "bool" + }, + "noZRSRegions": { + "type": "array", + "defaultValue": [ + "southindia", + "westus" + ] + }, + "sku": { + "type": "object", + "defaultValue": "[if(contains(parameters('noZRSRegions'), parameters('location')), createObject('name', 'Standard_GRS'), createObject('name', 'Standard_ZRS'))]" + } + }, + "variables": { + "azureStorageParts": "[split(parameters('azureStorageAccountResourceId'), '/')]" + }, + "resources": [ + { + "condition": "[not(parameters('azureStorageExists'))]", + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "2023-05-01", + "name": "[parameters('azureStorageName')]", + "location": "[parameters('location')]", + "kind": "StorageV2", + "sku": "[parameters('sku')]", + "properties": { + "minimumTlsVersion": "TLS1_2", + "allowBlobPublicAccess": false, + "publicNetworkAccess": "Disabled", + "networkAcls": { + "bypass": "AzureServices", + "defaultAction": "Deny", + "virtualNetworkRules": [] + }, + "allowSharedKeyAccess": false + } + } + ], + "outputs": { + "azureStorageName": { + "type": "string", + "value": "[if(parameters('azureStorageExists'), variables('azureStorageParts')[8], parameters('azureStorageName'))]" + }, + "azureStorageId": { + "type": "string", + "value": "[if(parameters('azureStorageExists'), extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', variables('azureStorageParts')[2], variables('azureStorageParts')[4]), 'Microsoft.Storage/storageAccounts', variables('azureStorageParts')[8]), resourceId('Microsoft.Storage/storageAccounts', parameters('azureStorageName')))]" + }, + "azureStorageResourceGroupName": { + "type": "string", + "value": "[if(parameters('azureStorageExists'), variables('azureStorageParts')[4], resourceGroup().name)]" + }, + "azureStorageSubscriptionId": { + "type": "string", + "value": "[if(parameters('azureStorageExists'), variables('azureStorageParts')[2], subscription().subscriptionId)]" + } + } + } + }, + "dependsOn": [ + "[resourceId('Microsoft.Resources/deployments', format('validate-existing-resources-{0}-deployment', variables('uniqueSuffix')))]" + ] + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('{0}-private-endpoint', variables('uniqueSuffix'))]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "aiAccountName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('accountName'), variables('uniqueSuffix'))), '2025-04-01').outputs.accountName.value]" + }, + "storageName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix'))), '2025-04-01').outputs.azureStorageName.value]" + }, + "vnetName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('vnet-{0}-{1}-deployment', variables('trimVnetName'), variables('uniqueSuffix'))), '2025-04-01').outputs.virtualNetworkName.value]" + }, + "peSubnetName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('vnet-{0}-{1}-deployment', variables('trimVnetName'), variables('uniqueSuffix'))), '2025-04-01').outputs.peSubnetName.value]" + }, + "suffix": { + "value": "[variables('uniqueSuffix')]" + }, + "vnetResourceGroupName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('vnet-{0}-{1}-deployment', variables('trimVnetName'), variables('uniqueSuffix'))), '2025-04-01').outputs.virtualNetworkResourceGroup.value]" + }, + "vnetSubscriptionId": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('vnet-{0}-{1}-deployment', variables('trimVnetName'), variables('uniqueSuffix'))), '2025-04-01').outputs.virtualNetworkSubscriptionId.value]" + }, + "storageAccountResourceGroupName": { + "value": "[variables('azureStorageResourceGroupName')]" + }, + "storageAccountSubscriptionId": { + "value": "[variables('azureStorageSubscriptionId')]" + }, + "existingDnsZones": { + "value": "[parameters('existingDnsZones')]" + }, + "dnsZonesSubscriptionId": { + "value": "[variables('resolvedDnsZonesSubscriptionId')]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "14317702821708481391" + } + }, + "parameters": { + "aiAccountName": { + "type": "string", + "metadata": { + "description": "Name of the AI Foundry account" + } + }, + "storageName": { + "type": "string", + "metadata": { + "description": "Name of the storage account" + } + }, + "vnetName": { + "type": "string", + "metadata": { + "description": "Name of the Vnet" + } + }, + "peSubnetName": { + "type": "string", + "metadata": { + "description": "Name of the Customer subnet" + } + }, + "suffix": { + "type": "string", + "metadata": { + "description": "Suffix for unique resource names" + } + }, + "vnetResourceGroupName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "Resource Group name for existing Virtual Network (if different from current resource group)" + } + }, + "vnetSubscriptionId": { + "type": "string", + "defaultValue": "[subscription().subscriptionId]", + "metadata": { + "description": "Subscription ID for Virtual Network" + } + }, + "storageAccountResourceGroupName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "Resource Group name for Storage Account" + } + }, + "storageAccountSubscriptionId": { + "type": "string", + "defaultValue": "[subscription().subscriptionId]", + "metadata": { + "description": "Subscription ID for Storage account" + } + }, + "existingDnsZones": { + "type": "object", + "defaultValue": { + "privatelink.services.ai.azure.com": "", + "privatelink.openai.azure.com": "", + "privatelink.cognitiveservices.azure.com": "", + "[format('privatelink.blob.{0}', environment().suffixes.storage)]": "" + }, + "metadata": { + "description": "Map of DNS zone FQDNs to resource group names. If provided, reference existing DNS zones in this resource group instead of creating them." + } + }, + "dnsZonesSubscriptionId": { + "type": "string", + "metadata": { + "description": "Subscription ID where existing private DNS zones are located. Should be resolved to current subscription if empty." + } + } + }, + "variables": { + "aiServicesDnsZoneName": "privatelink.services.ai.azure.com", + "openAiDnsZoneName": "privatelink.openai.azure.com", + "cognitiveServicesDnsZoneName": "privatelink.cognitiveservices.azure.com", + "storageDnsZoneName": "[format('privatelink.blob.{0}', environment().suffixes.storage)]", + "aiServicesDnsZoneRG": "[parameters('existingDnsZones')[variables('aiServicesDnsZoneName')]]", + "openAiDnsZoneRG": "[parameters('existingDnsZones')[variables('openAiDnsZoneName')]]", + "cognitiveServicesDnsZoneRG": "[parameters('existingDnsZones')[variables('cognitiveServicesDnsZoneName')]]", + "storageDnsZoneRG": "[parameters('existingDnsZones')[variables('storageDnsZoneName')]]", + "aiServicesDnsZoneId": "[if(empty(variables('aiServicesDnsZoneRG')), resourceId('Microsoft.Network/privateDnsZones', variables('aiServicesDnsZoneName')), extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('dnsZonesSubscriptionId'), variables('aiServicesDnsZoneRG')), 'Microsoft.Network/privateDnsZones', variables('aiServicesDnsZoneName')))]", + "openAiDnsZoneId": "[if(empty(variables('openAiDnsZoneRG')), resourceId('Microsoft.Network/privateDnsZones', variables('openAiDnsZoneName')), extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('dnsZonesSubscriptionId'), variables('openAiDnsZoneRG')), 'Microsoft.Network/privateDnsZones', variables('openAiDnsZoneName')))]", + "cognitiveServicesDnsZoneId": "[if(empty(variables('cognitiveServicesDnsZoneRG')), resourceId('Microsoft.Network/privateDnsZones', variables('cognitiveServicesDnsZoneName')), extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('dnsZonesSubscriptionId'), variables('cognitiveServicesDnsZoneRG')), 'Microsoft.Network/privateDnsZones', variables('cognitiveServicesDnsZoneName')))]", + "storageDnsZoneId": "[if(empty(variables('storageDnsZoneRG')), resourceId('Microsoft.Network/privateDnsZones', variables('storageDnsZoneName')), extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('dnsZonesSubscriptionId'), variables('storageDnsZoneRG')), 'Microsoft.Network/privateDnsZones', variables('storageDnsZoneName')))]" + }, + "resources": [ + { + "type": "Microsoft.Network/privateEndpoints", + "apiVersion": "2024-05-01", + "name": "[format('{0}-private-endpoint', parameters('aiAccountName'))]", + "location": "[resourceGroup().location]", + "properties": { + "subnet": { + "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('vnetSubscriptionId'), parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks/subnets', parameters('vnetName'), parameters('peSubnetName'))]" + }, + "privateLinkServiceConnections": [ + { + "name": "[format('{0}-private-link-service-connection', parameters('aiAccountName'))]", + "properties": { + "privateLinkServiceId": "[resourceId('Microsoft.CognitiveServices/accounts', parameters('aiAccountName'))]", + "groupIds": [ + "account" + ] + } + } + ] + } + }, + { + "type": "Microsoft.Network/privateEndpoints", + "apiVersion": "2024-05-01", + "name": "[format('{0}-private-endpoint', parameters('storageName'))]", + "location": "[resourceGroup().location]", + "properties": { + "subnet": { + "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('vnetSubscriptionId'), parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks/subnets', parameters('vnetName'), parameters('peSubnetName'))]" + }, + "privateLinkServiceConnections": [ + { + "name": "[format('{0}-private-link-service-connection', parameters('storageName'))]", + "properties": { + "privateLinkServiceId": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('storageAccountSubscriptionId'), parameters('storageAccountResourceGroupName')), 'Microsoft.Storage/storageAccounts', parameters('storageName'))]", + "groupIds": [ + "blob" + ] + } + } + ] + } + }, + { + "condition": "[empty(variables('aiServicesDnsZoneRG'))]", + "type": "Microsoft.Network/privateDnsZones", + "apiVersion": "2020-06-01", + "name": "[variables('aiServicesDnsZoneName')]", + "location": "global" + }, + { + "condition": "[empty(variables('openAiDnsZoneRG'))]", + "type": "Microsoft.Network/privateDnsZones", + "apiVersion": "2020-06-01", + "name": "[variables('openAiDnsZoneName')]", + "location": "global" + }, + { + "condition": "[empty(variables('cognitiveServicesDnsZoneRG'))]", + "type": "Microsoft.Network/privateDnsZones", + "apiVersion": "2020-06-01", + "name": "[variables('cognitiveServicesDnsZoneName')]", + "location": "global" + }, + { + "condition": "[empty(variables('storageDnsZoneRG'))]", + "type": "Microsoft.Network/privateDnsZones", + "apiVersion": "2020-06-01", + "name": "[variables('storageDnsZoneName')]", + "location": "global" + }, + { + "condition": "[empty(variables('aiServicesDnsZoneRG'))]", + "type": "Microsoft.Network/privateDnsZones/virtualNetworkLinks", + "apiVersion": "2024-06-01", + "name": "[format('{0}/{1}', variables('aiServicesDnsZoneName'), format('aiServices-{0}-link', parameters('suffix')))]", + "location": "global", + "properties": { + "virtualNetwork": { + "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('vnetSubscriptionId'), parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks', parameters('vnetName'))]" + }, + "registrationEnabled": false + }, + "dependsOn": [ + "[resourceId('Microsoft.Network/privateDnsZones', variables('aiServicesDnsZoneName'))]" + ] + }, + { + "condition": "[empty(variables('openAiDnsZoneRG'))]", + "type": "Microsoft.Network/privateDnsZones/virtualNetworkLinks", + "apiVersion": "2024-06-01", + "name": "[format('{0}/{1}', variables('openAiDnsZoneName'), format('aiServicesOpenAI-{0}-link', parameters('suffix')))]", + "location": "global", + "properties": { + "virtualNetwork": { + "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('vnetSubscriptionId'), parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks', parameters('vnetName'))]" + }, + "registrationEnabled": false + }, + "dependsOn": [ + "[resourceId('Microsoft.Network/privateDnsZones', variables('openAiDnsZoneName'))]" + ] + }, + { + "condition": "[empty(variables('cognitiveServicesDnsZoneRG'))]", + "type": "Microsoft.Network/privateDnsZones/virtualNetworkLinks", + "apiVersion": "2024-06-01", + "name": "[format('{0}/{1}', variables('cognitiveServicesDnsZoneName'), format('aiServicesCognitiveServices-{0}-link', parameters('suffix')))]", + "location": "global", + "properties": { + "virtualNetwork": { + "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('vnetSubscriptionId'), parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks', parameters('vnetName'))]" + }, + "registrationEnabled": false + }, + "dependsOn": [ + "[resourceId('Microsoft.Network/privateDnsZones', variables('cognitiveServicesDnsZoneName'))]" + ] + }, + { + "condition": "[empty(variables('storageDnsZoneRG'))]", + "type": "Microsoft.Network/privateDnsZones/virtualNetworkLinks", + "apiVersion": "2024-06-01", + "name": "[format('{0}/{1}', variables('storageDnsZoneName'), format('storage-{0}-link', parameters('suffix')))]", + "location": "global", + "properties": { + "virtualNetwork": { + "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('vnetSubscriptionId'), parameters('vnetResourceGroupName')), 'Microsoft.Network/virtualNetworks', parameters('vnetName'))]" + }, + "registrationEnabled": false + }, + "dependsOn": [ + "[resourceId('Microsoft.Network/privateDnsZones', variables('storageDnsZoneName'))]" + ] + }, + { + "type": "Microsoft.Network/privateEndpoints/privateDnsZoneGroups", + "apiVersion": "2024-05-01", + "name": "[format('{0}/{1}', format('{0}-private-endpoint', parameters('aiAccountName')), format('{0}-dns-group', parameters('aiAccountName')))]", + "properties": { + "privateDnsZoneConfigs": [ + { + "name": "[format('{0}-dns-aiserv-config', parameters('aiAccountName'))]", + "properties": { + "privateDnsZoneId": "[variables('aiServicesDnsZoneId')]" + } + }, + { + "name": "[format('{0}-dns-openai-config', parameters('aiAccountName'))]", + "properties": { + "privateDnsZoneId": "[variables('openAiDnsZoneId')]" + } + }, + { + "name": "[format('{0}-dns-cogserv-config', parameters('aiAccountName'))]", + "properties": { + "privateDnsZoneId": "[variables('cognitiveServicesDnsZoneId')]" + } + } + ] + }, + "dependsOn": [ + "[resourceId('Microsoft.Network/privateEndpoints', format('{0}-private-endpoint', parameters('aiAccountName')))]", + "[resourceId('Microsoft.Network/privateDnsZones/virtualNetworkLinks', variables('aiServicesDnsZoneName'), format('aiServices-{0}-link', parameters('suffix')))]", + "[resourceId('Microsoft.Network/privateDnsZones', variables('aiServicesDnsZoneName'))]", + "[resourceId('Microsoft.Network/privateDnsZones/virtualNetworkLinks', variables('cognitiveServicesDnsZoneName'), format('aiServicesCognitiveServices-{0}-link', parameters('suffix')))]", + "[resourceId('Microsoft.Network/privateDnsZones', variables('cognitiveServicesDnsZoneName'))]", + "[resourceId('Microsoft.Network/privateDnsZones/virtualNetworkLinks', variables('openAiDnsZoneName'), format('aiServicesOpenAI-{0}-link', parameters('suffix')))]", + "[resourceId('Microsoft.Network/privateDnsZones', variables('openAiDnsZoneName'))]" + ] + }, + { + "type": "Microsoft.Network/privateEndpoints/privateDnsZoneGroups", + "apiVersion": "2024-05-01", + "name": "[format('{0}/{1}', format('{0}-private-endpoint', parameters('storageName')), format('{0}-dns-group', parameters('storageName')))]", + "properties": { + "privateDnsZoneConfigs": [ + { + "name": "[format('{0}-dns-config', parameters('storageName'))]", + "properties": { + "privateDnsZoneId": "[variables('storageDnsZoneId')]" + } + } + ] + }, + "dependsOn": [ + "[resourceId('Microsoft.Network/privateDnsZones/virtualNetworkLinks', variables('storageDnsZoneName'), format('storage-{0}-link', parameters('suffix')))]", + "[resourceId('Microsoft.Network/privateDnsZones', variables('storageDnsZoneName'))]", + "[resourceId('Microsoft.Network/privateEndpoints', format('{0}-private-endpoint', parameters('storageName')))]" + ] + } + ] + } + }, + "dependsOn": [ + "[resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('accountName'), variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('vnet-{0}-{1}-deployment', variables('trimVnetName'), variables('uniqueSuffix')))]" + ] + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('{0}-{1}-deployment', variables('projectName'), variables('uniqueSuffix'))]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "projectName": { + "value": "[variables('projectName')]" + }, + "projectDescription": { + "value": "[parameters('projectDescription')]" + }, + "displayName": { + "value": "[parameters('displayName')]" + }, + "location": { + "value": "[parameters('location')]" + }, + "azureStorageName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix'))), '2025-04-01').outputs.azureStorageName.value]" + }, + "azureStorageSubscriptionId": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix'))), '2025-04-01').outputs.azureStorageSubscriptionId.value]" + }, + "azureStorageResourceGroupName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix'))), '2025-04-01').outputs.azureStorageResourceGroupName.value]" + }, + "accountName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('accountName'), variables('uniqueSuffix'))), '2025-04-01').outputs.accountName.value]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "15731446090362490111" + } + }, + "parameters": { + "accountName": { + "type": "string" + }, + "location": { + "type": "string" + }, + "projectName": { + "type": "string" + }, + "projectDescription": { + "type": "string" + }, + "displayName": { + "type": "string" + }, + "azureStorageName": { + "type": "string" + }, + "azureStorageSubscriptionId": { + "type": "string" + }, + "azureStorageResourceGroupName": { + "type": "string" + } + }, + "resources": [ + { + "type": "Microsoft.CognitiveServices/accounts/projects/connections", + "apiVersion": "2025-04-01-preview", + "name": "[format('{0}/{1}/{2}', parameters('accountName'), parameters('projectName'), parameters('azureStorageName'))]", + "properties": { + "category": "AzureStorageAccount", + "target": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('azureStorageSubscriptionId'), parameters('azureStorageResourceGroupName')), 'Microsoft.Storage/storageAccounts', parameters('azureStorageName')), '2023-05-01').primaryEndpoints.blob]", + "authType": "AAD", + "metadata": { + "ApiType": "Azure", + "ResourceId": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('azureStorageSubscriptionId'), parameters('azureStorageResourceGroupName')), 'Microsoft.Storage/storageAccounts', parameters('azureStorageName'))]", + "location": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', parameters('azureStorageSubscriptionId'), parameters('azureStorageResourceGroupName')), 'Microsoft.Storage/storageAccounts', parameters('azureStorageName')), '2023-05-01', 'full').location]" + } + }, + "dependsOn": [ + "[resourceId('Microsoft.CognitiveServices/accounts/projects', parameters('accountName'), parameters('projectName'))]" + ] + }, + { + "type": "Microsoft.CognitiveServices/accounts/projects", + "apiVersion": "2025-04-01-preview", + "name": "[format('{0}/{1}', parameters('accountName'), parameters('projectName'))]", + "location": "[parameters('location')]", + "identity": { + "type": "SystemAssigned" + }, + "properties": { + "description": "[parameters('projectDescription')]", + "displayName": "[parameters('displayName')]" + } + } + ], + "outputs": { + "projectName": { + "type": "string", + "value": "[parameters('projectName')]" + }, + "projectId": { + "type": "string", + "value": "[resourceId('Microsoft.CognitiveServices/accounts/projects', parameters('accountName'), parameters('projectName'))]" + }, + "projectPrincipalId": { + "type": "string", + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts/projects', parameters('accountName'), parameters('projectName')), '2025-04-01-preview', 'full').identity.principalId]" + }, + "projectWorkspaceId": { + "type": "string", + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts/projects', parameters('accountName'), parameters('projectName')), '2025-04-01-preview').internalId]" + }, + "azureStorageConnection": { + "type": "string", + "value": "[parameters('azureStorageName')]" + } + } + } + }, + "dependsOn": [ + "[resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('accountName'), variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('{0}-private-endpoint', variables('uniqueSuffix')))]" + ] + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('format-project-workspace-id-{0}-deployment', variables('uniqueSuffix'))]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "projectWorkspaceId": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('projectName'), variables('uniqueSuffix'))), '2025-04-01').outputs.projectWorkspaceId.value]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "5729541472937730744" + } + }, + "parameters": { + "projectWorkspaceId": { + "type": "string" + } + }, + "variables": { + "part1": "[substring(parameters('projectWorkspaceId'), 0, 8)]", + "part2": "[substring(parameters('projectWorkspaceId'), 8, 4)]", + "part3": "[substring(parameters('projectWorkspaceId'), 12, 4)]", + "part4": "[substring(parameters('projectWorkspaceId'), 16, 4)]", + "part5": "[substring(parameters('projectWorkspaceId'), 20, 12)]", + "formattedGuid": "[format('{0}-{1}-{2}-{3}-{4}', variables('part1'), variables('part2'), variables('part3'), variables('part4'), variables('part5'))]" + }, + "resources": [], + "outputs": { + "projectWorkspaceIdGuid": { + "type": "string", + "value": "[variables('formattedGuid')]" + } + } + } + }, + "dependsOn": [ + "[resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('projectName'), variables('uniqueSuffix')))]" + ] + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('storage-{0}-{1}-deployment', variables('azureStorageName'), variables('uniqueSuffix'))]", + "subscriptionId": "[variables('azureStorageSubscriptionId')]", + "resourceGroup": "[variables('azureStorageResourceGroupName')]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "azureStorageName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix'))), '2025-04-01').outputs.azureStorageName.value]" + }, + "projectPrincipalId": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('projectName'), variables('uniqueSuffix'))), '2025-04-01').outputs.projectPrincipalId.value]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "6805612394357697185" + } + }, + "parameters": { + "azureStorageName": { + "type": "string" + }, + "projectPrincipalId": { + "type": "string" + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "2022-04-01", + "scope": "[resourceId('Microsoft.Storage/storageAccounts', parameters('azureStorageName'))]", + "name": "[guid(parameters('projectPrincipalId'), resourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe'), resourceId('Microsoft.Storage/storageAccounts', parameters('azureStorageName')))]", + "properties": { + "principalId": "[parameters('projectPrincipalId')]", + "roleDefinitionId": "[resourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "principalType": "ServicePrincipal" + } + } + ] + } + }, + "dependsOn": [ + "[resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('projectName'), variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('{0}-private-endpoint', variables('uniqueSuffix')))]" + ] + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('ai-account-ra-{0}-deployment', variables('uniqueSuffix'))]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "accountName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('accountName'), variables('uniqueSuffix'))), '2025-04-01').outputs.accountName.value]" + }, + "projectPrincipalId": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('projectName'), variables('uniqueSuffix'))), '2025-04-01').outputs.projectPrincipalId.value]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "2328134986536572193" + } + }, + "parameters": { + "accountName": { + "type": "string", + "metadata": { + "description": "Name of the AI Services account" + } + }, + "projectPrincipalId": { + "type": "string", + "metadata": { + "description": "Principal ID of the AI project" + } + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "2022-04-01", + "scope": "[resourceId('Microsoft.CognitiveServices/accounts', parameters('accountName'))]", + "name": "[guid(parameters('projectPrincipalId'), resourceId('Microsoft.Authorization/roleDefinitions', '53ca6127-db72-4b80-b1b0-d745d6d5456d'), resourceId('Microsoft.CognitiveServices/accounts', parameters('accountName')))]", + "properties": { + "principalId": "[parameters('projectPrincipalId')]", + "roleDefinitionId": "[resourceId('Microsoft.Authorization/roleDefinitions', '53ca6127-db72-4b80-b1b0-d745d6d5456d')]", + "principalType": "ServicePrincipal" + } + } + ] + } + }, + "dependsOn": [ + "[resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('accountName'), variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('projectName'), variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('{0}-private-endpoint', variables('uniqueSuffix')))]" + ] + }, + { + "type": "Microsoft.Resources/deployments", + "apiVersion": "2025-04-01", + "name": "[format('storage-containers-ra-{0}-deployment', variables('uniqueSuffix'))]", + "subscriptionId": "[variables('azureStorageSubscriptionId')]", + "resourceGroup": "[variables('azureStorageResourceGroupName')]", + "properties": { + "expressionEvaluationOptions": { + "scope": "inner" + }, + "mode": "Incremental", + "parameters": { + "aiProjectPrincipalId": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('projectName'), variables('uniqueSuffix'))), '2025-04-01').outputs.projectPrincipalId.value]" + }, + "storageName": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix'))), '2025-04-01').outputs.azureStorageName.value]" + }, + "workspaceId": { + "value": "[reference(resourceId('Microsoft.Resources/deployments', format('format-project-workspace-id-{0}-deployment', variables('uniqueSuffix'))), '2025-04-01').outputs.projectWorkspaceIdGuid.value]" + } + }, + "template": { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.42.1.51946", + "templateHash": "14861833638743424490" + } + }, + "parameters": { + "storageName": { + "type": "string", + "metadata": { + "description": "Name of the storage account" + } + }, + "aiProjectPrincipalId": { + "type": "string", + "metadata": { + "description": "Principal ID of the AI Project" + } + }, + "workspaceId": { + "type": "string", + "metadata": { + "description": "Workspace Id of the AI Project" + } + } + }, + "variables": { + "conditionStr": "[format('((!(ActionMatches{{''Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/read''}}) AND !(ActionMatches{{''Microsoft.Storage/storageAccounts/blobServices/containers/blobs/filter/action''}}) AND !(ActionMatches{{''Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/write''}}) ) OR (@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:name] StringStartsWithIgnoreCase ''{0}'' AND @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:name] StringLikeIgnoreCase ''*-azureml-agent''))', parameters('workspaceId'))]" + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "2022-04-01", + "scope": "[resourceId('Microsoft.Storage/storageAccounts', parameters('storageName'))]", + "name": "[guid(resourceId('Microsoft.Authorization/roleDefinitions', 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b'), resourceId('Microsoft.Storage/storageAccounts', parameters('storageName')))]", + "properties": { + "principalId": "[parameters('aiProjectPrincipalId')]", + "roleDefinitionId": "[resourceId('Microsoft.Authorization/roleDefinitions', 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", + "principalType": "ServicePrincipal", + "conditionVersion": "2.0", + "condition": "[variables('conditionStr')]" + } + } + ] + } + }, + "dependsOn": [ + "[resourceId('Microsoft.Resources/deployments', format('dependencies-{0}-deployment', variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('{0}-{1}-deployment', variables('projectName'), variables('uniqueSuffix')))]", + "[resourceId('Microsoft.Resources/deployments', format('format-project-workspace-id-{0}-deployment', variables('uniqueSuffix')))]", + "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', variables('azureStorageSubscriptionId'), variables('azureStorageResourceGroupName')), 'Microsoft.Resources/deployments', format('storage-{0}-{1}-deployment', variables('azureStorageName'), variables('uniqueSuffix')))]" + ] + } + ] +} \ No newline at end of file diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/main.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/main.bicep new file mode 100644 index 000000000..7a4816fed --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/main.bicep @@ -0,0 +1,297 @@ +/* +Evaluation-Only Network Secured Setup - main.bicep +----------------------------------- +A simplified private-network setup for evaluation scenarios. +This template does NOT deploy Cosmos DB, AI Search, or project capability host. +It deploys only the AI Services account, a project with a storage connection, +a VNet with private endpoints, and the required role assignments. +*/ +@description('Location for all resources.') +@allowed([ + 'westus' + 'eastus' + 'eastus2' + 'japaneast' + 'francecentral' + 'spaincentral' + 'uaenorth' + 'southcentralus' + 'italynorth' + 'germanywestcentral' + 'brazilsouth' + 'southafricanorth' + 'australiaeast' + 'swedencentral' + 'canadaeast' + 'westeurope' + 'westus3' + 'uksouth' + 'southindia' + + //only class B and C + 'koreacentral' + 'polandcentral' + 'switzerlandnorth' + 'norwayeast' +]) +param location string = 'eastus' + +@description('Name for your AI Services resource.') +param aiServices string = 'aiservices' + +// Model deployment parameters +@description('The name of the model you want to deploy') +param modelName string = 'gpt-4.1' +@description('The provider of your model') +param modelFormat string = 'OpenAI' +@description('The version of your model') +param modelVersion string = '2025-04-14' +@description('The sku of your model deployment') +param modelSkuName string = 'GlobalStandard' +@description('The tokens per minute (TPM) of your model deployment') +param modelCapacity int = 30 + +// Create a short, unique suffix, that will be unique to each resource group +param deploymentTimestamp string = utcNow('yyyyMMddHHmmss') +var uniqueSuffix = substring(uniqueString('${resourceGroup().id}-${deploymentTimestamp}'), 0, 4) +var accountName = toLower('${aiServices}${uniqueSuffix}') + +@description('Name for your project resource.') +param firstProjectName string = 'project' + +@description('This project will be a sub-resource of your account') +param projectDescription string = 'A project for the AI Foundry account with network secured evaluation setup' + +@description('The display name of the project') +param displayName string = 'network secured evaluation project' + +// Existing Virtual Network parameters +@description('Virtual Network name for the Agent to create new or existing virtual network') +param vnetName string = 'agent-vnet-test' + +@description('The name of Agents Subnet to create new or existing subnet for agents') +param agentSubnetName string = 'agent-subnet' + +@description('The name of Private Endpoint subnet to create new or existing subnet for private endpoints') +param peSubnetName string = 'pe-subnet' + +//Existing standard Agent required resources +@description('Existing Virtual Network name Resource ID') +param existingVnetResourceId string = '' + +@description('Address space for the VNet (only used for new VNet)') +param vnetAddressPrefix string = '' + +@description('Address prefix for the agent subnet. The default value is 192.168.0.0/24 but you can choose any size /26 or any class like 10.0.0.0 or 172.168.0.0') +param agentSubnetPrefix string = '' + +@description('Address prefix for the private endpoint subnet') +param peSubnetPrefix string = '' + +@description('The AI Storage Account full ARM Resource ID. This is an optional field, and if not provided, the resource will be created.') +param azureStorageAccountResourceId string = '' + +@description('Subscription ID where existing private DNS zones are located. Leave empty to use current subscription.') +param dnsZonesSubscriptionId string = '' + +@description('Object mapping DNS zone names to their resource group, or empty string to indicate creation') +param existingDnsZones object = { + 'privatelink.services.ai.azure.com': '' + 'privatelink.openai.azure.com': '' + 'privatelink.cognitiveservices.azure.com': '' + 'privatelink.blob.core.windows.net': '' +} + +@description('Zone Names for Validation of existing Private Dns Zones') +param dnsZoneNames array = [ + 'privatelink.services.ai.azure.com' + 'privatelink.openai.azure.com' + 'privatelink.cognitiveservices.azure.com' + 'privatelink.blob.core.windows.net' +] + + +var projectName = toLower('${firstProjectName}${uniqueSuffix}') +var azureStorageName = toLower('${aiServices}${uniqueSuffix}storage') + +// Check if existing resources have been passed in +var storagePassedIn = azureStorageAccountResourceId != '' +var existingVnetPassedIn = existingVnetResourceId != '' + +var storageParts = split(azureStorageAccountResourceId, '/') +var azureStorageSubscriptionId = storagePassedIn ? storageParts[2] : subscription().subscriptionId +var azureStorageResourceGroupName = storagePassedIn ? storageParts[4] : resourceGroup().name + +var vnetParts = split(existingVnetResourceId, '/') +var vnetSubscriptionId = existingVnetPassedIn ? vnetParts[2] : subscription().subscriptionId +var vnetResourceGroupName = existingVnetPassedIn ? vnetParts[4] : resourceGroup().name +var existingVnetName = existingVnetPassedIn ? last(vnetParts) : vnetName +var trimVnetName = trim(existingVnetName) + +// Resolve DNS zones subscription ID - use current subscription if not specified +var resolvedDnsZonesSubscriptionId = empty(dnsZonesSubscriptionId) ? subscription().subscriptionId : dnsZonesSubscriptionId + +// Create Virtual Network and Subnets +module vnet 'modules-network-secured/network-agent-vnet.bicep' = { + name: 'vnet-${trimVnetName}-${uniqueSuffix}-deployment' + params: { + location: location + vnetName: trimVnetName + useExistingVnet: existingVnetPassedIn + existingVnetResourceGroupName: vnetResourceGroupName + agentSubnetName: agentSubnetName + peSubnetName: peSubnetName + vnetAddressPrefix: vnetAddressPrefix + agentSubnetPrefix: agentSubnetPrefix + peSubnetPrefix: peSubnetPrefix + existingVnetSubscriptionId: vnetSubscriptionId + } +} + +/* + Create the AI Services account and model deployment +*/ +module aiAccount 'modules-network-secured/ai-account-identity.bicep' = { + name: '${accountName}-${uniqueSuffix}-deployment' + params: { + accountName: accountName + location: location + modelName: modelName + modelFormat: modelFormat + modelVersion: modelVersion + modelSkuName: modelSkuName + modelCapacity: modelCapacity + agentSubnetId: vnet.outputs.agentSubnetId + } +} + +/* + Validate existing resources + This module will check if the Storage Account already exists. +*/ +module validateExistingResources 'modules-network-secured/validate-existing-resources.bicep' = { + name: 'validate-existing-resources-${uniqueSuffix}-deployment' + params: { + azureStorageAccountResourceId: azureStorageAccountResourceId + existingDnsZones: existingDnsZones + dnsZoneNames: dnsZoneNames + dnsZonesSubscriptionId: resolvedDnsZonesSubscriptionId + } +} + +// This module will create a new Storage Account if one does not already exist +module aiDependencies 'modules-network-secured/standard-dependent-resources.bicep' = { + name: 'dependencies-${uniqueSuffix}-deployment' + params: { + location: location + azureStorageName: azureStorageName + + // Storage Account + azureStorageAccountResourceId: azureStorageAccountResourceId + azureStorageExists: validateExistingResources.outputs.azureStorageExists + } +} + +resource storage 'Microsoft.Storage/storageAccounts@2022-05-01' existing = { + name: aiDependencies.outputs.azureStorageName + scope: resourceGroup(azureStorageSubscriptionId, azureStorageResourceGroupName) +} + +// Private Endpoint and DNS Configuration +// This module sets up private network access for AI Services and Storage: +// 1. Creates private endpoints in the specified subnet +// 2. Sets up private DNS zones for each service +// 3. Links private DNS zones to the VNet for name resolution +module privateEndpointAndDNS 'modules-network-secured/private-endpoint-and-dns.bicep' = { + name: '${uniqueSuffix}-private-endpoint' + params: { + aiAccountName: aiAccount.outputs.accountName + storageName: aiDependencies.outputs.azureStorageName + vnetName: vnet.outputs.virtualNetworkName + peSubnetName: vnet.outputs.peSubnetName + suffix: uniqueSuffix + vnetResourceGroupName: vnet.outputs.virtualNetworkResourceGroup + vnetSubscriptionId: vnet.outputs.virtualNetworkSubscriptionId + storageAccountResourceGroupName: azureStorageResourceGroupName + storageAccountSubscriptionId: azureStorageSubscriptionId + existingDnsZones: existingDnsZones + dnsZonesSubscriptionId: resolvedDnsZonesSubscriptionId + } + dependsOn: [ + storage + ] +} + +/* + Creates a new project (sub-resource of the AI Services account) +*/ +module aiProject 'modules-network-secured/ai-project-identity.bicep' = { + name: '${projectName}-${uniqueSuffix}-deployment' + params: { + projectName: projectName + projectDescription: projectDescription + displayName: displayName + location: location + + azureStorageName: aiDependencies.outputs.azureStorageName + azureStorageSubscriptionId: aiDependencies.outputs.azureStorageSubscriptionId + azureStorageResourceGroupName: aiDependencies.outputs.azureStorageResourceGroupName + + accountName: aiAccount.outputs.accountName + } + dependsOn: [ + privateEndpointAndDNS + storage + ] +} + +module formatProjectWorkspaceId 'modules-network-secured/format-project-workspace-id.bicep' = { + name: 'format-project-workspace-id-${uniqueSuffix}-deployment' + params: { + projectWorkspaceId: aiProject.outputs.projectWorkspaceId + } +} + +/* + Assigns the project SMI the storage blob data contributor role on the storage account +*/ +module storageAccountRoleAssignment 'modules-network-secured/azure-storage-account-role-assignment.bicep' = { + name: 'storage-${azureStorageName}-${uniqueSuffix}-deployment' + scope: resourceGroup(azureStorageSubscriptionId, azureStorageResourceGroupName) + params: { + azureStorageName: aiDependencies.outputs.azureStorageName + projectPrincipalId: aiProject.outputs.projectPrincipalId + } + dependsOn: [ + storage + privateEndpointAndDNS + ] +} + +/* + Assigns the project SMI the Azure AI User role on the AI Services account +*/ +module aiAccountRoleAssignment 'modules-network-secured/ai-account-role-assignment.bicep' = { + name: 'ai-account-ra-${uniqueSuffix}-deployment' + params: { + accountName: aiAccount.outputs.accountName + projectPrincipalId: aiProject.outputs.projectPrincipalId + } + dependsOn: [ + privateEndpointAndDNS + ] +} + +// The Storage Blob Data Owner role assignment +module storageContainersRoleAssignment 'modules-network-secured/blob-storage-container-role-assignments.bicep' = { + name: 'storage-containers-ra-${uniqueSuffix}-deployment' + scope: resourceGroup(azureStorageSubscriptionId, azureStorageResourceGroupName) + params: { + aiProjectPrincipalId: aiProject.outputs.projectPrincipalId + storageName: aiDependencies.outputs.azureStorageName + workspaceId: formatProjectWorkspaceId.outputs.projectWorkspaceIdGuid + } + dependsOn: [ + storageAccountRoleAssignment + ] +} diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/main.bicepparam b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/main.bicepparam new file mode 100644 index 000000000..91e6a3962 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/main.bicepparam @@ -0,0 +1,66 @@ +using './main.bicep' + +param location = 'westus' +param aiServices = 'foundry' +param modelName = 'gpt-4.1' +param modelFormat = 'OpenAI' +param modelVersion = '2025-04-14' +param modelSkuName = 'GlobalStandard' +param modelCapacity = 30 +param firstProjectName = 'project' +param projectDescription = 'A project for the AI Foundry account with network secured evaluation setup' +param displayName = 'project' +param peSubnetName = 'pe-subnet' + +// Resource IDs for existing resources +// If you provide these, the deployment will use the existing resources instead of creating new ones +param existingVnetResourceId = '' +param vnetName = 'agent-vnet-test' +param agentSubnetName = 'agent-subnet' +param azureStorageAccountResourceId = '' + +// Subscription ID where DNS zones are located (leave empty to use deployment subscription) +// ⚠️ If set to a different subscription, ALL zones below MUST have resource groups specified +param dnsZonesSubscriptionId = '' + +// DNS zone map: provide resource group name to use existing zone, or leave empty to create new +// Note: Empty values only allowed when dnsZonesSubscriptionId is empty or matches current subscription +param existingDnsZones = { + 'privatelink.services.ai.azure.com': '' + 'privatelink.openai.azure.com': '' + 'privatelink.cognitiveservices.azure.com': '' + 'privatelink.blob.core.windows.net': '' +} + +//DNSZones names for validating if they exist +param dnsZoneNames = [ + 'privatelink.services.ai.azure.com' + 'privatelink.openai.azure.com' + 'privatelink.cognitiveservices.azure.com' + 'privatelink.blob.core.windows.net' +] + + +// Network configuration (behavior depends on `existingVnetResourceId`) +// +// - NEW VNet (existingVnetResourceId is empty): +// The values below are used to CREATE the VNet and the two subnets. +// Provide explicit, non-overlapping CIDR ranges when creating a new VNet. +// +// - EXISTING VNet (existingVnetResourceId is provided): +// The module will reference the existing VNet. Subnet handling depends on the +// values you provide: +// * If `agentSubnetPrefix` or `peSubnetPrefix` are empty, the module may +// auto-derive subnet CIDRs from the existing VNet's address space +// (using cidrSubnet). This can produce /24 (or configured) subnets +// starting at index 0, 1, etc. +// * If you provide explicit subnet prefixes, the module will attempt to +// create or update subnets with those prefixes in the existing VNet. +// +// Important operational notes and risks (when existingVnetResourceId is provided): +// - Avoid CIDR overlaps with any existing subnets in the target VNet. Overlap +// leads to `NetcfgSubnetRangesOverlap` and failed deployments. +// - For highest safety when using an existing VNet, supply the existing `agentSubnetPrefix` and `peSubnetPrefix`. +param vnetAddressPrefix = '' +param agentSubnetPrefix = '' +param peSubnetPrefix = '' diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/metadata.json b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/metadata.json new file mode 100644 index 000000000..e8af1d95a --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/metadata.json @@ -0,0 +1,12 @@ +{ + "$schema": "https://aka.ms/azure-quickstart-templates-metadata-schema#", + "type": "QuickStart", + "itemDisplayName": "Network Secured Evaluation-Only Setup", + "description": "This set of templates demonstrates how to set up a network-secured Azure AI Foundry environment for evaluation scenarios without Cosmos DB, AI Search, or capability host.", + "summary": "This set of templates demonstrates how to use Azure AI Foundry with your own virtual network for evaluation purposes.", + "githubUsername": "fosteramanda", + "dateUpdated": "2025-06-24", + "environments": [ + "AzureCloud" + ] +} diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/ai-account-identity.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/ai-account-identity.bicep new file mode 100644 index 000000000..47f72a3be --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/ai-account-identity.bicep @@ -0,0 +1,63 @@ +param accountName string +param location string +param modelName string +param modelFormat string +param modelVersion string +param modelSkuName string +param modelCapacity int +param agentSubnetId string +param networkInjection string = 'true' + +#disable-next-line BCP036 +resource account 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' = { + name: accountName + location: location + sku: { + name: 'S0' + } + kind: 'AIServices' + identity: { + type: 'SystemAssigned' + } + properties: { + allowProjectManagement: true + customSubDomainName: accountName + networkAcls: { + defaultAction: 'Deny' + virtualNetworkRules: [] + ipRules: [] + bypass:'AzureServices' + } + publicNetworkAccess: 'Disabled' + networkInjections:((networkInjection == 'true') ? [ + { + scenario: 'agent' + subnetArmId: agentSubnetId + useMicrosoftManagedNetwork: false + } + ] : null ) + disableLocalAuth: false + } +} + +#disable-next-line BCP081 +resource modelDeployment 'Microsoft.CognitiveServices/accounts/deployments@2025-04-01-preview'= { + parent: account + name: modelName + sku : { + capacity: modelCapacity + name: modelSkuName + } + properties: { + model:{ + name: modelName + format: modelFormat + version: modelVersion + } + } +} + +output accountName string = account.name +output accountID string = account.id +output accountTarget string = account.properties.endpoint +output accountPrincipalId string = account.identity.principalId diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/ai-account-role-assignment.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/ai-account-role-assignment.bicep new file mode 100644 index 000000000..7f9c999a2 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/ai-account-role-assignment.bicep @@ -0,0 +1,28 @@ +// Assigns the Azure AI User role to the project managed identity on the AI Services account + +@description('Name of the AI Services account') +param accountName string + +@description('Principal ID of the AI project') +param projectPrincipalId string + +resource account 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = { + name: accountName + scope: resourceGroup() +} + +// Azure AI User: 53ca6127-db72-4b80-b1b0-d745d6d5456d +resource azureAIUserRole 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { + name: '53ca6127-db72-4b80-b1b0-d745d6d5456d' + scope: resourceGroup() +} + +resource azureAIUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: account + name: guid(projectPrincipalId, azureAIUserRole.id, account.id) + properties: { + principalId: projectPrincipalId + roleDefinitionId: azureAIUserRole.id + principalType: 'ServicePrincipal' + } +} diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/ai-project-identity.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/ai-project-identity.bicep new file mode 100644 index 000000000..6a765caf8 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/ai-project-identity.bicep @@ -0,0 +1,56 @@ +param accountName string +param location string +param projectName string +param projectDescription string +param displayName string + +param azureStorageName string +param azureStorageSubscriptionId string +param azureStorageResourceGroupName string + +resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' existing = { + name: azureStorageName + scope: resourceGroup(azureStorageSubscriptionId, azureStorageResourceGroupName) +} + +resource account 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = { + name: accountName + scope: resourceGroup() +} + +resource project 'Microsoft.CognitiveServices/accounts/projects@2025-04-01-preview' = { + parent: account + name: projectName + location: location + identity: { + type: 'SystemAssigned' + } + properties: { + description: projectDescription + displayName: displayName + } + + resource project_connection_azure_storage 'connections@2025-04-01-preview' = { + name: azureStorageName + properties: { + category: 'AzureStorageAccount' + target: storageAccount.properties.primaryEndpoints.blob + authType: 'AAD' + metadata: { + ApiType: 'Azure' + ResourceId: storageAccount.id + location: storageAccount.location + } + } + } +} + +output projectName string = project.name +output projectId string = project.id +output projectPrincipalId string = project.identity.principalId + +#disable-next-line BCP053 +output projectWorkspaceId string = project.properties.internalId + +// return the BYO connection names +output azureStorageConnection string = azureStorageName diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/azure-storage-account-role-assignment.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/azure-storage-account-role-assignment.bicep new file mode 100644 index 000000000..afc355a48 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/azure-storage-account-role-assignment.bicep @@ -0,0 +1,24 @@ +param azureStorageName string +param projectPrincipalId string + +resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' existing = { + name: azureStorageName + scope: resourceGroup() +} + +// Blob Storage Owner: b7e6dc6d-f1e8-4753-8033-0f276bb0955b +// Blob Storage Contributor: ba92f5b4-2d11-453d-a403-e96b0029c9fe +resource storageBlobDataContributor 'Microsoft.Authorization/roleDefinitions@2022-05-01-preview' existing = { + name: 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' + scope: resourceGroup() +} + +resource storageBlobDataContributorRoleAssignmentProject 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: storageAccount + name: guid(projectPrincipalId, storageBlobDataContributor.id, storageAccount.id) + properties: { + principalId: projectPrincipalId + roleDefinitionId: storageBlobDataContributor.id + principalType: 'ServicePrincipal' + } +} diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/blob-storage-container-role-assignments.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/blob-storage-container-role-assignments.bicep new file mode 100644 index 000000000..71abc97d6 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/blob-storage-container-role-assignments.bicep @@ -0,0 +1,36 @@ +@description('Name of the storage account') +param storageName string + +@description('Principal ID of the AI Project') +param aiProjectPrincipalId string + +@description('Workspace Id of the AI Project') +param workspaceId string + + +// Reference existing storage account +resource storage 'Microsoft.Storage/storageAccounts@2022-05-01' existing = { + name: storageName + scope: resourceGroup() +} + +// Storage Blob Data Owner Role +resource storageBlobDataOwner 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { + name: 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b' // Built-in role ID + scope: resourceGroup() +} + +var conditionStr= '((!(ActionMatches{\'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/read\'}) AND !(ActionMatches{\'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/filter/action\'}) AND !(ActionMatches{\'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/write\'}) ) OR (@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:name] StringStartsWithIgnoreCase \'${workspaceId}\' AND @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:name] StringLikeIgnoreCase \'*-azureml-agent\'))' + +// Assign Storage Blob Data Owner role +resource storageBlobDataOwnerAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: storage + name: guid(storageBlobDataOwner.id, storage.id) + properties: { + principalId: aiProjectPrincipalId + roleDefinitionId: storageBlobDataOwner.id + principalType: 'ServicePrincipal' + conditionVersion: '2.0' + condition: conditionStr + } +} diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/existing-vnet.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/existing-vnet.bicep new file mode 100644 index 000000000..b371d61e5 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/existing-vnet.bicep @@ -0,0 +1,89 @@ +/* +Virtual Network Module +This module works with existing virtual networks and required subnets. + +1. Flexibility: + - Works with any existing VNet address space + - Can use existing subnets or create new ones + - Cross-resource group support + +2. Security Features: + - Network isolation + - Subnet delegation for containerized workloads + - Private endpoint subnet for secure connectivity +*/ + + +@description('The name of the existing virtual network') +param vnetName string + +@description('Subscription ID of virtual network (if different from current subscription)') +param vnetSubscriptionId string = subscription().subscriptionId + +@description('Resource Group name of the existing VNet (if different from current resource group)') +param vnetResourceGroupName string = resourceGroup().name + +@description('The name of Agents Subnet') +param agentSubnetName string = 'agent-subnet' + +@description('The name of Private Endpoint subnet') +param peSubnetName string = 'pe-subnet' + +@description('Address prefix for the agent subnet (only needed if creating new subnet)') +param agentSubnetPrefix string = '' + +@description('Address prefix for the private endpoint subnet (only needed if creating new subnet)') +param peSubnetPrefix string = '' + +// Get the address space (array of CIDR strings) +var vnetAddressSpace = existingVNet.properties.addressSpace.addressPrefixes[0] + +var agentSubnetSpaces = empty(agentSubnetPrefix) ? cidrSubnet(vnetAddressSpace, 24, 0) : agentSubnetPrefix +var peSubnetSpaces = empty(peSubnetPrefix) ? cidrSubnet(vnetAddressSpace, 24, 1) : peSubnetPrefix + +// Reference the existing virtual network +resource existingVNet 'Microsoft.Network/virtualNetworks@2024-05-01' existing = { + name: vnetName + scope: resourceGroup(vnetResourceGroupName) +} + +// Create the agent subnet if requested +module agentSubnet 'subnet.bicep' = { + name: 'agent-subnet-${uniqueString(deployment().name, agentSubnetName)}' + scope: resourceGroup(vnetResourceGroupName) + params: { + vnetName: vnetName + subnetName: agentSubnetName + addressPrefix: agentSubnetSpaces + delegations: [ + { + name: 'Microsoft.App/environments' + properties: { + serviceName: 'Microsoft.App/environments' + } + } + ] + } +} + +// Create the private endpoint subnet if requested +module peSubnet 'subnet.bicep' = { + name: 'pe-subnet-${uniqueString(deployment().name, peSubnetName)}' + scope: resourceGroup(vnetResourceGroupName) + params: { + vnetName: vnetName + subnetName: peSubnetName + addressPrefix: peSubnetSpaces + delegations: [] + } +} + +// Output variables +output peSubnetName string = peSubnetName +output agentSubnetName string = agentSubnetName +output agentSubnetId string = '${existingVNet.id}/subnets/${agentSubnetName}' +output peSubnetId string = '${existingVNet.id}/subnets/${peSubnetName}' +output virtualNetworkName string = existingVNet.name +output virtualNetworkId string = existingVNet.id +output virtualNetworkResourceGroup string = vnetResourceGroupName +output virtualNetworkSubscriptionId string = vnetSubscriptionId diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/format-project-workspace-id.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/format-project-workspace-id.bicep new file mode 100644 index 000000000..ac7d0c3f2 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/format-project-workspace-id.bicep @@ -0,0 +1,12 @@ + +param projectWorkspaceId string + +var part1 = substring(projectWorkspaceId, 0, 8) // First 8 characters +var part2 = substring(projectWorkspaceId, 8, 4) // Next 4 characters +var part3 = substring(projectWorkspaceId, 12, 4) // Next 4 characters +var part4 = substring(projectWorkspaceId, 16, 4) // Next 4 characters +var part5 = substring(projectWorkspaceId, 20, 12) // Remaining 12 characters + +var formattedGuid = '${part1}-${part2}-${part3}-${part4}-${part5}' + +output projectWorkspaceIdGuid string = formattedGuid diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/network-agent-vnet.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/network-agent-vnet.bicep new file mode 100644 index 000000000..bad8a4f27 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/network-agent-vnet.bicep @@ -0,0 +1,67 @@ +@description('Azure region for the deployment') +param location string + +@description('The name of the virtual network') +param vnetName string + +@description('Indicates if an existing VNet should be used') +param useExistingVnet bool = false + +@description('Subscription ID of the existing VNet (if different from current subscription)') +param existingVnetSubscriptionId string = subscription().subscriptionId + +@description('Resource Group name of the existing VNet (if different from current resource group)') +param existingVnetResourceGroupName string = resourceGroup().name + +@description('The name of Agents Subnet') +param agentSubnetName string = 'agent-subnet' + +@description('The name of Private Endpoint subnet') +param peSubnetName string = 'pe-subnet' + +@description('Address space for the VNet (only used for new VNet)') +param vnetAddressPrefix string = '' + +@description('Address prefix for the agent subnet') +param agentSubnetPrefix string = '' + +@description('Address prefix for the private endpoint subnet') +param peSubnetPrefix string = '' + +// Create new VNet if needed +module newVNet 'vnet.bicep' = if (!useExistingVnet) { + name: 'vnet-deployment' + params: { + location: location + vnetName: vnetName + agentSubnetName: agentSubnetName + peSubnetName: peSubnetName + vnetAddressPrefix: vnetAddressPrefix + agentSubnetPrefix: agentSubnetPrefix + peSubnetPrefix: peSubnetPrefix + } +} + +// Use existing VNet if requested +module existingVNet 'existing-vnet.bicep' = if (useExistingVnet) { + name: 'existing-vnet-deployment' + params: { + vnetName: vnetName + vnetResourceGroupName: existingVnetResourceGroupName + vnetSubscriptionId: existingVnetSubscriptionId + agentSubnetName: agentSubnetName + peSubnetName: peSubnetName + agentSubnetPrefix: agentSubnetPrefix + peSubnetPrefix: peSubnetPrefix + } +} + +// Provide unified outputs regardless of which module was used +output virtualNetworkName string = useExistingVnet ? existingVNet.outputs.virtualNetworkName : newVNet.outputs.virtualNetworkName +output virtualNetworkId string = useExistingVnet ? existingVNet.outputs.virtualNetworkId : newVNet.outputs.virtualNetworkId +output virtualNetworkSubscriptionId string = useExistingVnet ? existingVNet.outputs.virtualNetworkSubscriptionId : newVNet.outputs.virtualNetworkSubscriptionId +output virtualNetworkResourceGroup string = useExistingVnet ? existingVNet.outputs.virtualNetworkResourceGroup : newVNet.outputs.virtualNetworkResourceGroup +output agentSubnetName string = agentSubnetName +output peSubnetName string = peSubnetName +output agentSubnetId string = useExistingVnet ? existingVNet.outputs.agentSubnetId : newVNet.outputs.agentSubnetId +output peSubnetId string = useExistingVnet ? existingVNet.outputs.peSubnetId : newVNet.outputs.peSubnetId diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/private-endpoint-and-dns.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/private-endpoint-and-dns.bicep new file mode 100644 index 000000000..53c45d77c --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/private-endpoint-and-dns.bicep @@ -0,0 +1,238 @@ +/* +Private Endpoint and DNS Configuration Module (Evaluation-Only) +------------------------------------------ +This module configures private network access for AI Services and Storage only. +No Cosmos DB or AI Search private endpoints are created. + +1. Private Endpoints: + - AI Foundry Account + - Azure Storage (blob) + +2. Private DNS Zones: + - privatelink.services.ai.azure.com + - privatelink.openai.azure.com + - privatelink.cognitiveservices.azure.com + - privatelink.blob.core.windows.net + +3. DNS Zone Links and DNS Zone Groups +*/ + +// Resource names and identifiers +@description('Name of the AI Foundry account') +param aiAccountName string +@description('Name of the storage account') +param storageName string +@description('Name of the Vnet') +param vnetName string +@description('Name of the Customer subnet') +param peSubnetName string +@description('Suffix for unique resource names') +param suffix string + +@description('Resource Group name for existing Virtual Network (if different from current resource group)') +param vnetResourceGroupName string = resourceGroup().name + +@description('Subscription ID for Virtual Network') +param vnetSubscriptionId string = subscription().subscriptionId + +@description('Resource Group name for Storage Account') +param storageAccountResourceGroupName string = resourceGroup().name + +@description('Subscription ID for Storage account') +param storageAccountSubscriptionId string = subscription().subscriptionId + +@description('Map of DNS zone FQDNs to resource group names. If provided, reference existing DNS zones in this resource group instead of creating them.') +param existingDnsZones object = { + 'privatelink.services.ai.azure.com': '' + 'privatelink.openai.azure.com': '' + 'privatelink.cognitiveservices.azure.com': '' + 'privatelink.blob.${environment().suffixes.storage}': '' +} + +@description('Subscription ID where existing private DNS zones are located. Should be resolved to current subscription if empty.') +param dnsZonesSubscriptionId string + +// ---- Resource references ---- +resource aiAccount 'Microsoft.CognitiveServices/accounts@2023-05-01' existing = { + name: aiAccountName + scope: resourceGroup() +} + +resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' existing = { + name: storageName + scope: resourceGroup(storageAccountSubscriptionId, storageAccountResourceGroupName) +} + +// Reference existing network resources +resource vnet 'Microsoft.Network/virtualNetworks@2024-05-01' existing = { + name: vnetName + scope: resourceGroup(vnetSubscriptionId, vnetResourceGroupName) +} +resource peSubnet 'Microsoft.Network/virtualNetworks/subnets@2024-05-01' existing = { + parent: vnet + name: peSubnetName +} + +/* -------------------------------------------- AI Foundry Account Private Endpoint -------------------------------------------- */ + +resource aiAccountPrivateEndpoint 'Microsoft.Network/privateEndpoints@2024-05-01' = { + name: '${aiAccountName}-private-endpoint' + location: resourceGroup().location + properties: { + subnet: { id: peSubnet.id } + privateLinkServiceConnections: [ + { + name: '${aiAccountName}-private-link-service-connection' + properties: { + privateLinkServiceId: aiAccount.id + groupIds: [ 'account' ] + } + } + ] + } +} + +/* -------------------------------------------- Storage Private Endpoint -------------------------------------------- */ + +resource storagePrivateEndpoint 'Microsoft.Network/privateEndpoints@2024-05-01' = { + name: '${storageName}-private-endpoint' + location: resourceGroup().location + properties: { + subnet: { id: peSubnet.id } + privateLinkServiceConnections: [ + { + name: '${storageName}-private-link-service-connection' + properties: { + privateLinkServiceId: storageAccount.id + groupIds: [ 'blob' ] + } + } + ] + } +} + +/* -------------------------------------------- Private DNS Zones -------------------------------------------- */ + +var aiServicesDnsZoneName = 'privatelink.services.ai.azure.com' +var openAiDnsZoneName = 'privatelink.openai.azure.com' +var cognitiveServicesDnsZoneName = 'privatelink.cognitiveservices.azure.com' +var storageDnsZoneName = 'privatelink.blob.${environment().suffixes.storage}' + +// ---- DNS Zone Resource Group lookups ---- +var aiServicesDnsZoneRG = existingDnsZones[aiServicesDnsZoneName] +var openAiDnsZoneRG = existingDnsZones[openAiDnsZoneName] +var cognitiveServicesDnsZoneRG = existingDnsZones[cognitiveServicesDnsZoneName] +var storageDnsZoneRG = existingDnsZones[storageDnsZoneName] + +// ---- DNS Zone Resources and References ---- +resource aiServicesPrivateDnsZone 'Microsoft.Network/privateDnsZones@2020-06-01' = if (empty(aiServicesDnsZoneRG)) { + name: aiServicesDnsZoneName + location: 'global' +} + +resource existingAiServicesPrivateDnsZone 'Microsoft.Network/privateDnsZones@2020-06-01' existing = if (!empty(aiServicesDnsZoneRG)) { + name: aiServicesDnsZoneName + scope: resourceGroup(dnsZonesSubscriptionId, aiServicesDnsZoneRG) +} +var aiServicesDnsZoneId = empty(aiServicesDnsZoneRG) ? aiServicesPrivateDnsZone.id : existingAiServicesPrivateDnsZone.id + +resource openAiPrivateDnsZone 'Microsoft.Network/privateDnsZones@2020-06-01' = if (empty(openAiDnsZoneRG)) { + name: openAiDnsZoneName + location: 'global' +} + +resource existingOpenAiPrivateDnsZone 'Microsoft.Network/privateDnsZones@2020-06-01' existing = if (!empty(openAiDnsZoneRG)) { + name: openAiDnsZoneName + scope: resourceGroup(dnsZonesSubscriptionId, openAiDnsZoneRG) +} +var openAiDnsZoneId = empty(openAiDnsZoneRG) ? openAiPrivateDnsZone.id : existingOpenAiPrivateDnsZone.id + +resource cognitiveServicesPrivateDnsZone 'Microsoft.Network/privateDnsZones@2020-06-01' = if (empty(cognitiveServicesDnsZoneRG)) { + name: cognitiveServicesDnsZoneName + location: 'global' +} + +resource existingCognitiveServicesPrivateDnsZone 'Microsoft.Network/privateDnsZones@2020-06-01' existing = if (!empty(cognitiveServicesDnsZoneRG)) { + name: cognitiveServicesDnsZoneName + scope: resourceGroup(dnsZonesSubscriptionId, cognitiveServicesDnsZoneRG) +} +var cognitiveServicesDnsZoneId = empty(cognitiveServicesDnsZoneRG) ? cognitiveServicesPrivateDnsZone.id : existingCognitiveServicesPrivateDnsZone.id + +resource storagePrivateDnsZone 'Microsoft.Network/privateDnsZones@2020-06-01' = if (empty(storageDnsZoneRG)) { + name: storageDnsZoneName + location: 'global' +} + +resource existingStoragePrivateDnsZone 'Microsoft.Network/privateDnsZones@2020-06-01' existing = if (!empty(storageDnsZoneRG)) { + name: storageDnsZoneName + scope: resourceGroup(dnsZonesSubscriptionId, storageDnsZoneRG) +} +var storageDnsZoneId = empty(storageDnsZoneRG) ? storagePrivateDnsZone.id : existingStoragePrivateDnsZone.id + +// ---- DNS VNet Links ---- +resource aiServicesLink 'Microsoft.Network/privateDnsZones/virtualNetworkLinks@2024-06-01' = if (empty(aiServicesDnsZoneRG)) { + parent: aiServicesPrivateDnsZone + location: 'global' + name: 'aiServices-${suffix}-link' + properties: { + virtualNetwork: { id: vnet.id } + registrationEnabled: false + } +} +resource openAiLink 'Microsoft.Network/privateDnsZones/virtualNetworkLinks@2024-06-01' = if (empty(openAiDnsZoneRG)) { + parent: openAiPrivateDnsZone + location: 'global' + name: 'aiServicesOpenAI-${suffix}-link' + properties: { + virtualNetwork: { id: vnet.id } + registrationEnabled: false + } +} +resource cognitiveServicesLink 'Microsoft.Network/privateDnsZones/virtualNetworkLinks@2024-06-01' = if (empty(cognitiveServicesDnsZoneRG)) { + parent: cognitiveServicesPrivateDnsZone + location: 'global' + name: 'aiServicesCognitiveServices-${suffix}-link' + properties: { + virtualNetwork: { id: vnet.id } + registrationEnabled: false + } +} +resource storageLink 'Microsoft.Network/privateDnsZones/virtualNetworkLinks@2024-06-01' = if (empty(storageDnsZoneRG)) { + parent: storagePrivateDnsZone + location: 'global' + name: 'storage-${suffix}-link' + properties: { + virtualNetwork: { id: vnet.id } + registrationEnabled: false + } +} + +// ---- DNS Zone Groups ---- +resource aiServicesDnsGroup 'Microsoft.Network/privateEndpoints/privateDnsZoneGroups@2024-05-01' = { + parent: aiAccountPrivateEndpoint + name: '${aiAccountName}-dns-group' + properties: { + privateDnsZoneConfigs: [ + { name: '${aiAccountName}-dns-aiserv-config', properties: { privateDnsZoneId: aiServicesDnsZoneId } } + { name: '${aiAccountName}-dns-openai-config', properties: { privateDnsZoneId: openAiDnsZoneId } } + { name: '${aiAccountName}-dns-cogserv-config', properties: { privateDnsZoneId: cognitiveServicesDnsZoneId } } + ] + } + dependsOn: [ + empty(aiServicesDnsZoneRG) ? aiServicesLink : null + empty(openAiDnsZoneRG) ? openAiLink : null + empty(cognitiveServicesDnsZoneRG) ? cognitiveServicesLink : null + ] +} +resource storageDnsGroup 'Microsoft.Network/privateEndpoints/privateDnsZoneGroups@2024-05-01' = { + parent: storagePrivateEndpoint + name: '${storageName}-dns-group' + properties: { + privateDnsZoneConfigs: [ + { name: '${storageName}-dns-config', properties: { privateDnsZoneId: storageDnsZoneId } } + ] + } + dependsOn: [ + empty(storageDnsZoneRG) ? storageLink : null + ] +} diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/standard-dependent-resources.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/standard-dependent-resources.bicep new file mode 100644 index 000000000..3068221c2 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/standard-dependent-resources.bicep @@ -0,0 +1,48 @@ +// Creates Azure dependent resources for Azure AI evaluation-only setup +// Only creates Storage Account (no Cosmos DB or AI Search) + +@description('Azure region of the deployment') +param location string + +@description('Name of the storage account') +param azureStorageName string + +@description('The AI Storage Account full ARM Resource ID. This is an optional field, and if not provided, the resource will be created.') +param azureStorageAccountResourceId string + +param azureStorageExists bool + +var azureStorageParts = split(azureStorageAccountResourceId, '/') + +resource existingAzureStorageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' existing = if (azureStorageExists) { + name: azureStorageParts[8] + scope: resourceGroup(azureStorageParts[2], azureStorageParts[4]) +} + +// Some regions doesn't support Standard Zone-Redundant storage, need to use Geo-redundant storage +param noZRSRegions array = ['southindia', 'westus'] +param sku object = contains(noZRSRegions, location) ? { name: 'Standard_GRS' } : { name: 'Standard_ZRS' } + +// Storage creation +resource storage 'Microsoft.Storage/storageAccounts@2023-05-01' = if(!azureStorageExists) { + name: azureStorageName + location: location + kind: 'StorageV2' + sku: sku + properties: { + minimumTlsVersion: 'TLS1_2' + allowBlobPublicAccess: false + publicNetworkAccess: 'Disabled' + networkAcls: { + bypass: 'AzureServices' + defaultAction: 'Deny' + virtualNetworkRules: [] + } + allowSharedKeyAccess: false + } +} + +output azureStorageName string = azureStorageExists ? existingAzureStorageAccount.name : storage.name +output azureStorageId string = azureStorageExists ? existingAzureStorageAccount.id : storage.id +output azureStorageResourceGroupName string = azureStorageExists ? azureStorageParts[4] : resourceGroup().name +output azureStorageSubscriptionId string = azureStorageExists ? azureStorageParts[2] : subscription().subscriptionId diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/subnet.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/subnet.bicep new file mode 100644 index 000000000..bf81553d8 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/subnet.bicep @@ -0,0 +1,22 @@ +@description('Name of the virtual network') +param vnetName string + +@description('Name of the subnet') +param subnetName string + +@description('Address prefix for the subnet') +param addressPrefix string + +@description('Array of subnet delegations') +param delegations array = [] + +resource subnet 'Microsoft.Network/virtualNetworks/subnets@2024-05-01' = { + name: '${vnetName}/${subnetName}' + properties: { + addressPrefix: addressPrefix + delegations: delegations + } +} + +output subnetId string = subnet.id +output subnetName string = subnetName diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/validate-existing-resources.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/validate-existing-resources.bicep new file mode 100644 index 000000000..6d3293fca --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/validate-existing-resources.bicep @@ -0,0 +1,41 @@ +// Validate existing resources for evaluation-only setup (Storage Account only) + +@description('Resource ID of the Azure Storage Account.') +param azureStorageAccountResourceId string + +// Check if existing resources have been passed in +var storagePassedIn = azureStorageAccountResourceId != '' + +var storageParts = split(azureStorageAccountResourceId, '/') +var azureStorageSubscriptionId = storagePassedIn && length(storageParts) > 2 ? storageParts[2] : subscription().subscriptionId +var azureStorageResourceGroupName = storagePassedIn && length(storageParts) > 4 ? storageParts[4] : resourceGroup().name + +// Validate Storage Account +resource azureStorageAccount 'Microsoft.Storage/storageAccounts@2024-01-01' existing = if (storagePassedIn) { + name: last(split(azureStorageAccountResourceId, '/')) + scope: resourceGroup(azureStorageSubscriptionId, azureStorageResourceGroupName) +} + +output azureStorageExists bool = storagePassedIn && (azureStorageAccount.name == storageParts[8]) + +output azureStorageSubscriptionId string = azureStorageSubscriptionId +output azureStorageResourceGroupName string = azureStorageResourceGroupName + +// Adding DNS Zone Check + +@description('Object mapping DNS zone names to their resource group, or empty string to indicate creation') +param existingDnsZones object + +@description('Subscription ID where existing private DNS zones are located. Should be resolved to current subscription if empty.') +param dnsZonesSubscriptionId string + +@description('List of private DNS zone names to validate') +param dnsZoneNames array + +// Output whether each DNS zone exists +output dnsZoneExists array = [ + for zoneName in dnsZoneNames: { + name: zoneName + exists: !empty(existingDnsZones[zoneName]) + } +] diff --git a/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/vnet.bicep b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/vnet.bicep new file mode 100644 index 000000000..d5b8db277 --- /dev/null +++ b/infrastructure/infrastructure-setup-bicep/15a-private-network-evaluation-only-setup/modules-network-secured/vnet.bicep @@ -0,0 +1,83 @@ +/* +Virtual Network Module +This module deploys the core network infrastructure with security controls: + +1. Address Space: + - VNet CIDR: 172.16.0.0/16 OR 192.168.0.0/16 + - Agents Subnet: 172.16.0.0/24 OR 192.168.0.0/24 + - Private Endpoint Subnet: 172.16.101.0/24 OR 192.168.1.0/24 + +2. Security Features: + - Network isolation + - Subnet delegation + - Private endpoint subnet +*/ + +@description('Azure region for the deployment') +param location string + +@description('The name of the virtual network') +param vnetName string = 'agents-vnet-test' + +@description('The name of Agents Subnet') +param agentSubnetName string = 'agent-subnet' + +@description('The name of Hub subnet') +param peSubnetName string = 'pe-subnet' + + +@description('Address space for the VNet') +param vnetAddressPrefix string = '' + +@description('Address prefix for the agent subnet') +param agentSubnetPrefix string = '' + +@description('Address prefix for the private endpoint subnet') +param peSubnetPrefix string = '' +var defaultVnetAddressPrefix = '192.168.0.0/16' +var vnetAddress = empty(vnetAddressPrefix) ? defaultVnetAddressPrefix : vnetAddressPrefix +var agentSubnet = empty(agentSubnetPrefix) ? cidrSubnet(vnetAddress, 24, 0) : agentSubnetPrefix +var peSubnet = empty(peSubnetPrefix) ? cidrSubnet(vnetAddress, 24, 1) : peSubnetPrefix + +resource virtualNetwork 'Microsoft.Network/virtualNetworks@2024-05-01' = { + name: vnetName + location: location + properties: { + addressSpace: { + addressPrefixes: [ + vnetAddress + ] + } + subnets: [ + { + name: agentSubnetName + properties: { + addressPrefix: agentSubnet + delegations: [ + { + name: 'Microsoft.app/environments' + properties: { + serviceName: 'Microsoft.App/environments' + } + } + ] + } + } + { + name: peSubnetName + properties: { + addressPrefix: peSubnet + } + } + ] + } +} +// Output variables +output peSubnetName string = peSubnetName +output agentSubnetName string = agentSubnetName +output agentSubnetId string = '${virtualNetwork.id}/subnets/${agentSubnetName}' +output peSubnetId string = '${virtualNetwork.id}/subnets/${peSubnetName}' +output virtualNetworkName string = virtualNetwork.name +output virtualNetworkId string = virtualNetwork.id +output virtualNetworkResourceGroup string = resourceGroup().name +output virtualNetworkSubscriptionId string = subscription().subscriptionId diff --git a/infrastructure/infrastructure-setup-bicep/16-private-network-standard-agent-apim-setup-preview/README.md b/infrastructure/infrastructure-setup-bicep/16-private-network-standard-agent-apim-setup-preview/README.md index 6baed465e..6777a09e9 100644 --- a/infrastructure/infrastructure-setup-bicep/16-private-network-standard-agent-apim-setup-preview/README.md +++ b/infrastructure/infrastructure-setup-bicep/16-private-network-standard-agent-apim-setup-preview/README.md @@ -143,6 +143,25 @@ To use an existing VNet and subnets, set the existingVnetResourceId parameter to To use an existing Cosmos DB for NoSQL resource, set cosmosDBResourceId parameter to the full Azure Resource ID of the target Cosmos DB. - param azureCosmosDBAccountResourceId string = /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName} +> **⚠️ Important: Cosmos DB Connection Requirements** +> +> When creating the Cosmos DB connection (e.g., via REST API or ARM), ensure the following: +> - The `authType` **must** be set to `AAD`. This is the only supported authentication type for the Cosmos DB connection used by the Agent Service. +> - The `metadata` section **must** include the `ResourceId` property, set to the full Azure Resource ID of your Cosmos DB account. The Agent Service relies on this property to correctly identify and connect to your Cosmos DB resource. Omitting `ResourceId` from the metadata will cause the connection to fail. +> +> Example connection properties: +> ```json +> { +> "category": "CosmosDB", +> "authType": "AAD", +> "metadata": { +> "ApiType": "Azure", +> "ResourceId": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName}", +> "location": "{region}" +> } +> } +> ``` + 3. **Use an existing Azure AI Search resource** diff --git a/infrastructure/infrastructure-setup-bicep/17-private-network-standard-user-assigned-identity-agent-setup/README.md b/infrastructure/infrastructure-setup-bicep/17-private-network-standard-user-assigned-identity-agent-setup/README.md index be8028faa..e098f8941 100644 --- a/infrastructure/infrastructure-setup-bicep/17-private-network-standard-user-assigned-identity-agent-setup/README.md +++ b/infrastructure/infrastructure-setup-bicep/17-private-network-standard-user-assigned-identity-agent-setup/README.md @@ -138,6 +138,25 @@ To use an existing VNet and subnets, set the existingVnetResourceId parameter to To use an existing Cosmos DB for NoSQL resource, set cosmosDBResourceId parameter to the full Azure Resource ID of the target Cosmos DB. - param azureCosmosDBAccountResourceId string = /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName} +> **⚠️ Important: Cosmos DB Connection Requirements** +> +> When creating the Cosmos DB connection (e.g., via REST API or ARM), ensure the following: +> - The `authType` **must** be set to `AAD`. This is the only supported authentication type for the Cosmos DB connection used by the Agent Service. +> - The `metadata` section **must** include the `ResourceId` property, set to the full Azure Resource ID of your Cosmos DB account. The Agent Service relies on this property to correctly identify and connect to your Cosmos DB resource. Omitting `ResourceId` from the metadata will cause the connection to fail. +> +> Example connection properties: +> ```json +> { +> "category": "CosmosDB", +> "authType": "AAD", +> "metadata": { +> "ApiType": "Azure", +> "ResourceId": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName}", +> "location": "{region}" +> } +> } +> ``` + 3. **Use an existing Azure AI Search resource** diff --git a/infrastructure/infrastructure-setup-bicep/18-managed-virtual-network-preview/README.md b/infrastructure/infrastructure-setup-bicep/18-managed-virtual-network-preview/README.md index 9a7a8198b..6f216c2ba 100644 --- a/infrastructure/infrastructure-setup-bicep/18-managed-virtual-network-preview/README.md +++ b/infrastructure/infrastructure-setup-bicep/18-managed-virtual-network-preview/README.md @@ -103,6 +103,25 @@ To use an existing VNet and subnet, set the existingVnetResourceId parameter to To use an existing Cosmos DB for NoSQL resource, set cosmosDBResourceId parameter to the full Azure Resource ID of the target Cosmos DB. - param azureCosmosDBAccountResourceId string = /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName} +> **⚠️ Important: Cosmos DB Connection Requirements** +> +> When creating the Cosmos DB connection (e.g., via REST API or ARM), ensure the following: +> - The `authType` **must** be set to `AAD`. This is the only supported authentication type for the Cosmos DB connection used by the Agent Service. +> - The `metadata` section **must** include the `ResourceId` property, set to the full Azure Resource ID of your Cosmos DB account. The Agent Service relies on this property to correctly identify and connect to your Cosmos DB resource. Omitting `ResourceId` from the metadata will cause the connection to fail. +> +> Example connection properties: +> ```json +> { +> "category": "CosmosDB", +> "authType": "AAD", +> "metadata": { +> "ApiType": "Azure", +> "ResourceId": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName}", +> "location": "{region}" +> } +> } +> ``` + 3. **Use an existing Azure AI Search resource** diff --git a/infrastructure/infrastructure-setup-bicep/19-hybrid-private-resources-agent-setup/README.md b/infrastructure/infrastructure-setup-bicep/19-hybrid-private-resources-agent-setup/README.md index b5ff1f14c..f67457cc9 100644 --- a/infrastructure/infrastructure-setup-bicep/19-hybrid-private-resources-agent-setup/README.md +++ b/infrastructure/infrastructure-setup-bicep/19-hybrid-private-resources-agent-setup/README.md @@ -178,6 +178,25 @@ Then configure private DNS zone for Container Apps (see TESTING-GUIDE.md Step 6. ## Parameters +> **⚠️ Important: Cosmos DB Connection Requirements** +> +> If you are creating the Cosmos DB connection manually (e.g., via REST API or ARM), ensure the following: +> - The `authType` **must** be set to `AAD`. This is the only supported authentication type for the Cosmos DB connection used by the Agent Service. +> - The `metadata` section **must** include the `ResourceId` property, set to the full Azure Resource ID of your Cosmos DB account. The Agent Service relies on this property to correctly identify and connect to your Cosmos DB resource. Omitting `ResourceId` from the metadata will cause the connection to fail. +> +> Example connection properties: +> ```json +> { +> "category": "CosmosDB", +> "authType": "AAD", +> "metadata": { +> "ApiType": "Azure", +> "ResourceId": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName}", +> "location": "{region}" +> } +> } +> ``` + | Parameter | Description | Default | |-----------|-------------|---------| | `location` | Azure region | `eastus2` | diff --git a/infrastructure/infrastructure-setup-bicep/19-hybrid-private-resources-agent-setup/a2a-server/main.py b/infrastructure/infrastructure-setup-bicep/19-hybrid-private-resources-agent-setup/a2a-server/main.py index becbda6f0..4a54e2acb 100644 --- a/infrastructure/infrastructure-setup-bicep/19-hybrid-private-resources-agent-setup/a2a-server/main.py +++ b/infrastructure/infrastructure-setup-bicep/19-hybrid-private-resources-agent-setup/a2a-server/main.py @@ -8,6 +8,7 @@ GET /.well-known/agent.json - Agent card (A2A spec standard) GET /.well-known/agent-card.json - Agent card (Azure SDK default path) POST / - JSON-RPC 2.0 task endpoint + POST /a2a - JSON-RPC 2.0 task endpoint (DataProxy-compatible path) GET /healthz - Container health check """ @@ -72,7 +73,10 @@ async def get_agent_card(request: Request): host = request.headers.get("x-forwarded-host", request.headers.get("host", "")) base_url = f"{scheme}://{host}" card = dict(AGENT_CARD) - card["url"] = base_url + "/" + # Use /a2a path instead of root "/" to ensure compatibility with the + # Foundry DataProxy, which requires a non-empty path after the hostname + # in its /v1/https/{serviceName}/{remainder} route. + card["url"] = base_url + "/a2a" return card @@ -173,6 +177,7 @@ def _compute(op: str, a: float, b: float) -> str: @app.post("/") +@app.post("/a2a") async def handle_jsonrpc(request: Request): """Handle A2A JSON-RPC 2.0 requests.""" try: diff --git a/infrastructure/infrastructure-setup-bicep/41-standard-agent-setup/README.md b/infrastructure/infrastructure-setup-bicep/41-standard-agent-setup/README.md index 991acda34..b4e8c2673 100644 --- a/infrastructure/infrastructure-setup-bicep/41-standard-agent-setup/README.md +++ b/infrastructure/infrastructure-setup-bicep/41-standard-agent-setup/README.md @@ -30,3 +30,22 @@ For more details on the standard agent setup, see the [standard agent setup conc **Azure Cosmos DB for NoSQL** - Your existing Azure Cosmos DB for NoSQL Account used in standard setup must have at least a total throughput limit of at least 3000 RU/s. Both Provisioned Thoughtput and Serverless are supported. - 3 containers will be provisioned in your existing Cosmos DB account and each need 1000 RU/s + +> **⚠️ Important: Cosmos DB Connection Requirements** +> +> When creating the Cosmos DB connection (e.g., via REST API or ARM), ensure the following: +> - The `authType` **must** be set to `AAD`. This is the only supported authentication type for the Cosmos DB connection used by the Agent Service. +> - The `metadata` section **must** include the `ResourceId` property, set to the full Azure Resource ID of your Cosmos DB account. The Agent Service relies on this property to correctly identify and connect to your Cosmos DB resource. Omitting `ResourceId` from the metadata will cause the connection to fail. +> +> Example connection properties: +> ```json +> { +> "category": "CosmosDB", +> "authType": "AAD", +> "metadata": { +> "ApiType": "Azure", +> "ResourceId": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{cosmosDbAccountName}", +> "location": "{region}" +> } +> } +> ``` diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/AgentThreadAndHITL.csproj b/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/AgentThreadAndHITL.csproj deleted file mode 100644 index c438e5e08..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/AgentThreadAndHITL.csproj +++ /dev/null @@ -1,21 +0,0 @@ - - - - Exe - net9.0 - enable - enable - - - - $(NoWarn);MEAI001 - - - - - - - - - - diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/Dockerfile b/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/Dockerfile deleted file mode 100644 index 0d3e5757c..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Build the application -FROM mcr.microsoft.com/dotnet/sdk:9.0-alpine AS build -WORKDIR /src - -# Copy files from the current directory on the host to the working directory in the container -COPY . . - -RUN dotnet restore -RUN dotnet build -c Release --no-restore -RUN dotnet publish -c Release --no-build -o /app - -# Run the application -FROM mcr.microsoft.com/dotnet/aspnet:9.0-alpine AS final -WORKDIR /app - -# Copy everything needed to run the app from the "build" stage. -COPY --from=build /app . - -EXPOSE 8088 -ENTRYPOINT ["dotnet", "AgentsInWorkflows.dll"] diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/Program.cs b/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/Program.cs deleted file mode 100644 index d356d491e..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/Program.cs +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -using System.ComponentModel; -using Azure.AI.AgentServer.AgentFramework.Extensions; -using Azure.AI.AgentServer.AgentFramework.Persistence; -using Azure.AI.OpenAI; -using Azure.Identity; -using Microsoft.Agents.AI; -using Microsoft.Extensions.AI; -using OpenAI; -using OpenAI.Chat; - -namespace AgengThreadAndHITL; - -public partial class Program -{ - public static async Task Main() - { - var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); - var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - - // Create a sample function tool that the agent can use. - [Description("Get the weather for a given location.")] - static string GetWeather([Description("The location to get the weather for.")] string location) - => $"The weather in {location} is cloudy with a high of 15°C."; - - // Create the chat client and agent. - // Note that we are wrapping the function tool with ApprovalRequiredAIFunction to require user approval before invoking it. - // user should reply with 'approve' or 'reject' when prompted. - - AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .AsAIAgent( - instructions: "You are a helpful assistant", - tools: [new ApprovalRequiredAIFunction(AIFunctionFactory.Create(GetWeather))] - ); - - var threadRespository = new InMemoryAgentThreadRepository(agent); - await agent.RunAIAgentAsync(telemetrySourceName: "Agents", threadRepository: threadRespository); - } -} diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/README.md b/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/README.md deleted file mode 100644 index 0d818a725..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/README.md +++ /dev/null @@ -1,104 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to use AI agents as executors within a workflow, hosted using -[Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and -deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### Weather Assistant Agent - -This sample demonstrates the integration of AI agents with a function tool involving human approval. - -The agents are connected sequentially in a workflow, creating a translation chain that demonstrates: - -- How AgentServer adapter use a ThreadRepository to manage conversation history. -- The AgentServer adapter converts human approval request to a FunctionCall with function name `__hosted_agent_adapter_hitl__` -- User approve or deny the request by responding `approved` or `denied` as a FunctionCallOutput. - -### Agent Hosting - -The agent workflow is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent workflow using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent workflow can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. -The extension builds a container image for the agent, deploys it to Azure Container Instances (ACI), and creates a hosted agent version and deployment on Foundry Agent Service. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. An Azure OpenAI endpoint configured -2. A deployment of a chat model (e.g., `gpt-4o-mini`) -3. Azure CLI installed and authenticated (`az login`) -4. .NET 9.0 SDK or later installed - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_OPENAI_DEPLOYMENT_NAME` - The deployment name for your chat model (optional, defaults to `gpt-4o-mini`) - -**PowerShell:** - -```powershell -# Replace with your Azure OpenAI endpoint -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" - -# Optional, defaults to gpt-4o-mini -$env:AZURE_OPENAI_DEPLOYMENT_NAME="gpt-4o-mini" -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -dotnet run -``` - -This will start the hosted agent workflow locally on `http://localhost:8088/`. - -### Interacting with the Agent - -You can interact with the agent workflow using: - -- The `test_requests.py` file in this directory to test and prompt the agent. It now generates a valid `conversation.id` automatically and reuses it across turns. -- Any OpenAI Responses compatible client by sending requests to `http://localhost:8088/`. For this HITL sample, you must provide a stable `conversation.id` in every request (including the first one) to keep thread state and pending approvals. - -Try providing text to ask the weather assistant agent about the weather in a city. - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Add this line at the top of your `Dockerfile`: - -```dockerfile -FROM --platform=linux/amd64 python:3.12-slim -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/agent.yaml b/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/agent.yaml deleted file mode 100644 index cbd0cde1b..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/agent.yaml +++ /dev/null @@ -1,29 +0,0 @@ -name: AgentThreadAndHITL -displayName: "Weather Assistant Agent" -description: > - A Weather Assistant Agent that provides weather information and forecasts. It - demonstrates how to use Azure AI AgentServer with Human-in-the-Loop (HITL) - capabilities to get human approval for funtional calls. - -metadata: - authors: - - Hosted Agent Team - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Human-in-the-Loop -template: - kind: hosted - name: AgentThreadAndHITL - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: AZURE_OPENAI_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - name: chat - kind: model - id: gpt-4o diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/test_requests.py b/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/test_requests.py deleted file mode 100644 index 025be5e45..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentThreadAndHITL/test_requests.py +++ /dev/null @@ -1,92 +0,0 @@ -import json -import os -import secrets -import string - -import requests - - -base_url = os.getenv("AGENTSERVER_URL", "http://localhost:8088").rstrip("/") -url = base_url if base_url.endswith("/responses") else f"{base_url}/responses" -stream = False - - -alphanum = string.ascii_letters + string.digits - - -def create_conversation_id(): - # Match AgentServer expected format: conv_<18-char partition><32-char entropy> - return "conv_" + "".join(secrets.choice(alphanum) for _ in range(50)) - - -def extract_conversation_id(response_detail): - conversation = response_detail.get("conversation") - if isinstance(conversation, dict): - conversation_id = conversation.get("id") - if isinstance(conversation_id, str) and conversation_id: - return conversation_id - return None - - -user_input = "What is the weather like in Vancouver?" -conversation_id = create_conversation_id() -payload = { - "agent": {"name": "local_agent", "type": "agent_reference"}, - "tools": [], - "stream": stream, - "input": user_input, - "conversation": {"id": conversation_id}, -} - -call_id = None - -try: - response = requests.post(url, json=payload) - response.raise_for_status() - - response_detail = response.json() - print(json.dumps(response_detail, indent=2)) - - returned_conversation_id = extract_conversation_id(response_detail) - if returned_conversation_id: - conversation_id = returned_conversation_id - - output = response_detail.get("output", []) - if isinstance(output, list): - for item in output: - if item.get("type") == "function_call" and item.get("name") == "__hosted_agent_adapter_hitl__": - call_id = item.get("call_id") - break -except Exception as e: - print(f"Error: {e}") - -print("\n\n") -print(f"conversation_id: {conversation_id}") -print(f"call_id: {call_id}") - -if not call_id: - print("Failed to parse hitl request info") -else: - human_feedback = { - "call_id": call_id, - "output": "approve", - "type": "function_call_output", - } - - feedback_payload = { - "agent": {"name": "local_agent", "type": "agent_reference"}, - "tools": [], - "stream": stream, - "input": [human_feedback], - "conversation": {"id": conversation_id}, - } - - try: - print("\n\nsending feedback...") - print(json.dumps(feedback_payload, indent=2)) - response = requests.post(url, json=feedback_payload) - response.raise_for_status() - print("\n\nagent response:") - print(json.dumps(response.json(), indent=2)) - except Exception as e: - print(f"Error: {e}") diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/AgentWithHostedMCP.csproj b/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/AgentWithHostedMCP.csproj deleted file mode 100644 index dfba6f51b..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/AgentWithHostedMCP.csproj +++ /dev/null @@ -1,21 +0,0 @@ - - - - Exe - net9.0 - - enable - enable - - $(NoWarn);MEAI001;OPENAI001 - - - - - - - - - - - diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/Dockerfile b/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/Dockerfile deleted file mode 100644 index 776f81041..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Build the application -FROM mcr.microsoft.com/dotnet/sdk:9.0-alpine AS build -WORKDIR /src - -# Copy files from the current directory on the host to the working directory in the container -COPY . . - -RUN dotnet restore -RUN dotnet build -c Release --no-restore -RUN dotnet publish -c Release --no-build -o /app - -# Run the application -FROM mcr.microsoft.com/dotnet/aspnet:9.0-alpine AS final -WORKDIR /app - -# Copy everything needed to run the app from the "build" stage. -COPY --from=build /app . - -EXPOSE 8088 -ENTRYPOINT ["dotnet", "AgentWithHostedMCP.dll"] diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/Program.cs b/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/Program.cs deleted file mode 100644 index 3a8908876..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/Program.cs +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -// This sample shows how to create and use a simple AI agent with OpenAI Responses as the backend, that uses a Hosted MCP Tool. -// In this case the OpenAI responses service will invoke any MCP tools as required. MCP tools are not invoked by the Agent Framework. -// The sample demonstrates how to use MCP tools with auto approval by setting ApprovalMode to NeverRequire. - -using Azure.AI.AgentServer.AgentFramework.Extensions; -using Azure.AI.OpenAI; -using Azure.Identity; -using Microsoft.Agents.AI; -using Microsoft.Extensions.AI; -using OpenAI; -using OpenAI.Responses; - -var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); -var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - -// Create an MCP tool that can be called without approval. -AITool mcpTool = new HostedMcpServerTool(serverName: "microsoft_learn", serverAddress: "https://learn.microsoft.com/api/mcp") -{ - AllowedTools = ["microsoft_docs_search"], - ApprovalMode = HostedMcpServerToolApprovalMode.NeverRequire -}; - -// Create an agent with the MCP tool using Azure OpenAI Responses. -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new DefaultAzureCredential()) - .GetResponsesClient(deploymentName) - .AsAIAgent( - instructions: "You answer questions by searching the Microsoft Learn content only.", - name: "MicrosoftLearnAgent", - tools: [mcpTool]); - -await agent.RunAIAgentAsync(); diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/README.md b/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/README.md deleted file mode 100644 index 14f3f0c71..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/README.md +++ /dev/null @@ -1,107 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to use a Hosted Model Context Protocol (MCP) server with a -[Microsoft Agent Framework](https://learn.microsoft.com/en-us/agent-framework/overview/agent-framework-overview#ai-agents) AI agent and -host it using [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and -deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### MCP Integration - -This sample uses a Hosted Model Context Protocol (MCP) server to provide external tools to the agent. The MCP workflow operates as follows: - -1. The agent is configured with a `HostedMcpServerTool` pointing to `https://learn.microsoft.com/api/mcp` -2. Only the `microsoft_docs_search` tool is enabled from the available MCP tools -3. Approval mode is set to `NeverRequire`, allowing automatic tool execution without user confirmation -4. When you ask questions, the Azure OpenAI Responses service automatically invokes the MCP tool to search Microsoft Learn documentation -5. The agent returns answers based on the retrieved Microsoft Learn content - -**Note**: In this configuration, the Azure OpenAI Responses service manages tool invocation directly - the Agent Framework does not handle MCP tool calls. - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. -The extension builds a container image for the agent, deploys it to Azure Container Instances (ACI), and creates a hosted agent version and deployment on Foundry Agent Service. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. An Azure OpenAI endpoint configured -2. A deployment of a chat model (e.g., `gpt-4o-mini`) -3. Azure CLI installed and authenticated (`az login`) -4. .NET 9.0 SDK or later installed - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_OPENAI_DEPLOYMENT_NAME` - The deployment name for your chat model (optional, defaults to `gpt-4o-mini`) - -**PowerShell:** - -```powershell -# Replace with your Azure OpenAI endpoint -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" - -# Optional, defaults to gpt-4o-mini -$env:AZURE_OPENAI_DEPLOYMENT_NAME="gpt-4o-mini" -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -dotnet run -``` - -This will start the hosted agent locally on `http://localhost:8080/`. - -### Interacting with the Agent - -You can interact with the agent using: - -- The `run-requests.http` file in this directory to test and prompt the agent -- Any OpenAI Responses compatible client by sending requests to `http://localhost:8080/` - -Try asking questions about Microsoft documentation and technologies to see the MCP tool in action. - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Add this line at the top of your `Dockerfile`: - -```dockerfile -FROM --platform=linux/amd64 python:3.12-slim -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/agent.yaml b/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/agent.yaml deleted file mode 100644 index d37c30457..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/agent.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: AgentWithHostedMCP -displayName: "Microsoft Learn Response Agent with MCP" -description: > - An AI agent that uses Azure OpenAI Responses with a Hosted Model Context Protocol (MCP) server. - The agent answers questions by searching Microsoft Learn documentation using MCP tools. - This demonstrates how MCP tools can be integrated with Azure OpenAI Responses where the service - itself handles tool invocation. -metadata: - authors: - - Microsoft Agent Framework Team - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Model Context Protocol - - MCP - - Tool Call Approval -template: - kind: hosted - name: AgentWithHostedMCP - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: AZURE_OPENAI_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - name: chat - kind: model - id: gpt-4o-mini diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/run-requests.http b/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/run-requests.http deleted file mode 100644 index cc26f43b9..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithHostedMCP/run-requests.http +++ /dev/null @@ -1,30 +0,0 @@ -@host = http://localhost:8088 -@endpoint = {{host}}/responses - -### Health Check -GET {{host}}/readiness - -### Simple string input - Ask about MCP Tools -POST {{endpoint}} -Content-Type: application/json -{ - "input": "Please summarize the Azure AI Agent documentation related to MCP Tool calling?" -} - -### Explicit input - Ask about Agent Framework -POST {{endpoint}} -Content-Type: application/json -{ - "input": [ - { - "type": "message", - "role": "user", - "content": [ - { - "type": "input_text", - "text": "What is the Microsoft Agent Framework?" - } - ] - } - ] -} diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/.dockerignore b/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/.dockerignore deleted file mode 100644 index 3f4d104ea..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/.dockerignore +++ /dev/null @@ -1,29 +0,0 @@ -# Build outputs -bin/ -obj/ -out/ - -# Environment files with secrets -.env -.env.* -*.local -appsettings.*.json -!appsettings.json - -# IDE and editor files -.vs/ -.vscode/ -*.user -*.suo -*.sln.docstates - -# Git -.git/ -.gitignore - -# Documentation and samples (not needed in container) -*.md -*.http - -# Test results -TestResults/ diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/AgentWithLocalTools.csproj b/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/AgentWithLocalTools.csproj deleted file mode 100644 index 8600536a6..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/AgentWithLocalTools.csproj +++ /dev/null @@ -1,17 +0,0 @@ - - - Exe - net10.0 - enable - enable - true - - - - - - - - - - diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/Program.cs b/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/Program.cs deleted file mode 100644 index 5b2859632..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/Program.cs +++ /dev/null @@ -1,136 +0,0 @@ -// Seattle Hotel Agent - A simple agent with a tool to find hotels in Seattle. -// Uses Microsoft Agent Framework with Azure AI Foundry. -// Ready for deployment to Foundry Hosted Agent service. - -using System.ComponentModel; -using System.Globalization; -using System.Text; -using System.ClientModel.Primitives; -using Azure.AI.AgentServer.AgentFramework.Extensions; -using Azure.AI.OpenAI; -using Azure.AI.Projects; -using Azure.Identity; -using Microsoft.Agents.AI; -using Microsoft.Extensions.AI; - -// Get configuration from environment variables -var endpoint = Environment.GetEnvironmentVariable("AZURE_AI_PROJECT_ENDPOINT") - ?? throw new InvalidOperationException("AZURE_AI_PROJECT_ENDPOINT is not set."); -var deploymentName = Environment.GetEnvironmentVariable("MODEL_DEPLOYMENT_NAME") ?? "gpt-4.1-mini"; -Console.WriteLine($"Project Endpoint: {endpoint}"); -Console.WriteLine($"Model Deployment: {deploymentName}"); -// Simulated hotel data for Seattle -var seattleHotels = new[] -{ - new Hotel("Contoso Suites", 189, 4.5, "Downtown"), - new Hotel("Fabrikam Residences", 159, 4.2, "Pike Place Market"), - new Hotel("Alpine Ski House", 249, 4.7, "Seattle Center"), - new Hotel("Margie's Travel Lodge", 219, 4.4, "Waterfront"), - new Hotel("Northwind Inn", 139, 4.0, "Capitol Hill"), - new Hotel("Relecloud Hotel", 99, 3.8, "University District"), -}; - -[Description("Get available hotels in Seattle for the specified dates. This simulates a call to a hotel availability API.")] -string GetAvailableHotels( - [Description("Check-in date in YYYY-MM-DD format")] string checkInDate, - [Description("Check-out date in YYYY-MM-DD format")] string checkOutDate, - [Description("Maximum price per night in USD (optional, defaults to 500)")] int maxPrice = 500) -{ - try - { - // Parse dates - if (!DateTime.TryParseExact(checkInDate, "yyyy-MM-dd", CultureInfo.InvariantCulture, DateTimeStyles.None, out var checkIn)) - { - return $"Error parsing check-in date. Please use YYYY-MM-DD format."; - } - - if (!DateTime.TryParseExact(checkOutDate, "yyyy-MM-dd", CultureInfo.InvariantCulture, DateTimeStyles.None, out var checkOut)) - { - return $"Error parsing check-out date. Please use YYYY-MM-DD format."; - } - - // Validate dates - if (checkOut <= checkIn) - { - return "Error: Check-out date must be after check-in date."; - } - - var nights = (checkOut - checkIn).Days; - - // Filter hotels by price - var availableHotels = seattleHotels.Where(h => h.PricePerNight <= maxPrice).ToList(); - - if (availableHotels.Count == 0) - { - return $"No hotels found in Seattle within your budget of ${maxPrice}/night."; - } - - // Build response - var result = new StringBuilder(); - result.AppendLine($"Available hotels in Seattle from {checkInDate} to {checkOutDate} ({nights} nights):"); - result.AppendLine(); - - foreach (var hotel in availableHotels) - { - var totalCost = hotel.PricePerNight * nights; - result.AppendLine($"**{hotel.Name}**"); - result.AppendLine($" Location: {hotel.Location}"); - result.AppendLine($" Rating: {hotel.Rating}/5"); - result.AppendLine($" ${hotel.PricePerNight}/night (Total: ${totalCost})"); - result.AppendLine(); - } - - return result.ToString(); - } - catch (Exception ex) - { - return $"Error processing request. Details: {ex.Message}"; - } -} - -// Create chat client using AIProjectClient to get the OpenAI connection from the project -var credential = new DefaultAzureCredential(); -AIProjectClient projectClient = new AIProjectClient(new Uri(endpoint), credential); - -// Get the OpenAI connection from the project -ClientConnection connection = projectClient.GetConnection(typeof(AzureOpenAIClient).FullName!); - -if (!connection.TryGetLocatorAsUri(out Uri? openAiEndpoint) || openAiEndpoint is null) -{ - throw new InvalidOperationException("Failed to get OpenAI endpoint from project connection."); -} -openAiEndpoint = new Uri($"https://{openAiEndpoint.Host}"); -Console.WriteLine($"OpenAI Endpoint: {openAiEndpoint}"); - -var chatClient = new AzureOpenAIClient(openAiEndpoint, credential) - .GetChatClient(deploymentName) - .AsIChatClient() - .AsBuilder() - .UseOpenTelemetry(sourceName: "Agents", configure: cfg => cfg.EnableSensitiveData = false) - .Build(); - -var agent = new ChatClientAgent(chatClient, - name: "SeattleHotelAgent", - instructions: """ - You are a helpful travel assistant specializing in finding hotels in Seattle, Washington. - - When a user asks about hotels in Seattle: - 1. Ask for their check-in and check-out dates if not provided - 2. Ask about their budget preferences if not mentioned - 3. Use the GetAvailableHotels tool to find available options - 4. Present the results in a friendly, informative way - 5. Offer to help with additional questions about the hotels or Seattle - - Be conversational and helpful. If users ask about things outside of Seattle hotels, - politely let them know you specialize in Seattle hotel recommendations. - """, - tools: [AIFunctionFactory.Create(GetAvailableHotels)]) - .AsBuilder() - .UseOpenTelemetry(sourceName: "Agents", configure: cfg => cfg.EnableSensitiveData = false) - .Build(); - -Console.WriteLine("Seattle Hotel Agent Server running on http://localhost:8088"); -await agent.RunAIAgentAsync(telemetrySourceName: "Agents"); - -// Hotel record for simulated data -record Hotel(string Name, int PricePerNight, double Rating, string Location); diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/README.md b/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/README.md deleted file mode 100644 index 62bc75491..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/README.md +++ /dev/null @@ -1,132 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates a **key advantage of code-based hosted agents**: - -- **Local C# tool execution** - Run custom C# methods as agent tools - -Code-based agents can execute **any C# code** you write. This sample includes a Seattle Hotel Agent with a `GetAvailableHotels` tool that searches for available hotels based on check-in/check-out dates and budget preferences. - -The agent is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and can be deployed to Microsoft Foundry using the Azure Developer CLI. - -## How It Works - -### Local Tools Integration - -In [Program.cs](Program.cs), the agent uses a local C# method (`GetAvailableHotels`) that simulates a hotel availability API. This demonstrates how code-based agents can execute custom server-side logic that prompt agents cannot access. - -The tool accepts: -- **checkInDate** - Check-in date in YYYY-MM-DD format -- **checkOutDate** - Check-out date in YYYY-MM-DD format -- **maxPrice** - Maximum price per night in USD (optional, defaults to $500) - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. - -### Agent Deployment - -The hosted agent can be deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. **Azure AI Foundry Project** - - Project created in [Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/what-is-foundry?view=foundry#microsoft-foundry-portals) - - Chat model deployed (e.g., `gpt-4o` or `gpt-4.1`) - - Note your project endpoint URL and model deployment name - -2. **Azure CLI** - - Installed and authenticated - - Run `az login` and verify with `az account show` - -3. **.NET 10.0 SDK or later** - - Verify your version: `dotnet --version` - - Download from [https://dotnet.microsoft.com/download](https://dotnet.microsoft.com/download) - -### Environment Variables - -Set the following environment variables (matching `agent.yaml`): - -- `AZURE_AI_PROJECT_ENDPOINT` - Your Azure AI Foundry project endpoint URL (required) -- `MODEL_DEPLOYMENT_NAME` - The deployment name for your chat model (defaults to `gpt-4.1-mini`) - -**PowerShell:** - -```powershell -# Replace with your actual values -$env:AZURE_AI_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" -$env:MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" -``` - -**Bash:** - -```bash -export AZURE_AI_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" -export MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```bash -dotnet run -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent - -**PowerShell (Windows):** -```powershell -$body = @{ - input = "I need a hotel in Seattle from 2025-03-15 to 2025-03-18, budget under $200 per night" - stream = $false -} | ConvertTo-Json - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" -``` - -**Bash/curl (Linux/macOS):** -```bash -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ - -d '{"input": "Find me hotels in Seattle for March 20-23, 2025 under $200 per night","stream":false}' -``` - -You can also use the `run-requests.http` file in this directory with the VS Code REST Client extension. - -The agent will use the `GetAvailableHotels` tool to search for available hotels matching your criteria. - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/agent.yaml b/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/agent.yaml deleted file mode 100644 index 993cc5178..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/agent.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Unique identifier/name for this agent -name: seattle-hotel-agent -# Brief description of what this agent does -description: > - A travel assistant agent that helps users find hotels in Seattle. - Demonstrates local C# tool execution - a key advantage of code-based - hosted agents over prompt agents. -metadata: - # Categorization tags for organizing and discovering agents - authors: - - Microsoft - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Local Tools - - Travel Assistant - - Hotel Search -template: - name: seattle-hotel-agent - kind: hosted - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_AI_PROJECT_ENDPOINT - value: ${AZURE_AI_PROJECT_ENDPOINT} - - name: MODEL_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4.1-mini - name: chat diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/run-requests.http b/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/run-requests.http deleted file mode 100644 index 4f2e87e09..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/run-requests.http +++ /dev/null @@ -1,52 +0,0 @@ -@host = http://localhost:8088 -@endpoint = {{host}}/responses - -### Health Check -GET {{host}}/readiness - -### Simple hotel search - budget under $200 -POST {{endpoint}} -Content-Type: application/json - -{ - "input": "I need a hotel in Seattle from 2025-03-15 to 2025-03-18, budget under $200 per night", - "stream": false -} - -### Hotel search with higher budget -POST {{endpoint}} -Content-Type: application/json - -{ - "input": "Find me hotels in Seattle for March 20-23, 2025 under $250 per night", - "stream": false -} - -### Ask for recommendations without dates (agent should ask for clarification) -POST {{endpoint}} -Content-Type: application/json - -{ - "input": "What hotels do you recommend in Seattle?", - "stream": false -} - -### Explicit input format -POST {{endpoint}} -Content-Type: application/json - -{ - "input": [ - { - "type": "message", - "role": "user", - "content": [ - { - "type": "input_text", - "text": "I'm looking for a hotel in Seattle from 2025-04-01 to 2025-04-05, my budget is $150 per night maximum" - } - ] - } - ], - "stream": false -} diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj b/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj deleted file mode 100644 index ff2c1ca0c..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj +++ /dev/null @@ -1,19 +0,0 @@ - - - - Exe - net9.0 - - enable - enable - - - - - - - - - - - diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/Dockerfile b/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/Dockerfile deleted file mode 100644 index b494ad225..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Build the application -FROM mcr.microsoft.com/dotnet/sdk:9.0-alpine AS build -WORKDIR /src - -# Copy files from the current directory on the host to the working directory in the container -COPY . . - -RUN dotnet restore -RUN dotnet build -c Release --no-restore -RUN dotnet publish -c Release --no-build -o /app - -# Run the application -FROM mcr.microsoft.com/dotnet/aspnet:9.0-alpine AS final -WORKDIR /app - -# Copy everything needed to run the app from the "build" stage. -COPY --from=build /app . - -EXPOSE 8088 -ENTRYPOINT ["dotnet", "AgentWithTextSearchRag.dll"] diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/Program.cs b/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/Program.cs deleted file mode 100644 index c3389988f..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/Program.cs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -// This sample shows how to use TextSearchProvider to add retrieval augmented generation (RAG) -// capabilities to an AI agent. The provider runs a search against an external knowledge base -// before each model invocation and injects the results into the model context. - -using Azure.AI.AgentServer.AgentFramework.Extensions; -using Azure.AI.OpenAI; -using Azure.Identity; -using Microsoft.Agents.AI; -using Microsoft.Extensions.AI; -using OpenAI.Chat; - -var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); -var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - -TextSearchProviderOptions textSearchOptions = new() -{ - // Run the search prior to every model invocation and keep a short rolling window of conversation context. - SearchTime = TextSearchProviderOptions.TextSearchBehavior.BeforeAIInvoke, - RecentMessageMemoryLimit = 6, -}; - -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new DefaultAzureCredential()) - .GetChatClient(deploymentName) - .AsAIAgent(new ChatClientAgentOptions - { - ChatOptions = new ChatOptions - { - Instructions = "You are a helpful support specialist for Contoso Outdoors. Answer questions using the provided context and cite the source document when available.", - }, - AIContextProviders = [new TextSearchProvider(MockSearchAsync, textSearchOptions)] - }); - -await agent.RunAIAgentAsync(); - -static Task> MockSearchAsync(string query, CancellationToken cancellationToken) -{ - // The mock search inspects the user's question and returns pre-defined snippets - // that resemble documents stored in an external knowledge source. - List results = new(); - - if (query.Contains("return", StringComparison.OrdinalIgnoreCase) || query.Contains("refund", StringComparison.OrdinalIgnoreCase)) - { - results.Add(new() - { - SourceName = "Contoso Outdoors Return Policy", - SourceLink = "https://contoso.com/policies/returns", - Text = "Customers may return any item within 30 days of delivery. Items should be unused and include original packaging. Refunds are issued to the original payment method within 5 business days of inspection." - }); - } - - if (query.Contains("shipping", StringComparison.OrdinalIgnoreCase)) - { - results.Add(new() - { - SourceName = "Contoso Outdoors Shipping Guide", - SourceLink = "https://contoso.com/help/shipping", - Text = "Standard shipping is free on orders over $50 and typically arrives in 3-5 business days within the continental United States. Expedited options are available at checkout." - }); - } - - if (query.Contains("tent", StringComparison.OrdinalIgnoreCase) || query.Contains("fabric", StringComparison.OrdinalIgnoreCase)) - { - results.Add(new() - { - SourceName = "TrailRunner Tent Care Instructions", - SourceLink = "https://contoso.com/manuals/trailrunner-tent", - Text = "Clean the tent fabric with lukewarm water and a non-detergent soap. Allow it to air dry completely before storage and avoid prolonged UV exposure to extend the lifespan of the waterproof coating." - }); - } - - return Task.FromResult>(results); -} diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/README.md b/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/README.md deleted file mode 100644 index cd3a91e0a..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/README.md +++ /dev/null @@ -1,111 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to use the TextSearchProvider to add retrieval augmented generation (RAG) capabilities to a -[Microsoft Agent Framework](https://learn.microsoft.com/en-us/agent-framework/overview/agent-framework-overview#ai-agents) AI agent and -host it using [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and -deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### Retrieval Augmented Generation (RAG) with TextSearchProvider - -This sample uses a **mock search function** to demonstrate the RAG pattern. The RAG workflow operates as follows: - -1. When the user asks a question, the TextSearchProvider intercepts it -2. The search function looks for relevant documents based on the query -3. Retrieved documents are injected into the model's context -4. The AI responds using both its training and the provided context -5. The agent can cite specific source documents in its answers - -**Note**: The mock search function returns pre-defined snippets for demonstration purposes. In a production scenario, replace this with actual searches against your knowledge base (e.g., Azure AI Search, vector database, or other data sources). - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. -The extension builds a container image for the agent, deploys it to Azure Container Instances (ACI), and creates a hosted agent version and deployment on Foundry Agent Service. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. An Azure OpenAI endpoint configured -2. A deployment of a chat model (e.g., `gpt-4o-mini`) -3. Azure CLI installed and authenticated (`az login`) -4. .NET 9.0 SDK or later installed - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_OPENAI_DEPLOYMENT_NAME` - The deployment name for your chat model (optional, defaults to `gpt-4o-mini`) - -**PowerShell:** - -```powershell -# Replace with your Azure OpenAI endpoint -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" - -# Optional, defaults to gpt-4o-mini -$env:AZURE_OPENAI_DEPLOYMENT_NAME="gpt-4o-mini" -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -dotnet run -``` - -This will start the hosted agent locally on `http://localhost:8080/`. - -### Interacting with the Agent - -You can interact with the agent using: - -- The `run-requests.http` file in this directory to test and prompt the agent -- Any OpenAI Responses compatible client by sending requests to `http://localhost:8080/` - -Try asking questions about: - -- Contoso return policy -- Shipping information -- Product care instructions - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Add this line at the top of your `Dockerfile`: - -```dockerfile -FROM --platform=linux/amd64 python:3.12-slim -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/agent.yaml b/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/agent.yaml deleted file mode 100644 index 31acfb1ce..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/agent.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: AgentWithTextSearchRag -displayName: "Text Search RAG Agent" -description: > - An AI agent that uses TextSearchProvider for retrieval augmented generation (RAG) capabilities. - The agent runs searches against an external knowledge base before each model invocation and - injects the results into the model context. It can answer questions about Contoso Outdoors - policies and products, including return policies, refunds, shipping options, and product care - instructions such as tent maintenance. -metadata: - authors: - - Microsoft Agent Framework Team - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Retrieval-Augmented Generation - - RAG -template: - kind: hosted - name: AgentWithTextSearchRag - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: AZURE_OPENAI_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - name: chat - kind: model - id: gpt-4o-mini diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/run-requests.http b/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/run-requests.http deleted file mode 100644 index 4bfb02d8f..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTextSearchRag/run-requests.http +++ /dev/null @@ -1,30 +0,0 @@ -@host = http://localhost:8088 -@endpoint = {{host}}/responses - -### Health Check -GET {{host}}/readiness - -### Simple string input -POST {{endpoint}} -Content-Type: application/json -{ - "input": "Hi! I need help understanding the return policy." -} - -### Explicit input -POST {{endpoint}} -Content-Type: application/json -{ - "input": [ - { - "type": "message", - "role": "user", - "content": [ - { - "type": "input_text", - "text": "How long does standard shipping usually take?" - } - ] - } - ] -} diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/AgentWithTools.csproj b/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/AgentWithTools.csproj deleted file mode 100644 index 048213625..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/AgentWithTools.csproj +++ /dev/null @@ -1,17 +0,0 @@ - - - Exe - net9.0 - - enable - enable - - - - - - - - - - diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/Dockerfile b/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/Dockerfile deleted file mode 100644 index 2b6e5b6d1..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Build the application -FROM mcr.microsoft.com/dotnet/sdk:9.0-alpine AS build -WORKDIR /src - -# Copy files from the current directory on the host to the working directory in the container -COPY . . - -RUN dotnet restore -RUN dotnet build -c Release --no-restore -RUN dotnet publish -c Release --no-build -o /app - -# Run the application -FROM mcr.microsoft.com/dotnet/aspnet:9.0-alpine AS final -WORKDIR /app - -# Copy everything needed to run the app from the "build" stage. -COPY --from=build /app . - -EXPOSE 8088 -ENTRYPOINT ["dotnet", "AgentWithTools.dll"] diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/Program.cs b/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/Program.cs deleted file mode 100644 index 3b77d054c..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/Program.cs +++ /dev/null @@ -1,42 +0,0 @@ -using Azure.AI.AgentServer.AgentFramework.Extensions; -using Microsoft.Agents.AI; -using Microsoft.Extensions.AI; -using Azure.AI.OpenAI; -using Azure.Identity; - -// Get configuration from environment variables -var openAiEndpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); -var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -var toolConnectionId = Environment.GetEnvironmentVariable("TOOL_CONNECTION_ID") ?? throw new InvalidOperationException("TOOL_CONNECTION_ID is not set."); - -var credential = new DefaultAzureCredential(); - -// Create chat client -var chatClient = new AzureOpenAIClient(new Uri(openAiEndpoint), credential) - .GetChatClient(deploymentName) - .AsIChatClient() - .AsBuilder() - .UseFoundryTools(new { type = "mcp", project_connection_id = toolConnectionId }, new { type = "code_interpreter" }) - .UseOpenTelemetry(sourceName: "Agents", configure: (cfg) => cfg.EnableSensitiveData = true) - .Build(); - - -var agent = new ChatClientAgent(chatClient, - name: "AgentWithTools", - instructions: @"You are a helpful assistant with access to tools for fetching Microsoft documentation. - - IMPORTANT: When the user asks about Microsoft Learn articles or documentation: - 1. You MUST use the microsoft_docs_fetch tool to retrieve the actual content - 2. Do NOT rely on your training data - 3. Always fetch the latest information from the provided URL - - Available tools: - - microsoft_docs_fetch: Fetches and converts Microsoft Learn documentation - - microsoft_docs_search: Searches Microsoft/Azure documentation - - microsoft_code_sample_search: Searches for code examples") - .AsBuilder() - .UseOpenTelemetry(sourceName: "Agents", configure: (cfg) => cfg.EnableSensitiveData = true) - .Build(); - -// Run agent with tool support using ToolDefinition objects -await agent.RunAIAgentAsync(telemetrySourceName: "Agents"); diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/README.md b/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/README.md deleted file mode 100644 index 808df7e13..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/README.md +++ /dev/null @@ -1,112 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to use Foundry tools (MCP and code interpreter) with a -[Microsoft Agent Framework](https://learn.microsoft.com/en-us/agent-framework/overview/agent-framework-overview#ai-agents) AI agent and -host it using [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and -deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### Foundry Tools Integration - -This sample uses Foundry tools to provide external capabilities to the agent. The workflow operates as follows: - -1. The agent registers Foundry tools using `UseFoundryTools`, including an MCP tool connection and a code interpreter tool -2. The MCP tool connection (identified by `TOOL_CONNECTION_ID`) provides tools such as `microsoft_docs_fetch`, `microsoft_docs_search`, and `microsoft_code_sample_search` -3. The agent instructions require the `microsoft_docs_fetch` tool when responding to Microsoft Learn documentation questions -4. The code interpreter tool is available for calculations or data transformations -5. The agent returns answers based on tool outputs and model reasoning - -**Note**: The MCP tool connection must be configured in your Foundry project (for example, a Microsoft Learn MCP server connection) and its connection ID provided via `TOOL_CONNECTION_ID`. - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. -The extension builds a container image for the agent, deploys it to Azure Container Instances (ACI), and creates a hosted agent version and deployment on Foundry Agent Service. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. An Azure OpenAI endpoint configured -2. A deployment of a chat model (e.g., `gpt-4o-mini`) -3. A Foundry MCP tool connection and its connection ID -4. Azure CLI installed and authenticated (`az login`) -5. .NET 9.0 SDK or later installed - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_OPENAI_DEPLOYMENT_NAME` - The deployment name for your chat model (optional, defaults to `gpt-4o-mini`) -- `TOOL_CONNECTION_ID` - The Foundry MCP tool connection ID (required) - -**PowerShell:** - -```powershell -# Replace with your Azure OpenAI endpoint -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" - -# Optional, defaults to gpt-4o-mini -$env:AZURE_OPENAI_DEPLOYMENT_NAME="gpt-4o-mini" - -# Required: Foundry MCP tool connection ID -$env:TOOL_CONNECTION_ID="your-tool-connection-id" -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -dotnet run -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent - -You can interact with the agent using: - -- The `run-requests.http` file in this directory to test and prompt the agent -- Any OpenAI Responses compatible client by sending requests to `http://localhost:8088/` - -Try asking questions that require Microsoft Learn content, or request a small calculation to exercise the code interpreter tool. - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Add this line at the top of your `Dockerfile`: - -```dockerfile -FROM --platform=linux/amd64 python:3.12-slim -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/agent.yaml b/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/agent.yaml deleted file mode 100644 index d75eb834f..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/agent.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: AgentWithTools -displayName: "Agent with Tools" -description: > - An AI agent that uses Foundry tools (MCP and code interpreter) with Azure OpenAI Responses. - The agent can fetch Microsoft Learn documentation and run code when needed. -metadata: - authors: - - Microsoft Agent Framework Team - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Tools - - MCP - - Code Interpreter -template: - kind: hosted - name: AgentWithTools - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: AZURE_OPENAI_DEPLOYMENT_NAME - value: "{{chat}}" - - name: TOOL_CONNECTION_ID - value: ${TOOL_CONNECTION_ID} -resources: - - name: chat - kind: model - id: gpt-4o-mini diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/run-requests.http b/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/run-requests.http deleted file mode 100644 index 22a37ff54..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithTools/run-requests.http +++ /dev/null @@ -1,30 +0,0 @@ -@host = http://localhost:8088 -@endpoint = {{host}}/responses - -### Health Check -GET {{host}}/readiness - -### Simple string input -POST {{endpoint}} -Content-Type: application/json -{ - "input": "Please use the microsoft_docs_fetch tool to fetch and summarize the Microsoft Learn article at https://learn.microsoft.com/azure/ai-services/openai/overview" -} - -### Explicit input -POST {{endpoint}} -Content-Type: application/json -{ - "input": [ - { - "type": "message", - "role": "user", - "content": [ - { - "type": "input_text", - "text": "Please use the microsoft_docs_fetch tool to fetch and summarize the Microsoft Learn article at https://learn.microsoft.com/azure/ai-services/openai/overview" - } - ] - } - ] -} diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/AgentsInWorkflows.csproj b/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/AgentsInWorkflows.csproj deleted file mode 100644 index f6184dee0..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/AgentsInWorkflows.csproj +++ /dev/null @@ -1,19 +0,0 @@ - - - - Exe - net9.0 - - enable - enable - - - - - - - - - - - diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/Dockerfile b/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/Dockerfile deleted file mode 100644 index 0d3e5757c..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Build the application -FROM mcr.microsoft.com/dotnet/sdk:9.0-alpine AS build -WORKDIR /src - -# Copy files from the current directory on the host to the working directory in the container -COPY . . - -RUN dotnet restore -RUN dotnet build -c Release --no-restore -RUN dotnet publish -c Release --no-build -o /app - -# Run the application -FROM mcr.microsoft.com/dotnet/aspnet:9.0-alpine AS final -WORKDIR /app - -# Copy everything needed to run the app from the "build" stage. -COPY --from=build /app . - -EXPOSE 8088 -ENTRYPOINT ["dotnet", "AgentsInWorkflows.dll"] diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/Program.cs b/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/Program.cs deleted file mode 100644 index ede01fa33..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/Program.cs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -// This sample demonstrates how to integrate AI agents into a workflow pipeline. -// Three translation agents are connected sequentially to create a translation chain: -// English → French → Spanish → English, showing how agents can be composed as workflow executors. - -using Azure.AI.AgentServer.AgentFramework; -using Azure.AI.AgentServer.AgentFramework.Extensions; -using Azure.AI.OpenAI; -using Azure.Identity; -using Microsoft.Agents.AI; -using Microsoft.Agents.AI.Workflows; -using Microsoft.Extensions.AI; - -// Set up the Azure OpenAI client -var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); -var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - -IChatClient chatClient = new AzureOpenAIClient(new Uri(endpoint), new DefaultAzureCredential()) - .GetChatClient(deploymentName) - .AsIChatClient(); - -// Create agents -AIAgent frenchAgent = GetTranslationAgent("French", chatClient); -AIAgent spanishAgent = GetTranslationAgent("Spanish", chatClient); -AIAgent englishAgent = GetTranslationAgent("English", chatClient); - -WorkflowAgentFactory factory = () => -{ - // Build the workflow and turn it into an agent - AIAgent agent = new WorkflowBuilder(frenchAgent) - .AddEdge(frenchAgent, spanishAgent) - .AddEdge(spanishAgent, englishAgent) - .Build() - .AsAIAgent(); - return Task.FromResult(agent); -}; - -await factory.RunWorkflowAgentAsync(telemetrySourceName: "Agents"); - -static ChatClientAgent GetTranslationAgent(string targetLanguage, IChatClient chatClient) => - new(chatClient, $"You are a translation assistant that translates the provided text to {targetLanguage}."); diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/README.md b/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/README.md deleted file mode 100644 index 8a32e228b..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/README.md +++ /dev/null @@ -1,108 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to use AI agents as executors within a workflow, hosted using -[Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and -deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### Agents in Workflows - -This sample demonstrates the integration of AI agents within a workflow pipeline. The workflow operates as follows: - -1. **French Agent** - Receives input text and translates it to French -2. **Spanish Agent** - Takes the French translation and translates it to Spanish -3. **English Agent** - Takes the Spanish translation and translates it back to English - -The agents are connected sequentially in a workflow, creating a translation chain that demonstrates: - -- How AI-powered agents can be seamlessly integrated into workflow pipelines -- Sequential execution patterns where each agent's output becomes the next agent's input -- Composable agent architectures for multi-step processing - -### Agent Hosting - -The agent workflow is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent workflow using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent workflow can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. -The extension builds a container image for the agent, deploys it to Azure Container Instances (ACI), and creates a hosted agent version and deployment on Foundry Agent Service. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. An Azure OpenAI endpoint configured -2. A deployment of a chat model (e.g., `gpt-4o-mini`) -3. Azure CLI installed and authenticated (`az login`) -4. .NET 9.0 SDK or later installed - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_OPENAI_DEPLOYMENT_NAME` - The deployment name for your chat model (optional, defaults to `gpt-4o-mini`) - -**PowerShell:** - -```powershell -# Replace with your Azure OpenAI endpoint -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" - -# Optional, defaults to gpt-4o-mini -$env:AZURE_OPENAI_DEPLOYMENT_NAME="gpt-4o-mini" -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -dotnet run -``` - -This will start the hosted agent workflow locally on `http://localhost:8080/`. - -### Interacting with the Agent - -You can interact with the agent workflow using: - -- The `run-requests.http` file in this directory to test and prompt the agent -- Any OpenAI Responses compatible client by sending requests to `http://localhost:8080/` - -Try providing text in English to see it translated through the workflow chain (English ? French ? Spanish ? English). - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Add this line at the top of your `Dockerfile`: - -```dockerfile -FROM --platform=linux/amd64 python:3.12-slim -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/agent.yaml b/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/agent.yaml deleted file mode 100644 index 53081f422..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/agent.yaml +++ /dev/null @@ -1,28 +0,0 @@ -name: AgentsInWorkflows -displayName: "Translation Chain Workflow Agent" -description: > - A workflow agent that performs sequential translation through multiple languages. - The agent translates text from English to French, then to Spanish, and finally back - to English, leveraging AI-powered translation capabilities in a pipeline workflow. -metadata: - authors: - - Microsoft Agent Framework Team - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Workflows -template: - kind: hosted - name: AgentsInWorkflows - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: AZURE_OPENAI_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - name: chat - kind: model - id: gpt-4o-mini diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/run-requests.http b/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/run-requests.http deleted file mode 100644 index 5c33700a9..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AgentsInWorkflows/run-requests.http +++ /dev/null @@ -1,30 +0,0 @@ -@host = http://localhost:8088 -@endpoint = {{host}}/responses - -### Health Check -GET {{host}}/readiness - -### Simple string input -POST {{endpoint}} -Content-Type: application/json -{ - "input": "Hello, how are you today?" -} - -### Explicit input -POST {{endpoint}} -Content-Type: application/json -{ - "input": [ - { - "type": "message", - "role": "user", - "content": [ - { - "type": "input_text", - "text": "Hello, how are you today?" - } - ] - } - ] -} diff --git a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/.dockerignore b/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/.dockerignore deleted file mode 100644 index 6bfa65a8f..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/.dockerignore +++ /dev/null @@ -1,57 +0,0 @@ -# Build outputs -bin/ -obj/ -out/ - -# IDE and editor files -.vs/ -.vscode/ -*.user -*.suo -*.sln.docstates -.foundry/ - -# Git -.git/ -.gitignore - -# Documentation and samples (not needed in container) -*.md -*.http - -# Ignore files -.dockerignore - -# Logs -*.log - -# Temporary files -*.tmp -*.temp - -# OS files -.DS_Store -Thumbs.db - -# Package manager directories -node_modules/ -packages/ - -# Test results -TestResults/ -*.trx - -# Coverage reports -coverage/ -*.coverage -*.coveragexml - -# Environment files with secrets -.env -.env.* -*.local -appsettings.*.json -!appsettings.json - -.venv/ -__pycache__/ diff --git a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/AzureAIAgentsInWorkflow.csproj b/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/AzureAIAgentsInWorkflow.csproj deleted file mode 100644 index dd6ab3c5f..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/AzureAIAgentsInWorkflow.csproj +++ /dev/null @@ -1,19 +0,0 @@ - - - Exe - net10.0 - enable - enable - true - - - - - - - - - - - - diff --git a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/Dockerfile b/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/Dockerfile deleted file mode 100644 index 102e36e08..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Build the application -FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build -WORKDIR /src - -# Copy files from the current directory on the host to the working directory in the container -COPY . . - -RUN dotnet restore -RUN dotnet build -c Release --no-restore -RUN dotnet publish -c Release --no-build -o /app -f net10.0 - -# Run the application -FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final -WORKDIR /app - -# Copy everything needed to run the app from the "build" stage. -COPY --from=build /app . - -EXPOSE 8088 -ENTRYPOINT ["dotnet", "AzureAIAgentsInWorkflow.dll"] diff --git a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/Program.cs b/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/Program.cs deleted file mode 100644 index c80b373e7..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/Program.cs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -// This sample demonstrates a multi-agent workflow with Writer and Reviewer agents -// using Azure AI Foundry AIProjectClient and the Agent Framework WorkflowBuilder. - -using Azure.AI.AgentServer.AgentFramework.Extensions; -using Azure.AI.Projects; -using Azure.Identity; -using Microsoft.Agents.AI; -using Microsoft.Agents.AI.Workflows; - -var endpoint = Environment.GetEnvironmentVariable("AZURE_AI_PROJECT_ENDPOINT") - ?? throw new InvalidOperationException("AZURE_AI_PROJECT_ENDPOINT is not set."); -var deploymentName = Environment.GetEnvironmentVariable("MODEL_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - -Console.WriteLine($"Using Azure AI endpoint: {endpoint}"); -Console.WriteLine($"Using model deployment: {deploymentName}"); - -// WARNING: DefaultAzureCredential is convenient for development but requires careful consideration in production. -// In production, consider using a specific credential (e.g., ManagedIdentityCredential) to avoid -// latency issues, unintended credential probing, and potential security risks from fallback mechanisms. -AIProjectClient aiProjectClient = new(new Uri(endpoint), new DefaultAzureCredential()); - -// Create Foundry agents -AIAgent writerAgent = await aiProjectClient.CreateAIAgentAsync( - name: "Writer", - model: deploymentName, - instructions: "You are an excellent content writer. You create new content and edit contents based on the feedback."); - -AIAgent reviewerAgent = await aiProjectClient.CreateAIAgentAsync( - name: "Reviewer", - model: deploymentName, - instructions: "You are an excellent content reviewer. Provide actionable feedback to the writer about the provided content. Provide the feedback in the most concise manner possible."); - -try -{ - var workflow = new WorkflowBuilder(writerAgent) - .AddEdge(writerAgent, reviewerAgent) - .Build(); - - Console.WriteLine("Starting Writer-Reviewer Workflow Agent Server on http://localhost:8088"); - await workflow.AsAgent().RunAIAgentAsync(); -} -finally -{ - // Cleanup server-side agents - await aiProjectClient.Agents.DeleteAgentAsync(writerAgent.Name); - await aiProjectClient.Agents.DeleteAgentAsync(reviewerAgent.Name); -} \ No newline at end of file diff --git a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/agent.yaml b/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/agent.yaml deleted file mode 100644 index 283c1a194..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/agent.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml - -kind: hosted -name: AzureAIAgentsInWorkflow -description: > - A multi-agent workflow featuring a Writer and Reviewer that collaborate - to create and refine content. -metadata: - authors: - - Microsoft - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Multi-Agent Workflow - - Writer-Reviewer - - Content Creation -protocols: - - protocol: responses - version: v1 -environment_variables: - - name: AZURE_AI_PROJECT_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: MODEL_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - name: chat - kind: model - id: gpt-4o-mini diff --git a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/run-requests.http b/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/run-requests.http deleted file mode 100644 index 2fcdb2499..000000000 --- a/samples/csharp/hosted-agents/AgentFramework/AzureAIAgentsInWorkflow/run-requests.http +++ /dev/null @@ -1,34 +0,0 @@ -@host = http://localhost:8088 -@endpoint = {{host}}/responses - -### Health Check -GET {{host}}/readiness - -### Simple string input - Content creation request -POST {{endpoint}} -Content-Type: application/json - -{ - "input": "Create a slogan for a new electric SUV that is affordable and fun to drive", - "stream": false -} - -### Explicit input format -POST {{endpoint}} -Content-Type: application/json - -{ - "input": [ - { - "type": "message", - "role": "user", - "content": [ - { - "type": "input_text", - "text": "Write a short product description for a smart water bottle that tracks hydration" - } - ] - } - ], - "stream": false -} diff --git a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/Dockerfile b/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/Dockerfile deleted file mode 100644 index 89c4907b0..000000000 --- a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build -WORKDIR /src -COPY . . -RUN dotnet restore -RUN dotnet publish -c Release -o /app/publish - -FROM mcr.microsoft.com/dotnet/aspnet:8.0 -WORKDIR /app -COPY --from=build /app/publish . - -ENV ASPNETCORE_URLS=http://0.0.0.0:8088 -EXPOSE 8088 - -ENTRYPOINT ["dotnet", "SystemUtilityAgent.dll"] diff --git a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/Program.cs b/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/Program.cs deleted file mode 100644 index 73f51d985..000000000 --- a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/Program.cs +++ /dev/null @@ -1,10 +0,0 @@ -using Azure.AI.AgentServer.Core.Context; -using Azure.AI.AgentServer.Responses.Invocation; -using Microsoft.Extensions.DependencyInjection; - -// Run Agent Server with customized agent invocation factory -// Uses DI to provide IAgentInvocation. -await AgentServerApplication.RunAsync(new ApplicationOptions( - ConfigureServices: services => services.AddSingleton(), - TelemetrySourceName: "SystemUtilityAgent" -)).ConfigureAwait(false); diff --git a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/Properties/launchSettings.json b/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/Properties/launchSettings.json deleted file mode 100644 index e7e6c5907..000000000 --- a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/Properties/launchSettings.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "$schema": "https://json.schemastore.org/launchsettings.json", - "profiles": { - "SystemUtilityAgent": { - "commandName": "Project", - "dotnetRunMessages": true, - "environmentVariables": { - "AZURE_AI_PROJECT_ENDPOINT": "", - "AZURE_AI_MODEL_DEPLOYMENT_NAME": "gpt-4o-mini", - "OPENAI_API_VERSION": "2025-03-01-preview", - "AZURE_OPENAI_API_KEY": "", - "AGENT_MAX_TURNS": "10", - "AGENT_APP_INSIGHTS_ENABLED": "true", - "APPLICATIONINSIGHTS_CONNECTION_STRING": "" - } - } - } -} diff --git a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/README.md b/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/README.md deleted file mode 100644 index a280b5089..000000000 --- a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/README.md +++ /dev/null @@ -1,148 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates a **C# System Utility Agent** hosted using the Azure AI AgentServer SDK. - -It shows: - -- How to host a custom agent by implementing `IAgentInvocation` -- How to emit OpenAI Responses-compatible REST responses (streaming and non-streaming) -- How to add **custom spans** into hosted agent traces (via `ActivitySource`) -- How to call **Azure OpenAI** (Responses API) and use the model to decide which tool(s) to invoke (tool-calling loop) -- A set of cross-platform “system utility” actions (processes, ports, DNS, env vars) that are container-aware - -## How It Works - -### System Utility Agent - -This agent is designed for diagnostics and “what’s running here?” questions. It is **container-aware** and will report whether the agent is likely seeing the container namespace vs the host. - -The agent exposes a small set of utility actions and returns results as JSON. - -At runtime, the agent: - -1. Sends the user request to Azure OpenAI (Responses API) along with the available tool definitions -2. Executes any tool calls the model requests -3. Feeds tool results back to the model as `function_call_output` -4. Repeats until the model returns a final assistant message - -Actions supported: - -1. **capability_report** - Report what the agent can likely observe (host vs container scope) -2. **system_info** - OS / runtime metadata -3. **resource_snapshot** - Process + GC + disk snapshot (best-effort cross-platform) -4. **list_processes** - List running processes (visibility depends on container scope) -5. **process_details** - Get details for a specific PID -6. **check_port** - Check whether a TCP port is reachable -7. **dns_lookup** - Resolve a hostname -8. **list_environment_variables** - List environment variables (supports redaction) - -### Tracing (custom spans) - -This sample demonstrates how to add **custom spans** to hosted agent traces using .NET `ActivitySource`. - -Spans are created for: - -- Each tool-calling iteration of the agent loop (`SystemUtilityAgent.agent_run_iteration`) -- Each tool call execution (`SystemUtilityAgent.tool_call_execution`), including tags such as: - - `gen_ai.tool.name` - - `gen_ai.tool.call.arguments` (truncated) - - `gen_ai.tool.call.result` - -The hosted endpoint also emits the standard AgentServer request spans for the overall invocation. - -### Agent Hosting - -The agent is hosted using the Azure AI AgentServer SDK, which provisions a REST API endpoint compatible with the OpenAI Responses protocol. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. .NET 8 SDK installed -2. An Azure OpenAI endpoint configured -3. A deployment of a chat model (e.g., `gpt-4o-mini`) - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_AI_MODEL_DEPLOYMENT_NAME` - The Azure OpenAI model deployment name (optional, defaults to `gpt-4o-mini`) - -Authentication (choose one): - -- `AZURE_OPENAI_API_KEY` for key-based auth, OR -- `az login` so `DefaultAzureCredential` can acquire a token - -Optional: - -- `OPENAI_API_VERSION` - Defaults to `2025-03-01-preview` - -```powershell -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" -$env:AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o-mini" - -# Option A: key-based -$env:AZURE_OPENAI_API_KEY="" - -# Option B: AAD-based -# az login - -# Optional -$env:OPENAI_API_VERSION="2025-03-01-preview" -``` - -### Running the Sample - -From the project folder: - -```powershell -cd samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent - -dotnet run -``` - -By default, the AgentServer host listens on `http://localhost:8088`. - -### Interacting with the Agent - -You can interact with the agent workflow using: - -- The `run-requests.http` file in this directory to test and prompt the agent -- Any OpenAI Responses compatible client by sending requests to `http://localhost:8080/` - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### After deployed, the agent appears stateless (chat history is not preserved) - -This agent will remain stateless before Azure.AI.Projects supports conversation client. - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/SystemUtilityAgent.csproj b/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/SystemUtilityAgent.csproj deleted file mode 100644 index f46080b1d..000000000 --- a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/SystemUtilityAgent.csproj +++ /dev/null @@ -1,17 +0,0 @@ - - - Exe - net8.0 - enable - enable - $(NoWarn);OPENAI001 - - - - - - - - - - diff --git a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/SystemUtilityAgentInvocation.cs b/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/SystemUtilityAgentInvocation.cs deleted file mode 100644 index c8a8cbb8b..000000000 --- a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/SystemUtilityAgentInvocation.cs +++ /dev/null @@ -1,673 +0,0 @@ -using Azure.AI.AgentServer.Contracts.Generated.OpenAI; -using Azure.AI.AgentServer.Contracts.Generated.Responses; -using Azure.AI.AgentServer.Core.Common.Http.Json; -using Azure.AI.AgentServer.Core.Common.Id; -using Azure.AI.AgentServer.Responses.Invocation; -using Azure.Identity; -using System.Diagnostics; -using System.Net; -using System.Net.Sockets; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; -using System.Text.Json; -using OpenAIFunctionTool = OpenAI.Responses.FunctionTool; - - -public sealed class SystemUtilityAgentInvocation : IAgentInvocation -{ - private static readonly ActivitySource ActivitySource = new("SystemUtilityAgent"); - - private static readonly object ApiClientLock = new(); - private static Azure.AI.OpenAI.AzureOpenAIClient? ApiClient; - - private static Azure.AI.OpenAI.AzureOpenAIClient GetOrCreateApiClient() - { - if (ApiClient is not null) - { - return ApiClient; - } - - lock (ApiClientLock) - { - if (ApiClient is not null) - { - return ApiClient; - } - - var aiProjectEndpoint = Environment.GetEnvironmentVariable("AZURE_AI_PROJECT_ENDPOINT"); - - if (string.IsNullOrWhiteSpace(aiProjectEndpoint)) - { - throw new InvalidOperationException("Missing required environment variable 'AZURE_AI_PROJECT_ENDPOINT'."); - } - - var aoaiEndpoint = ToAzureOpenAIEndpoint(aiProjectEndpoint); - var apiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY"); - - if (!string.IsNullOrWhiteSpace(apiKey)) - { - var credential = new System.ClientModel.ApiKeyCredential(apiKey); - ApiClient = new Azure.AI.OpenAI.AzureOpenAIClient(new Uri(aoaiEndpoint), credential); - } - else - { - var credential = new DefaultAzureCredential(); - ApiClient = new Azure.AI.OpenAI.AzureOpenAIClient(new Uri(aoaiEndpoint), credential); - } - - return ApiClient; - } - } - - - private const string SystemPrompt = - "You are a System Utility Agent.\n" + - "You can inspect the runtime environment using tools (processes, ports, resources, DNS, environment variables).\n" + - "Important:\n" + - "- Call capability_report early when user questions depend on host vs container visibility.\n" + - "- Never claim you can see host-wide processes/ports unless capability_report indicates it.\n" + - "- Prefer using tools over guessing.\n" + - "- Keep outputs clear and actionable."; - - private static readonly JsonSerializerOptions JsonOptions = new() - { - PropertyNamingPolicy = JsonNamingPolicy.CamelCase, - WriteIndented = false - }; - - public Task InvokeAsync(AgentRunContext context, CancellationToken cancellationToken = default) - { - return InvokeAsyncInternal(context, cancellationToken); - } - - private async Task InvokeAsyncInternal(AgentRunContext context, CancellationToken cancellationToken) - { - var request = context.Request; - var activity = Activity.Current; - - var inputText = GetInputText(request); - activity?.SetTag("gen_ai.conversation.id", context.ConversationId); - - var responseText = await RunAgentLoopAsync(inputText, context.ConversationId, cancellationToken).ConfigureAwait(false); - - IList contents = - [new ItemContentOutputText(text: responseText, annotations: [])]; - - IList outputs = - [ - new ResponsesAssistantMessageItemResource( - id: Guid.NewGuid().ToString(), - status: ResponsesMessageItemResourceStatus.Completed, - content: contents - ) - ]; - - return ToResponse(request, context, output: outputs); - } - - public async IAsyncEnumerable InvokeStreamAsync(AgentRunContext context, - [EnumeratorCancellation] CancellationToken cancellationToken = default) - { - var request = context.Request; - var activity = Activity.Current; - activity?.SetTag("gen_ai.conversation.id", context.ConversationId); - - - var seq = -1; - - yield return new ResponseCreatedEvent(++seq, - ToResponse(request, context, status: ResponseStatus.InProgress)); - - var itemId = context.IdGenerator.GenerateMessageId(); - yield return new ResponseOutputItemAddedEvent(++seq, 0, - item: new ResponsesAssistantMessageItemResource( - id: itemId, - status: ResponsesMessageItemResourceStatus.InProgress, - content: [] - ) - ); - - yield return new ResponseContentPartAddedEvent(++seq, itemId, 0, 0, - new ItemContentOutputText(text: "", annotations: []) - ); - - var inputText = GetInputText(request); - - var responseText = await RunAgentLoopAsync(inputText, context.ConversationId, cancellationToken).ConfigureAwait(false); - - foreach (var part in responseText.Split(" ")) - { - await Task.Delay(20, cancellationToken).ConfigureAwait(false); - yield return new ResponseTextDeltaEvent(++seq, itemId, 0, 0, part + " "); - } - - yield return new ResponseTextDoneEvent(++seq, itemId, 0, 0, responseText); - - var content = new ItemContentOutputText(text: responseText, annotations: []); - yield return new ResponseContentPartDoneEvent(++seq, itemId, 0, 0, content); - - var item = new ResponsesAssistantMessageItemResource(id: itemId, ResponsesMessageItemResourceStatus.Completed, - content: [content]); - yield return new ResponseOutputItemDoneEvent(++seq, 0, item); - - yield return new ResponseCompletedEvent(++seq, - ToResponse(request, context, status: ResponseStatus.Completed, output: [item])); - } - - private static async Task RunAgentLoopAsync(string userInput, string conversationId, CancellationToken cancellationToken) - { - var maxTurns = GetIntEnv("AGENT_MAX_TURNS", 10); - - // Build initial inputs in OpenAI Responses input format. - var inputs = new List - { - OpenAI.Responses.ResponseItem.CreateSystemMessageItem(SystemPrompt), - OpenAI.Responses.ResponseItem.CreateUserMessageItem(userInput) - }; - - // This check conversation existence will always return false, before Azure.AI.Projects supports conversation client. - var conversationExists = false; - if (!string.IsNullOrWhiteSpace(conversationId)) - { - var conversationClient = GetOrCreateApiClient().GetConversationClient(); - try - { - var conversation = await conversationClient.GetConversationAsync(conversationId); - conversationExists = conversation is not null; - } - catch (Exception) - { - conversationExists = false; - } - } - - var totalInputTokens = 0; - var totalOutputTokens = 0; - for (var turn = 0; turn < maxTurns; turn++) - { - using var iter = ActivitySource.StartActivity("SystemUtilityAgent.agent_run_iteration", ActivityKind.Internal); - - iter?.SetTag("current_iteration", turn); - - var response = await CallAzureOpenAIResponsesAsync(inputs, conversationExists ? conversationId : null, cancellationToken).ConfigureAwait(false); - - var usage = response.Usage; - if (usage is not null) - { - totalInputTokens += usage?.InputTokenCount ?? 0; - totalOutputTokens += usage?.OutputTokenCount ?? 0; - iter?.SetTag("gen_ai.usage.input_tokens", usage?.InputTokenCount ?? 0); - iter?.SetTag("gen_ai.usage.output_tokens", usage?.OutputTokenCount ?? 0); - } - - var calledAny = false; - var assistantTextChunks = new List(); - foreach (var outputItem in response.OutputItems) - { - inputs.Add(outputItem); - if (outputItem is OpenAI.Responses.FunctionCallResponseItem functionResponse) - { - using var functionCalActivity = ActivitySource.StartActivity("SystemUtilityAgent.tool_call_execution", ActivityKind.Internal); - var functionName = functionResponse.FunctionName; - var arguments = ParseArguments(functionResponse.FunctionArguments); - var functionResult = InvokeTool(functionName, arguments, cancellationToken); - calledAny = true; - inputs.Add(OpenAI.Responses.ResponseItem.CreateFunctionCallOutputItem( - functionResponse.CallId, - JsonSerializer.Serialize(functionResult, JsonOptions))); - functionCalActivity?.SetTag("gen_ai.tool.name", functionName); - functionCalActivity?.SetTag("gen_ai.tool.type", "function"); - functionCalActivity?.SetTag("gen_ai.tool.call.arguments", Truncate(functionResponse.FunctionArguments?.ToString() ?? "", 1024)); - functionCalActivity?.SetTag("gen_ai.tool.call.result", JsonSerializer.Serialize(functionResult, JsonOptions)); - } - else if (outputItem is OpenAI.Responses.MessageResponseItem messageResponse) - { - if (messageResponse.Content is null) - { - continue; - } - - foreach (var c in messageResponse.Content) - { - assistantTextChunks.Add(c.Text); - } - } - - if (conversationExists) - { - inputs.Clear(); - } - } - if (!calledAny) - { - var finalText = string.Join("", assistantTextChunks).Trim(); - return string.IsNullOrWhiteSpace(finalText) - ? "(No assistant text returned.)" - : finalText; - } - } - - var activity = Activity.Current; - activity?.SetTag("gen_ai.usage.input_tokens", totalInputTokens); - activity?.SetTag("gen_ai.usage.output_tokens", totalOutputTokens); - return $"I hit the {maxTurns} max turn limit for this request. Try rephrasing."; - } - - private static async Task CallAzureOpenAIResponsesAsync( - List inputs, - string? conversationId, - CancellationToken cancellationToken) - { - var activity = Activity.Current; - - var deploymentName = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME")?? "gpt-5"; - var chatHistoryLength = GetIntEnv("AGENT_CHAT_HISTORY_LENGTH", 20); - - activity?.SetTag("gen_ai.request.model", deploymentName); - - var apiClient = GetOrCreateApiClient(); - - var createResponseOptions = new OpenAI.Responses.CreateResponseOptions - { - StreamingEnabled = false - }; - - if (!string.IsNullOrWhiteSpace(conversationId)) - { - createResponseOptions.ConversationOptions = new OpenAI.Responses.ResponseConversationOptions(conversationId); - } - - foreach (var item in inputs.TakeLast(chatHistoryLength)) - { - createResponseOptions.InputItems.Add(item); - } - - var tools = ToolDefinitions(); - foreach (var tool in tools) - { - createResponseOptions.Tools.Add(tool); - } - - var responseClient = apiClient.GetResponsesClient(deploymentName); - activity?.SetTag("gen_ai.input.messages", JsonSerializer.Serialize(createResponseOptions.InputItems)); - var response = await responseClient - .CreateResponseAsync(createResponseOptions) - .ConfigureAwait(false); - - return response; - } - - private static OpenAIFunctionTool ToolDef(string name, string description, object parameters) - { - return OpenAIFunctionTool.CreateFunctionTool( - name, - BinaryData.FromObjectAsJson(parameters), - false, - description); - } - - private static OpenAIFunctionTool[] ToolDefinitions() - { - return - [ - ToolDef("capability_report", "Report what the agent can likely observe (host vs container scope).", new { type = "object", properties = new { }, required = Array.Empty() }), - ToolDef("system_info", "Return OS/runtime metadata.", new { type = "object", properties = new { }, required = Array.Empty() }), - ToolDef("resource_snapshot", "Return process + GC + disk snapshot.", new { type = "object", properties = new { }, required = Array.Empty() }), - ToolDef("list_processes", "List running processes (best-effort).", new { type = "object", properties = new { }, required = Array.Empty() }), - ToolDef("process_details", "Get details for a PID.", new { type = "object", properties = new { pid = new { type = "integer" } }, required = new[] { "pid" } }), - ToolDef("check_port", "Check whether a TCP port is reachable.", new { type = "object", properties = new { host = new { type = "string" }, port = new { type = "integer" } }, required = new[] { "host", "port" } }), - ToolDef("dns_lookup", "Resolve a hostname.", new { type = "object", properties = new { host = new { type = "string" } }, required = new[] { "host" } }), - ToolDef("list_environment_variables", "List environment variables. Supports redaction.", new { type = "object", properties = new { redact = new { type = "boolean" } }, required = Array.Empty() }) - ]; - } - - private static Dictionary ParseArguments(BinaryData? data) - { - if (data is null) - { - return new Dictionary(StringComparer.OrdinalIgnoreCase); - } - - using var doc = JsonDocument.Parse(data.ToString()); - if (doc.RootElement.ValueKind != JsonValueKind.Object) - { - return new Dictionary(StringComparer.OrdinalIgnoreCase); - } - - return doc.RootElement.EnumerateObject() - .ToDictionary(p => p.Name, p => p.Value.Clone(), StringComparer.OrdinalIgnoreCase); - } - - private static object InvokeTool(string name, Dictionary args, CancellationToken cancellationToken) - { - return name switch - { - "capability_report" => new { supported = true, reason = (string?)null, data = CapabilityReport() }, - "system_info" => new { supported = true, reason = (string?)null, data = SystemInfo() }, - "resource_snapshot" => new { supported = true, reason = (string?)null, data = ResourceSnapshot() }, - "list_processes" => new { supported = true, reason = (string?)null, data = ListProcesses() }, - "process_details" => ProcessDetailsTool(args), - "check_port" => CheckPortTool(args, cancellationToken), - "dns_lookup" => DnsLookupTool(args), - "list_environment_variables" => ListEnvTool(args), - _ => new { supported = false, reason = $"Unknown tool: {name}", data = (object?)null } - }; - } - - private static object ProcessDetailsTool(Dictionary args) - { - if (!args.TryGetValue("pid", out var pidEl) || !pidEl.TryGetInt32(out var pid) || pid <= 0) - { - return new { supported = false, reason = "Missing/invalid 'pid'", data = (object?)null }; - } - - try - { - return new { supported = true, reason = (string?)null, data = ProcessDetails(pid) }; - } - catch (Exception ex) - { - return new { supported = false, reason = $"{ex.GetType().Name}: {ex.Message}", data = (object?)null }; - } - } - - private static object CheckPortTool(Dictionary args, CancellationToken cancellationToken) - { - var host = args.TryGetValue("host", out var hostEl) ? hostEl.GetString() : null; - var port = 0; - var portOk = args.TryGetValue("port", out var portEl) && portEl.TryGetInt32(out port); - if (string.IsNullOrWhiteSpace(host) || !portOk || port <= 0 || port > 65535) - { - return new { supported = false, reason = "Missing/invalid 'host' or 'port'", data = (object?)null }; - } - - return new { supported = true, reason = (string?)null, data = CheckPort(host!, port, cancellationToken) }; - } - - private static object DnsLookupTool(Dictionary args) - { - var host = args.TryGetValue("host", out var hostEl) ? hostEl.GetString() : null; - if (string.IsNullOrWhiteSpace(host)) - { - return new { supported = false, reason = "Missing/invalid 'host'", data = (object?)null }; - } - - return new { supported = true, reason = (string?)null, data = DnsLookup(host!) }; - } - - private static object ListEnvTool(Dictionary args) - { - var redact = true; - if (args.TryGetValue("redact", out var r) && r.ValueKind is JsonValueKind.True or JsonValueKind.False) - { - redact = r.GetBoolean(); - } - - return new { supported = true, reason = (string?)null, data = ListEnvironmentVariables(redact) }; - } - - private static int GetIntEnv(string name, int fallback) - => int.TryParse(Environment.GetEnvironmentVariable(name), out var v) ? v : fallback; - - private static object CapabilityReport() - { - var os = RuntimeInformation.OSDescription; - var isLinux = RuntimeInformation.IsOSPlatform(OSPlatform.Linux); - - var inContainer = isLinux && (File.Exists("/.dockerenv") || CGroupLooksContainerized()); - var scope = inContainer ? "container" : "host"; - - return new - { - supported = true, - scope, - data = new - { - os, - framework = RuntimeInformation.FrameworkDescription, - in_container = inContainer, - process_visibility = new - { - supported = true, - scope, - notes = "In containers, you usually only see container processes (PID namespace)." - }, - network_visibility = new - { - supported = true, - scope, - notes = "In containers, ports reflect the container network namespace unless using host networking." - } - } - }; - } - - private static bool CGroupLooksContainerized() - { - try - { - var path = "/proc/1/cgroup"; - if (!File.Exists(path)) return false; - var txt = File.ReadAllText(path); - return txt.Contains("docker", StringComparison.OrdinalIgnoreCase) - || txt.Contains("containerd", StringComparison.OrdinalIgnoreCase) - || txt.Contains("kubepods", StringComparison.OrdinalIgnoreCase); - } - catch - { - return false; - } - } - - private static object SystemInfo() - { - return new - { - os = RuntimeInformation.OSDescription, - framework = RuntimeInformation.FrameworkDescription, - process_arch = RuntimeInformation.ProcessArchitecture.ToString(), - machine_name = Environment.MachineName, - user = Environment.UserName, - uptime = Environment.TickCount64 / 1000.0, - processors = Environment.ProcessorCount - }; - } - - private static object ResourceSnapshot() - { - var proc = Process.GetCurrentProcess(); - var memInfo = GC.GetGCMemoryInfo(); - - var drives = DriveInfo.GetDrives() - .Where(d => d.IsReady) - .Select(d => new - { - name = d.Name, - format = d.DriveFormat, - total_bytes = d.TotalSize, - free_bytes = d.TotalFreeSpace - }) - .ToList(); - - return new - { - process = new - { - pid = proc.Id, - working_set_bytes = proc.WorkingSet64, - private_memory_bytes = proc.PrivateMemorySize64, - threads = proc.Threads.Count - }, - gc = new - { - heap_size_bytes = GC.GetTotalMemory(forceFullCollection: false), - total_available_memory_bytes = memInfo.TotalAvailableMemoryBytes, - high_memory_load_threshold_bytes = memInfo.HighMemoryLoadThresholdBytes, - memory_load_bytes = memInfo.MemoryLoadBytes - }, - disks = drives - }; - } - - private static object ListProcesses() - { - var processes = Process.GetProcesses() - .OrderBy(p => p.ProcessName) - .Take(200) - .Select(p => new - { - pid = p.Id, - name = p.ProcessName - }) - .ToList(); - - return new - { - count = processes.Count, - sample = processes, - notes = "Process visibility can be limited in containers." - }; - } - - private static object ProcessDetails(int pid) - { - if (pid <= 0) throw new ArgumentException("Provide a PID (e.g., 'process_details 1234')."); - var p = Process.GetProcessById(pid); - - DateTimeOffset? start = null; - try { start = p.StartTime; } catch { } - - return new - { - pid = p.Id, - name = p.ProcessName, - start_time = start, - working_set_bytes = Safe(() => p.WorkingSet64), - private_memory_bytes = Safe(() => p.PrivateMemorySize64), - total_processor_time = Safe(() => p.TotalProcessorTime) - }; - } - - private static object CheckPort(string host, int port, CancellationToken cancellationToken) - { - if (port <= 0 || port > 65535) throw new ArgumentException("Provide a TCP port between 1 and 65535."); - - using var client = new TcpClient(); - var connectTask = client.ConnectAsync(host, port, cancellationToken); - - try - { - connectTask.GetAwaiter().GetResult(); - return new { host, port, reachable = true }; - } - catch (Exception ex) - { - return new { host, port, reachable = false, reason = $"{ex.GetType().Name}: {ex.Message}" }; - } - } - - private static object DnsLookup(string host) - { - if (string.IsNullOrWhiteSpace(host)) throw new ArgumentException("Provide a hostname."); - var addrs = Dns.GetHostAddresses(host); - return new - { - host, - addresses = addrs.Select(a => a.ToString()).ToArray() - }; - } - - private static object ListEnvironmentVariables(bool redact) - { - var vars = Environment.GetEnvironmentVariables(); - var dict = new SortedDictionary(StringComparer.OrdinalIgnoreCase); - - foreach (var keyObj in vars.Keys) - { - var key = keyObj?.ToString() ?? ""; - if (string.IsNullOrWhiteSpace(key)) - { - continue; - } - var val = Environment.GetEnvironmentVariable(key); - dict[key] = redact ? RedactIfSensitive(key, val) : val; - } - - return new - { - redact, - count = dict.Count, - variables = dict - }; - } - - private static string? RedactIfSensitive(string key, string? val) - { - if (val is null) return null; - var upper = key.ToUpperInvariant(); - var sensitive = upper.Contains("KEY") - || upper.Contains("SECRET") - || upper.Contains("TOKEN") - || upper.Contains("PASSWORD") - || upper.Contains("CONNECTION") - || upper.Contains("SAS"); - - return sensitive ? "***REDACTED***" : val; - } - - private static T? Safe(Func f) - { - try { return f(); } catch { return default; } - } - - private static string Truncate(string s, int max) - => s.Length <= max ? s : s[..max]; - - private static string GetInputText(CreateResponseRequest request) - { - var items = request.Input.ToObject>(); - if (items is { Count: > 0 }) - { - return items.Select(item => - { - return item switch - { - ResponsesUserMessageItemParam userMessage => userMessage.Content - .ToObject>()? - .FirstOrDefault()? - .Text ?? "", - _ => "" - }; - }) - .FirstOrDefault() ?? ""; - } - - // implicit user message of text input - return request.Input.ToString(); - } - - private static Response ToResponse(CreateResponseRequest request, AgentRunContext context, - ResponseStatus status = ResponseStatus.Completed, - IEnumerable? output = null) - { - return request.ToResponse(context: context, output: output, status: status); - } - - private static string ToAzureOpenAIEndpoint(string projectEndpoint) - { - if (string.IsNullOrWhiteSpace(projectEndpoint)) - throw new ArgumentException("URL cannot be null or empty.", nameof(projectEndpoint)); - - var uri = new Uri(projectEndpoint); - - // Expect something like: {resource}.services.ai.azure.com - var hostParts = uri.Host.Split('.'); - if (hostParts.Length < 5 || hostParts[1] != "services" || hostParts[2] != "ai") - throw new ArgumentException("Input URL is not a valid Azure AI Services URL.", nameof(projectEndpoint)); - - var resourceName = hostParts[0]; - - return $"https://{resourceName}.openai.azure.com/openai/v1/"; - } - -} diff --git a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/agent.yaml b/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/agent.yaml deleted file mode 100644 index fdff44441..000000000 --- a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/agent.yaml +++ /dev/null @@ -1,38 +0,0 @@ -name: SystemUtilityAgent -description: |- - System Utility Agent (cross-OS, container-aware) — NO local files required. - - Tools included (per your request): - - capability_report - - system_info - - resource_snapshot - - list_processes - - process_details - - check_port - - dns_lookup - - list_environment_variables -metadata: - example: - - role: user - content: |- - What is the current value of AZURE_AI_MODEL_DEPLOYMENT_NAME? - tags: - - example - - learning - authors: - - mengla -template: - name: SystemUtilityAgent - kind: hosted - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_AI_PROJECT_ENDPOINT - value: ${AZURE_AI_PROJECT_ENDPOINT} - - name: AZURE_AI_MODEL_DEPLOYMENT_NAME - value: gpt-5 - - name: AGENT_MAX_TURNS - value: 10 - - name: AGENT_CHAT_HISTORY_LENGTH - value: 20 \ No newline at end of file diff --git a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/run-requests.http b/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/run-requests.http deleted file mode 100644 index 4a91d0cd7..000000000 --- a/samples/csharp/hosted-agents/AgentWithCustomFramework/SystemUtilityAgent/run-requests.http +++ /dev/null @@ -1,30 +0,0 @@ -@host = http://localhost:8088 -@endpoint = {{host}}/responses - -### Health Check -GET {{host}}/readiness - -### Simple string input -POST {{endpoint}} -Content-Type: application/json -{ - "input": "What environment are you running in? Summarize what you can observe" -} - -### Explicit input -POST {{endpoint}} -Content-Type: application/json -{ - "input": [ - { - "type": "message", - "role": "user", - "content": [ - { - "type": "input_text", - "text": "What environment are you running in? Summarize what you can observe" - } - ] - } - ] -} diff --git a/samples/csharp/hosted-agents/README.md b/samples/csharp/hosted-agents/README.md new file mode 100644 index 000000000..bdbea3c3e --- /dev/null +++ b/samples/csharp/hosted-agents/README.md @@ -0,0 +1,274 @@ +# Microsoft Foundry — Hosted Agent Samples (.NET) + +Samples for building, deploying, and managing hosted agents on [Microsoft Foundry](https://learn.microsoft.com/en-us/azure/foundry/agents/concepts/hosted-agents). Each sample is a starter template — fork it, change the system prompt and tools, deploy with `azd up`. + +> **Every sample includes Application Insights and OpenTelemetry tracing out of the box.** You get production-ready logging, distributed traces, and metrics from the first sample you run. + +### Quickstart + +> **Prerequisites:** Install the Azure Developer CLI with the Foundry AI extension. See [Set up azd for hosted agents](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?pivots=azd) if you haven't already. + +```bash +mkdir my-agent && cd my-agent +azd ai agent init -m ../agent-framework/hello-world/agent.manifest.yaml +azd up +``` + +You'll have a running agent in minutes. Or, if you prefer VS Code, use the [Foundry extension quickstart](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?pivots=vscode) to build and deploy directly from the editor. + +Read on to pick the right sample for your scenario, or jump to the [learning path](#learning-path) for a guided walkthrough. + +--- + +## Two protocols: Responses and Invocations + +Hosted agents support two protocols. Pick the one that matches your scenario. + +| Scenario | Protocol | Why | +|----------|----------|-----| +| Conversational chatbot or assistant | **Responses** | The platform manages conversation history, streaming events, and session lifecycle — use any OpenAI-compatible SDK as the client. | +| Agent published to Teams or M365 | **Responses** + **Activity** | The Responses protocol powers the agent logic; the Activity protocol handles the Teams channel integration. | +| Multi-turn Q&A with RAG or tools | **Responses** | Built-in `conversation_id` threading and tool result handling. | +| Background / async processing | **Responses** | `background: true` with platform-managed polling and cancellation — no custom code needed. | +| Webhook receiver (GitHub, Stripe, Jira, etc.) | **Invocations** | The external system sends its own payload format — you can't change it to match `/responses`. | +| Non-conversational processing (classification, extraction, batch) | **Invocations** | The input is structured data, not a chat message. Arbitrary JSON in, arbitrary JSON out. | +| Custom streaming protocol (AG-UI, etc.) | **Invocations** | AG-UI and other agent-UI protocols aren't OpenAI-compatible — you need raw SSE control. | +| Async job with custom progress, polling, or non-OpenAI callers | **Invocations** | Custom progress reporting, intermediate results, and polling semantics beyond what Responses `background: true` provides. | +| Protocol bridge (GitHub Copilot, proprietary systems) | **Invocations** | The caller has its own protocol that doesn't map to `/responses`. | +| Inter-service orchestration (Durable Functions, Logic Apps) | **Invocations** | The caller sends structured task payloads, not chat messages. | + +> **Still not sure?** Start with **Responses**. You can always add an Invocations endpoint later — a hosted agent can support both protocols simultaneously by listing both in `agent.yaml`. + +> **Other protocols:** Hosted agents can also expose the **Activity** protocol (for Teams and M365 integration) and the **A2A** protocol (for agent-to-agent delegation). + +
+Protocol comparison details + +| | **Responses** | **Invocations** | +|---|---|---| +| **Best for** | Most agents — the platform manages conversation history, streaming lifecycle, and background polling | Agents that need full HTTP control, custom payloads, or custom async workflows | +| **Payload** | OpenAI-compatible `/responses` contract | Arbitrary JSON via `/invocations` — you define the schema | +| **Client SDK** | Any OpenAI-compatible SDK (Python, JS, C#) works out of the box | Custom client — you define the contract | +| **Session history** | Framework-managed via `conversation_id` | You manage sessions (in-memory, Cosmos DB, etc.) | +| **Streaming** | Framework-managed `ResponseEventStream` with lifecycle events (`created`, `in_progress`, `delta`, `completed`) | Raw SSE — you format and write events directly | +| **Background / long-running** | Built-in (`background: true` + platform-managed polling) | Manual task tracking and custom polling endpoints | +| **Server SDK** | `azure-ai-agentserver-responses` | `azure-ai-agentserver-invocations` | +| **agent.yaml** | `protocol: responses`, `version: v0.1.0` | `protocol: invocations`, `version: v0.0.1` | + +
+ +--- + +## Pick your framework + +Hosted agents run any code you can put in a container. These samples cover three frameworks — pick the one that matches where you are. + +| | **Agent Framework** | **Bring Your Own** | +|---|---|---| +| **Best for** | Starting fresh on Foundry — also supports AutoGen and Semantic Kernel | Already built with your own .NET stack or framework | +| **SDK** | `Microsoft.Agents.AI.Foundry` + `Azure.AI.Projects` | `azure-ai-agentserver-responses` / `azure-ai-agentserver-invocations`, or `azure-ai-agentserver-core` for fully custom HTTP | +| **Foundry integration** | Native — sessions, tools, memory, streaming all built in | Core adapter hosts the web server and exposes `/invocations` and `/responses` endpoints; you supply the agent logic | +| **Protocols** | Responses and Invocations | Responses and Invocations | +| **Language support** | C# and Python | Any language (C# and Python samples provided) | +| **Start here** | [Hello World →](agent-framework/hello-world/) | [Hello World →](bring-your-own/responses/HelloWorld/) | + +> **Which should I choose?** If you're building a new agent — or already using AutoGen or Semantic Kernel — start with **Agent Framework**. It has the tightest Foundry integration, supports those orchestrators natively, and has the most samples to learn from. If you have existing .NET agent code, **Bring Your Own** shows how to containerize and deploy it unchanged. For LangGraph or CrewAI (Python-only frameworks), see the [Python samples](../../python/hosted-agents/). + +--- + +## Agent Framework samples + +The recommended path for building hosted agents. Agent Framework gives you native session management, built-in tool wiring, streaming, and the full Foundry feature set. + +Samples are split by protocol. Start with **Responses** (the common path) — then explore **Invocations** when you need full HTTP control or long-running workflows. + +### Responses protocol + +The platform manages conversation history, streaming lifecycle, and background execution. This is the default for most agents. + +#### Which sample should I start with? + +| I want to... | Start here | Then try | +|--------------|-----------|----------| +| Get a hosted agent running as fast as possible | [Hello World](agent-framework/hello-world/) | Multi-Turn Sessions → Tools | +| Build a chatbot that remembers conversations | [Multi-Turn Sessions](agent-framework/simple-agent/) | Tools, Knowledge Grounding | +| Connect an agent to APIs, MCP servers, or search | [Tools](agent-framework/local-tools/) | MCP Tools, Knowledge Grounding | +| Use client-side or server-side MCP patterns | [MCP Tools](agent-framework/mcp-tools/) | Tools, Knowledge Grounding | +| Answer questions from my own documents | [Knowledge Grounding](agent-framework/text-search-rag/) | Tools | +| Build a multi-agent workflow with routing | [Workflows](agent-framework/workflows/) | Agent-to-Agent | + +#### Learning path + +**New to hosted agents?** Start here and work through in order: + +1. **[Hello World](agent-framework/hello-world/)** — Deploy your first agent, invoke it, see traces in App Insights. +2. **[Multi-Turn Sessions](agent-framework/simple-agent/)** — Adds multi-turn conversation history. +3. **[Tools](agent-framework/local-tools/)** — Add local C# function tools to your agent. + +**Ready for more?** + +4. **[Knowledge Grounding](agent-framework/text-search-rag/)** — Ground answers in your own documents with TextSearchProvider. +5. **[MCP Tools](agent-framework/mcp-tools/)** — Connect to MCP servers using client-side and server-side MCP patterns. +6. **[Workflows](agent-framework/workflows/)** — Compose multiple agents into sequential pipelines. + +### Invocations protocol + +Full control over the HTTP request/response cycle. You define the payload schema, manage session state, and implement polling for long-running operations. Use this when you need an arbitrary payload format or async workflows that don't fit the OpenAI `/responses` contract. + +> **Every capability works with both protocols.** Tools, RAG, memory, evaluations, Teams publishing, multi-agent — all of these work with Invocations. The Invocations samples below focus on the protocol mechanics (how you handle requests, streaming, sessions, and long-running tasks). To add a capability like knowledge grounding or tools, learn the Invocations pattern from these samples, then adapt the relevant Responses sample — the capability code is the same, only the HTTP handler differs. + +| Sample | What it shows | +|--------|---------------| +| **[Echo Agent](agent-framework/invocations-echo-agent/)** | Minimal invocations agent that echoes the request back — shows the invocations handler pattern. | + +--- + +## LangGraph samples + +> **LangGraph is Python-only.** See the [Python LangGraph samples](../../python/hosted-agents/) for LangGraph support on Foundry. The deployment, observability, Teams publishing, and evaluation infrastructure is identical — only the agent code differs. + +--- + +## Bring Your Own Framework samples + +Already built an agent with your own .NET code? The protocol SDKs (`Azure.AI.AgentServer.Responses` / `Azure.AI.AgentServer.Invocations`) give you the hosted agent HTTP contract — they host the web server, expose the right endpoint, and handle request parsing — so you just plug in your agent logic. This is the recommended path for BYO to ensure your agent stays aligned with the platform contract as new endpoints are added. For lower-level control, the **Core adapter** (`Azure.AI.AgentServer.Core`) gives you managed hosting, OpenTelemetry tracing, and health endpoints, but you handle the protocol details yourself. + +> **Note:** If you're using AutoGen or Semantic Kernel, you don't need BYO — Agent Framework supports them natively. See the [Agent Framework samples](#agent-framework-samples) instead. + +### Responses protocol + +| Sample | What it shows | +|--------|--------------| +| **[Hello World](bring-your-own/responses/HelloWorld/)** | Minimal agent — calls a Foundry model via the Responses API and returns the reply. The simplest possible BYO starting point. | +| **[Notetaking Agent](bring-your-own/responses/notetaking-agent/)** | Agent that takes and retrieves notes using a custom tool. | +| **[Background Agent](bring-your-own/responses/background-agent/)** | Long-running background processing with async execution. | + +### Invocations protocol + +| Sample | What it shows | +|--------|--------------| +| **[Hello World](bring-your-own/invocations/HelloWorld/)** | Minimal agent — arbitrary JSON in, streaming SSE out. The simplest possible BYO invocations starting point. | +| **[Notetaking Agent](bring-your-own/invocations/notetaking-agent/)** | Note-taking agent with the Invocations protocol. | +| **[Human-in-the-Loop](bring-your-own/invocations/human-in-the-loop/)** | Long-running agent that pauses for human approval before continuing. | + +**Which approach?** Use the protocol SDKs (`Azure.AI.AgentServer.Responses` / `Azure.AI.AgentServer.Invocations`) — they work with any framework and keep you aligned with the platform contract. The **Core adapter** (`Azure.AI.AgentServer.Core`) is available when you need lower-level control without protocol abstractions. The Custom HTTP sample exists as a reference for what the contract looks like under the hood with no SDK at all. + +**What's different from Agent Framework samples:** BYO samples handle their own session state and tool wiring. The protocol SDKs give you the HTTP plumbing, but the tradeoff vs. full Agent Framework is that you manage orchestration, tools, and memory in your own code. The Dockerfile, agent.yaml, and `azd up` deployment are the same. + +--- + +## Quick reference + +| Capability | Sample (Responses) | Sample (Invocations) | +|------------|-------------------|---------------------| +| Deploy and invoke a hosted agent | [Hello World](agent-framework/hello-world/) | [Echo Agent](agent-framework/invocations-echo-agent/) | +| Multi-turn conversation with session persistence | [Multi-Turn Sessions](agent-framework/simple-agent/) | — | +| Streaming | [Hello World](agent-framework/hello-world/) (built-in) | — | +| Local function tools | [Tools](agent-framework/local-tools/) | — | +| RAG / knowledge grounding | [Knowledge Grounding](agent-framework/text-search-rag/) | — | +| Multi-agent workflow | [Workflows](agent-framework/workflows/) | — | +| BYO agent (any framework) | [BYO Hello World](bring-your-own/responses/HelloWorld/) | [BYO Hello World](bring-your-own/invocations/HelloWorld/) | +| Observability (App Insights, OpenTelemetry, traces) | Every sample — enabled by default | Every sample — enabled by default | + +## Deploy any sample + +Every sample deploys the same way. You need the [Azure Developer CLI (azd)](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) and a Foundry project with a model deployment. + +```bash +mkdir my-agent && cd my-agent + +# Scaffold from the sample manifest — azd generates all the deployment files +azd ai agent init -m ../agent-framework/hello-world/agent.manifest.yaml + +# Build, push, and deploy +azd up + +# Clean up when done +azd down +``` + +### Other ways to invoke your agent + +| Method | When to use | +|--------|------------| +| `azd ai agent invoke` | Quick CLI test after deploy | +| [VS Code Foundry extension](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?pivots=vscode) | One-click invoke from the editor | +| `curl` | Each sample README includes curl examples | + +## Repo structure + +``` +samples/dotnet/hosted-agents/ +├── agent-framework/ +│ ├── hello-world/ ← Start here (Agent Framework) +│ ├── simple-agent/ +│ ├── local-tools/ +│ ├── mcp-tools/ +│ ├── text-search-rag/ +│ ├── workflows/ +│ └── invocations-echo-agent/ +└── bring-your-own/ + ├── responses/ + │ ├── HelloWorld/ ← Start here (BYO Responses) + │ ├── notetaking-agent/ + │ └── background-agent/ + └── invocations/ + ├── HelloWorld/ ← Start here (BYO Invocations) + ├── notetaking-agent/ + └── human-in-the-loop/ +``` + +### Language and framework coverage + +| Framework | Protocol | C# | Python | +|-----------|----------|----|--------| +| **Agent Framework** | Responses | 5 samples | 3 samples | +| **Agent Framework** | Invocations | 1 sample (echo agent) | — | +| **LangGraph** | — | — (Python-only) | See [Python README](../../python/hosted-agents/) | +| **Bring Your Own** | Responses | 3 samples | 5 samples | +| **Bring Your Own** | Invocations | 3 samples | 7 samples | + +The LangGraph samples are Python-only because LangGraph is a Python-native framework, but the containerized deployment pattern is identical. + +## What's in every sample + +``` +/ +├── README.md # What it does, prerequisites, deploy, invoke, clean up +├── Program.cs # Agent entry point (with OpenTelemetry + App Insights init) +├── .csproj # Project file with NuGet dependencies +├── Dockerfile # Container definition (port 8088, .NET 10 multi-stage build) +├── .dockerignore +├── agent.manifest.yaml # Agent definition — name, protocols, environment variables +└── agent.yaml # Deployed agent config — protocol, resources +``` + +Python samples follow the same layout with `main.py`, `requirements.txt`, and a Python-based Dockerfile. + +> Samples do not include `azure.yaml`. The `azd ai agent init -m agent.manifest.yaml` command generates the project configuration automatically from the agent manifest. + +## Prerequisites + +- **Azure subscription** with access to Microsoft Foundry +- **Azure Developer CLI (azd)** — [install](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?pivots=azd) +- **.NET 10** (or **Python 3.12+** for Python samples) + +That's it. `azd ai agent init` and the VS Code Foundry extension will create a Foundry project and deploy a model for you if you don't already have one. Container images are built remotely using ACR Tasks by default — **Docker is not required** unless you want to build locally. + +## Resources + +- [Microsoft Foundry documentation](https://learn.microsoft.com/en-us/azure/foundry/what-is-foundry?view=foundry) +- [Hosted agents overview](https://learn.microsoft.com/en-us/azure/foundry/agents/concepts/hosted-agents?view=foundry) +- [Deploy a hosted agent](https://learn.microsoft.com/en-us/azure/foundry/agents/how-to/deploy-hosted-agent) +- **Responses protocol:** [Python SDK (`azure-ai-agentserver-responses`)](https://pypi.org/project/azure-ai-agentserver-responses/) · [C# SDK (`Azure.AI.AgentServer.Responses`)](https://www.nuget.org/packages/Azure.AI.AgentServer.Responses) +- **Invocations protocol:** [Python SDK (`azure-ai-agentserver-invocations`)](https://pypi.org/project/azure-ai-agentserver-invocations/) · [C# SDK (`Azure.AI.AgentServer.Invocations`)](https://www.nuget.org/packages/Azure.AI.AgentServer.Invocations) +- **Core adapter (BYO):** [Python SDK (`azure-ai-agentserver-core`)](https://pypi.org/project/azure-ai-agentserver-core/) · [C# SDK (`Azure.AI.AgentServer.Core`)](https://www.nuget.org/packages/Azure.AI.AgentServer.Core) +- [Azure Developer CLI (azd)](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) + +## Contributing + +This project welcomes contributions and suggestions. + +## Trademarks + +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/samples/csharp/hosted-agents/agent-framework/README.md b/samples/csharp/hosted-agents/agent-framework/README.md new file mode 100644 index 000000000..822112da0 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/README.md @@ -0,0 +1,219 @@ +# Agent Framework Samples + +This directory contains samples that demonstrate how to use the [Agent Framework](https://github.com/microsoft/agent-framework) to host agents with different capabilities and configurations. Each sample includes a README with instructions on how to interact with the agent. + +## Samples + +### Responses API + +| # | Sample | Description | +|---|--------|-------------| +| 1 | [hello-world](hello-world/) | A minimal agent demonstrating basic request/response interaction and multi-turn conversations. | +| 2 | [simple-agent](simple-agent/) | A general-purpose AI assistant — the simplest hosted agent using `AsAIAgent(model, instructions)`. | +| 3 | [local-tools](local-tools/) | A hotel search assistant with local C# function tools (`AIFunctionFactory.Create`). | +| 4 | [mcp-tools](mcp-tools/) | An agent demonstrating client-side and server-side MCP tool integration. | +| 5 | [text-search-rag](text-search-rag/) | A support agent with RAG capabilities using `TextSearchProvider`. | +| 6 | [workflows](workflows/) | A multi-agent translation pipeline using `WorkflowBuilder`. | + +### Invocations API + +| # | Sample | Description | +|---|--------|-------------| +| 1 | [invocations-echo-agent](invocations-echo-agent/) | A minimal echo agent demonstrating session state management via `agent_session_id` (no LLM needed). | + +## Running the Agent Host Locally + +### Using `azd` + +#### Prerequisites + +1. **Azure Developer CLI (`azd`)** + + - [Install azd](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) and the AI agent extension: `azd ext install azure.ai.agents` + - Authenticated: `azd auth login` + +2. **Azure Subscription** + +#### Create a new project + +**No cloning required**. Create a new folder, point azd at the manifest on GitHub. + +```bash +mkdir hosted-agent-framework-agent && cd hosted-agent-framework-agent + +# Initialize from the manifest +azd ai agent init -m https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/samples/dotnet/hosted-agents/agent-framework/hello-world/agent.manifest.yaml +``` + +Follow the instructions from `azd ai agent init` to complete the agent initialization. If you don't have an existing Foundry project and a model deployment, `azd ai agent init` will guide you through creating them. + +#### Provision Azure Resources + +> This step is only needed if you don't have an existing Foundry project and model deployment. + +Run the following command to provision the necessary Azure resources: + +```bash +azd provision +``` + +This will create the following Azure resources: + +- A new resource group named `rg-[project_name]-dev`. In this guide, `[project_name]` will be `hosted-agent-framework-agent`. +- Within the resource group, among other resources, the most important ones are: + - A new Foundry instance + - A new Foundry project, within which a new model deployment will be created + - An Application Insights instance + - A container registry, which will be used to store the container images for the hosted agent + +#### Set Environment Variables + +```bash +export FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="" +# And any other environment variables required by the sample +``` + +Or in PowerShell: + +```powershell +$env:FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +$env:AZURE_AI_MODEL_DEPLOYMENT_NAME="" +# And any other environment variables required by the sample +``` + +> Note: The environment variables set above are only for the current session. You will need to set them again if you open a new terminal session. + +#### Running the Agent Host + +```bash +azd ai agent run +``` + +Right now, the agent host should be running on `http://localhost:8088` + +#### Invoking the Agent + +Open another terminal, **navigate to the project directory**, and run the following command to invoke the agent: + +```bash +azd ai agent invoke --local "Hello!" +``` + +Or you can in another terminal, without navigating to the project directory, run the following command to invoke the agent: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hello!"}' +``` + +Or in PowerShell: + +```powershell +(Invoke-WebRequest -Uri http://localhost:8088/responses -Method POST -ContentType "application/json" -Body '{"input": "Hello!"}').Content +``` + +### Using `dotnet run` + +#### Prerequisites + +1. An existing Foundry project +2. A deployed model in your Foundry project +3. [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed and authenticated (`az login`) +4. [.NET 10 SDK](https://dotnet.microsoft.com/download/dotnet/10.0) or later + +#### Running the Agent Host with `dotnet` + +Clone the repository containing the sample code: + +```bash +git clone https://github.com/microsoft/hosted-agents-vnext-private-preview.git +cd hosted-agents-vnext-private-preview/samples/dotnet/hosted-agents/agent-framework +``` + +#### Environment setup + +1. Navigate to the sample directory you want to explore: + + ```bash + cd hello-world + ``` + +2. Restore dependencies: + + ```bash + dotnet restore + ``` + +3. Set environment variables: + + ```bash + export FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" + export AZURE_AI_MODEL_DEPLOYMENT_NAME="" + ``` + + Or in PowerShell: + + ```powershell + $env:FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" + $env:AZURE_AI_MODEL_DEPLOYMENT_NAME="" + ``` + +4. Make sure you are logged in with the Azure CLI: + + ```bash + az login + ``` + +#### Running the Agent Host + +```bash +dotnet run +``` + +Right now, the agent host should be running on `http://localhost:8088` + +#### Invoking the Agent + +On another terminal, run the following command to invoke the agent: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hello!"}' +``` + +Or in PowerShell: + +```powershell +(Invoke-WebRequest -Uri http://localhost:8088/responses -Method POST -ContentType "application/json" -Body '{"input": "Hello!"}').Content +``` + +## Deploying the Agent to Foundry + +Once you've tested locally, deploy to Microsoft Foundry. + +### With an Existing Foundry Project + +If you already have a Foundry project and the necessary Azure resources provisioned, you can skip the setup steps and proceed directly to deploying the agent. + +After running `azd ai agent init -m ` and following the prompts to configure your agent, you will have a project ready for deployment. + +### Setting Up a New Foundry Project + +Follow the steps in [Using `azd`](#using-azd) to set up the project and provision the necessary Azure resources for your Foundry deployment. + +### Deploying the Agent + +Once the project is setup and resources are provisioned, you can deploy the agent to Foundry by running: + +```bash +azd deploy +``` + +> The Foundry hosting infrastructure will inject the following environment variables into your agent at runtime: +> +> - `FOUNDRY_PROJECT_ENDPOINT`: The endpoint URL for the Foundry project where the agent is deployed. +> - `AZURE_AI_MODEL_DEPLOYMENT_NAME`: The name of the model deployment in your Foundry project. This is configured during the agent initialization process with `azd ai agent init`. +> - `APPLICATIONINSIGHTS_CONNECTION_STRING`: The connection string for Application Insights to enable telemetry for your agent. + +This will package your agent and deploy it to the Foundry environment, making it accessible through the Foundry project endpoint. Once it's deployed, you can also access the agent through the Foundry UI. + +For the full deployment guide, see [Azure AI Foundry hosted agents](https://aka.ms/azdaiagent/docs). diff --git a/samples/csharp/hosted-agents/agent-framework/hello-world/.dockerignore b/samples/csharp/hosted-agents/agent-framework/hello-world/.dockerignore new file mode 100644 index 000000000..a49232de8 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/hello-world/.dockerignore @@ -0,0 +1,35 @@ +# Build artifacts +**/bin/ +**/obj/ + +# .NET user secrets +**/.secrets/ + +# Local settings +**/*.user +**/*.suo + +# IDE settings +.vs/ +.vscode/ +**/.idea/ + +# Test results +**/TestResults/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env + +# NuGet +.nuget/ diff --git a/samples/csharp/hosted-agents/agent-framework/hello-world/.env.example b/samples/csharp/hosted-agents/agent-framework/hello-world/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/hello-world/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/Dockerfile b/samples/csharp/hosted-agents/agent-framework/hello-world/Dockerfile similarity index 53% rename from samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/Dockerfile rename to samples/csharp/hosted-agents/agent-framework/hello-world/Dockerfile index 87dab868b..0a2b0eae9 100644 --- a/samples/csharp/hosted-agents/AgentFramework/AgentWithLocalTools/Dockerfile +++ b/samples/csharp/hosted-agents/agent-framework/hello-world/Dockerfile @@ -1,20 +1,12 @@ -# Build the application FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build WORKDIR /src - -# Copy files from the current directory on the host to the working directory in the container COPY . . - RUN dotnet restore RUN dotnet build -c Release --no-restore RUN dotnet publish -c Release --no-build -o /app -# Run the application FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final WORKDIR /app - -# Copy everything needed to run the app from the "build" stage. COPY --from=build /app . - EXPOSE 8088 -ENTRYPOINT ["dotnet", "AgentWithLocalTools.dll"] +ENTRYPOINT ["dotnet", "hello-world.dll"] diff --git a/samples/csharp/hosted-agents/agent-framework/hello-world/Program.cs b/samples/csharp/hosted-agents/agent-framework/hello-world/Program.cs new file mode 100644 index 000000000..4e74dc607 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/hello-world/Program.cs @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft. All rights reserved. + +/* + * Hello World — Agent Framework Responses agent for C# + * + * Minimal hosted agent that uses the Microsoft Agent Framework (Microsoft.Agents.AI) + * to create an AIAgent backed by a Foundry model, then hosts it using AgentHost.CreateBuilder() + * from Azure.AI.AgentServer.Core with AddFoundryResponses from Microsoft.Agents.AI.Foundry.Hosting. + * + * This sample demonstrates the simplest possible Agent Framework integration: the agent + * framework manages the LLM call, conversation history, and response lifecycle automatically — + * there is no ResponseHandler subclass to implement. AgentHost.CreateBuilder() handles the + * HTTP contract, port binding, health probes, SSE lifecycle, and OpenTelemetry tracing. + * + * Multi-turn conversation works automatically: on each request the framework calls + * GetHistoryAsync() internally to build the conversation history from prior turns. + * Pass previous_response_id from one response as the input to the next call to maintain + * conversation context. Locally, history is stored in-process (lost on restart); when + * hosted by Foundry (FOUNDRY_HOSTING_ENVIRONMENT set), it uses durable server-side storage. + * + * Required environment variables: + * FOUNDRY_PROJECT_ENDPOINT — Foundry project endpoint (auto-injected in hosted containers) + * AZURE_AI_MODEL_DEPLOYMENT_NAME — Model deployment name (declared in agent.manifest.yaml) + * + * Usage: + * dotnet run + * + * # Turn 1 — invoke the agent: + * curl -sS -X POST http://localhost:8088/responses \ + * -H "Content-Type: application/json" \ + * -d '{"input": "What is Microsoft Foundry?", "stream": false}' | jq . + * + * # Turn 2 — follow up using the id from the previous response: + * curl -sS -X POST http://localhost:8088/responses \ + * -H "Content-Type: application/json" \ + * -d '{"input": "Can you summarize that?", "previous_response_id": "", "stream": false}' | jq . + */ + +using Azure.AI.AgentServer.Core; +using Azure.AI.Projects; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.Foundry.Hosting; + +if (string.IsNullOrEmpty(Environment.GetEnvironmentVariable("APPLICATIONINSIGHTS_CONNECTION_STRING"))) +{ + Console.Error.WriteLine( + "[WARNING] APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent " + + "to Application Insights. Set it to enable local telemetry. " + + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)"); +} + +var projectEndpoint = new Uri(Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is not set.")); + +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") + ?? throw new InvalidOperationException("AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set."); + +// Create an AIAgent backed by a Foundry model. +// The agent framework manages the LLM call, conversation sessions, and response lifecycle. +AIAgent agent = new AIProjectClient(projectEndpoint, new DefaultAzureCredential()) + .AsAIAgent( + model: deployment, + instructions: "You are a helpful AI assistant. Be concise and informative.", + name: "hello-world", + description: "A minimal Hello World agent using the Agent Framework"); + +// AgentHost.CreateBuilder() auto-configures: +// - Kestrel on port 8088 (or the PORT environment variable) +// - GET /readiness health probe +// - OpenTelemetry traces and metrics +// - x-platform-server response header +var builder = AgentHost.CreateBuilder(args); +builder.Services.AddFoundryResponses(agent); +builder.RegisterProtocol("responses", endpoints => endpoints.MapFoundryResponses()); + +var app = builder.Build(); +app.Run(); diff --git a/samples/csharp/hosted-agents/agent-framework/hello-world/README.md b/samples/csharp/hosted-agents/agent-framework/hello-world/README.md new file mode 100644 index 000000000..fe6af031d --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/hello-world/README.md @@ -0,0 +1,31 @@ +# HelloWorld + +A minimal "hello world" hosted agent using the [Agent Framework](https://github.com/microsoft/agent-framework) with the Responses protocol in C#. This is the recommended starting point for understanding how agents are hosted on Foundry. + +## Running the Agent Host + +Follow the instructions in the [Running the Agent Host Locally](../README.md#running-the-agent-host-locally) section of the parent README to run the agent host. + +## Interacting with the agent + +> Depending on how you run the agent host, you can invoke the agent using `curl` (`Invoke-WebRequest` in PowerShell) or `azd`. Please refer to the [parent README](../README.md) for more details. Use this README for sample queries you can send to the agent. + +Send a request to the agent: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "What is Microsoft Foundry?", "stream": false}' +``` + +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +### Multi-turn conversation + +To have a multi-turn conversation with the agent, include the previous response id in the request body: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Can you summarize that?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID", "stream": false}' +``` + +## Deploying the Agent to Foundry + +To deploy the agent to Foundry, follow the instructions in the [Deploying the Agent to Foundry](../README.md#deploying-the-agent-to-foundry) section of the parent README. diff --git a/samples/csharp/hosted-agents/agent-framework/hello-world/agent.manifest.yaml b/samples/csharp/hosted-agents/agent-framework/hello-world/agent.manifest.yaml new file mode 100644 index 000000000..c12ba469a --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/hello-world/agent.manifest.yaml @@ -0,0 +1,32 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: hello-world-dotnet-agent-framework +displayName: "Hello World (.NET, Agent Framework)" +description: > + Minimal Hello World agent using the Responses protocol with the Agent + Framework approach in C#. Uses Microsoft.Agents.AI to create an AIAgent backed + by a Foundry model, hosted via AgentHost.CreateBuilder() for automatic port, + health, and telemetry configuration. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Agent Framework + - .NET +template: + name: hello-world-dotnet-agent-framework + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/agent-framework/hello-world/agent.yaml b/samples/csharp/hosted-agents/agent-framework/hello-world/agent.yaml new file mode 100644 index 000000000..180bdef13 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/hello-world/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: hello-world-dotnet-agent-framework +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/agent-framework/hello-world/hello-world.csproj b/samples/csharp/hosted-agents/agent-framework/hello-world/hello-world.csproj new file mode 100644 index 000000000..5b536c7f5 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/hello-world/hello-world.csproj @@ -0,0 +1,20 @@ + + + + net10.0 + HelloWorld + enable + enable + + $(NoWarn);NU1903;NU1605 + + + + + + + + + + diff --git a/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/Dockerfile b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/Dockerfile new file mode 100644 index 000000000..44c4b9dc7 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/Dockerfile @@ -0,0 +1,15 @@ +FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS base +WORKDIR /app +EXPOSE 8088 +ENV ASPNETCORE_URLS=http://+:8088 + +FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet publish -c Release -o /app/publish + +FROM base AS final +WORKDIR /app +COPY --from=build /app/publish . +ENTRYPOINT ["dotnet", "invocations-echo-agent.dll"] diff --git a/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/EchoAIAgent.cs b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/EchoAIAgent.cs new file mode 100644 index 000000000..caebc3181 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/EchoAIAgent.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Runtime.CompilerServices; +using System.Text.Json; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI; + +/// +/// A minimal that echoes the user's input text back as the response. +/// No LLM or external service is required. +/// +public sealed class EchoAIAgent : AIAgent +{ + /// + public override string Name => "echo-agent"; + + /// + public override string Description => "An agent that echoes back the input message."; + + /// + protected override Task RunCoreAsync( + IEnumerable messages, + AgentSession? session = null, + AgentRunOptions? options = null, + CancellationToken cancellationToken = default) + { + var inputText = GetInputText(messages); + var response = new AgentResponse(new ChatMessage(ChatRole.Assistant, $"Echo: {inputText}")); + return Task.FromResult(response); + } + + /// + protected override async IAsyncEnumerable RunCoreStreamingAsync( + IEnumerable messages, + AgentSession? session = null, + AgentRunOptions? options = null, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + var inputText = GetInputText(messages); + yield return new AgentResponseUpdate + { + Role = ChatRole.Assistant, + Contents = [new TextContent($"Echo: {inputText}")], + }; + + await Task.CompletedTask; + } + + /// + protected override ValueTask CreateSessionCoreAsync(CancellationToken cancellationToken = default) + => new(new EchoAgentSession()); + + /// + protected override ValueTask SerializeSessionCoreAsync( + AgentSession session, + JsonSerializerOptions? jsonSerializerOptions = null, + CancellationToken cancellationToken = default) + => new(JsonSerializer.SerializeToElement(new { }, jsonSerializerOptions)); + + /// + protected override ValueTask DeserializeSessionCoreAsync( + JsonElement serializedState, + JsonSerializerOptions? jsonSerializerOptions = null, + CancellationToken cancellationToken = default) + => new(new EchoAgentSession()); + + private static string GetInputText(IEnumerable messages) + { + foreach (var message in messages) + { + if (message.Role == ChatRole.User) + { + return message.Text ?? string.Empty; + } + } + + return string.Empty; + } + + /// + /// Minimal session for the echo agent. No state is persisted. + /// + private sealed class EchoAgentSession : AgentSession; +} diff --git a/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/EchoInvocationHandler.cs b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/EchoInvocationHandler.cs new file mode 100644 index 000000000..06ef84997 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/EchoInvocationHandler.cs @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Azure.AI.AgentServer.Invocations; +using Microsoft.Agents.AI; +using Microsoft.AspNetCore.Http; + +namespace InvocationsEchoAgent; + +/// +/// An that reads the request body as plain text, +/// passes it to the , and writes the response back. +/// +public sealed class EchoInvocationHandler(EchoAIAgent agent) : InvocationHandler +{ + /// + public override async Task HandleAsync( + HttpRequest request, + HttpResponse response, + InvocationContext context, + CancellationToken cancellationToken) + { + // Read the raw text from the request body. + using var reader = new StreamReader(request.Body); + var input = await reader.ReadToEndAsync(cancellationToken); + + // Run the echo agent with the input text. + var agentResponse = await agent.RunAsync(input, cancellationToken: cancellationToken); + + // Write the agent response text back to the HTTP response. + response.ContentType = "text/plain"; + await response.WriteAsync(agentResponse.Text, cancellationToken); + } +} diff --git a/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/Program.cs b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/Program.cs new file mode 100644 index 000000000..1c6263de8 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/Program.cs @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Azure.AI.AgentServer.Core; +using Azure.AI.AgentServer.Invocations; +using InvocationsEchoAgent; +using Microsoft.Agents.AI; + +var builder = AgentHost.CreateBuilder(args); + +// Register the echo agent as a singleton (no LLM needed). +builder.Services.AddSingleton(); + +// Register the Invocations SDK services and wire the handler. +builder.Services.AddInvocationsServer(); +builder.Services.AddScoped(); + +// Map the Invocations protocol endpoints: +// POST /invocations — invoke the agent +// GET /invocations/{id} — get result (not used by this sample) +// POST /invocations/{id}/cancel — cancel (not used by this sample) +builder.RegisterProtocol("invocations", endpoints => endpoints.MapInvocationsServer()); + +var app = builder.Build(); +app.Run(); diff --git a/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/README.md b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/README.md new file mode 100644 index 000000000..df28c40d6 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/README.md @@ -0,0 +1,41 @@ +# Invocations Echo Agent + +A minimal echo agent hosted as a Foundry Hosted Agent using the **Invocations protocol** and the [Agent Framework](https://github.com/microsoft/agent-framework). The agent reads the request body as plain text, passes it through a custom `EchoAIAgent`, and writes the echoed text back in the response. No LLM or Azure credentials are required. + +## Running the Agent Host + +Follow the instructions in the [Running the Agent Host Locally](../README.md#running-the-agent-host-locally) section of the parent README to run the agent host. + +## Interacting with the agent + +> Depending on how you run the agent host, you can invoke the agent using `curl` (`Invoke-WebRequest` in PowerShell) or `azd`. Please refer to the [parent README](../README.md) for more details. Use this README for sample queries you can send to the agent. + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent: + +```bash +curl -X POST http://localhost:8088/invocations -i -H "Content-Type: application/json" -d '{"message": "Hello, world!"}' +``` + +The server will respond with a JSON object containing the response text. The `-i` flag includes the HTTP response headers in the output, which includes the session ID that can be used for multi-turn conversations. Here is an example of the response: + +``` +HTTP/1.1 200 +content-type: application/json +x-agent-invocation-id: ec04d020-a0e7-441e-ae83-db75635a9f83 +x-agent-session-id: 9370b9d4-cd13-4436-a57f-03b843ac0e17 +x-platform-server: azure-ai-agentserver-core/2.0.0 (dotnet/10.0) + +{"response":"Echo: Hello, world!"} +``` + +### Multi-turn conversation + +To have a multi-turn conversation with the agent, take the session ID from the response headers of the previous request and include it in URL parameters for the next request: + +```bash +curl -X POST "http://localhost:8088/invocations?agent_session_id=9370b9d4-cd13-4436-a57f-03b843ac0e17" -i -H "Content-Type: application/json" -d '{"message": "How are you?"}' +``` + +## Deploying the Agent to Foundry + +To deploy the agent to Foundry, follow the instructions in the [Deploying the Agent to Foundry](../README.md#deploying-the-agent-to-foundry) section of the parent README. diff --git a/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/agent.manifest.yaml b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/agent.manifest.yaml new file mode 100644 index 000000000..53fd8eae2 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/agent.manifest.yaml @@ -0,0 +1,28 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: invocations-echo-agent +displayName: "Invocations Echo Agent" + +description: > + A minimal echo agent hosted as a Foundry Hosted Agent using the Invocations protocol + and the Agent Framework. Reads the request body as plain text, echoes it back in the response. + No LLM or Azure credentials are required. + +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Invocations Protocol + - Agent Framework + +template: + name: invocations-echo-agent + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + resources: + cpu: "0.25" + memory: 0.5Gi +parameters: + properties: [] +resources: [] diff --git a/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/agent.yaml b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/agent.yaml new file mode 100644 index 000000000..d49aa7cd7 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: invocations-echo-agent +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/invocations-echo-agent.csproj b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/invocations-echo-agent.csproj new file mode 100644 index 000000000..9194ef3ac --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/invocations-echo-agent/invocations-echo-agent.csproj @@ -0,0 +1,17 @@ + + + + net10.0 + InvocationsEchoAgent + invocations-echo-agent + enable + enable + $(NoWarn);NU1903;NU1605 + + + + + + + + diff --git a/samples/csharp/hosted-agents/agent-framework/local-tools/Dockerfile b/samples/csharp/hosted-agents/agent-framework/local-tools/Dockerfile new file mode 100644 index 000000000..8e0f84992 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/local-tools/Dockerfile @@ -0,0 +1,15 @@ +FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS base +WORKDIR /app +EXPOSE 8088 +ENV ASPNETCORE_URLS=http://+:8088 + +FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet publish -c Release -o /app/publish + +FROM base AS final +WORKDIR /app +COPY --from=build /app/publish . +ENTRYPOINT ["dotnet", "local-tools.dll"] diff --git a/samples/csharp/hosted-agents/agent-framework/local-tools/Program.cs b/samples/csharp/hosted-agents/agent-framework/local-tools/Program.cs new file mode 100644 index 000000000..9ee902652 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/local-tools/Program.cs @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Azure.AI.AgentServer.Core; +using Azure.AI.Projects; +using Azure.Identity; +using DotNetEnv; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.Foundry.Hosting; +using Microsoft.Extensions.AI; + +Env.TraversePath().Load(); + +var projectEndpoint = new Uri(Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is not set.")); +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") ?? "gpt-4o"; + +AIAgent agent = new AIProjectClient(projectEndpoint, new DefaultAzureCredential()) + .AsAIAgent( + model: deployment, + instructions: """ + You are a helpful Seattle hotel concierge assistant. + Use the available tools to help customers find hotels in Seattle. + Provide detailed information about available hotels when asked. + """, + name: "local-tools", + description: "A hotel concierge assistant with local function tools", + tools: + [ + AIFunctionFactory.Create(GetAvailableHotels, "GetAvailableHotels", + "Gets a list of available hotels in Seattle with details about amenities and pricing.") + ]); + +var builder = AgentHost.CreateBuilder(args); +builder.Services.AddFoundryResponses(agent); +builder.RegisterProtocol("responses", endpoints => endpoints.MapFoundryResponses()); + +var app = builder.Build(); +app.Run(); + +static string GetAvailableHotels(string? checkInDate = null, string? checkOutDate = null, int? guests = null) +{ + var hotels = new[] + { + new + { + Name = "The Grand Seattle", + Location = "Downtown Seattle", + PricePerNight = 289, + Rating = 4.7, + Amenities = new[] { "Free WiFi", "Pool", "Spa", "Restaurant", "Fitness Center" }, + AvailableRooms = 12 + }, + new + { + Name = "Pike Place Inn", + Location = "Near Pike Place Market", + PricePerNight = 199, + Rating = 4.5, + Amenities = new[] { "Free WiFi", "Breakfast Included", "Rooftop Bar" }, + AvailableRooms = 8 + }, + new + { + Name = "Space Needle View Hotel", + Location = "Queen Anne", + PricePerNight = 349, + Rating = 4.8, + Amenities = new[] { "Free WiFi", "Pool", "Restaurant", "Valet Parking", "Concierge Service" }, + AvailableRooms = 5 + }, + new + { + Name = "Waterfront Lodge", + Location = "Seattle Waterfront", + PricePerNight = 159, + Rating = 4.3, + Amenities = new[] { "Free WiFi", "Pet Friendly", "Free Parking" }, + AvailableRooms = 15 + }, + new + { + Name = "Capitol Hill Boutique", + Location = "Capitol Hill", + PricePerNight = 179, + Rating = 4.6, + Amenities = new[] { "Free WiFi", "Breakfast Included", "Fitness Center", "Local Art Gallery" }, + AvailableRooms = 6 + } + }; + + var result = "Available Hotels in Seattle:\n\n"; + foreach (var hotel in hotels) + { + result += $"🏨 {hotel.Name}\n"; + result += $" 📍 Location: {hotel.Location}\n"; + result += $" 💰 Price: ${hotel.PricePerNight}/night\n"; + result += $" ⭐ Rating: {hotel.Rating}/5.0\n"; + result += $" 🛏️ Available Rooms: {hotel.AvailableRooms}\n"; + result += $" ✨ Amenities: {string.Join(", ", hotel.Amenities)}\n\n"; + } + + if (checkInDate != null) + result += $"Check-in: {checkInDate}\n"; + if (checkOutDate != null) + result += $"Check-out: {checkOutDate}\n"; + if (guests != null) + result += $"Guests: {guests}\n"; + + return result; +} diff --git a/samples/csharp/hosted-agents/agent-framework/local-tools/README.md b/samples/csharp/hosted-agents/agent-framework/local-tools/README.md new file mode 100644 index 000000000..c7c768551 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/local-tools/README.md @@ -0,0 +1,19 @@ +# Local Tools + +A hotel search assistant with local C# function tools — demonstrates how to define tools that the LLM can invoke, a key advantage of code-based hosted agents over prompt agents. + +## Running the Agent Host + +Follow the instructions in the [Running the Agent Host Locally](../README.md#running-the-agent-host-locally) section of the parent README to run the agent host. + +## Interacting with the agent + +> Depending on how you run the agent host, you can invoke the agent using `curl` (`Invoke-WebRequest` in PowerShell) or `azd`. Please refer to the [parent README](../README.md) for more details. Use this README for sample queries you can send to the agent. + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Find hotels in Seattle for Dec 20-25 under $200/night", "stream": false}' +``` + +## Deploying the Agent to Foundry + +To deploy the agent to Foundry, follow the instructions in the [Deploying the Agent to Foundry](../README.md#deploying-the-agent-to-foundry) section of the parent README. diff --git a/samples/csharp/hosted-agents/agent-framework/local-tools/agent.manifest.yaml b/samples/csharp/hosted-agents/agent-framework/local-tools/agent.manifest.yaml new file mode 100644 index 000000000..ee6fd4c1d --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/local-tools/agent.manifest.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: local-tools +displayName: "Local Tools Agent" + +description: > + A travel assistant agent with local C# function tools for hotel search in Seattle. + +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Agent Framework + - Local Tools + +template: + name: local-tools + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + resources: + cpu: "0.25" + memory: 0.5Gi + environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +parameters: + properties: [] +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/agent-framework/local-tools/agent.yaml b/samples/csharp/hosted-agents/agent-framework/local-tools/agent.yaml new file mode 100644 index 000000000..2047b8636 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/local-tools/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: local-tools +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/agent-framework/local-tools/local-tools.csproj b/samples/csharp/hosted-agents/agent-framework/local-tools/local-tools.csproj new file mode 100644 index 000000000..9cbc2b2d2 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/local-tools/local-tools.csproj @@ -0,0 +1,18 @@ + + + + net10.0 + LocalTools + local-tools + enable + enable + $(NoWarn);NU1903;NU1605 + + + + + + + + + diff --git a/samples/csharp/hosted-agents/agent-framework/mcp-tools/Dockerfile b/samples/csharp/hosted-agents/agent-framework/mcp-tools/Dockerfile new file mode 100644 index 000000000..793e6c42b --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/mcp-tools/Dockerfile @@ -0,0 +1,15 @@ +FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS base +WORKDIR /app +EXPOSE 8088 +ENV ASPNETCORE_URLS=http://+:8088 + +FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet publish -c Release -o /app/publish + +FROM base AS final +WORKDIR /app +COPY --from=build /app/publish . +ENTRYPOINT ["dotnet", "mcp-tools.dll"] diff --git a/samples/csharp/hosted-agents/agent-framework/mcp-tools/Program.cs b/samples/csharp/hosted-agents/agent-framework/mcp-tools/Program.cs new file mode 100644 index 000000000..57db1b6f4 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/mcp-tools/Program.cs @@ -0,0 +1,80 @@ +// Copyright (c) Microsoft. All rights reserved. + +// This sample demonstrates a hosted agent with two layers of MCP (Model Context Protocol) tools: +// +// 1. CLIENT-SIDE MCP: The agent connects to the Microsoft Learn MCP server directly via +// McpClient, discovers tools, and handles tool invocations locally within the agent process. +// +// 2. SERVER-SIDE MCP: The agent declares a HostedMcpServerTool for the same MCP server which +// delegates tool discovery and invocation to the LLM provider (Azure OpenAI Responses API). +// The provider calls the MCP server on behalf of the agent — no local connection needed. +// +// Both patterns use the Microsoft Learn MCP server to illustrate the architectural difference: +// client-side tools are resolved and invoked by the agent, while server-side tools are resolved +// and invoked by the LLM provider. + +#pragma warning disable MEAI001 // HostedMcpServerTool is experimental + +using Azure.AI.AgentServer.Core; +using Azure.AI.Projects; +using Azure.Identity; +using DotNetEnv; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.Foundry.Hosting; +using Microsoft.Extensions.AI; +using ModelContextProtocol.Client; + +// Load .env file if present (for local development) +Env.TraversePath().Load(); + +var projectEndpoint = new Uri(Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is not set.")); +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") ?? "gpt-4o"; + +// ── Client-side MCP: Microsoft Learn (local resolution) ────────────────────── +// Connect directly to the MCP server. The agent discovers and invokes tools locally. +Console.WriteLine("Connecting to Microsoft Learn MCP server (client-side)..."); + +await using var learnMcp = await McpClient.CreateAsync(new HttpClientTransport(new() +{ + Endpoint = new Uri("https://learn.microsoft.com/api/mcp"), + Name = "Microsoft Learn (client)", +})); + +var clientTools = await learnMcp.ListToolsAsync(); +Console.WriteLine($"Client-side MCP tools: {string.Join(", ", clientTools.Select(t => t.Name))}"); + +// ── Server-side MCP: Microsoft Learn (provider resolution) ─────────────────── +// Declare a HostedMcpServerTool — the LLM provider (Responses API) handles tool +// invocations directly. No local MCP connection needed for this pattern. +AITool serverTool = new HostedMcpServerTool( + serverName: "microsoft_learn_hosted", + serverAddress: "https://learn.microsoft.com/api/mcp") +{ + AllowedTools = ["microsoft_docs_search"], + ApprovalMode = HostedMcpServerToolApprovalMode.NeverRequire +}; +Console.WriteLine("Server-side MCP tool: microsoft_docs_search (via HostedMcpServerTool)"); + +// ── Combine both tool types into a single agent ────────────────────────────── +// The agent has access to tools from both MCP patterns simultaneously. +List allTools = [.. clientTools.Cast(), serverTool]; + +AIAgent agent = new AIProjectClient(projectEndpoint, new DefaultAzureCredential()) + .AsAIAgent( + model: deployment, + instructions: """ + You are a helpful developer assistant with access to Microsoft Learn documentation. + Use the available tools to search and retrieve documentation. + Be concise and provide direct answers with relevant links. + """, + name: "mcp-tools", + description: "Developer assistant with dual-layer MCP tools (client-side and server-side)", + tools: allTools); + +var builder = AgentHost.CreateBuilder(args); +builder.Services.AddFoundryResponses(agent); +builder.RegisterProtocol("responses", endpoints => endpoints.MapFoundryResponses()); + +var app = builder.Build(); +app.Run(); diff --git a/samples/csharp/hosted-agents/agent-framework/mcp-tools/README.md b/samples/csharp/hosted-agents/agent-framework/mcp-tools/README.md new file mode 100644 index 000000000..a2dc54a75 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/mcp-tools/README.md @@ -0,0 +1,23 @@ +# MCP Tools + +An agent demonstrating two layers of MCP (Model Context Protocol) tool integration: client-side MCP (agent connects directly to an MCP server) and server-side MCP (LLM provider connects to the MCP server on behalf of the agent). + +## Running the Agent Host + +Follow the instructions in the [Running the Agent Host Locally](../README.md#running-the-agent-host-locally) section of the parent README to run the agent host. + +## Interacting with the agent + +> Depending on how you run the agent host, you can invoke the agent using `curl` (`Invoke-WebRequest` in PowerShell) or `azd`. Please refer to the [parent README](../README.md) for more details. Use this README for sample queries you can send to the agent. + +```bash +# Triggers client-side MCP tools (docs search, code samples, docs fetch) +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Search Microsoft Learn for how to use dependency injection in ASP.NET Core", "stream": false}' + +# Triggers code sample search (client-side only) +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Find a C# code sample for creating an Azure Blob Storage container", "stream": false}' +``` + +## Deploying the Agent to Foundry + +To deploy the agent to Foundry, follow the instructions in the [Deploying the Agent to Foundry](../README.md#deploying-the-agent-to-foundry) section of the parent README. diff --git a/samples/csharp/hosted-agents/agent-framework/mcp-tools/agent.manifest.yaml b/samples/csharp/hosted-agents/agent-framework/mcp-tools/agent.manifest.yaml new file mode 100644 index 000000000..1c980887e --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/mcp-tools/agent.manifest.yaml @@ -0,0 +1,35 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: mcp-tools +displayName: "MCP Tools Agent" + +description: > + A developer assistant with remote MCP tools connecting to GitHub and Microsoft Learn + documentation servers. + +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Agent Framework + - MCP + - Model Context Protocol + +template: + name: mcp-tools + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + resources: + cpu: "0.25" + memory: 0.5Gi + environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +parameters: + properties: [] +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/agent-framework/mcp-tools/agent.yaml b/samples/csharp/hosted-agents/agent-framework/mcp-tools/agent.yaml new file mode 100644 index 000000000..34beb3e2c --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/mcp-tools/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: mcp-tools +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/agent-framework/mcp-tools/mcp-tools.csproj b/samples/csharp/hosted-agents/agent-framework/mcp-tools/mcp-tools.csproj new file mode 100644 index 000000000..eb308bbfd --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/mcp-tools/mcp-tools.csproj @@ -0,0 +1,20 @@ + + + + net10.0 + enable + enable + McpTools + mcp-tools + $(NoWarn);NU1903;NU1605 + + + + + + + + + + + diff --git a/samples/csharp/hosted-agents/agent-framework/simple-agent/Dockerfile b/samples/csharp/hosted-agents/agent-framework/simple-agent/Dockerfile new file mode 100644 index 000000000..d46d5027f --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/simple-agent/Dockerfile @@ -0,0 +1,15 @@ +FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS base +WORKDIR /app +EXPOSE 8088 +ENV ASPNETCORE_URLS=http://+:8088 + +FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet publish -c Release -o /app/publish + +FROM base AS final +WORKDIR /app +COPY --from=build /app/publish . +ENTRYPOINT ["dotnet", "simple-agent.dll"] diff --git a/samples/csharp/hosted-agents/agent-framework/simple-agent/Program.cs b/samples/csharp/hosted-agents/agent-framework/simple-agent/Program.cs new file mode 100644 index 000000000..4c404bdbe --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/simple-agent/Program.cs @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Azure.AI.AgentServer.Core; +using Azure.AI.Projects; +using Azure.Identity; +using DotNetEnv; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.Foundry.Hosting; + +Env.TraversePath().Load(); + +var projectEndpoint = new Uri(Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is not set.")); +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") ?? "gpt-4o"; + +AIAgent agent = new AIProjectClient(projectEndpoint, new DefaultAzureCredential()) + .AsAIAgent( + model: deployment, + instructions: """ + You are a helpful AI assistant hosted as a Foundry Hosted Agent. + You can help with a wide range of tasks including answering questions, + providing explanations, brainstorming ideas, and offering guidance. + Be concise, clear, and helpful in your responses. + """, + name: "simple-agent", + description: "A simple general-purpose AI assistant"); + +var builder = AgentHost.CreateBuilder(args); +builder.Services.AddFoundryResponses(agent); +builder.RegisterProtocol("responses", endpoints => endpoints.MapFoundryResponses()); + +var app = builder.Build(); +app.Run(); diff --git a/samples/csharp/hosted-agents/agent-framework/simple-agent/README.md b/samples/csharp/hosted-agents/agent-framework/simple-agent/README.md new file mode 100644 index 000000000..3ca0f4b46 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/simple-agent/README.md @@ -0,0 +1,19 @@ +# Simple Agent + +A general-purpose AI assistant — the simplest hosted agent using `AsAIAgent(model, instructions)`. This is the recommended starting point for understanding inline agent creation. + +## Running the Agent Host + +Follow the instructions in the [Running the Agent Host Locally](../README.md#running-the-agent-host-locally) section of the parent README to run the agent host. + +## Interacting with the agent + +> Depending on how you run the agent host, you can invoke the agent using `curl` (`Invoke-WebRequest` in PowerShell) or `azd`. Please refer to the [parent README](../README.md) for more details. Use this README for sample queries you can send to the agent. + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hello! What can you help me with?", "stream": false}' +``` + +## Deploying the Agent to Foundry + +To deploy the agent to Foundry, follow the instructions in the [Deploying the Agent to Foundry](../README.md#deploying-the-agent-to-foundry) section of the parent README. diff --git a/samples/csharp/hosted-agents/agent-framework/simple-agent/agent.manifest.yaml b/samples/csharp/hosted-agents/agent-framework/simple-agent/agent.manifest.yaml new file mode 100644 index 000000000..9ef1732be --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/simple-agent/agent.manifest.yaml @@ -0,0 +1,32 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: simple-agent +displayName: "Simple Agent" + +description: > + A simple general-purpose AI assistant hosted as a Foundry Hosted Agent using the Agent Framework. + +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Agent Framework + +template: + name: simple-agent + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + resources: + cpu: "0.25" + memory: 0.5Gi + environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +parameters: + properties: [] +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/agent-framework/simple-agent/agent.yaml b/samples/csharp/hosted-agents/agent-framework/simple-agent/agent.yaml new file mode 100644 index 000000000..dc21f6843 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/simple-agent/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: simple-agent +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/agent-framework/simple-agent/simple-agent.csproj b/samples/csharp/hosted-agents/agent-framework/simple-agent/simple-agent.csproj new file mode 100644 index 000000000..79db99732 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/simple-agent/simple-agent.csproj @@ -0,0 +1,18 @@ + + + + net10.0 + SimpleAgent + simple-agent + enable + enable + $(NoWarn);NU1903;NU1605 + + + + + + + + + diff --git a/samples/csharp/hosted-agents/agent-framework/text-search-rag/Dockerfile b/samples/csharp/hosted-agents/agent-framework/text-search-rag/Dockerfile new file mode 100644 index 000000000..6841af855 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/text-search-rag/Dockerfile @@ -0,0 +1,15 @@ +FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS base +WORKDIR /app +EXPOSE 8088 +ENV ASPNETCORE_URLS=http://+:8088 + +FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet publish -c Release -o /app/publish + +FROM base AS final +WORKDIR /app +COPY --from=build /app/publish . +ENTRYPOINT ["dotnet", "text-search-rag.dll"] diff --git a/samples/csharp/hosted-agents/agent-framework/text-search-rag/Program.cs b/samples/csharp/hosted-agents/agent-framework/text-search-rag/Program.cs new file mode 100644 index 000000000..ff5f6b0d5 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/text-search-rag/Program.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Azure.AI.AgentServer.Core; +using Azure.AI.Projects; +using Azure.Identity; +using DotNetEnv; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.Foundry.Hosting; +using Microsoft.Extensions.AI; + +Env.TraversePath().Load(); + +var projectEndpoint = new Uri(Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is not set.")); +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") ?? "gpt-4o"; + +AIAgent agent = new AIProjectClient(projectEndpoint, new DefaultAzureCredential()) + .AsAIAgent( + model: deployment, + instructions: """ + You are a helpful customer support assistant for an outdoor equipment company. + Use the available search tools to find relevant information before answering questions. + Always base your answers on the search results provided. + If you cannot find relevant information, let the customer know. + """, + name: "text-search-rag", + description: "A RAG-powered customer support assistant with text search capabilities", + tools: + [ + AIFunctionFactory.Create(SearchKnowledgeBase, "SearchKnowledgeBase", + "Searches the company knowledge base for relevant information about products, policies, and procedures.") + ]); + +var builder = AgentHost.CreateBuilder(args); +builder.Services.AddFoundryResponses(agent); +builder.RegisterProtocol("responses", endpoints => endpoints.MapFoundryResponses()); + +var app = builder.Build(); +app.Run(); + +static string SearchKnowledgeBase(string query) +{ + // Mock knowledge base search - in production, this would query a vector store or search index + var knowledgeBase = new Dictionary(StringComparer.OrdinalIgnoreCase) + { + ["return policy"] = """ + Return Policy: Items can be returned within 30 days of purchase with a valid receipt. + Items must be in original condition with tags attached. Sale items are final sale and + cannot be returned. Refunds are processed within 5-7 business days. + """, + ["shipping"] = """ + Shipping Information: Standard shipping takes 5-7 business days. + Express shipping takes 2-3 business days. Free shipping on orders over $50. + International shipping is available to select countries with delivery in 10-14 business days. + """, + ["tent care"] = """ + Tent Care Guide: Always dry your tent completely before storing to prevent mold. + Use a footprint or ground cloth to protect the tent floor. Clean with mild soap and water. + Never machine wash or dry your tent. Store loosely in a cool, dry place. + Apply seam sealer annually for best waterproofing performance. + """, + ["warranty"] = """ + Warranty Information: All products come with a 1-year manufacturer warranty. + Premium products include a lifetime warranty against defects. Warranty does not cover + normal wear and tear or damage from misuse. Contact support with your order number + to file a warranty claim. + """, + ["hiking boots"] = """ + Hiking Boot Guide: Break in new boots gradually before long hikes. + Use waterproofing spray to protect leather boots. Replace insoles every 500 miles. + Clean boots after each hike and allow them to air dry. Store in a cool, dry place + away from direct sunlight. + """ + }; + + var results = knowledgeBase + .Where(kvp => query.Split(' ').Any(word => kvp.Key.Contains(word, StringComparison.OrdinalIgnoreCase) + || kvp.Value.Contains(word, StringComparison.OrdinalIgnoreCase))) + .Select(kvp => kvp.Value) + .ToList(); + + return results.Count > 0 + ? string.Join("\n\n---\n\n", results) + : "No relevant information found in the knowledge base."; +} diff --git a/samples/csharp/hosted-agents/agent-framework/text-search-rag/README.md b/samples/csharp/hosted-agents/agent-framework/text-search-rag/README.md new file mode 100644 index 000000000..f9cf36540 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/text-search-rag/README.md @@ -0,0 +1,21 @@ +# Text Search RAG + +A support specialist agent with Retrieval Augmented Generation (RAG) — demonstrates how to ground agent answers in external knowledge using `TextSearchProvider`. + +## Running the Agent Host + +Follow the instructions in the [Running the Agent Host Locally](../README.md#running-the-agent-host-locally) section of the parent README to run the agent host. + +## Interacting with the agent + +> Depending on how you run the agent host, you can invoke the agent using `curl` (`Invoke-WebRequest` in PowerShell) or `azd`. Please refer to the [parent README](../README.md) for more details. Use this README for sample queries you can send to the agent. + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "What is your return policy?", "stream": false}' +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How long does shipping take?", "stream": false}' +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How do I clean my tent?", "stream": false}' +``` + +## Deploying the Agent to Foundry + +To deploy the agent to Foundry, follow the instructions in the [Deploying the Agent to Foundry](../README.md#deploying-the-agent-to-foundry) section of the parent README. diff --git a/samples/csharp/hosted-agents/agent-framework/text-search-rag/agent.manifest.yaml b/samples/csharp/hosted-agents/agent-framework/text-search-rag/agent.manifest.yaml new file mode 100644 index 000000000..21b3abd8e --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/text-search-rag/agent.manifest.yaml @@ -0,0 +1,34 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: text-search-rag +displayName: "Text Search RAG Agent" + +description: > + A support specialist agent with RAG capabilities using TextSearchProvider to ground answers in product documentation. + +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Agent Framework + - RAG + - Text Search + +template: + name: text-search-rag + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + resources: + cpu: "0.25" + memory: 0.5Gi + environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +parameters: + properties: [] +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/agent-framework/text-search-rag/agent.yaml b/samples/csharp/hosted-agents/agent-framework/text-search-rag/agent.yaml new file mode 100644 index 000000000..169a20746 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/text-search-rag/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: text-search-rag +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/agent-framework/text-search-rag/text-search-rag.csproj b/samples/csharp/hosted-agents/agent-framework/text-search-rag/text-search-rag.csproj new file mode 100644 index 000000000..541d5a3c4 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/text-search-rag/text-search-rag.csproj @@ -0,0 +1,18 @@ + + + + net10.0 + TextSearchRag + text-search-rag + enable + enable + $(NoWarn);NU1903;NU1605 + + + + + + + + + diff --git a/samples/csharp/hosted-agents/agent-framework/workflows/Dockerfile b/samples/csharp/hosted-agents/agent-framework/workflows/Dockerfile new file mode 100644 index 000000000..e55193881 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/workflows/Dockerfile @@ -0,0 +1,15 @@ +FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS base +WORKDIR /app +EXPOSE 8088 +ENV ASPNETCORE_URLS=http://+:8088 + +FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet publish -c Release -o /app/publish + +FROM base AS final +WORKDIR /app +COPY --from=build /app/publish . +ENTRYPOINT ["dotnet", "workflows.dll"] diff --git a/samples/csharp/hosted-agents/agent-framework/workflows/Program.cs b/samples/csharp/hosted-agents/agent-framework/workflows/Program.cs new file mode 100644 index 000000000..bf5a63047 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/workflows/Program.cs @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Azure.AI.AgentServer.Core; +using Azure.AI.Projects; +using Azure.Identity; +using DotNetEnv; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.Foundry.Hosting; +using Microsoft.Agents.AI.Workflows; + +Env.TraversePath().Load(); + +var projectEndpoint = new Uri(Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is not set.")); +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") ?? "gpt-4o"; + +var projectClient = new AIProjectClient(projectEndpoint, new DefaultAzureCredential()); + +// Create individual translation agents +AIAgent englishToFrench = projectClient.AsAIAgent( + model: deployment, + instructions: """ + You are a professional translator. Translate the user's input text into French. + Only output the translated text, nothing else. Do not add explanations or notes. + """, + name: "english-to-french", + description: "Translates English text to French"); + +AIAgent frenchToSpanish = projectClient.AsAIAgent( + model: deployment, + instructions: """ + You are a professional translator. Translate the user's input text into Spanish. + Only output the translated text, nothing else. Do not add explanations or notes. + """, + name: "french-to-spanish", + description: "Translates French text to Spanish"); + +AIAgent spanishToEnglish = projectClient.AsAIAgent( + model: deployment, + instructions: """ + You are a professional translator. Translate the user's input text back into English. + Only output the translated text, nothing else. Do not add explanations or notes. + """, + name: "spanish-to-english", + description: "Translates Spanish text to English"); + +// Build a sequential translation chain: English → French → Spanish → English +AIAgent agent = AgentWorkflowBuilder + .BuildSequential("translation-chain", englishToFrench, frenchToSpanish, spanishToEnglish) + .AsAIAgent( + name: "translation-chain", + description: "A translation workflow that chains English → French → Spanish → English"); + +var builder = AgentHost.CreateBuilder(args); +builder.Services.AddFoundryResponses(agent); +builder.RegisterProtocol("responses", endpoints => endpoints.MapFoundryResponses()); + +var app = builder.Build(); +app.Run(); diff --git a/samples/csharp/hosted-agents/agent-framework/workflows/README.md b/samples/csharp/hosted-agents/agent-framework/workflows/README.md new file mode 100644 index 000000000..5341209e1 --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/workflows/README.md @@ -0,0 +1,21 @@ +# Workflows + +A multi-agent workflow that chains three translation agents into a sequential pipeline: English → French → Spanish → English. + +## Running the Agent Host + +Follow the instructions in the [Running the Agent Host Locally](../README.md#running-the-agent-host-locally) section of the parent README to run the agent host. + +## Interacting with the agent + +> Depending on how you run the agent host, you can invoke the agent using `curl` (`Invoke-WebRequest` in PowerShell) or `azd`. Please refer to the [parent README](../README.md) for more details. Use this README for sample queries you can send to the agent. + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "The quick brown fox jumps over the lazy dog", "stream": false}' +``` + +Expected output: three lines showing the text in French, Spanish, then back in English. + +## Deploying the Agent to Foundry + +To deploy the agent to Foundry, follow the instructions in the [Deploying the Agent to Foundry](../README.md#deploying-the-agent-to-foundry) section of the parent README. diff --git a/samples/csharp/hosted-agents/agent-framework/workflows/agent.manifest.yaml b/samples/csharp/hosted-agents/agent-framework/workflows/agent.manifest.yaml new file mode 100644 index 000000000..ac81de8ce --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/workflows/agent.manifest.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: workflows +displayName: "Translation Workflow Agent" + +description: > + A workflow agent that performs sequential translation through multiple languages (English to French to Spanish to English). + +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Agent Framework + - Workflows + +template: + name: workflows + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + resources: + cpu: "0.25" + memory: 0.5Gi + environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +parameters: + properties: [] +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/agent-framework/workflows/agent.yaml b/samples/csharp/hosted-agents/agent-framework/workflows/agent.yaml new file mode 100644 index 000000000..a890493da --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/workflows/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: workflows +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/agent-framework/workflows/workflows.csproj b/samples/csharp/hosted-agents/agent-framework/workflows/workflows.csproj new file mode 100644 index 000000000..ae86c873b --- /dev/null +++ b/samples/csharp/hosted-agents/agent-framework/workflows/workflows.csproj @@ -0,0 +1,19 @@ + + + + net10.0 + Workflows + workflows + enable + enable + $(NoWarn);NU1903;NU1605 + + + + + + + + + + diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/.dockerignore b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/.dockerignore new file mode 100644 index 000000000..ffd63fa37 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/.dockerignore @@ -0,0 +1,38 @@ +# Build artifacts +**/bin/ +**/obj/ + +# .NET user secrets +**/.secrets/ + +# Local settings +**/*.user +**/*.suo + +# IDE settings +.vs/ +.vscode/ +**/.idea/ + +# Test results +**/TestResults/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env + +# Local environment (never bake credentials into the image) +.env + +# NuGet +.nuget/ diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/.env.example b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/Dockerfile b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/Dockerfile new file mode 100644 index 000000000..efbd4fb6d --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/Dockerfile @@ -0,0 +1,12 @@ +FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet build -c Release --no-restore +RUN dotnet publish -c Release --no-build -o /app + +FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final +WORKDIR /app +COPY --from=build /app . +EXPOSE 8088 +ENTRYPOINT ["dotnet", "HelloWorld.dll"] diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/HelloWorld.csproj b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/HelloWorld.csproj new file mode 100644 index 000000000..d176ef641 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/HelloWorld.csproj @@ -0,0 +1,24 @@ + + + + net10.0 + enable + enable + + $(NoWarn);OPENAI001 + + + + + + + + + + + + + + diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/Program.cs b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/Program.cs new file mode 100644 index 000000000..183b6c7bc --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/Program.cs @@ -0,0 +1,217 @@ +// Copyright (c) Microsoft. All rights reserved. + +/* + * Hello World — Bring Your Own Invocations agent for C# + * + * Minimal hosted agent that forwards user input to a Foundry model via the + * Responses API and returns the reply through the Invocations protocol + * as a streaming SSE event stream. + * + * This sample demonstrates the simplest possible BYO integration: the protocol + * SDK (Azure.AI.AgentServer.Invocations) handles the HTTP contract and session + * resolution, and you supply the model call using the Foundry SDK + * (Azure.AI.Projects and Azure.AI.Extensions.OpenAI). + * + * Unlike the Responses protocol, the Invocations protocol does NOT provide + * built-in server-side conversation history. This agent maintains an in-memory + * session store keyed by agent_session_id. In production, replace it with + * durable storage (Redis, Cosmos DB, etc.) so history survives restarts. + * + * Required environment variables: + * FOUNDRY_PROJECT_ENDPOINT — Foundry project endpoint (auto-injected in hosted containers) + * AZURE_AI_MODEL_DEPLOYMENT_NAME — Model deployment name (declared in agent.manifest.yaml) + * + * Usage: + * dotnet run + * + * # Turn 1 — start a new conversation: + * curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \ + * -H "Content-Type: application/json" \ + * -d '{"message": "What is Microsoft Foundry?"}' + * + * # Turn 2 — continue the same conversation: + * curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \ + * -H "Content-Type: application/json" \ + * -d '{"message": "What hosted agent options does it offer?"}' + */ + +using System.Collections.Concurrent; +using System.Text.Json; +using Azure.AI.AgentServer.Invocations; +using Azure.AI.Extensions.OpenAI; +using Azure.AI.Projects; +using Azure.Identity; +using Microsoft.AspNetCore.Http; +using Microsoft.Extensions.Logging; +using OpenAI.Responses; + +// One-liner startup — wires up Kestrel on port 8088, OpenTelemetry, health probes, +// and the Invocations API endpoints. Telemetry is configured automatically: +// when APPLICATIONINSIGHTS_CONNECTION_STRING is set, traces and logs are sent to +// Application Insights with no extra code. +InvocationsServer.Run(configure: builder => +{ + if (string.IsNullOrEmpty(Environment.GetEnvironmentVariable("APPLICATIONINSIGHTS_CONNECTION_STRING"))) + Console.Error.WriteLine( + "[WARNING] APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent " + + "to Application Insights. Set it to enable local telemetry. " + + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)"); + + var endpoint = Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set."); + + var model = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") + ?? throw new InvalidOperationException( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set."); + + var projectClient = new AIProjectClient(new Uri(endpoint), new DefaultAzureCredential()); + + // Use the Responses API — not GetChatClient() (Chat Completions API is legacy). + var responsesClient = projectClient.ProjectOpenAIClient + .GetProjectResponsesClientForModel(model); + + builder.Services.AddSingleton(responsesClient); +}); + +/// +/// Hello World handler — forwards user input to a Foundry model via the Responses API, +/// streams the reply as SSE token events, and persists conversation history +/// in an in-memory session store keyed by . +/// +/// Foundry Responses API client, injected via DI. +/// Logger injected via DI. Calls are automatically exported to Application Insights. +public sealed class HelloWorldHandler( + ProjectResponsesClient responsesClient, + ILogger logger) : InvocationHandler +{ + private const string SystemPrompt = "You are a helpful AI assistant. Be concise and informative."; + + // In-memory session store keyed by agent_session_id. + // State is lost on restart; use durable storage in production. + // Note: the inner List is not thread-safe — concurrent + // requests on the same session_id are not supported in this sample. + private static readonly ConcurrentDictionary> s_sessions = new(); + + // ── Required override ───────────────────────────────────────────────────── + // HandleAsync is the only method you must override. It receives every + // POST /invocations request. + // + // Three optional overrides exist for long-running operations (LRO): + // GetAsync — handle GET /invocations/{id} status polls + // CancelAsync — handle DELETE /invocations/{id} cancellation + // GetOpenApiAsync — serve an OpenAPI spec at GET /invocations/docs/openapi.json + // For a simple streaming agent like this one, none of them are needed. + // ───────────────────────────────────────────────────────────────────────── + public override async Task HandleAsync( + HttpRequest request, + HttpResponse response, + InvocationContext context, + CancellationToken cancellationToken) + { + // Parse the incoming message — accepts JSON or plain text (see ParseUserMessage). + var rawBody = await new StreamReader(request.Body).ReadToEndAsync(cancellationToken); + var userMessage = ParseUserMessage(rawBody); + if (string.IsNullOrWhiteSpace(userMessage)) + { + response.StatusCode = 400; + await response.WriteAsJsonAsync( + new + { + error = "invalid_request", + message = "Request body must be a JSON object with a non-empty \"message\" string (e.g. {\"message\": \"What is Microsoft Foundry?\"}) or a non-empty plain-text body.", + }, + cancellationToken); + return; + } + + // InvocationContext is provided by the Invocations SDK. It resolves + // session and invocation identity from the incoming request headers + // so you don't have to parse them yourself. + logger.LogInformation( + "Processing invocation {InvocationId} (session {SessionId})", + context.InvocationId, context.SessionId); + + // Retrieve or create conversation history for this session. + var history = s_sessions.GetOrAdd(context.SessionId, _ => []); + + response.ContentType = "text/event-stream"; + response.Headers.CacheControl = "no-cache"; + + // Build the Responses API input from history + current user message. + // History is stored as SessionMessage records — convert to ResponseItem. + var inputItems = new List(); + foreach (var msg in history) + { + inputItems.Add(msg.Role == "user" + ? ResponseItem.CreateUserMessageItem(msg.Content) + : ResponseItem.CreateAssistantMessageItem(msg.Content)); + } + inputItems.Add(ResponseItem.CreateUserMessageItem(userMessage)); + // Record the user message before streaming so history is consistent + // even if the model call fails partway through. + history.Add(new SessionMessage("user", userMessage)); + + // Stream tokens from the model via the Responses API. + var options = new CreateResponseOptions { Instructions = SystemPrompt }; + foreach (var item in inputItems) + { + options.InputItems.Add(item); + } + + var fullText = ""; + await foreach (var update in responsesClient.CreateResponseStreamingAsync( + options, cancellationToken)) + { + if (update is StreamingResponseOutputTextDeltaUpdate delta + && !string.IsNullOrEmpty(delta.Delta)) + { + fullText += delta.Delta; + var tokenEvent = JsonSerializer.Serialize( + new { type = "token", content = delta.Delta }); + await response.WriteAsync($"data: {tokenEvent}\n\n", cancellationToken); + await response.Body.FlushAsync(cancellationToken); + } + } + + // Send the final done event with the complete reply text. + var doneEvent = JsonSerializer.Serialize(new + { + type = "done", + invocation_id = context.InvocationId, + session_id = context.SessionId, + full_text = fullText, + }); + await response.WriteAsync($"data: {doneEvent}\n\n", cancellationToken); + await response.Body.FlushAsync(cancellationToken); + + // Persist the assistant reply to session history. + history.Add(new SessionMessage("assistant", fullText)); + } + + /// + /// Extracts the user message from the request body. + /// Accepts {"message": "..."} JSON, {"input": "..."} JSON + /// (alternate Foundry portal format), or a plain text body. + /// + private static string? ParseUserMessage(string rawBody) + { + try + { + using var doc = JsonDocument.Parse(rawBody); + var root = doc.RootElement; + return + (root.TryGetProperty("message", out var m) && m.ValueKind == JsonValueKind.String ? m.GetString() : null) + ?? (root.TryGetProperty("input", out var inp) && inp.ValueKind == JsonValueKind.String ? inp.GetString() : null) + ?? rawBody.Trim(); + } + catch (JsonException) + { + // Not JSON — treat the whole body as plain text. + return rawBody.Trim(); + } + } +} + +/// Internal session message record — stores role and content for history tracking. +public record SessionMessage(string Role, string Content); diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/README.md b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/README.md new file mode 100644 index 000000000..aca9e3835 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/README.md @@ -0,0 +1,185 @@ + +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency note for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + + +# What this sample demonstrates + +A minimal "hello world" hosted agent using the **Bring Your Own** approach with the **Invocations protocol** in C#. It shows how to use the [`Azure.AI.AgentServer.Invocations`](https://www.nuget.org/packages/Azure.AI.AgentServer.Invocations/) SDK to host a custom agent that calls a Foundry model via the Responses API and returns the reply as a streaming SSE event stream. + +This is the simplest possible BYO integration — the protocol SDK handles the HTTP endpoints, session resolution, client header forwarding, and OpenTelemetry tracing. You supply the model call using the [Foundry SDK (`Azure.AI.Projects` + `Azure.AI.Extensions.OpenAI`)](https://www.nuget.org/packages/Azure.AI.Extensions.OpenAI/). + +> **Invocations vs Responses:** Unlike the Responses protocol, the Invocations protocol does **not** provide built-in server-side conversation history. This agent maintains an in-memory session store keyed by `agent_session_id`. In production, replace it with durable storage (Redis, Cosmos DB, etc.) so history survives restarts. + +## How It Works + +### Model Integration + +The agent uses the Foundry SDK to create a `ProjectResponsesClient` from the project endpoint and model deployment name. When a request arrives, the handler looks up the session history by `SessionId`, appends the new user message, calls the model via the Responses API with streaming, and writes SSE events directly to the response — `token` events during generation, then a final `done` event. + +See [Program.cs](Program.cs) for the full implementation. + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer Invocations SDK](https://www.nuget.org/packages/Azure.AI.AgentServer.Invocations/), which provisions a REST API endpoint compatible with the Azure AI Invocations protocol. + +### Agent Deployment + +The hosted agent can be developed and deployed to Microsoft Foundry using the [Azure Developer CLI](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd). + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Azure Developer CLI (`azd`)** (recommended) + - [Install azd](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) and the AI agent extension: `azd ext install azure.ai.agents` + - Authenticated: `azd auth login` + +2. **Azure CLI** + - Installed and authenticated: `az login` + +3. **.NET 10.0 SDK or later** + - Verify your version: `dotnet --version` + - Download from [https://dotnet.microsoft.com/download](https://dotnet.microsoft.com/download) + +> [!NOTE] +> You do **not** need an existing [Microsoft Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/what-is-foundry?view=foundry) project or model deployment to get started — `azd provision` creates them for you. If you already have a project, see the [note below](#using-azd-recommended-for-cli-workflows) on how to target it. + +### Environment Variables + +See [`.env.example`](.env.example) for the full list of environment variables this sample uses. + +| Variable | Required | Description | +|----------|----------|-------------| +| `FOUNDRY_PROJECT_ENDPOINT` | Yes | Foundry project endpoint. Auto-injected in hosted containers; set automatically by `azd ai agent run` locally. | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Yes | Model deployment name — must match your Foundry project deployment. Declared in `agent.manifest.yaml`. | +| `APPLICATIONINSIGHTS_CONNECTION_STRING` | Recommended | Enables telemetry. Auto-injected in hosted containers; set manually for local dev. | + +**Local development (without `azd`):** + +```bash +# Set env vars directly — .NET does not natively read .env files +export FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="" +``` + +> [!NOTE] +> When using `azd ai agent run`, environment variables are handled automatically — no manual setup needed. + +### Running the Sample + +The recommended way to run and test hosted agents locally is with the Azure Developer CLI (`azd`) or the Foundry VS Code extension. + +#### Using the Foundry VS Code Extension + +The [Foundry VS Code extension](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=vscode) has a built-in sample gallery. You can open this sample directly from the extension without cloning the repository — it scaffolds the project into a new workspace, generates `agent.yaml`, `.env`, and `.vscode/tasks.json` + `launch.json` automatically, and configures a one-click **F5** debug experience. + +Follow the [VS Code quickstart](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=vscode) for a full step-by-step walkthrough. + +#### Using [`azd`](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd) (recommended for CLI workflows) + +No cloning required. Create a new folder, point `azd` at the manifest on GitHub, and it sets up the sample and generates Bicep infrastructure, `agent.yaml`, and env config automatically: + +```bash +# Create a new folder for the agent and navigate into it +mkdir hello-world-agent && cd hello-world-agent + +# Initialize from the manifest — azd reads it, downloads the sample, +# and generates Bicep infrastructure, agent.yaml, and env config +azd ai agent init -m https://github.com/microsoft-foundry/foundry-samples/blob/main/samples/dotnet/hosted-agents/bring-your-own/invocations/HelloWorld/agent.manifest.yaml + +# Provision Azure resources (Foundry project, model deployment, App Insights) +azd provision + +# Run the agent locally (handles env vars, build, and startup) +azd ai agent run +``` + +> [!NOTE] +> If you've already cloned this repository, pass a local path to the manifest instead: +> `azd ai agent init -m /samples/dotnet/hosted-agents/bring-your-own/invocations/HelloWorld/agent.manifest.yaml` + +> [!NOTE] +> If you already have a Foundry project and model deployment, add `-p -d ` to `azd ai agent init` to target existing resources. You can also skip provisioning entirely and configure env vars manually — see [Without `azd`](#without-azd). + +The agent starts on `http://localhost:8088/`. To invoke it: + +```bash +azd ai agent invoke --local "What is Microsoft Foundry?" +``` + +Or use curl directly. The `-N` flag disables output buffering so you see SSE tokens as they arrive: + +> [!NOTE] +> `agent_session_id` is optional. If omitted, the server auto-generates one and returns it in the `done` event (`session_id` field). To continue a conversation across turns, pass the same `agent_session_id` in each request. + +```bash +# Turn 1 — start a new conversation +curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \ + -H "Content-Type: application/json" \ + -d '{"message": "What is Microsoft Foundry?"}' + +# Turn 2 — continue the same conversation +curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \ + -H "Content-Type: application/json" \ + -d '{"message": "What hosted agent options does it offer?"}' +``` + +Each response is a stream of SSE events: `token` events with incremental text, followed by a `done` event with the complete reply. + +#### Without `azd` + +If running without `azd`, set environment variables manually (see [Environment Variables](#environment-variables)), then: + +```bash +dotnet run +``` + +### Deploying the Agent to Microsoft Foundry + +Once you've tested locally, deploy to Microsoft Foundry: + +```bash +# Provision Azure resources (skip if already done during local setup) +azd provision + +# Build, push, and deploy the agent to Foundry +azd deploy +``` + +After deploying, invoke the agent running in Foundry: + +```bash +azd ai agent invoke "What is Microsoft Foundry?" +``` + +To stream logs from the running agent: + +```bash +azd ai agent monitor +``` + +For the full deployment guide, see [Azure AI Foundry hosted agents](https://aka.ms/azdaiagent/docs). + +## Troubleshooting + +### Images built on Apple Silicon or other ARM64 machines do not work on our service + +We **recommend deploying with `azd deploy`**, which uses ACR remote build and always produces images with the correct architecture. + +If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. + +**Fix for local builds:** + +```bash +docker build --platform=linux/amd64 -t image . +``` + +This forces the image to be built for the required `amd64` architecture. diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/agent.manifest.yaml b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/agent.manifest.yaml new file mode 100644 index 000000000..2d6646ce8 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/agent.manifest.yaml @@ -0,0 +1,31 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: hello-world-dotnet-invocations +displayName: "Hello World (.NET, Invocations)" +description: > + Minimal Hello World agent using the Invocations protocol with a bring-your-own + approach in C#. Calls a Foundry model via the Responses API and returns the + response as a streaming SSE event stream. +metadata: + tags: + - AI Agent Hosting + - Invocations Protocol + - Bring Your Own + - .NET +template: + name: hello-world-dotnet-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/agent.yaml b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/agent.yaml new file mode 100644 index 000000000..31bad1f59 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/HelloWorld/agent.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: hello-world-dotnet-invocations +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/.dockerignore b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/.dockerignore new file mode 100644 index 000000000..a49232de8 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/.dockerignore @@ -0,0 +1,35 @@ +# Build artifacts +**/bin/ +**/obj/ + +# .NET user secrets +**/.secrets/ + +# Local settings +**/*.user +**/*.suo + +# IDE settings +.vs/ +.vscode/ +**/.idea/ + +# Test results +**/TestResults/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env + +# NuGet +.nuget/ diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/.env.example b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/Dockerfile b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/Dockerfile new file mode 100644 index 000000000..4cd857ce8 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/Dockerfile @@ -0,0 +1,12 @@ +FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet build -c Release --no-restore +RUN dotnet publish -c Release --no-build -o /app + +FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final +WORKDIR /app +COPY --from=build /app . +EXPOSE 8088 +ENTRYPOINT ["dotnet", "human-in-the-loop.dll"] diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/Program.cs b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/Program.cs new file mode 100644 index 000000000..3bc3a2d26 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/Program.cs @@ -0,0 +1,382 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using Azure.AI.AgentServer.Invocations; +using Azure.AI.Extensions.OpenAI; +using Azure.AI.Projects; +using Azure.Identity; +using Microsoft.AspNetCore.Http; +using Microsoft.Extensions.DependencyInjection; +using OpenAI.Responses; + +// --------------------------------------------------------------------------- +// Foundry project configuration +// --------------------------------------------------------------------------- +if (string.IsNullOrEmpty(Environment.GetEnvironmentVariable("APPLICATIONINSIGHTS_CONNECTION_STRING"))) + Console.Error.WriteLine( + "[WARNING] APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent " + + "to Application Insights. Set it to enable local telemetry. " + + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)"); + +var foundryEndpoint = Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is required."); +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") + ?? throw new InvalidOperationException("AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is required."); + +var projectClient = new AIProjectClient(new Uri(foundryEndpoint), new DefaultAzureCredential()); + +// Use the Responses API — not GetChatClient() (Chat Completions API is legacy). +var responsesClient = projectClient.ProjectOpenAIClient + .GetProjectResponsesClientForModel(deployment); + +// Load persisted sessions from disk +SessionStore.LoadAllSessions(); + +InvocationsServer.Run(configure: builder => +{ + builder.Services.AddSingleton(responsesClient); +}); + +// ────────────────────────────────────────────────────────────────── +// Handler +// ────────────────────────────────────────────────────────────────── + +/// +/// Human-in-the-loop agent using the invocations protocol with Azure OpenAI. +/// Implements an approval-gate pattern: generates a proposal, pauses for +/// human review, and resumes after the human approves, revises, or rejects. +/// +public class HumanInTheLoopHandler : InvocationHandler +{ + private readonly ProjectResponsesClient _responsesClient; + + private const string SystemPrompt = + "You are a professional assistant. The user will give you a task. " + + "Generate a high-quality draft proposal that the user can review " + + "and approve. Be detailed, well-structured, and ready for review.\n\n" + + "If revision feedback is provided, incorporate it into an improved " + + "version of the proposal."; + + public HumanInTheLoopHandler(ProjectResponsesClient responsesClient) => _responsesClient = responsesClient; + + // ── POST /invocations ── + + public override async Task HandleAsync( + HttpRequest request, + HttpResponse response, + InvocationContext context, + CancellationToken cancellationToken) + { + JsonElement body; + try + { + body = await request.ReadFromJsonAsync(cancellationToken); + if (body.ValueKind != JsonValueKind.Object) + throw new JsonException("body is not a JSON object"); + } + catch (JsonException) + { + response.StatusCode = 400; + await response.WriteAsJsonAsync( + new + { + error = "invalid_request", + message = "Request body must be a JSON object with either a \"task\" string (e.g. {\"task\": \"draft a project proposal\"}) to start a new proposal, or a \"decision\" of \"approve\" / \"revise\" / \"reject\" (e.g. {\"decision\": \"revise\", \"feedback\": \"make it shorter\"}).", + }, + cancellationToken); + return; + } + + var sessionId = context.SessionId; + var invocationId = context.InvocationId; + + var hasTask = body.TryGetProperty("task", out var taskProp); + var hasDecision = body.TryGetProperty("decision", out var decisionProp); + var task = hasTask ? taskProp.GetString() : null; + var decision = hasDecision ? decisionProp.GetString() : null; + + if (!string.IsNullOrEmpty(task) && !string.IsNullOrEmpty(decision)) + { + response.StatusCode = 400; + await response.WriteAsJsonAsync( + new { error = "Cannot provide both 'task' and 'decision' in the same request." }, + cancellationToken); + return; + } + + // --- New task submission --- + if (!string.IsNullOrEmpty(task)) + { + if (string.IsNullOrWhiteSpace(task)) + { + response.StatusCode = 400; + await response.WriteAsJsonAsync(new { error = "task cannot be empty" }, cancellationToken); + return; + } + + var existing = SessionStore.GetBySession(sessionId); + if (existing is not null && existing.Status == "awaiting_approval") + { + response.StatusCode = 409; + await response.WriteAsJsonAsync(new + { + error = $"Session {sessionId} has a pending proposal. Approve, revise, or reject it before submitting a new task." + }, cancellationToken); + return; + } + + var proposal = await GenerateProposalAsync(task, new List(), cancellationToken); + + var session = new HitlSession + { + SessionId = sessionId, + Status = "awaiting_approval", + OriginalTask = task, + Proposal = proposal, + RevisionHistory = new List(), + InvocationId = invocationId, + InvocationIds = new List { invocationId }, + }; + + SessionStore.TrackInvocation(invocationId, sessionId); + SessionStore.Save(sessionId, session); + + await response.WriteAsJsonAsync(new + { + session_id = sessionId, + invocation_id = invocationId, + status = "awaiting_approval", + proposal, + revision_count = 0, + }, cancellationToken); + return; + } + + // --- Decision on existing proposal --- + if (!string.IsNullOrEmpty(decision)) + { + var sessionLock = SessionStore.GetLock(sessionId); + await sessionLock.WaitAsync(cancellationToken); + try + { + var session = SessionStore.GetBySession(sessionId); + if (session is null) + { + response.StatusCode = 400; + await response.WriteAsJsonAsync( + new { error = $"No pending session found for session_id={sessionId}" }, + cancellationToken); + return; + } + + if (session.Status != "awaiting_approval") + { + response.StatusCode = 400; + await response.WriteAsJsonAsync( + new { error = $"Session is not awaiting approval (status={session.Status})" }, + cancellationToken); + return; + } + + if (decision is not ("approve" or "revise" or "reject")) + { + response.StatusCode = 400; + await response.WriteAsJsonAsync( + new { error = $"Unknown decision: {decision}. Use 'approve', 'revise', or 'reject'." }, + cancellationToken); + return; + } + + var feedback = body.TryGetProperty("feedback", out var fbProp) ? fbProp.GetString() ?? "" : ""; + if (decision == "revise" && string.IsNullOrEmpty(feedback)) + { + response.StatusCode = 400; + await response.WriteAsJsonAsync( + new { error = "feedback is required for 'revise' decision" }, + cancellationToken); + return; + } + + // All validation passed — track the invocation + session.InvocationId = invocationId; + session.InvocationIds.Add(invocationId); + SessionStore.TrackInvocation(invocationId, sessionId); + + if (decision == "approve") + { + session.Status = "completed"; + SessionStore.Save(sessionId, session); + + await response.WriteAsJsonAsync(new + { + session_id = sessionId, + invocation_id = invocationId, + status = "completed", + final_output = session.Proposal, + revision_count = session.RevisionHistory.Count, + }, cancellationToken); + return; + } + + if (decision == "revise") + { + session.RevisionHistory.Add(new RevisionEntry + { + Proposal = session.Proposal, + Feedback = feedback, + }); + + var newProposal = await GenerateProposalAsync( + session.OriginalTask, session.RevisionHistory, cancellationToken); + session.Proposal = newProposal; + session.Status = "awaiting_approval"; + SessionStore.Save(sessionId, session); + + await response.WriteAsJsonAsync(new + { + session_id = sessionId, + invocation_id = invocationId, + status = "awaiting_approval", + proposal = newProposal, + revision_count = session.RevisionHistory.Count, + }, cancellationToken); + return; + } + + // decision == "reject" + session.Status = "rejected"; + SessionStore.Save(sessionId, session); + + await response.WriteAsJsonAsync(new + { + session_id = sessionId, + invocation_id = invocationId, + status = "rejected", + revision_count = session.RevisionHistory.Count, + }, cancellationToken); + } + finally + { + sessionLock.Release(); + } + return; + } + + // Neither task nor decision provided + response.StatusCode = 400; + await response.WriteAsJsonAsync( + new { error = "invalid_request", message = "Request body must be a JSON object with either a \"task\" string (e.g. {\"task\": \"draft a project proposal\"}) to start a new proposal, or a \"decision\" of \"approve\" / \"revise\" / \"reject\" (e.g. {\"decision\": \"revise\", \"feedback\": \"make it shorter\"})." }, + cancellationToken); + } + + // ── GET /invocations/{id} ── + + public override async Task GetAsync( + string invocationId, + HttpRequest request, + HttpResponse response, + InvocationContext context, + CancellationToken cancellationToken) + { + var sessionId = SessionStore.GetSessionIdByInvocation(invocationId); + var session = sessionId is not null ? SessionStore.GetBySession(sessionId) : null; + + if (session is null) + { + response.StatusCode = 404; + await response.WriteAsJsonAsync(new { error = "not found" }, cancellationToken); + return; + } + + var responseData = new Dictionary + { + ["session_id"] = session.SessionId, + ["invocation_id"] = session.InvocationId, + ["status"] = session.Status, + ["original_task"] = session.OriginalTask, + ["revision_count"] = session.RevisionHistory.Count, + }; + + if (session.Status == "awaiting_approval") + responseData["proposal"] = session.Proposal; + else if (session.Status == "completed") + responseData["final_output"] = session.Proposal; + + await response.WriteAsJsonAsync(responseData, cancellationToken); + } + + // ── POST /invocations/{id}/cancel ── + + public override async Task CancelAsync( + string invocationId, + HttpRequest request, + HttpResponse response, + InvocationContext context, + CancellationToken cancellationToken) + { + var sessionId = SessionStore.GetSessionIdByInvocation(invocationId); + var session = sessionId is not null ? SessionStore.GetBySession(sessionId) : null; + + if (session is null) + { + response.StatusCode = 404; + await response.WriteAsJsonAsync(new { error = "not found" }, cancellationToken); + return; + } + + var sessionLock = SessionStore.GetLock(sessionId!); + await sessionLock.WaitAsync(cancellationToken); + try + { + // Check terminal state under lock to prevent TOCTOU race + if (session.Status is "completed" or "rejected" or "cancelled") + { + await response.WriteAsJsonAsync(new + { + session_id = sessionId, + invocation_id = invocationId, + status = session.Status, + error = "session already finalized", + }, cancellationToken); + return; + } + + session.InvocationId = invocationId; + session.InvocationIds.Add(invocationId); + SessionStore.TrackInvocation(invocationId, sessionId!); + session.Status = "cancelled"; + SessionStore.Save(sessionId!, session); + } + finally + { + sessionLock.Release(); + } + + await response.WriteAsJsonAsync(new + { + session_id = sessionId, + invocation_id = invocationId, + status = "cancelled", + }, cancellationToken); + } + + // ── LLM helper ── + + private async Task GenerateProposalAsync( + string task, + List revisionHistory, + CancellationToken cancellationToken) + { + var options = new CreateResponseOptions { Instructions = SystemPrompt }; + options.InputItems.Add(ResponseItem.CreateUserMessageItem($"Task: {task}")); + + foreach (var rev in revisionHistory) + { + options.InputItems.Add(ResponseItem.CreateAssistantMessageItem(rev.Proposal)); + options.InputItems.Add(ResponseItem.CreateUserMessageItem($"Revision feedback: {rev.Feedback}")); + } + + var result = await _responsesClient.CreateResponseAsync(options, cancellationToken); + return result.Value.GetOutputText() ?? ""; + } +} diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/README.md b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/README.md new file mode 100644 index 000000000..23f17bad8 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/README.md @@ -0,0 +1,178 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. + +# Human-in-the-Loop Agent (Invocations Protocol) — .NET + +This sample demonstrates a human-in-the-loop agent built with [Azure.AI.AgentServer.Invocations](https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json) that implements an **approval-gate pattern**. The agent generates a proposal using Azure OpenAI, pauses for human review, and resumes execution after the human approves, requests a revision, or rejects. + +Session state is persisted as JSON files in the `$HOME` directory, so proposals survive agent restarts and are accessible via the **Session Files API** when deployed to Azure. + +This pattern is useful for workflows where an AI agent should **not act autonomously** — for example, drafting communications, generating code changes, or proposing decisions that require human sign-off. + +## How It Works + +``` +[new task] ──► AWAITING_APPROVAL ──► (approve) ──► COMPLETED + │ + ├──► (revise + feedback) ──► AWAITING_APPROVAL (loop) + │ + └──► (reject) ──► REJECTED +``` + +1. **Submit a task** via `POST /invocations` — the agent calls Azure OpenAI to generate a proposal and returns it with status `awaiting_approval`. +2. **The agent pauses** — the proposal is saved to disk, and the human can return at any time (minutes, hours, or days later). +3. **Respond with a decision** via another `POST /invocations` using the same `agent_session_id`: + - `approve` — the agent marks the proposal as final and returns it. + - `revise` (with feedback) — the agent generates an improved proposal incorporating the feedback. + - `reject` — the agent marks the session as rejected. +4. **Poll status** via `GET /invocations/{id}` — useful for checking whether a proposal is still pending after reconnecting. +5. **Cancel** via `POST /invocations/{id}/cancel` — cancels a pending session. + +## Running Locally + +### Prerequisites + +- [.NET 10.0 SDK](https://dotnet.microsoft.com/download/dotnet/10.0) +- Azure CLI installed and authenticated (`az login`) +- An Azure AI Foundry project with a deployed model + +### Environment Variables + +```bash +export FOUNDRY_PROJECT_ENDPOINT="https://your-resource.openai.azure.com/api/projects/proj" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +``` + +### Start the Agent + +```bash +dotnet run +``` + +The agent starts on `http://localhost:8088/`. + +### Using `azd ai agent run` (Local Development) + +```bash +azd ai agent run +``` + +### Test with curl + +```bash +# Step 1: Submit a task — agent generates a proposal +curl -X POST "http://localhost:8088/invocations?agent_session_id=session-1" \ + -H "Content-Type: application/json" \ + -d '{"task": "Draft a marketing email for our new AI product launch"}' +# -> {"status": "awaiting_approval", "proposal": "...", "session_id": "session-1", ...} + +# Step 2: Check status (e.g., after reconnecting hours later) +curl http://localhost:8088/invocations/ +# -> {"status": "awaiting_approval", "proposal": "...", ...} + +# Step 3a: Approve the proposal +curl -X POST "http://localhost:8088/invocations?agent_session_id=session-1" \ + -H "Content-Type: application/json" \ + -d '{"decision": "approve"}' +# -> {"status": "completed", "final_output": "...", ...} + +# Step 3b: Or request a revision with feedback +curl -X POST "http://localhost:8088/invocations?agent_session_id=session-1" \ + -H "Content-Type: application/json" \ + -d '{"decision": "revise", "feedback": "Make the tone more casual and add a call-to-action"}' +# -> {"status": "awaiting_approval", "proposal": "", ...} + +# Step 3c: Or reject +curl -X POST "http://localhost:8088/invocations?agent_session_id=session-1" \ + -H "Content-Type: application/json" \ + -d '{"decision": "reject"}' +# -> {"status": "rejected", ...} + +# Cancel a pending session +curl -X POST http://localhost:8088/invocations//cancel +# -> {"status": "cancelled", ...} +``` + +## Invoke with azd + +### Local + +**Bash:** +```bash +azd ai agent invoke --local '{"task": "Write a product launch announcement for Azure AI Foundry"}' +``` + +**PowerShell:** +```powershell +azd ai agent invoke --local '{\"task\": \"Write a product launch announcement for Azure AI Foundry\"}' +``` + +### Remote (after `azd up`) + +**Bash:** +```bash +azd ai agent invoke '{"task": "Write a product launch announcement for Azure AI Foundry"}' +``` + +**PowerShell:** +```powershell +azd ai agent invoke '{\"task\": \"Write a product launch announcement for Azure AI Foundry\"}' +``` + +## Deploying to Microsoft Foundry + +To deploy your agent to Microsoft Foundry, follow the deployment guide at https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/azd-quickstart.md + +## Project Structure + +``` +human-in-the-loop/ +├── Program.cs # Entry point, DI setup, and InvocationHandler implementation +├── SessionStore.cs # Session state persistence (JSON files in $HOME) +├── human-in-the-loop.csproj # Project file with NuGet dependencies +├── Dockerfile # Multi-stage Docker build +├── .dockerignore # Docker build exclusions +├── .env.example # Example environment variables +├── agent.yaml # Agent deployment configuration +├── agent.manifest.yaml # Agent manifest with metadata and resources +├── test-payload.json # Sample request payload for testing +└── README.md # This file +``` + +## Troubleshooting + +### Azure OpenAI Permission Denied (401) + +If you see an error like: + +``` +Error calling Azure OpenAI: Error code: 401 - {'error': {'code': 'PermissionDenied', ...}} +``` + +The identity running the agent does not have the required RBAC roles on the Azure AI Foundry project. Assign the following roles: + +- **Cognitive Services OpenAI User** +- **Azure AI User** + +Use the Azure CLI to assign them: + +```bash +# Set your variables +SUBSCRIPTION_ID="" +RESOURCE_GROUP="" +PROJECT_NAME="" +PRINCIPAL_ID="" + +# Assign "Cognitive Services OpenAI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Cognitive Services OpenAI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" + +# Assign "Azure AI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Azure AI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" +``` + +> **Note:** It may take a few minutes for role assignments to propagate. Retry the request after waiting. diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/SessionStore.cs b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/SessionStore.cs new file mode 100644 index 000000000..f59d2f3d8 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/SessionStore.cs @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Concurrent; +using System.Security.Cryptography; +using System.Text; +using System.Text.Json; +using System.Text.Json.Serialization; + +/// +/// Manages human-in-the-loop session state with JSON file persistence. +/// State is stored in $HOME so files are accessible via the Session Files API. +/// +public static class SessionStore +{ + private static readonly string StateDir = + Environment.GetEnvironmentVariable("HOME") ?? Directory.GetCurrentDirectory(); + + private static readonly ConcurrentDictionary Sessions = new(); + private static readonly ConcurrentDictionary InvocationToSession = new(); + private static readonly ConcurrentDictionary SessionLocks = new(); + + /// Load all persisted sessions into memory on startup. + public static void LoadAllSessions() + { + if (!Directory.Exists(StateDir)) + return; + + foreach (var path in Directory.EnumerateFiles(StateDir, "hitl_session_*.json")) + { + try + { + var json = File.ReadAllText(path); + var session = JsonSerializer.Deserialize(json); + if (session is null || string.IsNullOrEmpty(session.SessionId)) + continue; + + var requiredFields = !string.IsNullOrEmpty(session.Status) + && !string.IsNullOrEmpty(session.OriginalTask); + if (!requiredFields) + continue; + + Sessions[session.SessionId] = session; + + foreach (var invId in session.InvocationIds) + InvocationToSession[invId] = session.SessionId; + } + catch + { + // Skip corrupt files + } + } + + if (!Sessions.IsEmpty) + Console.WriteLine($"Loaded {Sessions.Count} session(s) from disk"); + } + + public static HitlSession? GetBySession(string sessionId) => + Sessions.TryGetValue(sessionId, out var s) ? s : null; + + public static HitlSession? GetByInvocation(string invocationId) + { + var sessionId = GetSessionIdByInvocation(invocationId); + return sessionId is not null ? GetBySession(sessionId) : null; + } + + public static string? GetSessionIdByInvocation(string invocationId) => + InvocationToSession.TryGetValue(invocationId, out var sid) ? sid : null; + + /// Persist session state atomically to a JSON file in $HOME. + public static void Save(string sessionId, HitlSession session) + { + Sessions[sessionId] = session; + var target = GetFilePath(sessionId); + var tempPath = target + ".tmp"; + try + { + var json = JsonSerializer.Serialize(session, new JsonSerializerOptions { WriteIndented = true }); + File.WriteAllText(tempPath, json); + File.Move(tempPath, target, overwrite: true); + } + catch + { + try { File.Delete(tempPath); } catch { } + throw; + } + } + + public static void TrackInvocation(string invocationId, string sessionId) => + InvocationToSession[invocationId] = sessionId; + + public static SemaphoreSlim GetLock(string sessionId) => + SessionLocks.GetOrAdd(sessionId, _ => new SemaphoreSlim(1, 1)); + + private static string GetFilePath(string sessionId) + { + var safeId = new string(sessionId.Select(c => char.IsLetterOrDigit(c) || c == '-' || c == '_' ? c : '_').ToArray()); + var hashBytes = SHA256.HashData(Encoding.UTF8.GetBytes(sessionId)); + var hashSuffix = Convert.ToHexString(hashBytes)[..8].ToLowerInvariant(); + return Path.Combine(StateDir, $"hitl_session_{safeId}_{hashSuffix}.json"); + } +} + +// ────────────────────────────────────────────────────────────────── +// Session models +// ────────────────────────────────────────────────────────────────── + +public class HitlSession +{ + [JsonPropertyName("session_id")] + public string SessionId { get; set; } = ""; + + [JsonPropertyName("status")] + public string Status { get; set; } = ""; + + [JsonPropertyName("original_task")] + public string OriginalTask { get; set; } = ""; + + [JsonPropertyName("proposal")] + public string Proposal { get; set; } = ""; + + [JsonPropertyName("revision_history")] + public List RevisionHistory { get; set; } = new(); + + [JsonPropertyName("invocation_id")] + public string InvocationId { get; set; } = ""; + + [JsonPropertyName("invocation_ids")] + public List InvocationIds { get; set; } = new(); +} + +public class RevisionEntry +{ + [JsonPropertyName("proposal")] + public string Proposal { get; set; } = ""; + + [JsonPropertyName("feedback")] + public string Feedback { get; set; } = ""; +} diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.manifest.yaml b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.manifest.yaml new file mode 100644 index 000000000..7d850fad8 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.manifest.yaml @@ -0,0 +1,34 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: human-in-the-loop-dotnet-invocations +displayName: "Human-in-the-Loop (.NET, Invocations)" +description: > + A human-in-the-loop agent that demonstrates the approval-gate pattern using + the Azure.AI.AgentServer.Invocations SDK with Azure OpenAI. Generates a + proposal, pauses for human review, and resumes after the human approves, + requests a revision, or rejects. State is persisted as JSON files for + durability across restarts. +metadata: + tags: + - AI Agent Hosting + - Invocations Protocol + - Bring Your Own + - Human-in-the-Loop + - .NET +template: + name: human-in-the-loop-dotnet-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.yaml b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.yaml new file mode 100644 index 000000000..5133e653a --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: human-in-the-loop-dotnet-invocations +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/human-in-the-loop.csproj b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/human-in-the-loop.csproj new file mode 100644 index 000000000..fea9f90bc --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/human-in-the-loop.csproj @@ -0,0 +1,24 @@ + + + + net10.0 + enable + enable + + $(NoWarn);OPENAI001 + + + + + + + + + + + + + + \ No newline at end of file diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/test-payload.json b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/test-payload.json new file mode 100644 index 000000000..c76d401ed --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/human-in-the-loop/test-payload.json @@ -0,0 +1,3 @@ +{ + "task": "analyze dataset" +} diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/.dockerignore b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/.dockerignore new file mode 100644 index 000000000..a49232de8 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/.dockerignore @@ -0,0 +1,35 @@ +# Build artifacts +**/bin/ +**/obj/ + +# .NET user secrets +**/.secrets/ + +# Local settings +**/*.user +**/*.suo + +# IDE settings +.vs/ +.vscode/ +**/.idea/ + +# Test results +**/TestResults/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env + +# NuGet +.nuget/ diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/.env.example b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/Dockerfile b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/Dockerfile new file mode 100644 index 000000000..e68a388c4 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/Dockerfile @@ -0,0 +1,12 @@ +FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet build -c Release --no-restore +RUN dotnet publish -c Release --no-build -o /app + +FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final +WORKDIR /app +COPY --from=build /app . +EXPOSE 8088 +ENTRYPOINT ["dotnet", "notetaking-agent.dll"] diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/NoteStore.cs b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/NoteStore.cs new file mode 100644 index 000000000..888b5f765 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/NoteStore.cs @@ -0,0 +1,50 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; + +public record NoteEntry(string Note, DateTime Timestamp); + +// ────────────────────────────────────────────────────────────────── +// Note storage — JSONL file per session +// ────────────────────────────────────────────────────────────────── + +public static class NoteStore +{ + private static readonly object s_lock = new(); + + private static string GetFilePath(string sessionId) + { + var safeId = string.Join("_", sessionId.Split(Path.GetInvalidFileNameChars())); + // Write to HOME so files are accessible via the Session Files API. + var baseDir = Environment.GetEnvironmentVariable("HOME") + ?? Directory.GetCurrentDirectory(); + return Path.Combine(baseDir, $"notes_{safeId}.jsonl"); + } + + public static NoteEntry SaveNote(string sessionId, string noteText) + { + var entry = new NoteEntry(noteText, DateTime.UtcNow); + var json = JsonSerializer.Serialize(entry); + lock (s_lock) + { + File.AppendAllText(GetFilePath(sessionId), json + Environment.NewLine); + } + return entry; + } + + public static List GetNotes(string sessionId) + { + var path = GetFilePath(sessionId); + lock (s_lock) + { + if (!File.Exists(path)) return new List(); + + return File.ReadAllLines(path) + .Where(line => !string.IsNullOrWhiteSpace(line)) + .Select(line => JsonSerializer.Deserialize(line)) + .Where(entry => entry is not null) + .Select(entry => entry!) + .ToList(); + } + } +} diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/Program.cs b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/Program.cs new file mode 100644 index 000000000..260f84cce --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/Program.cs @@ -0,0 +1,249 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using Azure.AI.AgentServer.Invocations; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.AspNetCore.Http; +using Microsoft.Extensions.DependencyInjection; +using OpenAI.Chat; + +// Derive Azure OpenAI endpoint from the auto-injected Foundry project endpoint +if (string.IsNullOrEmpty(Environment.GetEnvironmentVariable("APPLICATIONINSIGHTS_CONNECTION_STRING"))) + Console.Error.WriteLine( + "[WARNING] APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent " + + "to Application Insights. Set it to enable local telemetry. " + + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)"); + +var foundryEndpoint = Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is not set."); +var azureOpenAIEndpoint = new Uri(foundryEndpoint).GetLeftPart(UriPartial.Authority); +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") + ?? throw new InvalidOperationException("AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set."); + +var aoaiClient = new AzureOpenAIClient( + new Uri(azureOpenAIEndpoint), + new DefaultAzureCredential()); +var chatClient = aoaiClient.GetChatClient(deployment); + +InvocationsServer.Run(configure: builder => +{ + builder.Services.AddSingleton(chatClient); +}); + +// ────────────────────────────────────────────────────────────────── +// Handler +// ────────────────────────────────────────────────────────────────── + +/// +/// Note-taking agent using the invocations protocol with Azure OpenAI function calling. +/// Streams responses as SSE events with per-session JSONL persistence. +/// +public class NoteTakingHandler : InvocationHandler +{ + private readonly ChatClient _chatClient; + + public NoteTakingHandler(ChatClient chatClient) => _chatClient = chatClient; + + public override async Task HandleAsync( + HttpRequest request, + HttpResponse response, + InvocationContext context, + CancellationToken cancellationToken) + { + string userMessage; + try + { + var input = await request.ReadFromJsonAsync(cancellationToken); + userMessage = input?.Message ?? ""; + if (string.IsNullOrWhiteSpace(userMessage)) + throw new JsonException("missing or empty \"message\" field"); + } + catch (JsonException) + { + response.StatusCode = 400; + await response.WriteAsJsonAsync( + new + { + error = "invalid_request", + message = "Request body must be a JSON object with a non-empty \"message\" string, e.g. {\"message\": \"save a note - book reservation for dinner\"}", + }, + cancellationToken); + return; + } + + var sessionId = context.SessionId; + + // Set up SSE streaming + response.ContentType = "text/event-stream"; + response.Headers.CacheControl = "no-cache"; + + // Define tools for Azure OpenAI function calling + var tools = new ChatTool[] + { + ChatTool.CreateFunctionTool( + "save_note", + "Save a note with the current timestamp. Use this when the user asks to save, add, or create a note.", + BinaryData.FromString(""" + { + "type": "object", + "properties": { + "note": { + "type": "string", + "description": "The note text to save" + } + }, + "required": ["note"] + } + """)), + ChatTool.CreateFunctionTool( + "get_notes", + "Retrieve all saved notes. Use this when the user asks to get, list, show, or view their notes.", + BinaryData.FromString(""" + { + "type": "object", + "properties": {}, + "required": [] + } + """)) + }; + + var messages = new List + { + new SystemChatMessage( + "You are a helpful note-taking assistant. You can save notes and retrieve them. " + + "When the user asks to save a note, extract the note content and call save_note. " + + "When the user asks to see their notes, call get_notes. " + + "Always respond in a friendly, concise manner."), + new UserChatMessage(userMessage) + }; + + var options = new ChatCompletionOptions(); + foreach (var tool in tools) + options.Tools.Add(tool); + + // First call — may return tool calls + var completion = await _chatClient.CompleteChatAsync(messages, options, cancellationToken); + + // If tool calls are requested, execute them and send results back + if (completion.Value.FinishReason == ChatFinishReason.ToolCalls) + { + messages.Add(new AssistantChatMessage(completion.Value)); + + foreach (var toolCall in completion.Value.ToolCalls) + { + var result = ExecuteToolCall(toolCall.FunctionName, toolCall.FunctionArguments, sessionId); + messages.Add(new ToolChatMessage(toolCall.Id, result)); + } + + // Second call — stream natural language response + await StreamResponseAsync(messages, options, response, context, cancellationToken); + } + else + { + // Direct text response (no tool calls) — stream it + var text = completion.Value.Content?.FirstOrDefault()?.Text ?? ""; + await StreamTextAsync(text, response, context, cancellationToken); + } + } + + // ── Streaming helpers ── + + private async Task StreamResponseAsync( + List messages, + ChatCompletionOptions options, + HttpResponse response, + InvocationContext context, + CancellationToken cancellationToken) + { + var fullText = ""; + + await foreach (var update in _chatClient.CompleteChatStreamingAsync(messages, options, cancellationToken)) + { + foreach (var part in update.ContentUpdate) + { + if (!string.IsNullOrEmpty(part.Text)) + { + fullText += part.Text; + var tokenEvent = JsonSerializer.Serialize(new { type = "token", content = part.Text }); + await response.WriteAsync($"data: {tokenEvent}\n\n", cancellationToken); + await response.Body.FlushAsync(cancellationToken); + } + } + } + + // Send completion event + var doneEvent = JsonSerializer.Serialize(new + { + type = "done", + invocation_id = context.InvocationId, + session_id = context.SessionId, + full_text = fullText + }); + await response.WriteAsync($"data: {doneEvent}\n\n", cancellationToken); + await response.Body.FlushAsync(cancellationToken); + } + + private static async Task StreamTextAsync( + string text, + HttpResponse response, + InvocationContext context, + CancellationToken cancellationToken) + { + var words = text.Split(' '); + for (int i = 0; i < words.Length; i++) + { + var token = i == 0 ? words[i] : $" {words[i]}"; + var tokenEvent = JsonSerializer.Serialize(new { type = "token", content = token }); + await response.WriteAsync($"data: {tokenEvent}\n\n", cancellationToken); + await response.Body.FlushAsync(cancellationToken); + await Task.Delay(30, cancellationToken); + } + + // Send completion event + var doneEvent = JsonSerializer.Serialize(new + { + type = "done", + invocation_id = context.InvocationId, + session_id = context.SessionId, + full_text = text + }); + await response.WriteAsync($"data: {doneEvent}\n\n", cancellationToken); + await response.Body.FlushAsync(cancellationToken); + } + + // ── Tool execution ── + + private static string ExecuteToolCall(string functionName, BinaryData arguments, string sessionId) + { + try + { + if (functionName == "save_note") + { + var args = JsonSerializer.Deserialize(arguments); + if (!args.TryGetProperty("note", out var noteProp)) + return JsonSerializer.Serialize(new { error = "Missing required 'note' argument" }); + + var noteText = noteProp.GetString() ?? ""; + var entry = NoteStore.SaveNote(sessionId, noteText); + return JsonSerializer.Serialize(new { status = "saved", note = entry.Note, timestamp = entry.Timestamp }); + } + else if (functionName == "get_notes") + { + var notes = NoteStore.GetNotes(sessionId); + return JsonSerializer.Serialize(new { count = notes.Count, notes = notes.Select(n => new { n.Note, n.Timestamp }) }); + } + return JsonSerializer.Serialize(new { error = $"Unknown function: {functionName}" }); + } + catch (JsonException ex) + { + return JsonSerializer.Serialize(new { error = $"Invalid tool arguments: {ex.Message}" }); + } + } +} + +// ────────────────────────────────────────────────────────────────── +// Input model +// ────────────────────────────────────────────────────────────────── + +public record NoteInput(string Message); diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/README.md b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/README.md new file mode 100644 index 000000000..991486219 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/README.md @@ -0,0 +1,143 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. + +# Note-Taking Agent — Invocations Protocol + +This sample demonstrates a note-taking agent built with [Azure.AI.AgentServer.Invocations](https://www.nuget.org/packages/Azure.AI.AgentServer.Invocations) that uses **Azure OpenAI function calling** for intent understanding, **SSE streaming** for real-time token delivery, and **local JSONL file storage** for session-persistent notes. + +## How It Works + +The agent receives natural language messages via `POST /invocations` and uses Azure OpenAI with two tool definitions — `save_note` and `get_notes` — to understand user intent. When the LLM returns a tool call, the agent executes it locally (reads/writes a JSONL file) and streams the LLM's natural language response back as Server-Sent Events. + +Notes are stored per session in `notes_{session_id}.jsonl` files, demonstrating **session persistence** — notes survive across multiple invocations within the same session. The session ID is resolved automatically from the `agent_session_id` query parameter. + +## Running Locally + +### Prerequisites + +- .NET 10.0 SDK +- Azure CLI installed and authenticated (`az login`) +- Foundry project with a deployed model (e.g., `gpt-4.1-mini`) + +### Build + +```bash +dotnet build +``` + +### Start the Agent + +```bash +cp .env.example .env # then edit values +export FOUNDRY_PROJECT_ENDPOINT="https://your-project.services.ai.azure.com/api/projects/your-project" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +dotnet run +``` + +The agent starts on `http://localhost:8088/`. + +### Test + +#### 1. Test with curl + +##### Save a note + +```bash +curl -N -X POST "http://localhost:8088/invocations?agent_session_id=my-session" \ + -H "Content-Type: application/json" \ + -d '{"message": "save a note - book reservation for dinner"}' +``` + +##### Save another note + +```bash +curl -N -X POST "http://localhost:8088/invocations?agent_session_id=my-session" \ + -H "Content-Type: application/json" \ + -d '{"message": "save a note - buy groceries"}' +``` + +##### Get all notes + +```bash +curl -N -X POST "http://localhost:8088/invocations?agent_session_id=my-session" \ + -H "Content-Type: application/json" \ + -d '{"message": "get all my notes"}' +``` + +##### Start a new session + +```bash +curl -N -X POST "http://localhost:8088/invocations?agent_session_id=new-session" \ + -H "Content-Type: application/json" \ + -d '{"message": "get all my notes"}' +``` + +#### 2. Test in Agent Inspector + +Once the agent is running, open **Agent Inspector** in VS Code to interactively send messages and view responses. + +![Agent Inspector](../../../../assets/agent-inspector-invocations.png) + +##### Save a note + +```json +{"message": "save a note - book reservation for dinner"} +``` + +##### Save another note + +```json +{"message": "save a note - buy groceries"} +``` + +##### Get all notes + +```json +{"message": "get all my notes"} +``` + +##### Start a new session + +Click the **Clear Conversation** button at the top-right corner to start a new session. + +## Deploying to Microsoft Foundry + +To deploy your agent to Microsoft Foundry, follow the deployment guide at https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/azd-quickstart.md + +## Troubleshooting + +### Azure OpenAI Permission Denied (401) + +If you see an error like: + +``` +Error calling Azure OpenAI: Error code: 401 - {'error': {'code': 'PermissionDenied', 'message': 'The principal lacks the required data action Microsoft.CognitiveServices/accounts/OpenAI/deployments/chat/completions/action to perform POST /openai/deployments/{deployment-id}/chat/completions operation.'}} +``` + +The identity running the agent does not have the required RBAC roles on the Azure AI Foundry project. Assign the following roles: + +- **Cognitive Services OpenAI User** +- **Azure AI User** + +Use the Azure CLI to assign them: + +```bash +# Set your variables +SUBSCRIPTION_ID="" +RESOURCE_GROUP="" +PROJECT_NAME="" +PRINCIPAL_ID="" + +# Assign "Cognitive Services OpenAI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Cognitive Services OpenAI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" + +# Assign "Azure AI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Azure AI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" +``` + +> **Note:** It may take a few minutes for role assignments to propagate. Retry the request after waiting. diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.manifest.yaml b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.manifest.yaml new file mode 100644 index 000000000..86cfc5e41 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.manifest.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: notetaking-agent-dotnet-invocations +displayName: "Notetaking Agent (.NET, Invocations)" +description: > + A note-taking agent using the Invocations protocol with Azure OpenAI function + calling in C#. Demonstrates tool use (save_note, get_notes) with per-session + JSONL persistence and SSE streaming. +metadata: + tags: + - AI Agent Hosting + - Invocations Protocol + - Bring Your Own + - Function Calling + - Note Taking + - .NET +template: + name: notetaking-agent-dotnet-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.yaml b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.yaml new file mode 100644 index 000000000..f7df54885 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: notetaking-agent-dotnet-invocations +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/notetaking-agent.csproj b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/notetaking-agent.csproj new file mode 100644 index 000000000..996ec1600 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/notetaking-agent.csproj @@ -0,0 +1,18 @@ + + + + net10.0 + enable + enable + + + + + + + + + + + + diff --git a/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/test-payload.json b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/test-payload.json new file mode 100644 index 000000000..d3d80100b --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/invocations/notetaking-agent/test-payload.json @@ -0,0 +1 @@ +{"message": "save a note - book reservation for dinner"} diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/.dockerignore b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/.dockerignore new file mode 100644 index 000000000..ffd63fa37 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/.dockerignore @@ -0,0 +1,38 @@ +# Build artifacts +**/bin/ +**/obj/ + +# .NET user secrets +**/.secrets/ + +# Local settings +**/*.user +**/*.suo + +# IDE settings +.vs/ +.vscode/ +**/.idea/ + +# Test results +**/TestResults/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env + +# Local environment (never bake credentials into the image) +.env + +# NuGet +.nuget/ diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/.env.example b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/Dockerfile b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/Dockerfile new file mode 100644 index 000000000..efbd4fb6d --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/Dockerfile @@ -0,0 +1,12 @@ +FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet build -c Release --no-restore +RUN dotnet publish -c Release --no-build -o /app + +FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final +WORKDIR /app +COPY --from=build /app . +EXPOSE 8088 +ENTRYPOINT ["dotnet", "HelloWorld.dll"] diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/HelloWorld.csproj b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/HelloWorld.csproj new file mode 100644 index 000000000..f9ffff32b --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/HelloWorld.csproj @@ -0,0 +1,24 @@ + + + + net10.0 + enable + enable + + $(NoWarn);OPENAI001 + + + + + + + + + + + + + + diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/Program.cs b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/Program.cs new file mode 100644 index 000000000..849faa2b1 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/Program.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft. All rights reserved. + +/* + * Hello World — Bring Your Own Responses agent for C# + * + * Minimal hosted agent that forwards user input to a Foundry model via the + * Responses API and returns the reply through the Responses protocol. + * + * This sample demonstrates the simplest possible BYO integration: the protocol + * SDK (Azure.AI.AgentServer.Responses) handles the HTTP contract and SSE + * lifecycle, and you supply the model call using the Foundry SDK + * (Azure.AI.Projects and Azure.AI.Extensions.OpenAI). + * + * Conversation history is retrieved via ResponseContext.GetHistoryAsync() and + * included in each model call so the agent maintains context across turns. + * + * Required environment variables: + * FOUNDRY_PROJECT_ENDPOINT — Foundry project endpoint (auto-injected in hosted containers) + * AZURE_AI_MODEL_DEPLOYMENT_NAME — Model deployment name (declared in agent.manifest.yaml) + * + * Usage: + * dotnet run + * + * # Invoke the agent (in a separate terminal): + * curl -sS -X POST http://localhost:8088/responses \ + * -H "Content-Type: application/json" \ + * -d '{"input": "What is Microsoft Foundry?", "stream": false}' | jq . + */ + +using Azure.AI.AgentServer.Responses; +using Azure.AI.AgentServer.Responses.Models; +using Azure.AI.Extensions.OpenAI; +using Azure.AI.Projects; +using Azure.Identity; +using Microsoft.Extensions.Logging; +using OpenAI.Responses; + +// One-liner startup — wires up Kestrel on port 8088, OpenTelemetry, health probes, +// and the Responses API endpoints. Telemetry is configured automatically: +// when APPLICATIONINSIGHTS_CONNECTION_STRING is set, traces and logs are sent to +// Application Insights with no extra code. +ResponsesServer.Run(configure: builder => +{ + if (string.IsNullOrEmpty(Environment.GetEnvironmentVariable("APPLICATIONINSIGHTS_CONNECTION_STRING"))) + Console.Error.WriteLine( + "[WARNING] APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent " + + "to Application Insights. Set it to enable local telemetry. " + + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)"); + + var endpoint = Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set."); + + var model = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") + ?? throw new InvalidOperationException( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set."); + + var projectClient = new AIProjectClient(new Uri(endpoint), new DefaultAzureCredential()); + + // Use the Responses client — not GetChatClient() (Chat Completions API) + var responsesClient = projectClient.ProjectOpenAIClient + .GetProjectResponsesClientForModel(model); + + builder.Services.AddSingleton(responsesClient); +}); + +/// +/// Hello World handler — forwards user input to a Foundry model via the Responses API. +/// Conversation history is fetched via and +/// included in each model call so the agent maintains context across conversation turns. +/// +/// Foundry Responses API client, injected via DI. +/// Logger injected via DI. Calls are automatically exported to Application Insights. +public sealed class HelloWorldHandler( + ProjectResponsesClient responsesClient, + ILogger logger) : ResponseHandler +{ + private const string SystemPrompt = "You are a helpful AI assistant. Be concise and informative."; + + public override IAsyncEnumerable CreateAsync( + CreateResponse request, + ResponseContext context, + CancellationToken cancellationToken) + { + // TextResponse wraps the result text in the full SSE lifecycle: + // response.created → response.in_progress → content events → response.completed + return new TextResponse(context, request, + createText: ct => GenerateTextAsync(context, ct)); + } + + private async Task GenerateTextAsync( + ResponseContext context, + CancellationToken cancellationToken) + { + var userInput = await context.GetInputTextAsync(cancellationToken: cancellationToken) ?? "Hello!"; + var history = await context.GetHistoryAsync(cancellationToken); + + logger.LogInformation("Processing request {ResponseId}", context.ResponseId); + + var options = new CreateResponseOptions + { + Instructions = SystemPrompt, + }; + + // Reconstruct conversation history for the model. + // GetHistoryAsync walks the previous_response_id chain and returns items oldest-first. + // Each OutputItemMessage can contain both user input content and assistant output content. + foreach (var item in history) + { + if (item is OutputItemMessage { Content: { } contents }) + { + foreach (var content in contents) + { + switch (content) + { + case MessageContentOutputTextContent { Text: { } assistantText }: + // Assistant message from a previous turn + options.InputItems.Add(ResponseItem.CreateAssistantMessageItem(assistantText)); + break; + case MessageContentInputTextContent { Text: { } userText }: + // User message from a previous turn + options.InputItems.Add(ResponseItem.CreateUserMessageItem(userText)); + break; + } + } + } + } + + // Add the current user message + options.InputItems.Add(ResponseItem.CreateUserMessageItem(userInput)); + + var result = await responsesClient.CreateResponseAsync(options); + return result.Value.GetOutputText() ?? string.Empty; + } +} diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/README.md b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/README.md new file mode 100644 index 000000000..e111ce4a7 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/README.md @@ -0,0 +1,183 @@ + +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency note for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + + +# What this sample demonstrates + +A minimal "hello world" hosted agent using the **Bring Your Own** approach with the **Responses protocol** in C#. It shows how to use the [`Azure.AI.AgentServer.Responses`](https://www.nuget.org/packages/Azure.AI.AgentServer.Responses/) SDK to host a custom agent that calls a Foundry model via the Responses API and returns the reply through the standard Responses protocol contract. + +This is the simplest possible BYO integration — the protocol SDK handles the HTTP endpoints, SSE lifecycle, health probes, and OpenTelemetry tracing. You supply the model call using the [Foundry SDK (`Azure.AI.Projects` + `Azure.AI.Extensions.OpenAI`)](https://www.nuget.org/packages/Azure.AI.Extensions.OpenAI/). + +## How It Works + +### Model Integration + +The agent uses the Foundry SDK to create a `ProjectResponsesClient` from the project endpoint and model deployment name. When a request arrives, the handler fetches conversation history via `GetHistoryAsync()`, builds an input list, calls the model via the Responses API, and returns the reply as a `TextResponse` — which the SDK automatically wraps in the correct SSE lifecycle events (`response.created` → `response.in_progress` → content events → `response.completed`). + +See [Program.cs](Program.cs) for the full implementation. + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer Responses SDK](https://www.nuget.org/packages/Azure.AI.AgentServer.Responses/), which provisions a REST API endpoint compatible with the OpenAI Responses protocol. + +### Agent Deployment + +The hosted agent can be developed and deployed to Microsoft Foundry using the [Azure Developer CLI](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd). + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Azure Developer CLI (`azd`)** (recommended) + - [Install azd](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) and the AI agent extension: `azd ext install azure.ai.agents` + - Authenticated: `azd auth login` + +2. **Azure CLI** + - Installed and authenticated: `az login` + +3. **.NET 10.0 SDK or later** + - Verify your version: `dotnet --version` + - Download from [https://dotnet.microsoft.com/download](https://dotnet.microsoft.com/download) + +> [!NOTE] +> You do **not** need an existing [Microsoft Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/what-is-foundry?view=foundry) project or model deployment to get started — `azd provision` creates them for you. If you already have a project, see the [note below](#using-azd-recommended-for-cli-workflows) on how to target it. + +### Environment Variables + +See [`.env.example`](.env.example) for the full list of environment variables this sample uses. + +| Variable | Required | Description | +|----------|----------|-------------| +| `FOUNDRY_PROJECT_ENDPOINT` | Yes | Foundry project endpoint. Auto-injected in hosted containers; set automatically by `azd ai agent run` locally. | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Yes | Model deployment name — must match your Foundry project deployment. Declared in `agent.manifest.yaml`. | +| `APPLICATIONINSIGHTS_CONNECTION_STRING` | Recommended | Enables telemetry. Auto-injected in hosted containers; set manually for local dev. | + +**Local development (without `azd`):** + +```bash +# Set env vars directly — .NET does not natively read .env files +export FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="" +``` + +> [!NOTE] +> When using `azd ai agent run`, environment variables are handled automatically — no manual setup needed. + +### Installing Dependencies + +> [!NOTE] +> If using `azd ai agent run`, dependencies are restored automatically — skip to [Running the Sample](#running-the-sample). + +Dependencies are restored automatically when building the project: + +```bash +dotnet restore +``` + +### Running the Sample + +The recommended way to run and test hosted agents locally is with the Azure Developer CLI (`azd`) or the Foundry VS Code extension. + +#### Using the Foundry VS Code Extension + +The [Foundry VS Code extension](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=vscode) has a built-in sample gallery. You can open this sample directly from the extension without cloning the repository — it scaffolds the project into a new workspace, generates `agent.yaml`, `.env`, and `.vscode/tasks.json` + `launch.json` automatically, and configures a one-click **F5** debug experience. + +Follow the [VS Code quickstart](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=vscode) for a full step-by-step walkthrough. + +#### Using [`azd`](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd) (recommended for CLI workflows) + +No cloning required. Create a new folder, point `azd` at the manifest on GitHub, and it sets up the sample and generates Bicep infrastructure, `agent.yaml`, and env config automatically: + +```bash +# Create a new folder for the agent and navigate into it +mkdir hello-world-agent && cd hello-world-agent + +# Initialize from the manifest — azd reads it, downloads the sample, +# and generates Bicep infrastructure, agent.yaml, and env config +azd ai agent init -m https://github.com/microsoft-foundry/foundry-samples/blob/main/samples/dotnet/hosted-agents/bring-your-own/responses/HelloWorld/agent.manifest.yaml + +# Provision Azure resources (Foundry project, model deployment, App Insights) +azd provision + +# Run the agent locally (handles env vars, build, and startup) +azd ai agent run +``` + +> [!NOTE] +> If you've already cloned this repository, pass a local path to the manifest instead: +> `azd ai agent init -m /samples/dotnet/hosted-agents/bring-your-own/responses/HelloWorld/agent.manifest.yaml` + +> [!NOTE] +> If you already have a Foundry project and model deployment, add `-p -d ` to `azd ai agent init` to target existing resources. You can also skip provisioning entirely and configure env vars manually — see [Without `azd`](#without-azd). + +The agent starts on `http://localhost:8088/`. To invoke it: + +```bash +azd ai agent invoke --local "What is Microsoft Foundry?" +``` + +Or use curl directly: + +```bash +curl -sS -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"input": "What is Microsoft Foundry?", "stream": false}' | jq . +``` + +#### Without `azd` + +If running without `azd`, set environment variables manually (see [Environment Variables](#environment-variables)), then: + +```bash +dotnet run +``` + +### Deploying the Agent to Microsoft Foundry + +Once you've tested locally, deploy to Microsoft Foundry: + +```bash +# Provision Azure resources (skip if already done during local setup) +azd provision + +# Build, push, and deploy the agent to Foundry +azd deploy +``` + +After deploying, invoke the agent running in Foundry: + +```bash +azd ai agent invoke "What is Microsoft Foundry?" +``` + +To stream logs from the running agent: + +```bash +azd ai agent monitor +``` + +For the full deployment guide, see [Azure AI Foundry hosted agents](https://aka.ms/azdaiagent/docs). + +## Troubleshooting + +### Images built on Apple Silicon or other ARM64 machines do not work on our service + +We **recommend deploying with `azd deploy`**, which uses ACR remote build and always produces images with the correct architecture. + +If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. + +**Fix for local builds:** + +```bash +docker build --platform=linux/amd64 -t image . +``` + +This forces the image to be built for the required `amd64` architecture. diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/agent.manifest.yaml b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/agent.manifest.yaml new file mode 100644 index 000000000..16a51bc95 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/agent.manifest.yaml @@ -0,0 +1,31 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: hello-world-dotnet-responses +displayName: "Hello World (.NET, Responses)" +description: > + Minimal Hello World agent using the Responses protocol with a bring-your-own + approach in C#. Calls a Foundry model via the Responses API and returns the + response. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Bring Your Own + - .NET +template: + name: hello-world-dotnet-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/agent.yaml b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/agent.yaml new file mode 100644 index 000000000..85a975528 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/HelloWorld/agent.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: hello-world-dotnet-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/.dockerignore b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/.dockerignore new file mode 100644 index 000000000..a49232de8 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/.dockerignore @@ -0,0 +1,35 @@ +# Build artifacts +**/bin/ +**/obj/ + +# .NET user secrets +**/.secrets/ + +# Local settings +**/*.user +**/*.suo + +# IDE settings +.vs/ +.vscode/ +**/.idea/ + +# Test results +**/TestResults/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env + +# NuGet +.nuget/ diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/.env.example b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/Dockerfile b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/Dockerfile new file mode 100644 index 000000000..033cc7532 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/Dockerfile @@ -0,0 +1,12 @@ +FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet build -c Release --no-restore +RUN dotnet publish -c Release --no-build -o /app + +FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final +WORKDIR /app +COPY --from=build /app . +EXPOSE 8088 +ENTRYPOINT ["dotnet", "background-agent.dll"] diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/Program.cs b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/Program.cs new file mode 100644 index 000000000..65d555195 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/Program.cs @@ -0,0 +1,92 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Runtime.CompilerServices; +using Azure.AI.AgentServer.Responses; +using Azure.AI.AgentServer.Responses.Models; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Extensions.DependencyInjection; +using OpenAI.Chat; + +if (string.IsNullOrEmpty(Environment.GetEnvironmentVariable("APPLICATIONINSIGHTS_CONNECTION_STRING"))) + Console.Error.WriteLine( + "[WARNING] APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent " + + "to Application Insights. Set it to enable local telemetry. " + + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)"); + +// Derive Azure OpenAI endpoint from the auto-injected Foundry project endpoint +var foundryEndpoint = Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is required."); +var azureOpenAIEndpoint = new Uri(foundryEndpoint).GetLeftPart(UriPartial.Authority); +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") + ?? throw new InvalidOperationException("AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is required."); + +var aoaiClient = new AzureOpenAIClient( + new Uri(azureOpenAIEndpoint), + new DefaultAzureCredential()); +var chatClient = aoaiClient.GetChatClient(deployment); + +ResponsesServer.Run(configure: builder => +{ + builder.Services.AddSingleton(chatClient); +}); + +// ────────────────────────────────────────────────────────────────── +// Handler +// ────────────────────────────────────────────────────────────────── + +/// +/// Background research agent using the responses protocol with Azure OpenAI. +/// Processes requests asynchronously — the SDK handles background mode, +/// polling, and cancellation automatically. +/// +public class BackgroundResearchHandler : ResponseHandler +{ + private const string SystemPrompt = + "You are a research analyst. When given a topic, produce a thorough " + + "multi-section analysis report. Include:\n" + + "1. Executive Summary\n" + + "2. Background & Context\n" + + "3. Key Findings (at least 3)\n" + + "4. Implications & Recommendations\n" + + "5. Conclusion\n\n" + + "Be detailed and substantive. Target 500-800 words."; + + private readonly ChatClient _chatClient; + + public BackgroundResearchHandler(ChatClient chatClient) => _chatClient = chatClient; + + public override IAsyncEnumerable CreateAsync( + CreateResponse request, + ResponseContext context, + CancellationToken cancellationToken) + { + return new TextResponse(context, request, + createTextStream: ct => StreamResearchAsync(context, ct)); + } + + private async IAsyncEnumerable StreamResearchAsync( + ResponseContext context, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + var userInput = await context.GetInputTextAsync(cancellationToken: cancellationToken) + ?? "General AI trends analysis"; + + var messages = new List + { + new SystemChatMessage(SystemPrompt), + new UserChatMessage($"Research topic: {userInput}") + }; + + await foreach (var update in _chatClient.CompleteChatStreamingAsync(messages, cancellationToken: cancellationToken)) + { + foreach (var part in update.ContentUpdate) + { + if (!string.IsNullOrEmpty(part.Text)) + { + yield return part.Text; + } + } + } + } +} diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/README.md b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/README.md new file mode 100644 index 000000000..8b5eb772c --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/README.md @@ -0,0 +1,92 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. + +# Background Agent (Responses Protocol) — .NET + +This sample demonstrates a long-running agent built with [Azure.AI.AgentServer.Responses](https://www.nuget.org/packages/Azure.AI.AgentServer.Responses) that uses the background execution mode for asynchronous processing. It calls Azure OpenAI to generate a multi-section research analysis, streaming LLM tokens as they arrive via the Responses API event lifecycle. + +## How It Works + +The agent receives a request via `POST /responses` with `"background": true`. The server returns immediately while the handler calls Azure OpenAI in the background, streaming response tokens as `text.delta` events. The caller polls `GET /responses/{id}` until the response reaches a terminal status (`completed`, `failed`, or `incomplete`). In-flight requests can be cancelled via `POST /responses/{id}/cancel`. + +The handler itself stays simple — background mode, polling, and cancellation are all managed by the SDK automatically. + +## Running Locally + +### Prerequisites + +- [.NET 10.0 SDK](https://dotnet.microsoft.com/download/dotnet/10.0) +- Azure CLI installed and authenticated (`az login`) +- An Azure AI Foundry project with an Azure OpenAI deployment + +### Environment Variables + +| Variable | Description | +|---|---| +| `FOUNDRY_PROJECT_ENDPOINT` | Azure AI Foundry project endpoint (auto-injected when deployed) | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Azure OpenAI model deployment name (e.g., `gpt-4.1-mini`) | + +### Start the Agent + +```bash +export FOUNDRY_PROJECT_ENDPOINT="https://your-resource.services.ai.azure.com/api/projects/your-project" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +dotnet run +``` + +The agent starts on `http://localhost:8088/`. + +### Test — Background Mode + +```bash +# Submit a background research analysis +curl -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"model": "research", "input": "Analyze the impact of AI on healthcare", "background": true, "store": true}' + +# Poll for result (use the id from the response) +curl http://localhost:8088/responses/ + +# Cancel an in-flight request +curl -X POST http://localhost:8088/responses//cancel +``` + +### Test — Default Mode (Synchronous) + +```bash +curl -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"model": "research", "input": "Analyze the impact of AI on healthcare"}' +``` + +## Invoke with azd + +### Local + +```bash +azd ai agent invoke --local "Analyze the impact of AI on healthcare" +``` + +### Remote (after `azd up`) + +```bash +azd ai agent invoke "Analyze the impact of AI on healthcare" +``` + +## Deploying to Microsoft Foundry + +To deploy your agent to Microsoft Foundry, follow the deployment guide at https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/azd-quickstart.md + +## Project Structure + +``` +background-agent/ +├── Program.cs # Agent entry point and handler implementation +├── background-agent.csproj # .NET project file with dependencies +├── Dockerfile # Container build definition +├── agent.yaml # Agent deployment configuration +├── agent.manifest.yaml # Agent manifest for Foundry +├── .dockerignore # Docker build exclusions +├── .env.example # Example environment variables +├── test-payload.json # Sample request payload for testing +└── README.md # This file +``` diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/agent.manifest.yaml b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/agent.manifest.yaml new file mode 100644 index 000000000..409107790 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/agent.manifest.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: background-agent-dotnet-responses +displayName: "Background Agent (.NET, Responses)" +description: > + A long-running research agent that demonstrates the background execution + pattern using the Azure.AI.AgentServer.Responses SDK with Azure OpenAI. + Processes requests asynchronously via background=true, streaming LLM + tokens as they arrive. Supports polling and cancellation. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Bring Your Own + - Background Mode + - .NET +template: + name: background-agent-dotnet-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/agent.yaml b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/agent.yaml new file mode 100644 index 000000000..f8411dab3 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: background-agent-dotnet-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/background-agent.csproj b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/background-agent.csproj new file mode 100644 index 000000000..2c6f27536 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/background-agent/background-agent.csproj @@ -0,0 +1,18 @@ + + + + net10.0 + enable + enable + + + + + + + + + + + + diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/.dockerignore b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/.dockerignore new file mode 100644 index 000000000..a49232de8 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/.dockerignore @@ -0,0 +1,35 @@ +# Build artifacts +**/bin/ +**/obj/ + +# .NET user secrets +**/.secrets/ + +# Local settings +**/*.user +**/*.suo + +# IDE settings +.vs/ +.vscode/ +**/.idea/ + +# Test results +**/TestResults/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env + +# NuGet +.nuget/ diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/.env.example b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/Dockerfile b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/Dockerfile new file mode 100644 index 000000000..e68a388c4 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/Dockerfile @@ -0,0 +1,12 @@ +FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build +WORKDIR /src +COPY . . +RUN dotnet restore +RUN dotnet build -c Release --no-restore +RUN dotnet publish -c Release --no-build -o /app + +FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final +WORKDIR /app +COPY --from=build /app . +EXPOSE 8088 +ENTRYPOINT ["dotnet", "notetaking-agent.dll"] diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/NoteStore.cs b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/NoteStore.cs new file mode 100644 index 000000000..7265415c9 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/NoteStore.cs @@ -0,0 +1,50 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; + +public record NoteEntry(string Note, DateTime Timestamp); + +// ────────────────────────────────────────────────────────────────── +// Note storage — JSONL file per session +// ────────────────────────────────────────────────────────────────── + +public static class NoteStore +{ + private static readonly object s_lock = new(); + + private static string GetFilePath(string sessionId) + { + var safeId = string.Join("_", sessionId.Split(Path.GetInvalidFileNameChars())); + // Write to HOME so files are accessible via the Session Files API. + var baseDir = Environment.GetEnvironmentVariable("HOME") + ?? Directory.GetCurrentDirectory(); + return Path.Combine(baseDir, $"notes_{safeId}.jsonl"); + } + + public static NoteEntry SaveNote(string sessionId, string noteText) + { + var entry = new NoteEntry(noteText, DateTime.UtcNow); + var json = JsonSerializer.Serialize(entry); + lock (s_lock) + { + File.AppendAllText(GetFilePath(sessionId), json + Environment.NewLine); + } + return entry; + } + + public static List GetNotes(string sessionId) + { + var path = GetFilePath(sessionId); + if (!File.Exists(path)) return new List(); + + lock (s_lock) + { + return File.ReadAllLines(path) + .Where(line => !string.IsNullOrWhiteSpace(line)) + .Select(line => JsonSerializer.Deserialize(line)) + .Where(entry => entry is not null) + .Select(entry => entry!) + .ToList(); + } + } +} diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/Program.cs b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/Program.cs new file mode 100644 index 000000000..ba8c0abe5 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/Program.cs @@ -0,0 +1,187 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Runtime.CompilerServices; +using System.Text.Json; +using Azure.AI.AgentServer.Responses; +using Azure.AI.AgentServer.Responses.Models; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Extensions.DependencyInjection; +using OpenAI.Chat; + +// Derive Azure OpenAI endpoint from the auto-injected Foundry project endpoint +if (string.IsNullOrEmpty(Environment.GetEnvironmentVariable("APPLICATIONINSIGHTS_CONNECTION_STRING"))) + Console.Error.WriteLine( + "[WARNING] APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent " + + "to Application Insights. Set it to enable local telemetry. " + + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)"); + +var foundryEndpoint = Environment.GetEnvironmentVariable("FOUNDRY_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("FOUNDRY_PROJECT_ENDPOINT environment variable is not set."); +var azureOpenAIEndpoint = new Uri(foundryEndpoint).GetLeftPart(UriPartial.Authority); +var deployment = Environment.GetEnvironmentVariable("AZURE_AI_MODEL_DEPLOYMENT_NAME") + ?? throw new InvalidOperationException("AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set."); + +var aoaiClient = new AzureOpenAIClient( + new Uri(azureOpenAIEndpoint), + new DefaultAzureCredential()); +var chatClient = aoaiClient.GetChatClient(deployment); + +ResponsesServer.Run(configure: builder => +{ + builder.Services.AddSingleton(new LlmConfig(chatClient)); +}); + +// ────────────────────────────────────────────────────────────────── +// Handler +// ────────────────────────────────────────────────────────────────── + +public class NoteTakingHandler : ResponseHandler +{ + private readonly LlmConfig _llm; + + public NoteTakingHandler(LlmConfig llm) => _llm = llm; + + public override IAsyncEnumerable CreateAsync( + CreateResponse request, + ResponseContext context, + CancellationToken cancellationToken) + { + return new TextResponse(context, request, + createTextStream: ct => ProcessAsync(request, context, ct)); + } + + private async IAsyncEnumerable ProcessAsync( + CreateResponse request, + ResponseContext context, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + var userMessage = await context.GetInputTextAsync(cancellationToken: cancellationToken) ?? ""; + var sessionId = request.AgentSessionId ?? "default"; + + await foreach (var token in ProcessWithLlmAsync(userMessage, sessionId, cancellationToken)) + { + yield return token; + } + } + + // ── LLM mode: Azure OpenAI with function calling ── + + private async IAsyncEnumerable ProcessWithLlmAsync( + string userMessage, + string sessionId, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + var tools = new ChatTool[] + { + ChatTool.CreateFunctionTool( + "save_note", + "Save a note with the current timestamp. Use this when the user asks to save, add, or create a note.", + BinaryData.FromString(""" + { + "type": "object", + "properties": { + "note": { + "type": "string", + "description": "The note text to save" + } + }, + "required": ["note"] + } + """)), + ChatTool.CreateFunctionTool( + "get_notes", + "Retrieve all saved notes. Use this when the user asks to get, list, show, or view their notes.", + BinaryData.FromString(""" + { + "type": "object", + "properties": {}, + "required": [] + } + """)) + }; + + var messages = new List + { + new SystemChatMessage( + "You are a helpful note-taking assistant. You can save notes and retrieve them. " + + "When the user asks to save a note, extract the note content and call save_note. " + + "When the user asks to see their notes, call get_notes. " + + "Always respond in a friendly, concise manner."), + new UserChatMessage(userMessage) + }; + + var options = new ChatCompletionOptions(); + foreach (var tool in tools) + options.Tools.Add(tool); + + // First call — may return tool calls + var completion = await _llm.ChatClient.CompleteChatAsync(messages, options, cancellationToken); + + // If tool calls are requested, execute them and send results back + if (completion.Value.FinishReason == ChatFinishReason.ToolCalls) + { + messages.Add(new AssistantChatMessage(completion.Value)); + + foreach (var toolCall in completion.Value.ToolCalls) + { + var result = ExecuteToolCall(toolCall.FunctionName, toolCall.FunctionArguments, sessionId); + messages.Add(new ToolChatMessage(toolCall.Id, result)); + } + + // Second call — get natural language response + var finalCompletion = await _llm.ChatClient.CompleteChatAsync(messages, options, cancellationToken); + + var response = finalCompletion.Value.Content[0].Text ?? ""; + foreach (var word in SplitIntoTokens(response)) + { + yield return word; + await Task.Delay(30, cancellationToken); + } + } + else + { + // Direct text response (no tool calls) + var response = completion.Value.Content[0].Text ?? ""; + foreach (var word in SplitIntoTokens(response)) + { + yield return word; + await Task.Delay(30, cancellationToken); + } + } + } + + // ── Helpers ── + + private static string ExecuteToolCall(string functionName, BinaryData arguments, string sessionId) + { + if (functionName == "save_note") + { + var args = JsonSerializer.Deserialize(arguments); + var noteText = args.GetProperty("note").GetString() ?? ""; + var entry = NoteStore.SaveNote(sessionId, noteText); + return JsonSerializer.Serialize(new { status = "saved", note = entry.Note, timestamp = entry.Timestamp }); + } + else if (functionName == "get_notes") + { + var notes = NoteStore.GetNotes(sessionId); + return JsonSerializer.Serialize(new { count = notes.Count, notes = notes.Select(n => new { n.Note, n.Timestamp }) }); + } + return JsonSerializer.Serialize(new { error = $"Unknown function: {functionName}" }); + } + + private static IEnumerable SplitIntoTokens(string text) + { + var words = text.Split(' '); + for (int i = 0; i < words.Length; i++) + { + yield return i == 0 ? words[i] : $" {words[i]}"; + } + } +} + +// ────────────────────────────────────────────────────────────────── +// Config record for DI +// ────────────────────────────────────────────────────────────────── + +public record LlmConfig(ChatClient ChatClient); diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/README.md b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/README.md new file mode 100644 index 000000000..06bd67dd5 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/README.md @@ -0,0 +1,153 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. + +# Note-Taking Agent — Responses Protocol + +This sample demonstrates a note-taking agent built with [Azure.AI.AgentServer.Responses](https://www.nuget.org/packages/Azure.AI.AgentServer.Responses) that uses **Azure OpenAI function calling** for intent understanding and **local JSONL file storage** for session-persistent notes. + +## How It Works + +The agent receives natural language messages via `POST /responses` and uses Azure OpenAI with two tool definitions — `save_note` and `get_notes` — to understand user intent. When the LLM returns a tool call, the agent executes it locally (reads/writes a JSONL file) and sends the result back to the LLM for a natural language response. + +Notes are stored per session in `notes_{session_id}.jsonl` files, demonstrating **session persistence** — notes survive across multiple invocations within the same session. + +## Running Locally + +### Prerequisites + +- .NET 10.0 SDK +- Azure CLI installed and authenticated (`az login`) +- Foundry project with a deployed model (e.g., `gpt-4.1-mini`) + +### Build + +```bash +dotnet build +``` + +### Start the Agent + +```bash +cp .env.example .env # then edit values +export FOUNDRY_PROJECT_ENDPOINT="https://your-project.services.ai.azure.com/api/projects/your-project" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +dotnet run +``` + +The agent starts on `http://localhost:8088/`. + +### Test + +#### 1. Test with curl + +##### Save a note + +```bash +curl -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"input": "save a note - book reservation for dinner", "agent_session_id": "my-session"}' \ + --no-buffer +``` + +##### Get all notes + +```bash +curl -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"input": "get all my notes", "agent_session_id": "my-session"}' \ + --no-buffer +``` + +#### 2. Test in Agent Inspector + +Once the agent is running, open **Agent Inspector** in VS Code to interactively send messages and view responses. + +![Agent Inspector](../../../../assets/agent-inspector-responses.png) + +##### Save a note + +``` +save a note - book reservation for dinner +``` + +##### Save another note + +``` +save a note - buy groceries +``` + +##### Get all notes + +``` +get all my notes +``` + +#### 3. Test with azd + +> **Note:** Run all `azd` commands from this sample directory (where `azure.yaml` is located) so that session state is persisted across calls. + +##### Save a note + +```bash +azd ai agent invoke --local "save a note - book reservation for dinner" +``` + +##### Save another note + +```bash +azd ai agent invoke --local "save a note - buy groceries" +``` + +##### Get all notes + +```bash +azd ai agent invoke --local "get all my notes" +``` + +##### Start a new session + +```bash +azd ai agent invoke --local --new-session "get all my notes" +``` + +## Deploying to Microsoft Foundry + +To deploy your agent to Microsoft Foundry, follow the deployment guide at https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/azd-quickstart.md + +## Troubleshooting + +### Azure OpenAI Permission Denied (401) + +If you see an error like: + +``` +Error calling Azure OpenAI: Error code: 401 - {'error': {'code': 'PermissionDenied', 'message': 'The principal lacks the required data action Microsoft.CognitiveServices/accounts/OpenAI/deployments/chat/completions/action to perform POST /openai/deployments/{deployment-id}/chat/completions operation.'}} +``` + +The identity running the agent does not have the required RBAC roles on the Azure AI Foundry project. Assign the following roles: + +- **Cognitive Services OpenAI User** +- **Azure AI User** + +Use the Azure CLI to assign them: + +```bash +# Set your variables +SUBSCRIPTION_ID="" +RESOURCE_GROUP="" +PROJECT_NAME="" +PRINCIPAL_ID="" + +# Assign "Cognitive Services OpenAI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Cognitive Services OpenAI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" + +# Assign "Azure AI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Azure AI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" +``` + +> **Note:** It may take a few minutes for role assignments to propagate. Retry the request after waiting. diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/agent.manifest.yaml b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/agent.manifest.yaml new file mode 100644 index 000000000..fbc0f1341 --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/agent.manifest.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: notetaking-agent-dotnet-responses +displayName: "Notetaking Agent (.NET, Responses)" +description: > + A note-taking agent using the Responses protocol with Azure OpenAI function + calling in C#. Demonstrates tool use (save_note, get_notes) with per-session + JSONL persistence. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Bring Your Own + - Function Calling + - Note Taking + - .NET +template: + name: notetaking-agent-dotnet-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/agent.yaml b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/agent.yaml new file mode 100644 index 000000000..a446337ea --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: notetaking-agent-dotnet-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/notetaking-agent.csproj b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/notetaking-agent.csproj new file mode 100644 index 000000000..62994e59e --- /dev/null +++ b/samples/csharp/hosted-agents/bring-your-own/responses/notetaking-agent/notetaking-agent.csproj @@ -0,0 +1,18 @@ + + + + net10.0 + enable + enable + + + + + + + + + + + + diff --git a/samples/csharp/toolbox/README.md b/samples/csharp/toolbox/README.md new file mode 100644 index 000000000..4b7136fd7 --- /dev/null +++ b/samples/csharp/toolbox/README.md @@ -0,0 +1,88 @@ + +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + + +# .NET Toolbox Samples + +.NET samples for running Microsoft Foundry agents connected to a **Toolbox** via the +MCP Streamable HTTP protocol. Two agent framework options are provided — pick the one +that matches your stack. + +## Which sample should I use? + +| I want to… | Use | +|-------------|-----| +| Build a .NET agent with a custom ReAct loop | [`maf/`](./maf/) | +| Create, list, and delete toolbox resources from code | [`crud-sample/`](./crud-sample/) | + +## Sample Comparison + +| Capability | `maf/` | +|-----------|:---:| +| Multi-turn conversation | ✅ | +| Streaming (SSE) | ✅ | +| Tool schema sanitization | ✅ | +| SDK | Agent Framework | + +All agent samples: +- Serve the **Responses Protocol** on port `8088` +- Authenticate to the Toolbox endpoint using `DefaultAzureCredential` (bearer token, auto-refreshed) +- Send the `Foundry-Features: Toolboxes=V1Preview` header on every MCP request (required) + +## Prerequisites (all samples) + +- [.NET 10 SDK](https://dotnet.microsoft.com/download) +- A [Microsoft Foundry](https://ai.azure.com) account and project +- A toolbox already created in that project (see [`crud-sample/`](./crud-sample/)) +- Azure CLI installed and logged in: + + ```bash + az login + ``` + +## What is a Toolbox? + +A **Toolbox** is a named collection of tools (MCP, OpenAPI, Azure AI Search, Web Search, +File Search, Code Interpreter, A2A) hosted in your Microsoft Foundry project. Agents +connect to a toolbox via its MCP endpoint and dynamically discover available tools at startup. + +The toolbox MCP endpoint URL supports two forms: + +``` +# Latest version: +https://.services.ai.azure.com/api/projects//toolboxes//mcp?api-version=v1 + +# Pinned to a specific version: +https://.services.ai.azure.com/api/projects//toolboxes//versions//mcp?api-version=v1 +``` + +> **Note:** The `?api-version=v1` query parameter is **required**. Requests without it return HTTP 400. + +## How to Get Your Project Endpoint + +1. Go to [ai.azure.com](https://ai.azure.com) and open your project. +2. Navigate to **Settings** → **Project details**. +3. Copy the **Project endpoint** value — it looks like: + + ``` + https://.services.ai.azure.com/api/projects/ + ``` + +## Related Python Samples + +The equivalent Python samples are in [`../../python/toolbox/`](../../python/toolbox/). +For toolbox creation SDK examples, see [`../../python/toolbox/sample_toolboxes_crud.py`](../../python/toolbox/sample_toolboxes_crud.py). + +## Contributing + +This project welcomes contributions and suggestions. + +## Trademarks + +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/samples/python/hosted-agents/README.md b/samples/python/hosted-agents/README.md new file mode 100644 index 000000000..8fb98117e --- /dev/null +++ b/samples/python/hosted-agents/README.md @@ -0,0 +1,193 @@ +# Microsoft Foundry — Hosted Agent Samples + +Samples for building, deploying, and managing hosted agents on [Microsoft Foundry](https://learn.microsoft.com/en-us/azure/foundry/agents/concepts/hosted-agents). Each sample is a starter template — fork it, change the system prompt and tools, deploy with `azd up`. + +> **Every sample includes Application Insights and OpenTelemetry tracing out of the box.** You get production-ready logging, distributed traces, and metrics from the first sample you run. + +### Quickstart + +> **Prerequisites:** Install the Azure Developer CLI with the Foundry AI extension. See [Set up azd for hosted agents](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?pivots=azd) if you haven't already. + +```bash +mkdir my-agent && cd my-agent +azd ai agent init -m ../agent-framework/responses/01-basic/agent.manifest.yaml +azd up +``` + +You'll have a running agent in minutes. Or, if you prefer VS Code, use the [Foundry extension quickstart](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?pivots=vscode) to build and deploy directly from the editor. + +Read on to pick the right sample for your scenario, or jump to the [learning path](#learning-path) for a guided walkthrough. + +--- + +## Two protocols: Responses and Invocations + +Hosted agents support two protocols. Pick the one that matches your scenario. + +| Scenario | Protocol | Why | +|----------|----------|-----| +| Conversational chatbot or assistant | **Responses** | The platform manages conversation history, streaming events, and session lifecycle — use any OpenAI-compatible SDK as the client. | +| Agent published to Teams or M365 | **Responses** + **Activity** | The Responses protocol powers the agent logic; the Activity protocol handles the Teams channel integration. | +| Multi-turn Q&A with RAG or tools | **Responses** | Built-in `conversation_id` threading and tool result handling. | +| Background / async processing | **Responses** | `background: true` with platform-managed polling and cancellation — no custom code needed. | +| Webhook receiver (GitHub, Stripe, Jira, etc.) | **Invocations** | The external system sends its own payload format — you can't change it to match `/responses`. | +| Non-conversational processing (classification, extraction, batch) | **Invocations** | The input is structured data, not a chat message. Arbitrary JSON in, arbitrary JSON out. | +| Custom streaming protocol (AG-UI, etc.) | **Invocations** | AG-UI and other agent-UI protocols aren't OpenAI-compatible — you need raw SSE control. | +| Async job with custom progress, polling, or non-OpenAI callers | **Invocations** | Custom progress reporting, intermediate results, and polling semantics beyond what Responses `background: true` provides. | +| Protocol bridge (GitHub Copilot, proprietary systems) | **Invocations** | The caller has its own protocol that doesn't map to `/responses`. | +| Inter-service orchestration (Durable Functions, Logic Apps) | **Invocations** | The caller sends structured task payloads, not chat messages. | + +> **Still not sure?** Start with **Responses**. You can always add an Invocations endpoint later — a hosted agent can support both protocols simultaneously by listing both in `agent.yaml`. + +> **Other protocols:** Hosted agents can also expose the **Activity** protocol (for Teams and M365 integration) and the **A2A** protocol (for agent-to-agent delegation). + +
+Protocol comparison details + +| | **Responses** | **Invocations** | +|---|---|---| +| **Best for** | Most agents — the platform manages conversation history, streaming lifecycle, and background polling | Agents that need full HTTP control, custom payloads, or custom async workflows | +| **Payload** | OpenAI-compatible `/responses` contract | Arbitrary JSON via `/invocations` — you define the schema | +| **Client SDK** | Any OpenAI-compatible SDK (Python, JS, C#) works out of the box | Custom client — you define the contract | +| **Session history** | Framework-managed via `conversation_id` | You manage sessions (in-memory, Cosmos DB, etc.) | +| **Streaming** | Framework-managed `ResponseEventStream` with lifecycle events (`created`, `in_progress`, `delta`, `completed`) | Raw SSE — you format and write events directly | +| **Background / long-running** | Built-in (`background: true` + platform-managed polling) | Manual task tracking and custom polling endpoints | +| **Server SDK** | `azure-ai-agentserver-responses` | `azure-ai-agentserver-invocations` | +| **agent.yaml** | `protocol: responses`, `version: v0.1.0` | `protocol: invocations`, `version: v0.0.1` | + +
+ +--- + +## Pick your framework + +Hosted agents run any code you can put in a container. These samples cover three frameworks — pick the one that matches where you are. + +| | **Agent Framework** | **LangGraph** | **Bring Your Own** | +|---|---|---|---| +| **Best for** | Starting fresh on Foundry — also supports AutoGen and Semantic Kernel | Already using LangChain / LangGraph | Already built with CrewAI or your own stack | +| **SDK** | `agent-framework-foundry-hosting` (includes core, openai, foundry, orchestrations) | `azure-ai-agentserver-responses` / `azure-ai-agentserver-invocations` | `azure-ai-agentserver-responses` / `azure-ai-agentserver-invocations`, or `azure-ai-agentserver-core` for fully custom HTTP | +| **Foundry integration** | Native — sessions, tools, memory, streaming all built in | Adapter — sessions and tools wired through LangGraph adapter | Core adapter hosts the web server and exposes `/invocations` and `/responses` endpoints; you supply the agent logic | +| **Protocols** | Responses and Invocations | Responses and Invocations | Responses and Invocations | +| **Language support** | Python and C# | Python only | Any language (Python and C# samples provided) | +| **Start here** | [Basic Agent →](agent-framework/responses/01-basic/) | [LangGraph Chat →](bring-your-own/responses/langgraph-chat/) | [Hello World →](bring-your-own/responses/hello-world/) | + +> **Which should I choose?** If you're building a new agent — or already using AutoGen or Semantic Kernel — start with **Agent Framework**. It has the tightest Foundry integration, supports those orchestrators natively, and has the most samples to learn from. If you already have LangGraph code, use the **LangGraph** adapter to bring it to Foundry. If you have an existing agent in another framework (e.g., CrewAI), **Bring Your Own** shows how to containerize and deploy it unchanged. + +--- + +## Agent Framework samples + +The recommended path for building hosted agents. Agent Framework gives you native session management, built-in tool wiring, streaming, and the full Foundry feature set. + +Samples are split by protocol. Start with **Responses** (the common path) — then explore **Invocations** when you need full HTTP control or long-running workflows. + +### Responses protocol + +The platform manages conversation history, streaming lifecycle, and background execution. This is the default for most agents. + +#### Learning path + +**New to hosted agents?** Start here and work through in order: + +1. **[Basic agent & Multi-Turn Sessions](./agent-framework/responses/01-basic/)** — Deploy your first agent, have a conversation with it. +2. **[Tools](./agent-framework/responses/02-tools/)** — Add local tools to your agent. +3. **[MCP Tools](./agent-framework/responses/03-mcp/)** — Connect your agent to a remote MCP server to access tools, retrieval, and more. +4. **[Foundry Toolbox](./agent-framework/responses/04-foundry-toolbox/)** — Wire your agent to a Foundry Toolbox for managed tool access. +5. **[Workflows](./agent-framework/responses/05-workflows/)** — Compose multiple agents into sequential pipelines. + +### Invocations protocol + +Full control over the HTTP request/response cycle. You define the payload schema, manage session state, and implement polling for long-running operations. Use this when you need an arbitrary payload format or async workflows that don't fit the OpenAI `/responses` contract. + +> **Every capability works with both protocols.** Tools, RAG, memory, evaluations, Teams publishing, multi-agent — all of these work with Invocations. The Invocations samples below focus on the protocol mechanics (how you handle requests, streaming, sessions, and long-running tasks). To add a capability like knowledge grounding or tools, learn the Invocations pattern from these samples, then adapt the relevant Responses sample — the capability code is the same, only the HTTP handler differs. + +| Sample | What it shows | +|--------|---------------| +| **[Basic Invocations Agent](./agent-framework/invocations/01-basic/)** | Minimal invocations agent — shows the invocations handler pattern with Agent Framework. | + +--- + +## LangGraph samples + +LangGraph samples are included in the **Bring Your Own** section below — see [`bring-your-own/responses/langgraph-chat/`](bring-your-own/responses/langgraph-chat/) and [`bring-your-own/invocations/langgraph-chat/`](bring-your-own/invocations/langgraph-chat/). + +--- +## Bring Your Own Framework samples + +Already built an agent with CrewAI or your own code? The protocol SDKs (`azure-ai-agentserver-responses` / `azure-ai-agentserver-invocations`) give you the hosted agent HTTP contract — they host the web server, expose the right endpoint, and handle request parsing — so you just plug in your agent logic. This is the recommended path for BYO to ensure your agent stays aligned with the platform contract as new endpoints are added. For lower-level control, the **Core adapter** (`azure-ai-agentserver-core`) gives you managed hosting, OpenTelemetry tracing, and health endpoints, but you handle the protocol details yourself. + +> **Note:** If you're using AutoGen or Semantic Kernel, you don't need BYO — Agent Framework supports them natively. See the [Agent Framework samples](#agent-framework-samples) instead. + +### Responses protocol + +| Sample | What it shows | +|--------|--------------| +| **[Hello World](bring-your-own/responses/hello-world/)** | Minimal agent — calls a Foundry model via the Responses API and returns the reply. The simplest possible BYO starting point. | +| **[LangGraph Chat](bring-your-own/responses/langgraph-chat/)** | LangGraph conversational agent hosted on Foundry with multi-turn history via the Responses protocol. | +| **[Notetaking Agent](bring-your-own/responses/notetaking-agent/)** | Agent that takes and retrieves notes using a custom tool. | +| **[Toolbox](bring-your-own/responses/toolbox/)** | BYO agent wired to a Foundry Toolbox MCP endpoint for tool access. | +| **[Background Agent](bring-your-own/responses/background-agent/)** | Long-running background processing with async execution. | + +### Invocations protocol + +| Sample | What it shows | +|--------|--------------| +| **[Hello World](bring-your-own/invocations/hello-world/)** | Minimal agent — arbitrary JSON in, streaming SSE out. The simplest possible BYO invocations starting point. | +| **[LangGraph Chat](bring-your-own/invocations/langgraph-chat/)** | LangGraph conversational agent over the Invocations protocol with client-managed sessions. | +| **[Notetaking Agent](bring-your-own/invocations/notetaking-agent/)** | Note-taking agent with the Invocations protocol. | +| **[Toolbox](bring-your-own/invocations/toolbox/)** | BYO invocations agent wired to a Foundry Toolbox MCP endpoint. | +| **[AG-UI](bring-your-own/invocations/ag-ui/)** | Agent using the AG-UI streaming protocol via the Invocations endpoint. | +| **[GitHub Copilot](bring-your-own/invocations/github-copilot/)** | Agent that integrates with GitHub Copilot as the AI backbone. | +| **[Human-in-the-Loop](bring-your-own/invocations/human-in-the-loop/)** | Long-running agent that pauses for human approval before continuing. | + +## Deploy any sample + +Every sample deploys the same way. You need the [Azure Developer CLI (azd)](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) and a Foundry project with a model deployment. + +```bash +mkdir my-agent && cd my-agent + +# Scaffold from the sample manifest — azd generates all the deployment files +azd ai agent init -m ../agent-framework/responses/01-basic/agent.manifest.yaml + +# Build, push, and deploy +azd up + +# Clean up when done +azd down +``` + +### Other ways to invoke your agent + +| Method | When to use | +|--------|------------| +| `azd ai agent invoke` | Quick CLI test after deploy | +| [VS Code Foundry extension](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?pivots=vscode) | One-click invoke from the editor | +| `curl` | Each sample README includes curl examples | + +## Prerequisites + +- **Azure subscription** with access to Microsoft Foundry +- **Azure Developer CLI (azd)** — [install](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?pivots=azd) +- **Python 3.12+** + +That's it. `azd ai agent init` and the VS Code Foundry extension will create a Foundry project and deploy a model for you if you don't already have one. Container images are built remotely using ACR Tasks by default — **Docker is not required** unless you want to build locally. + +## Resources + +- [Microsoft Foundry documentation](https://learn.microsoft.com/en-us/azure/foundry/what-is-foundry?view=foundry) +- [Hosted agents overview](https://learn.microsoft.com/en-us/azure/foundry/agents/concepts/hosted-agents?view=foundry) +- [Deploy a hosted agent](https://learn.microsoft.com/en-us/azure/foundry/agents/how-to/deploy-hosted-agent) +- **Responses protocol:** [Python SDK (`azure-ai-agentserver-responses`)](https://pypi.org/project/azure-ai-agentserver-responses/) · [C# SDK (`Azure.AI.AgentServer.Responses`)](https://www.nuget.org/packages/Azure.AI.AgentServer.Responses) +- **Invocations protocol:** [Python SDK (`azure-ai-agentserver-invocations`)](https://pypi.org/project/azure-ai-agentserver-invocations/) · [C# SDK (`Azure.AI.AgentServer.Invocations`)](https://www.nuget.org/packages/Azure.AI.AgentServer.Invocations) +- **Core adapter (BYO):** [Python SDK (`azure-ai-agentserver-core`)](https://pypi.org/project/azure-ai-agentserver-core/) · [C# SDK (`Azure.AI.AgentServer.Core`)](https://www.nuget.org/packages/Azure.AI.AgentServer.Core) +- [Azure Developer CLI (azd)](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) + +## Contributing + +This project welcomes contributions and suggestions. + +## Trademarks + +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/samples/python/hosted-agents/agent-framework/README.md b/samples/python/hosted-agents/agent-framework/README.md new file mode 100644 index 000000000..05fe66fd5 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/README.md @@ -0,0 +1,31 @@ +# Agent Framework Samples + +This directory contains samples that demonstrate how to use the Agent Framework to host agents with different capabilities and configurations. Each sample includes a README with instructions on how to set up, run, and interact with the agent. + +## Environment setup + +1. Navigate to the sample directory you want to run. For example: + + ```bash + python -m venv .venv + + # Windows + .venv\Scripts\Activate + + # macOS/Linux + source .venv/bin/activate + ``` + +2. Install dependencies: + + ```bash + pip install -r requirements.txt + ``` + +3. Create a `.env` file with your Foundry configuration following the `env.example` file in the sample. + +4. Make sure you are logged in with the Azure CLI: + + ```bash + az login + ``` diff --git a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/README.md b/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/README.md deleted file mode 100644 index 6eecdc908..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/README.md +++ /dev/null @@ -1,146 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to build a Microsoft Agent Framework chat agent that can use **Foundry tools** -(for example, web search and MCP tools), host it using the -[Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/), -and deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. - -## How It Works - -### Foundry tools integration - -In [main.py](main.py), the agent is created using `AzureOpenAIChatClient` and is configured with -`FoundryToolsChatMiddleware`. The middleware enables tool usage via Foundry-supported tool types: - -- `web_search_preview` (foundry configured tools) -- `mcp` (connected mcp tool, configured with a Foundry project connection id) - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. -The extension builds a container image into Azure Container Registry (ACR), and creates a hosted agent version and deployment on Microsoft Foundry. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. **Azure OpenAI Service** - - Endpoint configured - - Chat model deployed (e.g., `gpt-4o-mini` or `gpt-4`) - - Note your endpoint URL and deployment name - -2. **Azure AI Foundry Project** - - Project created in [Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/what-is-foundry?view=foundry#microsoft-foundry-portals) - - Add 'Microsoft Learn' MCP from foundry tool catalog. - ![microsoft_learn](microsoft_learn.png) - -3. **Azure CLI** - - Installed and authenticated - - Run `az login` and verify with `az account show` - -4. **Python 3.10 or higher** - - Verify your version: `python --version` - - If you have Python 3.9 or older, install a newer version: - - Windows: `winget install Python.Python.3.12` - - macOS: `brew install python@3.12` - - Linux: Use your package manager - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME` - The deployment name for your chat model (required) -- `AZURE_AI_PROJECT_ENDPOINT` - Your Azure AI Foundry project endpoint (required) -- `AZURE_AI_PROJECT_TOOL_CONNECTION_ID` - Foundry project connection id used to configure the `mcp` tool (optional) - -This sample loads environment variables from a local `.env` file if present. - -**Finding your tool connection id** (portal names may vary): -1. Go to [Azure AI Foundry portal](https://ai.azure.com) -2. Navigate to your project -> Build -> Tools -3. Find your connected MCP tool (e.g., "Microsoft Learn") -4. Copy your tool's name and set it as `AZURE_AI_PROJECT_TOOL_CONNECTION_ID` - -```powershell -# Replace with your actual values -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" -$env:AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-4o-mini" -$env:AZURE_AI_PROJECT_ENDPOINT="https://{resource}.services.ai.azure.com/api/projects/{project-name}" -$env:AZURE_AI_PROJECT_TOOL_CONNECTION_ID="" -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```powershell -pip install -r requirements.txt -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -python main.py -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent - -**PowerShell (Windows):** -```powershell -$body = @{ - input = "How to deploy foundry hosted agents?" - stream = $false -} | ConvertTo-Json - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" -``` - -**Bash/curl (Linux/macOS):** -```bash -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ - -d '{"input": "How to deploy foundry hosted agents?","stream":false}' -``` - -The agent may use Foundry tools (for example `web_search_preview` and/or `mcp`) as needed to answer. - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/agent.yaml b/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/agent.yaml deleted file mode 100644 index 304b5fcbe..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/agent.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Unique identifier/name for this agent -name: af-agent-with-foundry-tools -# Brief description of what this agent does -description: > - An AI agent that uses Azure OpenAI with a Hosted Model Context Protocol (MCP) server. - The agent answers questions by searching Microsoft Learn documentation using MCP tools. -metadata: - # Categorization tags for organizing and discovering agents - authors: - - Microsoft - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Model Context Protocol - - MCP -template: - name: af-agent-with-foundry-tools - kind: hosted - protocols: - - protocol: responses - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME - value: "{{chat}}" - - name: AZURE_AI_PROJECT_TOOL_CONNECTION_ID - value: MicrosoftLearn -resources: - - kind: model - id: gpt-5 - name: chat diff --git a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/main.py b/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/main.py deleted file mode 100644 index dc54b4d5d..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/main.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -from dotenv import load_dotenv -from agent_framework.azure import AzureOpenAIChatClient - -from azure.ai.agentserver.agentframework import from_agent_framework, FoundryToolsChatMiddleware -from azure.identity import DefaultAzureCredential, get_bearer_token_provider - -# Load environment variables from .env file for local development -# load_dotenv() - -# Create a token provider that refreshes tokens automatically for long-running servers -# This avoids 401 errors when the initial token expires (typically after 1 hour) -_credential = DefaultAzureCredential() -_token_provider = get_bearer_token_provider(_credential, "https://cognitiveservices.azure.com/.default") - - -def main(): - required_env_vars = [ - "AZURE_OPENAI_ENDPOINT", - "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", - "AZURE_AI_PROJECT_ENDPOINT", - ] - for env_var in required_env_vars: - assert env_var in os.environ and os.environ[env_var], ( - f"{env_var} environment variable must be set." - ) - - tools=[{"type": "web_search_preview"}] - if project_tool_connection_id := os.environ.get("AZURE_AI_PROJECT_TOOL_CONNECTION_ID"): - tools.append({"type": "mcp", "project_connection_id": project_tool_connection_id}) - - # Use token provider for automatic token refresh in long-running servers - chat_client = AzureOpenAIChatClient(ad_token_provider=_token_provider, - middleware=FoundryToolsChatMiddleware(tools)) - agent = chat_client.create_agent( - name="FoundryToolAgent", - instructions="You are a helpful assistant with access to various tools." - ) - - from_agent_framework(agent).run() - -if __name__ == "__main__": - main() diff --git a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/microsoft_learn.png b/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/microsoft_learn.png deleted file mode 100644 index 0a8ebaafb..000000000 Binary files a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/microsoft_learn.png and /dev/null differ diff --git a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/requirements.txt b/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/requirements.txt deleted file mode 100644 index 1e93c1a80..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -azure-ai-agentserver-agentframework==1.0.0b12 \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/.dockerignore b/samples/python/hosted-agents/agent-framework/agent-with-local-tools/.dockerignore deleted file mode 100644 index 779bc67aa..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/.dockerignore +++ /dev/null @@ -1,66 +0,0 @@ -# Virtual environments -.venv/ -venv/ -env/ -.python-version - -# Environment files with secrets -.env -.env.* -*.local - -# Python build artifacts -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Testing -.tox/ -.nox/ -.coverage -.coverage.* -htmlcov/ -.pytest_cache/ -.mypy_cache/ - -# IDE and OS files -.DS_Store -.idea/ -.vscode/ -*.swp -*.swo -*~ - -# Foundry config -.foundry/ -build-source-*/ - -# Git -.git/ -.gitignore - -# Docker -.dockerignore - -# Documentation -docs/ -*.md -!README.md -LICENSE diff --git a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/.vscode/launch.json b/samples/python/hosted-agents/agent-framework/agent-with-local-tools/.vscode/launch.json deleted file mode 100644 index 341fcbe80..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/.vscode/launch.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "version": "0.2.0", - "configurations": [ - { - "name": "Python: Seattle Hotel Agent", - "type": "debugpy", - "request": "launch", - "program": "${workspaceFolder}/main.py", - "console": "integratedTerminal", - "cwd": "${workspaceFolder}", - "envFile": "${workspaceFolder}/.env", - "justMyCode": true - } - ] -} diff --git a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/Dockerfile b/samples/python/hosted-agents/agent-framework/agent-with-local-tools/Dockerfile deleted file mode 100644 index abec2e20e..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM python:3.12-slim - -WORKDIR /app - -COPY requirements.txt . - -RUN pip install --no-cache-dir -r requirements.txt - -COPY main.py . - -EXPOSE 8088 - -CMD ["python", "main.py"] \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/README.md b/samples/python/hosted-agents/agent-framework/agent-with-local-tools/README.md deleted file mode 100644 index c9e550fac..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/README.md +++ /dev/null @@ -1,166 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates a **key advantage of code-based hosted agents**: - -- **Local Python tool execution** - Run custom Python functions as agent tools - -Code-based agents can execute **any Python code** you write. This sample includes a Seattle Hotel Agent with a `get_available_hotels` tool that searches for available hotels based on check-in/check-out dates and budget preferences. - -The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/) and can be deployed to Microsoft Foundry using the Azure Developer CLI. - -## How It Works - -### Local Tools Integration - -In [main.py](main.py), the agent uses a local Python function (`get_available_hotels`) that simulates a hotel availability API. This demonstrates how code-based agents can execute custom server-side logic that prompt agents cannot access. - -The tool accepts: -- **check_in_date** - Check-in date in YYYY-MM-DD format -- **check_out_date** - Check-out date in YYYY-MM-DD format -- **max_price** - Maximum price per night in USD (optional, defaults to $500) - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. - -### Agent Deployment - -The hosted agent can be deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. **Azure AI Foundry Project** - - Project created in [Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/what-is-foundry?view=foundry#microsoft-foundry-portals) - - Chat model deployed (e.g., `gpt-4o` or `gpt-4.1`) - - Note your project endpoint URL and model deployment name - -2. **Azure CLI** - - Installed and authenticated - - Run `az login` and verify with `az account show` - -3. **Python 3.10 or higher** - - Verify your version: `python --version` - - If you have Python 3.9 or older, install a newer version: - - Windows: `winget install Python.Python.3.12` - - macOS: `brew install python@3.12` - - Linux: Use your package manager - -### Environment Variables - -Set the following environment variables (matching `agent.yaml`): - -- `PROJECT_ENDPOINT` - Your Azure AI Foundry project endpoint URL (required) -- `MODEL_DEPLOYMENT_NAME` - The deployment name for your chat model (defaults to `gpt-4.1-mini`) - -This sample loads environment variables from a local `.env` file if present. - -Create a `.env` file in this directory with the following content: - -``` -PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ -MODEL_DEPLOYMENT_NAME=gpt-4.1-mini -``` - -Or set them via PowerShell: - -```powershell -# Replace with your actual values -$env:PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" -$env:MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" -``` - -### Setting Up a Virtual Environment - -It's recommended to use a virtual environment to isolate project dependencies: - -**macOS/Linux:** -```bash -python -m venv .venv -source .venv/bin/activate -``` - -**Windows (PowerShell):** -```powershell -python -m venv .venv -.\.venv\Scripts\Activate.ps1 -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```bash -pip install -r requirements.txt -``` - -The required packages are: -- `azure-ai-agentserver-agentframework` - Agent Framework and AgentServer SDK -- `python-dotenv` - Load environment variables from `.env` file -- `azure-identity` - Azure authentication -- `azure-monitor-opentelemetry-exporter` - Azure Monitor telemetry export -- `opentelemetry-sdk` / `opentelemetry-api` - OpenTelemetry for tracing - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -python main.py -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent - -**PowerShell (Windows):** -```powershell -$body = @{ - input = "I need a hotel in Seattle from 2025-03-15 to 2025-03-18, budget under $200 per night" - stream = $false -} | ConvertTo-Json - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" -``` - -**Bash/curl (Linux/macOS):** -```bash -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ - -d '{"input": "Find me hotels in Seattle for March 20-23, 2025 under $200 per night","stream":false}' -``` - -The agent will use the `get_available_hotels` tool to search for available hotels matching your criteria. - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/agent.yaml b/samples/python/hosted-agents/agent-framework/agent-with-local-tools/agent.yaml deleted file mode 100644 index 728fc1629..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/agent.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Unique identifier/name for this agent -name: seattle-hotel-agent -# Brief description of what this agent does -description: > - A travel assistant agent that helps users find hotels in Seattle. - Demonstrates local Python tool execution - a key advantage of code-based - hosted agents over prompt agents. -metadata: - # Categorization tags for organizing and discovering agents - authors: - - Microsoft - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Local Tools - - Travel Assistant - - Hotel Search -template: - name: seattle-hotel-agent - kind: hosted - protocols: - - protocol: responses - environment_variables: - - name: PROJECT_ENDPOINT - value: ${AZURE_AI_PROJECT_ENDPOINT} - - name: MODEL_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4.1-mini - name: chat diff --git a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/main.py b/samples/python/hosted-agents/agent-framework/agent-with-local-tools/main.py deleted file mode 100644 index e9d5d34bf..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/main.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -Seattle Hotel Agent - A simple agent with a tool to find hotels in Seattle. -Uses Microsoft Agent Framework with Azure AI Foundry. -Ready for deployment to Foundry Hosted Agent service. -""" - -import asyncio -import os -from typing import Annotated -from datetime import datetime -from dotenv import load_dotenv - -load_dotenv(override=True) - -from agent_framework import Agent -from agent_framework.azure import AzureAIAgentClient -from azure.ai.agentserver.agentframework import from_agent_framework -from azure.identity.aio import DefaultAzureCredential - -# Configure these for your Foundry project -# Read the explicit variables present in the .env file -PROJECT_ENDPOINT = os.getenv("PROJECT_ENDPOINT") # e.g., "https://.services.ai.azure.com" -MODEL_DEPLOYMENT_NAME = os.getenv("MODEL_DEPLOYMENT_NAME", "gpt-4.1-mini") # Your model deployment name e.g., "gpt-4.1-mini" - - -# Simulated hotel data for Seattle -SEATTLE_HOTELS = [ - {"name": "Contoso Suites", "price_per_night": 189, "rating": 4.5, "location": "Downtown"}, - {"name": "Fabrikam Residences", "price_per_night": 159, "rating": 4.2, "location": "Pike Place Market"}, - {"name": "Alpine Ski House", "price_per_night": 249, "rating": 4.7, "location": "Seattle Center"}, - {"name": "Margie's Travel Lodge", "price_per_night": 219, "rating": 4.4, "location": "Waterfront"}, - {"name": "Northwind Inn", "price_per_night": 139, "rating": 4.0, "location": "Capitol Hill"}, - {"name": "Relecloud Hotel", "price_per_night": 99, "rating": 3.8, "location": "University District"}, -] - - -def get_available_hotels( - check_in_date: Annotated[str, "Check-in date in YYYY-MM-DD format"], - check_out_date: Annotated[str, "Check-out date in YYYY-MM-DD format"], - max_price: Annotated[int, "Maximum price per night in USD (optional)"] = 500, -) -> str: - """ - Get available hotels in Seattle for the specified dates. - This simulates a call to a fake hotel availability API. - """ - try: - # Parse dates - check_in = datetime.strptime(check_in_date, "%Y-%m-%d") - check_out = datetime.strptime(check_out_date, "%Y-%m-%d") - - # Validate dates - if check_out <= check_in: - return "Error: Check-out date must be after check-in date." - - nights = (check_out - check_in).days - - # Filter hotels by price - available_hotels = [ - hotel for hotel in SEATTLE_HOTELS - if hotel["price_per_night"] <= max_price - ] - - if not available_hotels: - return f"No hotels found in Seattle within your budget of ${max_price}/night." - - # Build response - result = f"Available hotels in Seattle from {check_in_date} to {check_out_date} ({nights} nights):\n\n" - - for hotel in available_hotels: - total_cost = hotel["price_per_night"] * nights - result += f"**{hotel['name']}**\n" - result += f" Location: {hotel['location']}\n" - result += f" Rating: {hotel['rating']}/5\n" - result += f" ${hotel['price_per_night']}/night (Total: ${total_cost})\n\n" - - return result - - except ValueError as e: - return f"Error parsing dates. Please use YYYY-MM-DD format. Details: {str(e)}" - - -async def main(): - """Main function to run the agent as a web server.""" - async with ( - DefaultAzureCredential() as credential, - AzureAIAgentClient( - project_endpoint=PROJECT_ENDPOINT, - model_deployment_name=MODEL_DEPLOYMENT_NAME, - credential=credential, - ) as client, - ): - agent = Agent( - client, - name="SeattleHotelAgent", - instructions="""You are a helpful travel assistant specializing in finding hotels in Seattle, Washington. - -When a user asks about hotels in Seattle: -1. Ask for their check-in and check-out dates if not provided -2. Ask about their budget preferences if not mentioned -3. Use the get_available_hotels tool to find available options -4. Present the results in a friendly, informative way -5. Offer to help with additional questions about the hotels or Seattle - -Be conversational and helpful. If users ask about things outside of Seattle hotels, -politely let them know you specialize in Seattle hotel recommendations.""", - tools=[get_available_hotels], - ) - - print("Seattle Hotel Agent Server running on http://localhost:8088") - server = from_agent_framework(agent) - await server.run_async() - - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/requirements.txt b/samples/python/hosted-agents/agent-framework/agent-with-local-tools/requirements.txt deleted file mode 100644 index 8b4d18535..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-local-tools/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -azure-ai-agentserver-agentframework==1.0.0b16 -python-dotenv -azure-identity - -# Azure Monitor / OpenTelemetry -azure-monitor-opentelemetry-exporter>=1.0.0b46 -opentelemetry-sdk>=1.39.0 -opentelemetry-api>=1.39.0 \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/README.md b/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/README.md deleted file mode 100644 index d01fd0ecd..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/README.md +++ /dev/null @@ -1,112 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to use the TextSearchProvider to add retrieval augmented generation (RAG) capabilities to a -[Microsoft Agent Framework](https://learn.microsoft.com/en-us/agent-framework/overview/agent-framework-overview#ai-agents) AI agent and -host it using [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and -deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### Retrieval Augmented Generation (RAG) with `TextSearchContextProvider` - -This sample uses a `TextSearchContextProvider` to demonstrate the RAG pattern. The RAG workflow operates as follows: - -1. When the user asks a question, the `TextSearchContextProvider` intercepts it -2. The search function looks for relevant documents based on the query -3. Retrieved documents are injected into the model's context -4. The AI responds using both its training and the provided context -5. The agent can cite specific source documents in its answers - -**Note**: The `TextSearchContextProvider` returns pre-defined snippets for demonstration purposes. In a production scenario, replace this with actual searches against your knowledge base (e.g., Azure AI Search, vector database, or other data sources). - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. -The extension builds a container image for the agent, deploys it to Azure Container Instances (ACI), and creates a hosted agent version and deployment on Foundry Agent Service. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. An Azure OpenAI endpoint configured -2. A deployment of a chat model (e.g., `gpt-4o-mini`) -3. Azure CLI installed and authenticated (`az login`) -4. Python 3.10+ installed - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME` - The deployment name for your chat model (required) - -```powershell -# Replace with your Azure OpenAI endpoint -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" - -# Optional, defaults to gpt-4o-mini -$env:AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-4o-mini" -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```powershell -pip install -r requirements.txt -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -python main.py -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent - -You can interact with the agent using: - -```powershell -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses -d '{"input": "What is the return policy?","stream":false}' -``` - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/agent.yaml b/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/agent.yaml deleted file mode 100644 index 1e23818b0..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/agent.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Unique identifier/name for this agent -name: agent-with-text-search-rag -# Brief description of what this agent does -description: > - An AI agent that uses a ContextProvider for retrieval augmented generation (RAG) capabilities. - The agent runs searches against an external knowledge base before each model invocation and - injects the results into the model context. It can answer questions about Contoso Outdoors - policies and products, including return policies, refunds, shipping options, and product care - instructions such as tent maintenance. -metadata: - # Categorization tags for organizing and discovering agents - authors: - - Microsoft Agent Framework Team - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Retrieval-Augmented Generation - - RAG -template: - name: agent-with-text-search-rag - # The type of agent - "hosted" for HOBO, "container" for COBO - kind: hosted - protocols: - - protocol: responses - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4o-mini - name: chat diff --git a/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/main.py b/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/main.py deleted file mode 100644 index 0ed5e61f4..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/main.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import json -import sys -from collections.abc import MutableSequence -from dataclasses import dataclass -from typing import Any - -from agent_framework import ChatMessage, Context, ContextProvider, Role -from agent_framework.azure import AzureOpenAIChatClient -from azure.ai.agentserver.agentframework import from_agent_framework # pyright: ignore[reportUnknownVariableType] -from azure.identity import DefaultAzureCredential, get_bearer_token_provider - -if sys.version_info >= (3, 12): - from typing import override -else: - from typing_extensions import override - -# Create a token provider that refreshes tokens automatically for long-running servers -# This avoids 401 errors when the initial token expires (typically after 1 hour) -_credential = DefaultAzureCredential() -_token_provider = get_bearer_token_provider(_credential, "https://cognitiveservices.azure.com/.default") - - -@dataclass -class TextSearchResult: - source_name: str - source_link: str - text: str - - -class TextSearchContextProvider(ContextProvider): - """A simple context provider that simulates text search results based on keywords in the user's message.""" - - def _get_most_recent_message(self, messages: ChatMessage | MutableSequence[ChatMessage]) -> ChatMessage: - """Helper method to extract the most recent message from the input.""" - if isinstance(messages, ChatMessage): - return messages - if messages: - return messages[-1] - raise ValueError("No messages provided") - - @override - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: - message = self._get_most_recent_message(messages) - query = message.text.lower() - - results: list[TextSearchResult] = [] - if "return" in query and "refund" in query: - results.append( - TextSearchResult( - source_name="Contoso Outdoors Return Policy", - source_link="https://contoso.com/policies/returns", - text=( - "Customers may return any item within 30 days of delivery. " - "Items should be unused and include original packaging. " - "Refunds are issued to the original payment method within 5 business days of inspection." - ), - ) - ) - - if "shipping" in query: - results.append( - TextSearchResult( - source_name="Contoso Outdoors Shipping Guide", - source_link="https://contoso.com/help/shipping", - text=( - "Standard shipping is free on orders over $50 and typically arrives in 3-5 business days " - "within the continental United States. Expedited options are available at checkout." - ), - ) - ) - - if "tent" in query or "fabric" in query: - results.append( - TextSearchResult( - source_name="TrailRunner Tent Care Instructions", - source_link="https://contoso.com/manuals/trailrunner-tent", - text=( - "Clean the tent fabric with lukewarm water and a non-detergent soap. " - "Allow it to air dry completely before storage and avoid prolonged UV " - "exposure to extend the lifespan of the waterproof coating." - ), - ) - ) - - if not results: - return Context() - - return Context( - messages=[ - ChatMessage( - role=Role.USER, text="\n\n".join(json.dumps(result.__dict__, indent=2) for result in results) - ) - ] - ) - - -def create_agent(): - # Create an Agent using the Azure OpenAI Chat Client with token provider - # for automatic token refresh in long-running servers - agent = AzureOpenAIChatClient(ad_token_provider=_token_provider).create_agent( - name="SupportSpecialist", - instructions=( - "You are a helpful support specialist for Contoso Outdoors. " - "Answer questions using the provided context and cite the source document when available." - ), - context_providers=TextSearchContextProvider(), - ) - return agent - - -def main(): - # Run the agent as a hosted agent - from_agent_framework(create_agent()).run() - - -if __name__ == "__main__": - main() diff --git a/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/requirements.txt b/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/requirements.txt deleted file mode 100644 index 1e93c1a80..000000000 --- a/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -azure-ai-agentserver-agentframework==1.0.0b12 \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agents-in-workflow/README.md b/samples/python/hosted-agents/agent-framework/agents-in-workflow/README.md deleted file mode 100644 index 6964703a7..000000000 --- a/samples/python/hosted-agents/agent-framework/agents-in-workflow/README.md +++ /dev/null @@ -1,156 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to use AI agents as executors within a workflow, hosted using -[Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and -deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### Agents in Workflows - -This sample demonstrates the integration of AI agents within a workflow pipeline. The workflow operates as follows: - -1. **Research Agent** - Research market and product -2. **Market Agent** - Create market strategy -3. **Legal Agent** - Review legal considerations for the market strategy - -The agents will work concurrently in a workflow, creating a comprehensive report that demonstrates: - -- How AI-powered agents can be seamlessly integrated into workflow pipelines -- concurrent execution of multiple agents within a workflow - -### Agent Hosting - -The agent workflow is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent workflow using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent workflow can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. -The extension builds a container image for the agent, deploys it to Azure Container Instances (ACI), and creates a hosted agent version and deployment on Foundry Agent Service. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. **Azure OpenAI Service** - - Endpoint configured - - Chat model deployed (e.g., `gpt-4o-mini` or `gpt-4`) - - Note your endpoint URL and deployment name - -2. **Azure AI Foundry Project** - - Project created in [Azure AI Foundry](https://ai.azure.com) - - Note your project endpoint URL - -3. **Azure CLI** - - Installed and authenticated - - Run `az login` and verify with `az account show` - -4. **Python 3.10 or higher** - - Verify your version: `python --version` - - If you have Python 3.9 or older, install a newer version: - - Windows: `winget install Python.Python.3.12` - - macOS: `brew install python@3.12` - - Linux: Use your package manager - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME` - The deployment name for your chat model (required) -- `AZURE_AI_PROJECT_ENDPOINT` - Your Azure AI Foundry project endpoint (required) - -**Finding your Azure AI Project Endpoint:** -1. Go to [Azure AI Foundry portal](https://ai.azure.com) -2. Navigate to your project -3. Find the endpoint under **Project Settings** > **Properties** -4. Format: `https://{project-name}.{region}.api.azureml.ms` or `https://{resource}.services.ai.azure.com/api/projects/{project-name}` - -```powershell -# Replace with your actual values -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" -$env:AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-4o-mini" -$env:AZURE_AI_PROJECT_ENDPOINT="https://your-project.region.api.azureml.ms" -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```powershell -pip install -r requirements.txt -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -python main.py -``` - -This will start the hosted agent workflow locally on `http://localhost:8088/`. - -**Expected Output:** -``` -2026-01-22 11:27:02,086 - azure.ai.agentserver - INFO - Starting FoundryCBAgent server on port 8088 -INFO: Uvicorn running on http://0.0.0.0:8088 (Press CTRL+C to quit) -``` - -### Interacting with the Agent - -**PowerShell (Windows):** -```powershell -$body = @{ - input = "We are launching a new budget-friendly electric bike for urban commuters." - stream = $false -} | ConvertTo-Json - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" -``` - -**Bash/curl (Linux/macOS):** -```bash -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ - -d '{"input": "We are launching a new budget-friendly electric bike for urban commuters.","stream":false}' -``` - -**Expected Response:** - -You'll receive a comprehensive response from all three agents running concurrently: -- **Research Agent**: Market insights, opportunities, and risks -- **Marketing Agent**: Value propositions and targeted messaging -- **Legal Agent**: Compliance and policy considerations - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/python/hosted-agents/agent-framework/agents-in-workflow/agent.yaml b/samples/python/hosted-agents/agent-framework/agents-in-workflow/agent.yaml deleted file mode 100644 index 584b462a4..000000000 --- a/samples/python/hosted-agents/agent-framework/agents-in-workflow/agent.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Unique identifier/name for this agent -name: agents-in-workflow -# Brief description of what this agent does -description: > - A workflow agent that responds to product launch strategy inquiries by concurrently leveraging insights from three specialized agents. -metadata: - # Categorization tags for organizing and discovering agents - authors: - - Microsoft Agent Framework Team - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Workflows -template: - name: agents-in-workflow - # The type of agent - "hosted" for HOBO, "container" for COBO - kind: hosted - protocols: - - protocol: responses - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4o-mini - name: chat diff --git a/samples/python/hosted-agents/agent-framework/agents-in-workflow/main.py b/samples/python/hosted-agents/agent-framework/agents-in-workflow/main.py deleted file mode 100644 index c3ecabfc6..000000000 --- a/samples/python/hosted-agents/agent-framework/agents-in-workflow/main.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from agent_framework import ConcurrentBuilder -from agent_framework.azure import AzureOpenAIChatClient -from azure.ai.agentserver.agentframework import from_agent_framework -from azure.identity import DefaultAzureCredential, get_bearer_token_provider # pyright: ignore[reportUnknownVariableType] - -# Create a token provider that refreshes tokens automatically for long-running servers -# This avoids 401 errors when the initial token expires (typically after 1 hour) -_credential = DefaultAzureCredential() -_token_provider = get_bearer_token_provider(_credential, "https://cognitiveservices.azure.com/.default") - - -def create_workflow_builder(): - # Create agents using token provider for automatic token refresh - researcher = AzureOpenAIChatClient(ad_token_provider=_token_provider).create_agent( - instructions=( - "You're an expert market and product researcher. " - "Given a prompt, provide concise, factual insights, opportunities, and risks." - ), - name="researcher", - ) - marketer = AzureOpenAIChatClient(ad_token_provider=_token_provider).create_agent( - instructions=( - "You're a creative marketing strategist. " - "Craft compelling value propositions and target messaging aligned to the prompt." - ), - name="marketer", - ) - legal = AzureOpenAIChatClient(ad_token_provider=_token_provider).create_agent( - instructions=( - "You're a cautious legal/compliance reviewer. " - "Highlight constraints, disclaimers, and policy concerns based on the prompt." - ), - name="legal", - ) - - # Build a concurrent workflow - workflow_builder = ConcurrentBuilder().participants([researcher, marketer, legal]) - - return workflow_builder - -def main(): - # Run the agent as a hosted agent - from_agent_framework(create_workflow_builder().build).run() - - -if __name__ == "__main__": - main() diff --git a/samples/python/hosted-agents/agent-framework/agents-in-workflow/requirements.txt b/samples/python/hosted-agents/agent-framework/agents-in-workflow/requirements.txt deleted file mode 100644 index 1e93c1a80..000000000 --- a/samples/python/hosted-agents/agent-framework/agents-in-workflow/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -azure-ai-agentserver-agentframework==1.0.0b12 \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/.dockerignore b/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/.dockerignore deleted file mode 100644 index 79cc80773..000000000 --- a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/.dockerignore +++ /dev/null @@ -1,51 +0,0 @@ -# Build artifacts -bin/ -obj/ - -# IDE and editor files -.vs/ -.vscode/ -*.user -*.suo -.foundry/ - -# Source control -.git/ - -# Documentation -README.md - -# Ignore files -.gitignore -.dockerignore - -# Logs -*.log - -# Temporary files -*.tmp -*.temp - -# OS files -.DS_Store -Thumbs.db - -# Package manager directories -node_modules/ -packages/ - -# Test results -TestResults/ -*.trx - -# Coverage reports -coverage/ -*.coverage -*.coveragexml - -# Local development config -appsettings.Development.json -.env - -.venv/ -__pycache__/ diff --git a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/.env.sample b/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/.env.sample deleted file mode 100644 index 7a7d4d5ec..000000000 --- a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/.env.sample +++ /dev/null @@ -1,3 +0,0 @@ -# IMPORTANT: Never commit .env to version control - add it to .gitignore -PROJECT_ENDPOINT= -MODEL_DEPLOYMENT_NAME= \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/Dockerfile b/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/Dockerfile deleted file mode 100644 index 0cc939d9b..000000000 --- a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.12-slim - -WORKDIR /app - -COPY . user_agent/ -WORKDIR /app/user_agent - -RUN if [ -f requirements.txt ]; then \ - pip install -r requirements.txt; \ - else \ - echo "No requirements.txt found"; \ - fi - -EXPOSE 8088 - -CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/agent.yaml b/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/agent.yaml deleted file mode 100644 index 5734170d8..000000000 --- a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/agent.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Unique identifier/name for this agent -name: azure-ai-agents-in-workflow -# Brief description of what this agent does -description: > - A multi-agent workflow featuring a Writer and Reviewer that collaborate - to create and refine content. -metadata: - # Categorization tags for organizing and discovering agents - authors: - - Microsoft Agent Framework Team - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Multi-Agent Workflow - - Writer-Reviewer - - Content Creation -template: - name: azure-ai-agents-in-workflow - # The type of agent - "hosted" for HOBO, "container" for COBO - kind: hosted - protocols: - - protocol: responses - environment_variables: - - name: PROJECT_ENDPOINT - value: ${PROJECT_ENDPOINT} - - name: MODEL_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4.1-mini - name: chat diff --git a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/main.py b/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/main.py deleted file mode 100644 index 0876b4604..000000000 --- a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/main.py +++ /dev/null @@ -1,88 +0,0 @@ -import asyncio -import os -from contextlib import asynccontextmanager - -from agent_framework import Agent, WorkflowBuilder -from agent_framework.azure import AzureAIAgentClient -from azure.ai.agentserver.agentframework import from_agent_framework -from azure.identity.aio import DefaultAzureCredential, ManagedIdentityCredential -from dotenv import load_dotenv - -load_dotenv(override=True) - -# Configure these for your Foundry project -# Read the explicit variables present in the .env file -PROJECT_ENDPOINT = os.getenv( - "PROJECT_ENDPOINT" -) # e.g., "https://.services.ai.azure.com/api/projects/" -MODEL_DEPLOYMENT_NAME = os.getenv( - "MODEL_DEPLOYMENT_NAME", "gpt-4.1-mini" -) # Your model deployment name e.g., "gpt-4.1-mini" - - -def get_credential(): - """Will use Managed Identity when running in Azure, otherwise falls back to DefaultAzureCredential.""" - return ( - ManagedIdentityCredential() - if os.getenv("MSI_ENDPOINT") - else DefaultAzureCredential() - ) - - -@asynccontextmanager -async def create_agents(): - async with ( - get_credential() as credential, - AzureAIAgentClient( - project_endpoint=PROJECT_ENDPOINT, - model_deployment_name=MODEL_DEPLOYMENT_NAME, - credential=credential, - ) as writer_client, - AzureAIAgentClient( - project_endpoint=PROJECT_ENDPOINT, - model_deployment_name=MODEL_DEPLOYMENT_NAME, - credential=credential, - ) as reviewer_client, - ): - writer = Agent( - writer_client, - name="Writer", - instructions="You are an excellent content writer. You create new content and edit contents based on the feedback.", - ) - reviewer = Agent( - reviewer_client, - name="Reviewer", - instructions="You are an excellent content reviewer. Provide actionable feedback to the writer about the provided content in the most concise manner possible.", - ) - yield writer, reviewer - - -def create_workflow(writer, reviewer): - workflow = ( - WorkflowBuilder( - name="Writer-Reviewer", - start_executor=writer, - output_executors=[writer, reviewer], - ) - .add_edge(writer, reviewer) - .build() - ) - return workflow.as_agent() - - -async def main() -> None: - """ - The writer and reviewer multi-agent workflow. - - Environment variables required: - - PROJECT_ENDPOINT: Your Microsoft Foundry project endpoint - - MODEL_DEPLOYMENT_NAME: Your Microsoft Foundry model deployment name - """ - - async with create_agents() as (writer, reviewer): - agent = create_workflow(writer, reviewer) - await from_agent_framework(agent).run_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/requirements.txt b/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/requirements.txt deleted file mode 100644 index 0f530be02..000000000 --- a/samples/python/hosted-agents/agent-framework/azure-ai-agents-in-workflow/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -azure-ai-agentserver-agentframework==1.0.0b16 \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/echo-agent/Dockerfile b/samples/python/hosted-agents/agent-framework/echo-agent/Dockerfile deleted file mode 100644 index 0cc939d9b..000000000 --- a/samples/python/hosted-agents/agent-framework/echo-agent/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.12-slim - -WORKDIR /app - -COPY . user_agent/ -WORKDIR /app/user_agent - -RUN if [ -f requirements.txt ]; then \ - pip install -r requirements.txt; \ - else \ - echo "No requirements.txt found"; \ - fi - -EXPOSE 8088 - -CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/agent-framework/echo-agent/README.md b/samples/python/hosted-agents/agent-framework/echo-agent/README.md deleted file mode 100644 index 7b90f4dd8..000000000 --- a/samples/python/hosted-agents/agent-framework/echo-agent/README.md +++ /dev/null @@ -1,104 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to build a web search agent using Bing Grounding, hosted using -[Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/) and -deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### Web Search Agent - -The agent uses Bing Grounding to search the web for current information and provide accurate, well-sourced answers. This demonstrates: - -- How to integrate Bing Grounding as a tool in an AI agent -- How to use the `HostedWebSearchTool` from the Agent Framework - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. -The extension builds a container image into Azure Container Registry (ACR), and creates a hosted agent version and deployment on Microsoft Foundry. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. An Azure AI Foundry project configured -2. A deployment of a chat model (e.g., `gpt-4.1-mini`) -3. A Bing Grounding connection in your project -4. Azure CLI installed and authenticated (`az login`) -5. Python 3.10+ installed - -### Environment Variables - -Create a `.env` file with the following environment variables: - -> **Note:** The `.env` file is for local development only. When deploying to Azure AI Foundry, remove the `.env` file and configure environment variables in `agent.yaml` instead. - -```bash -AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ -AZURE_AI_MODEL_DEPLOYMENT_NAME= # e.g., gpt-4.1-mini -BING_GROUNDING_CONNECTION_ID=/subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts//projects//connections/ -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```bash -pip install -r requirements.txt -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```bash -python main.py -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent - -```bash -curl -X POST http://localhost:8088/responses \ - -H "Content-Type: application/json" \ - -d '{"input": "What is the latest news in AI?"}' | jq . -``` - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/python/hosted-agents/agent-framework/echo-agent/agent.yaml b/samples/python/hosted-agents/agent-framework/echo-agent/agent.yaml deleted file mode 100644 index 96f5b39c4..000000000 --- a/samples/python/hosted-agents/agent-framework/echo-agent/agent.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Unique identifier/name for this agent -name: echo-agent -# Brief description of what this agent does -description: > - This sample demonstrates how to create a custom AI agent that echoes user input. - It is useful for testing, debugging, and learning how to build custom agents. -metadata: - # Categorization tags for organizing and discovering agents - tags: - - AI Agent Hosting - - Azure AI AgentServer - - Custom Agent Implementation - - Microsoft Agent Framework -template: - name: echo-agent - kind: hosted - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_AI_PROJECT_ENDPOINT - value: ${AZURE_AI_PROJECT_ENDPOINT} diff --git a/samples/python/hosted-agents/agent-framework/echo-agent/main.py b/samples/python/hosted-agents/agent-framework/echo-agent/main.py deleted file mode 100644 index 8a556e836..000000000 --- a/samples/python/hosted-agents/agent-framework/echo-agent/main.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -from collections.abc import AsyncIterable -from typing import Any - -from agent_framework import ( - AgentRunResponse, - AgentRunResponseUpdate, - AgentThread, - BaseAgent, - ChatMessage, - Role, - TextContent, -) -from azure.ai.agentserver.agentframework import from_agent_framework -from azure.identity.aio import DefaultAzureCredential - -""" -Custom Agent Implementation Example - -This sample demonstrates implementing a custom agent by extending BaseAgent class, -showing the minimal requirements for both streaming and non-streaming responses. -""" - - -class EchoAgent(BaseAgent): - """A simple custom agent that echoes user messages with a prefix. - - This demonstrates how to create a fully custom agent by extending BaseAgent - and implementing the required run() and run_stream() methods. - """ - - def __init__( - self, - *, - name: str | None = None, - description: str | None = None, - echo_prefix: str = "Echo: ", - **kwargs: Any, - ) -> None: - """Initialize the EchoAgent. - - Args: - name: The name of the agent. - description: The description of the agent. - echo_prefix: The prefix to add to echoed messages. - **kwargs: Additional keyword arguments passed to BaseAgent. - """ - self.echo_prefix = echo_prefix - super().__init__( - name=name, - description=description, - **kwargs, - ) - - async def run( - self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AgentRunResponse: - """Execute the agent and return a complete response. - - Args: - messages: The message(s) to process. - thread: The conversation thread (optional). - **kwargs: Additional keyword arguments. - - Returns: - An AgentRunResponse containing the agent's reply. - """ - # Normalize input messages to a list - normalized_messages = self._normalize_messages(messages) - - if not normalized_messages: - response_message = ChatMessage( - role=Role.ASSISTANT, - contents=[TextContent(text="Hello! I'm a custom echo agent. Send me a message and I'll echo it back.")], - ) - else: - # For simplicity, echo the last user message - last_message = normalized_messages[-1] - if last_message.text: - echo_text = f"{self.echo_prefix}{last_message.text}" - else: - echo_text = f"{self.echo_prefix}[Non-text message received]" - - response_message = ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text=echo_text)]) - - # Notify the thread of new messages if provided - if thread is not None: - await self._notify_thread_of_new_messages(thread, normalized_messages, response_message) - - return AgentRunResponse(messages=[response_message]) - - async def run_stream( - self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AsyncIterable[AgentRunResponseUpdate]: - """Execute the agent and yield streaming response updates. - - Args: - messages: The message(s) to process. - thread: The conversation thread (optional). - **kwargs: Additional keyword arguments. - - Yields: - AgentRunResponseUpdate objects containing chunks of the response. - """ - # Normalize input messages to a list - normalized_messages = self._normalize_messages(messages) - - if not normalized_messages: - response_text = "Hello! I'm a custom echo agent. Send me a message and I'll echo it back." - else: - # For simplicity, echo the last user message - last_message = normalized_messages[-1] - if last_message.text: - response_text = f"{self.echo_prefix}{last_message.text}" - else: - response_text = f"{self.echo_prefix}[Non-text message received]" - - # Simulate streaming by yielding the response word by word - words = response_text.split() - for i, word in enumerate(words): - # Add space before word except for the first one - chunk_text = f" {word}" if i > 0 else word - - yield AgentRunResponseUpdate( - contents=[TextContent(text=chunk_text)], - role=Role.ASSISTANT, - ) - - # Small delay to simulate streaming - await asyncio.sleep(0.1) - - # Notify the thread of the complete response if provided - if thread is not None: - complete_response = ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text=response_text)]) - await self._notify_thread_of_new_messages(thread, normalized_messages, complete_response) - - -def create_agent() -> EchoAgent: - agent = EchoAgent( - name="EchoBot", description="A simple agent that echoes messages with a prefix", echo_prefix="🔊 Echo: " - ) - return agent - -if __name__ == "__main__": - from_agent_framework(create_agent()).run() diff --git a/samples/python/hosted-agents/agent-framework/echo-agent/requirements.txt b/samples/python/hosted-agents/agent-framework/echo-agent/requirements.txt deleted file mode 100644 index 9dac56e37..000000000 --- a/samples/python/hosted-agents/agent-framework/echo-agent/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -azure-ai-agentserver-agentframework==1.0.0b12 -pytest==8.4.2 -python-dotenv==1.1.1 -azure-monitor-opentelemetry==1.8.1 diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/README.md b/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/README.md deleted file mode 100644 index 667a282dc..000000000 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/README.md +++ /dev/null @@ -1,281 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to build a Microsoft Agent Framework chat agent with **human-in-the-loop** approval workflows, host it using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/), and deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. - -This sample is adapted from the [agent-framework sample](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/tools/ai_function_with_approval_and_threads.py). - -## How It Works - -### Human-in-the-loop approval - -In [main.py](main.py), the agent is created using `AzureOpenAIChatClient` and includes an `@ai_function` decorated with `approval_mode="always_require"`. This means any call to the function (e.g., `add_to_calendar`) will escalate to a human reviewer before execution. - -When the agent determines it needs to call an approval-required function, the response includes a `function_call` with name `__hosted_agent_adapter_hitl__`. The caller must then provide feedback (`approve`, `reject`, or additional guidance) to continue the workflow. - -### Thread persistence - -- The sample uses `JsonLocalFileAgentThreadRepository` for `AgentThread` persistence, creating a JSON file per conversation ID under `./thread_storage`. - -- An in-memory alternative, `InMemoryAgentThreadRepository`, lives in the `azure.ai.agentserver.agentframework.persistence` module. - -- To store thread messages elsewhere, inherit from `SerializedAgentThreadRepository` and override the following methods: -```python -class SerializedAgentThreadRepository(AgentThreadRepository): - async def read_from_storage(self, conversation_id: str) -> Optional[Any]: - """Read the serialized thread from storage. - - :param conversation_id: The conversation ID. - :type conversation_id: str - - :return: The serialized thread if available, None otherwise. - :rtype: Optional[Any] - """ - ... - - async def write_to_storage(self, conversation_id: str, serialized_thread: Any) -> None: - """Write the serialized thread to storage. - - :param conversation_id: The conversation ID. - :type conversation_id: str - :param serialized_thread: The serialized thread to save. - :type serialized_thread: Any - :return: None - :rtype: None - """ - ... -``` - -These hooks let you plug in any backing store (blob storage, databases, etc.) without changing the rest of the sample. - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/), which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. The extension builds a container image into Azure Container Registry (ACR), and creates a hosted agent version and deployment on Microsoft Foundry. - -## Validate the deployed Agent -```python -# Before running the sample: -# pip install --pre azure-ai-projects>=2.0.0b4 - -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -import json - -foundry_account = "" -foundry_project = "" -agent_name = "" - -project_endpoint = f"https://{foundry_account}.services.ai.azure.com/api/projects/{foundry_project}" - -project_client = AIProjectClient( - endpoint=project_endpoint, - credential=DefaultAzureCredential(), -) - -# Get an existing agent -agent = project_client.agents.get(agent_name=agent_name) -print(f"Retrieved agent: {agent.name}") - -openai_client = project_client.get_openai_client() -conversation = openai_client.conversations.create() - -response = openai_client.responses.create( - input="Add a dentist appointment on March 15th", - conversation=conversation.id, - extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, -) - -call_id = "" -for item in response.output: - if item.type == "function_call" and item.name == "__hosted_agent_adapter_hitl__": - args = json.loads(item.arguments) - print(f"Agent will add {args['event_name']} on {args['date']}") - call_id = item.call_id - -if not call_id: - print(f"No human input is required, output: {response.output_text}") -else: - human_response = "approve" - response = openai_client.responses.create( - input=[ - { - "type": "function_call_output", - "call_id": call_id, - "output": human_response - }], - conversation=conversation.id, - extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, - ) - print(f"Human response: {human_response}") - print(f"Agent response: {response.output_text}") -``` - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. **Azure OpenAI Service** - - Endpoint configured - - Chat model deployed (e.g., `gpt-4o-mini` or `gpt-4`) - - Note your endpoint URL and deployment name - -2. **Azure CLI** - - Installed and authenticated - - Run `az login` and verify with `az account show` - -3. **Python 3.10 or higher** - - Verify your version: `python --version` - - If you have Python 3.9 or older, install a newer version: - - Windows: `winget install Python.Python.3.12` - - macOS: `brew install python@3.12` - - Linux: Use your package manager - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME` - The deployment name for your chat model (required) -- `OPENAI_API_VERSION` - The API version (e.g., `2025-03-01-preview`) - -This sample loads environment variables from a local `.env` file if present. Copy `.envtemplate` to `.env` and fill in your Azure OpenAI details: - -``` -AZURE_OPENAI_ENDPOINT=https://.cognitiveservices.azure.com/ -OPENAI_API_VERSION=2025-03-01-preview -AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= -``` - -```powershell -# Replace with your actual values -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" -$env:AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-4o-mini" -$env:OPENAI_API_VERSION="2025-03-01-preview" -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```powershell -pip install -r requirements.txt -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -python main.py -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent locally - -**Step 1: Send a user request** - -**PowerShell (Windows):** -```powershell -$body = @{ - agent = @{ name = "local_agent"; type = "agent_reference" } - stream = $false - input = "Add a dentist appointment on March 15th" -} | ConvertTo-Json - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" -``` - -**Bash/curl (Linux/macOS):** -```bash -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ - -d '{"agent":{"name":"local_agent","type":"agent_reference"},"stream":false,"input":"Add a dentist appointment on March 15th"}' -``` - -A response that requires a human decision looks like this (formatted for clarity): - -```json -{ - "conversation": {"id": ""}, - "output": [ - {...}, - { - "type": "function_call", - "id": "func_xxx", - "name": "__hosted_agent_adapter_hitl__", - "call_id": "", - "arguments": "{\"event_name\":\"Dentist Appointment\",\"date\":\"2024-03-15\"}" - } - ] -} -``` - -Capture these values from the response; you will need them to provide feedback: - -- `conversation.id` -- The `call_id` associated with `__hosted_agent_adapter_hitl__` - -**Step 2: Provide human feedback** - -Send a `CreateResponse` request with a `function_call_output` message that contains your decision (`approve`, `reject`, or additional guidance). Replace the placeholders before running the command: - -**PowerShell (Windows):** -```powershell -$body = @{ - agent = @{ name = "local_agent"; type = "agent_reference" } - stream = $false - conversation = @{ id = "" } - input = @( - @{ - call_id = "" - output = "approve" - type = "function_call_output" - } - ) -} | ConvertTo-Json -Depth 3 - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" -``` - -**Bash/curl (Linux/macOS):** -```bash -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ - -d '{"agent":{"name":"local_agent","type":"agent_reference"},"stream":false,"conversation":{"id":""},"input":[{"call_id":"","output":"approve","type":"function_call_output"}]}' -``` - -When the reviewer response is accepted, the agent executes the approved function and returns the final output. - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/agent.yaml b/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/agent.yaml deleted file mode 100644 index 992c3a73d..000000000 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/agent.yaml +++ /dev/null @@ -1,30 +0,0 @@ -name: calendar-agent-with-human-in-the-loop -description: This AgentFramework agent demonstrates how to integrate human-in-the-loop functionality using AI Functions. -metadata: - example: - - role: user - content: |- - Add a dentist appointment on March 15th - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Human in the Loop - authors: - - junanchen -template: - name: calendar-agent-with-human-in-the-loop - kind: hosted - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: OPENAI_API_VERSION - value: 2025-03-01-preview - - name: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4o - name: chat diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/main.py b/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/main.py deleted file mode 100644 index c88b3ed56..000000000 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/main.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio -from typing import Annotated, Any, Collection - -from agent_framework import ChatAgent, ChatMessage, ChatMessageStoreProtocol, ai_function -from agent_framework._threads import ChatMessageStoreState -from agent_framework.azure import AzureOpenAIChatClient - -from azure.ai.agentserver.agentframework import from_agent_framework -from azure.ai.agentserver.agentframework.persistence.agent_thread_repository import JsonLocalFileAgentThreadRepository -from azure.identity import DefaultAzureCredential, get_bearer_token_provider - -""" -Tool Approvals with Threads - -This sample demonstrates using tool approvals with threads. -With threads, you don't need to manually pass previous messages - -the thread stores and retrieves them automatically. -""" - -# Create a token provider that refreshes tokens automatically for long-running servers -# This avoids 401 errors when the initial token expires (typically after 1 hour) -_credential = DefaultAzureCredential() -_token_provider = get_bearer_token_provider(_credential, "https://cognitiveservices.azure.com/.default") - -class CustomChatMessageStore(ChatMessageStoreProtocol): - """Implementation of custom chat message store. - In real applications, this can be an implementation of relational database or vector store.""" - - def __init__(self, messages: Collection[ChatMessage] | None = None) -> None: - self._messages: list[ChatMessage] = [] - if messages: - self._messages.extend(messages) - - async def add_messages(self, messages: Collection[ChatMessage]) -> None: - self._messages.extend(messages) - - async def list_messages(self) -> list[ChatMessage]: - return self._messages - - @classmethod - async def deserialize(cls, serialized_store_state: Any, **kwargs: Any) -> "CustomChatMessageStore": - """Create a new instance from serialized state.""" - store = cls() - await store.update_from_state(serialized_store_state, **kwargs) - return store - - async def update_from_state(self, serialized_store_state: Any, **kwargs: Any) -> None: - """Update this instance from serialized state.""" - if serialized_store_state: - state = ChatMessageStoreState.from_dict(serialized_store_state, **kwargs) - if state.messages: - self._messages.extend(state.messages) - - async def serialize(self, **kwargs: Any) -> Any: - """Serialize this store's state.""" - state = ChatMessageStoreState(messages=self._messages) - return state.to_dict(**kwargs) - - -@ai_function(approval_mode="always_require") -def add_to_calendar( - event_name: Annotated[str, "Name of the event"], date: Annotated[str, "Date of the event"] -) -> str: - """Add an event to the calendar (requires approval).""" - print(f">>> EXECUTING: add_to_calendar(event_name='{event_name}', date='{date}')") - return f"Added '{event_name}' to calendar on {date}" - - -def build_agent(): - # Use token provider for automatic token refresh in long-running servers - return ChatAgent( - chat_client=AzureOpenAIChatClient(ad_token_provider=_token_provider), - name="CalendarAgent", - instructions="You are a helpful calendar assistant.", - tools=[add_to_calendar], - chat_message_store_factory=CustomChatMessageStore, - ) - - -async def main() -> None: - agent = build_agent() - thread_repository = JsonLocalFileAgentThreadRepository(agent=agent, storage_path="./thread_storage") - await from_agent_framework(agent, thread_repository=thread_repository).run_async() - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/requirements.txt b/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/requirements.txt deleted file mode 100644 index 1e93c1a80..000000000 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -azure-ai-agentserver-agentframework==1.0.0b12 \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/README.md b/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/README.md deleted file mode 100644 index 0450c1dc0..000000000 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/README.md +++ /dev/null @@ -1,242 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to build a Microsoft Agent Framework workflow that persists checkpoints and pauses for human-in-the-loop (HITL) review before completing a response. The workflow is hosted with the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/) and can be deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### Checkpoints - -Agent-framework workflow can resume by loading a checkpoint. Hosted agent provides a CheckpointRepository API for users to manage their checkpoints. It defines as below: - -```py -class CheckpointRepository(ABC): - """ - Repository interface for storing and retrieving checkpoints. - - :meta private: - """ - @abstractmethod - async def get_or_create(self, conversation_id: str) -> Optional[CheckpointStorage]: - """Retrieve or create a checkpoint storage by conversation ID. - - :param conversation_id: The unique identifier for the checkpoint. - :type conversation_id: str - :return: The CheckpointStorage if found or created, None otherwise. - :rtype: Optional[CheckpointStorage] - """ -``` - -An in-memory checkpoint repository `azure.ai.agentserver.agentframework.persistence.InMemoryCheckpointRepository` and a local file based `azure.ai.agentserver.agentframework.persistence.FileCheckpointRepository(storage_path: str)` are provided. - -If checkpoint repository is provided, hosted agent adapter will search for previous checkpoints by `conversation_id`, load the latest checkpoint to `WorkflowAgent`, and then invoke the workflow agent with `CheckpointStorage` instance. Thus, the checkpoint will be updated by agent framework. - -In this sample, the workflow persists checkpoints through `FileCheckpointRepository(storage_path="./checkpoints")`, ensuring the pending review queue survives restarts. - - -### Workflow with HITL - -`workflow_as_agent_reflection_pattern.py` defines two executors: - -- `Worker` – Generates answers with `AzureOpenAIChatClient`, tracks pending review requests, emits final responses, and implements `on_checkpoint_save` / `on_checkpoint_restore` so pending work can be resumed in multiturn conversions. -- `ReviewerWithHumanInTheLoop` – Always escalates to a human. The `HumanReviewRequest` payload captures the entire conversation so the reviewer can approve or reject the draft. When the reviewer responds, the workflow either emits the answer or regenerates it with the supplied feedback. Hosted agent adapter converts the HITL request to a `function_call` item with `HumanReviewRequest` information as argument. `HumanReviewRequest.convert_to_payload` is used for conversion. -- Human feedback should be provided as a `function_call_output` item with `conversation_id` and `call_id` matching with feedback request. Hosted agent adapter convert the feedback to targeted data instance by calling `ReviewResponse.convert_from_payload`. - - -### Agent hosting - -`main.py` builds the workflow, adapts it with `from_agent_framework`, and starts a local OpenAI Responses-compatible endpoint on `http://localhost:8088`. The endpoint supports both streaming and non-streaming modes and emits `function_call` items whenever the workflow pauses for human feedback. - -### Agent deployment - -The same container image can be deployed to Microsoft Foundry with the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension, which pushes the image to Azure Container Registry and creates hosted agent versions and deployments. - -## Validate the deployed Agent -```python -# Before running the sample: -# pip install --pre azure-ai-projects>=2.0.0b4 - -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -import json - -foundry_account = "" -foundry_project = "" -agent_name = "" - -project_endpoint = f"https://{foundry_account}.services.ai.azure.com/api/projects/{foundry_project}" - -project_client = AIProjectClient( - endpoint=project_endpoint, - credential=DefaultAzureCredential(), -) - -# Get an existing agent -agent = project_client.agents.get(agent_name=agent_name) -print(f"Retrieved agent: {agent.name}") - -openai_client = project_client.get_openai_client() -conversation = openai_client.conversations.create() - -response = openai_client.responses.create( - input="Draft a launch plan for a sustainable backpack brand", - conversation=conversation.id, - extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, -) - -call_id = "" -request_id = "" -for item in response.output: - if item.type == "function_call" and item.name == "__hosted_agent_adapter_hitl__": - agent_request = json.loads(item.arguments).get("agent_request", {}) - request_id = agent_request.get("request_id", "") - - agent_messages = agent_request.get("agent_messages", []) - agent_messages_str = "\n".join(json.dumps(msg, indent=4) for msg in agent_messages) - print(f"Agent requests: {agent_messages_str}") - call_id = item.call_id - -if not call_id or not request_id: - print(f"No human input is required, output: {response.output_text}") -else: - human_response = { - "request_id": request_id, - "feedback": "approve", - "approved": True, - } - response = openai_client.responses.create( - input=[ - { - "type": "function_call_output", - "call_id": call_id, - "output": json.dumps(human_response) - }], - conversation=conversation.id, - extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, - ) - print(f"Human response: {human_response['feedback']}") - print(f"Agent response: {response.output_text}") -``` - -## Running the Agent Locally - -### Prerequisites - -1. **Azure OpenAI Service** – An endpoint with a deployed chat model (for example `gpt-4o-mini`). Record the endpoint URL and deployment name. -2. **Azure CLI** – Installed and signed in (`az login`). The workflow uses `AzureCliCredential` for Azure OpenAI authentication. -3. **Python 3.10 or higher** – Verify with `python --version`. Install a newer version if required. -4. **pip** – To install the sample dependencies. - -### Environment variables - -Set the following variables before running the sample (use a `.env` file or your shell environment): - -- `AZURE_OPENAI_ENDPOINT` – Azure OpenAI endpoint URL (required). -- `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME` – Deployment name for your chat model (required). - -```powershell -# Replace the placeholder values -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" -$env:AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-4o-mini" -``` - -### Installing dependencies - -From the `workflow-agent-with-checkpoint-and-hitl` folder: - -```powershell -pip install -r requirements.txt -``` - -### Running the sample - -Start the hosted workflow locally: - -```powershell -python main.py -``` - -The server listens on `http://localhost:8088/` and writes checkpoints to the `./checkpoints` directory. - -### Interacting with the agent locally - -Send a `POST` request to `http://0.0.0.0:8088/responses` - -```json -{ - "agent": {"name": "local_agent", "type": "agent_reference"}, - "stream": false, - "input": "Draft a launch plan for a sustainable backpack brand", -} -``` - -A response with human-review request looks like this (formatted for clarity): - -```json -{ - "conversation": {"id": ""}, - "output": [ - {...}, - { - "type": "function_call", - "id": "func_xxx", - "name": "__hosted_agent_adapter_hitl__", - "call_id": "", - "arguments": "{\"agent_request\":{\"request_id\":\"\",...}}" - } - ] -} -``` - -Capture three values from the response: - -- `conversation.id` -- The `call_id` of the `__hosted_agent_adapter_hitl__` function call -- The `request_id` inside the serialized `agent_request` - -Respond by sending a `CreateResponse` request with `function_call_output` message that carries your review decision. Replace the placeholders before running the command: - -```json -{ - "agent": {"name": "local_agent", "type": "agent_reference"}, - "stream": false, - "convseration": {"id": ""}, - "input": [ - { - "call_id": "", - "output": "{\"request_id\":\"\",\"approved\":true,\"feedback\":\"approve\"}", - "type": "function_call_output", - } - ] -} -``` - -## Deploying the agent to Microsoft Foundry - -Follow the hosted agent deployment guide at https://aka.ms/azdaiagent/docs to: - -1. Configure the Azure Developer CLI and authenticate with your Azure subscription. -2. Build the container image (use `azd` cloud build or `docker build --platform=linux/amd64 ...`). -3. Publish the image to Azure Container Registry. -4. Create a hosted agent version and deployment inside your Azure AI Foundry project. - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on the service - -We **recommend using `azd` cloud build**, which always produces a `linux/amd64` image. - -If you must build locally on non-`amd64` hardware (for example, Apple Silicon), force the correct architecture: - -```bash -docker build --platform=linux/amd64 -t image . -``` - -This ensures the hosted agent runs correctly in Microsoft Foundry. diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/agent.yaml b/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/agent.yaml deleted file mode 100644 index 99c14d598..000000000 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/agent.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Unique identifier/name for this agent -name: af-worfklow-agent-with-checkpoint-and-hitl -# Brief description of what this agent does -description: > - A workflow agent built using the Microsoft Agent Framework that includes checkpointing and human-in-the-loop (HITL) capabilities. -metadata: - # Categorization tags for organizing and discovering agents - authors: - - Microsoft - tags: - - Azure AI AgentServer - - Microsoft Agent Framework - - Human in the Loop -template: - name: af-worfklow-agent-with-checkpoint-and-hitl - kind: hosted - protocols: - - protocol: responses - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: OPENAI_API_VERSION - value: 2025-03-01-preview - - name: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4o - name: chat diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/main.py b/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/main.py deleted file mode 100644 index 487a36b14..000000000 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/main.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio -import json -from dataclasses import dataclass -from typing import Any - -from agent_framework.azure import AzureOpenAIChatClient -from azure.identity import AzureCliCredential, get_bearer_token_provider -from dotenv import load_dotenv - -from agent_framework import ( # noqa: E402 - Executor, - WorkflowBuilder, - WorkflowContext, - handler, - response_handler, -) -from workflow_as_agent_reflection_pattern import ( # noqa: E402 - ReviewRequest, - ReviewResponse, - Worker, -) - -from azure.ai.agentserver.agentframework import from_agent_framework -from azure.ai.agentserver.agentframework.persistence import FileCheckpointRepository - -# Create a token provider that refreshes tokens automatically for long-running servers -# This avoids 401 errors when the initial token expires (typically after 1 hour) -_credential = AzureCliCredential() -_token_provider = get_bearer_token_provider(_credential, "https://cognitiveservices.azure.com/.default") - -@dataclass -class HumanReviewRequest: - """A request message type for escalation to a human reviewer.""" - - agent_request: ReviewRequest | None = None - - def convert_to_payload(self) -> str: - """Convert the HumanReviewRequest to a payload string.""" - request = self.agent_request - payload: dict[str, Any] = {"agent_request": None} - - if request: - payload["agent_request"] = { - "request_id": request.request_id, - "user_messages": [msg.to_dict() for msg in request.user_messages], - "agent_messages": [msg.to_dict() for msg in request.agent_messages], - } - - return json.dumps(payload, indent=2) - - -class ReviewerWithHumanInTheLoop(Executor): - """Executor that always escalates reviews to a human manager.""" - - def __init__(self, worker_id: str, reviewer_id: str | None = None) -> None: - unique_id = reviewer_id or f"{worker_id}-reviewer" - super().__init__(id=unique_id) - self._worker_id = worker_id - - @handler - async def review(self, request: ReviewRequest, ctx: WorkflowContext) -> None: - # In this simplified example, we always escalate to a human manager. - # See workflow_as_agent_reflection.py for an implementation - # using an automated agent to make the review decision. - print(f"Reviewer: Evaluating response for request {request.request_id[:8]}...") - print("Reviewer: Escalating to human manager...") - - # Forward the request to a human manager by sending a HumanReviewRequest. - await ctx.request_info( - request_data=HumanReviewRequest(agent_request=request), - response_type=ReviewResponse, - ) - - @response_handler - async def accept_human_review( - self, - original_request: HumanReviewRequest, - response: ReviewResponse, - ctx: WorkflowContext[ReviewResponse], - ) -> None: - # Accept the human review response and forward it back to the Worker. - print(f"Reviewer: Accepting human review for request {response.request_id[:8]}...") - print(f"Reviewer: Human feedback: {response.feedback}") - print(f"Reviewer: Human approved: {response.approved}") - print("Reviewer: Forwarding human review back to worker...") - await ctx.send_message(response, target_id=self._worker_id) - -def create_builder(): - # Build a workflow with bidirectional communication between Worker and Reviewer, - # and escalation paths for human review. - # Use token provider for automatic token refresh in long-running servers - builder = ( - WorkflowBuilder() - .register_executor( - lambda: Worker( - id="sub-worker", - chat_client=AzureOpenAIChatClient(ad_token_provider=_token_provider), - ), - name="worker", - ) - .register_executor( - lambda: ReviewerWithHumanInTheLoop(worker_id="sub-worker"), - name="reviewer", - ) - .add_edge("worker", "reviewer") # Worker sends requests to Reviewer - .add_edge("reviewer", "worker") # Reviewer sends feedback to Worker - .set_start_executor("worker") - ) - return builder - - -async def run_agent() -> None: - """Run the workflow inside the agent server adapter.""" - builder = create_builder() - await from_agent_framework( - builder, # pass workflow builder to adapter - checkpoint_repository=FileCheckpointRepository(storage_path="./checkpoints"), # for checkpoint storage - ).run_async() - -if __name__ == "__main__": - asyncio.run(run_agent()) diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/requirements.txt b/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/requirements.txt deleted file mode 100644 index 1e93c1a80..000000000 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -azure-ai-agentserver-agentframework==1.0.0b12 \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/workflow_as_agent_reflection_pattern.py b/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/workflow_as_agent_reflection_pattern.py deleted file mode 100644 index a0b0709ea..000000000 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/workflow_as_agent_reflection_pattern.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from dataclasses import dataclass -import json -from uuid import uuid4 - -from agent_framework import ( - AgentRunResponseUpdate, - AgentRunUpdateEvent, - ChatClientProtocol, - ChatMessage, - Contents, - Executor, - Role, - WorkflowContext, - handler, -) - -@dataclass -class ReviewRequest: - """Structured request passed from Worker to Reviewer for evaluation.""" - - request_id: str - user_messages: list[ChatMessage] - agent_messages: list[ChatMessage] - - -@dataclass -class ReviewResponse: - """Structured response from Reviewer back to Worker.""" - - request_id: str - feedback: str - approved: bool - - @staticmethod - def convert_from_payload(payload: str) -> "ReviewResponse": - """Convert a JSON payload string to a ReviewResponse instance.""" - data = json.loads(payload) - return ReviewResponse( - request_id=data["request_id"], - feedback=data["feedback"], - approved=data["approved"], - ) - - -PendingReviewState = tuple[ReviewRequest, list[ChatMessage]] - - -class Worker(Executor): - """Executor that generates responses and incorporates feedback when necessary.""" - - def __init__(self, id: str, chat_client: ChatClientProtocol) -> None: - super().__init__(id=id) - self._chat_client = chat_client - self._pending_requests: dict[str, PendingReviewState] = {} - - @handler - async def handle_user_messages(self, user_messages: list[ChatMessage], ctx: WorkflowContext[ReviewRequest]) -> None: - print("Worker: Received user messages, generating response...") - - # Initialize chat with system prompt. - messages = [ChatMessage(role=Role.SYSTEM, text="You are a helpful assistant.")] - messages.extend(user_messages) - - print("Worker: Calling LLM to generate response...") - response = await self._chat_client.get_response(messages=messages) - print(f"Worker: Response generated: {response.messages[-1].text}") - - # Add agent messages to context. - messages.extend(response.messages) - - # Create review request and send to Reviewer. - request = ReviewRequest(request_id=str(uuid4()), user_messages=user_messages, agent_messages=response.messages) - print(f"Worker: Sending response for review (ID: {request.request_id[:8]})") - await ctx.send_message(request) - - # Track request for possible retry. - self._pending_requests[request.request_id] = (request, messages) - - @handler - async def handle_review_response(self, review: ReviewResponse, ctx: WorkflowContext[ReviewRequest]) -> None: - print(f"Worker: Received review for request {review.request_id[:8]} - Approved: {review.approved}") - - if review.request_id not in self._pending_requests: - raise ValueError(f"Unknown request ID in review: {review.request_id}") - - request, messages = self._pending_requests.pop(review.request_id) - - if review.approved: - print("Worker: Response approved. Emitting to external consumer...") - contents: list[Contents] = [] - for message in request.agent_messages: - contents.extend(message.contents) - - # Emit approved result to external consumer via AgentRunUpdateEvent. - await ctx.add_event( - AgentRunUpdateEvent(self.id, data=AgentRunResponseUpdate(contents=contents, role=Role.ASSISTANT)) - ) - return - - print(f"Worker: Response not approved. Feedback: {review.feedback}") - print("Worker: Regenerating response with feedback...") - - # Incorporate review feedback. - messages.append(ChatMessage(role=Role.SYSTEM, text=review.feedback)) - messages.append( - ChatMessage(role=Role.SYSTEM, text="Please incorporate the feedback and regenerate the response.") - ) - messages.extend(request.user_messages) - - # Retry with updated prompt. - response = await self._chat_client.get_response(messages=messages) - print(f"Worker: New response generated: {response.messages[-1].text}") - - messages.extend(response.messages) - - # Send updated request for re-review. - new_request = ReviewRequest( - request_id=review.request_id, user_messages=request.user_messages, agent_messages=response.messages - ) - await ctx.send_message(new_request) - - # Track new request for further evaluation. - self._pending_requests[new_request.request_id] = (new_request, messages) - - async def on_checkpoint_save(self) -> dict: - """ - Persist pending requests during checkpointing. - In memory implementation for demonstration purposes. - """ - return {"pending_requests": self._pending_requests} - - async def on_checkpoint_restore(self, data: dict) -> None: - """ - Load pending requests from checkpoint data. - In memory implementation for demonstration purposes. - """ - self._pending_requests = data.get("pending_requests", {}) diff --git a/samples/python/hosted-agents/agent-framework/invocations/01-basic/.dockerignore b/samples/python/hosted-agents/agent-framework/invocations/01-basic/.dockerignore new file mode 100644 index 000000000..008e6e661 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/invocations/01-basic/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/invocations/01-basic/.env.example b/samples/python/hosted-agents/agent-framework/invocations/01-basic/.env.example new file mode 100644 index 000000000..fe302a8ad --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/invocations/01-basic/.env.example @@ -0,0 +1,2 @@ +FOUNDRY_PROJECT_ENDPOINT="..." +MODEL_DEPLOYMENT_NAME="..." \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/Dockerfile b/samples/python/hosted-agents/agent-framework/invocations/01-basic/Dockerfile similarity index 100% rename from samples/python/hosted-agents/agent-framework/agent-with-foundry-tools/Dockerfile rename to samples/python/hosted-agents/agent-framework/invocations/01-basic/Dockerfile diff --git a/samples/python/hosted-agents/agent-framework/invocations/01-basic/README.md b/samples/python/hosted-agents/agent-framework/invocations/01-basic/README.md new file mode 100644 index 000000000..78f5e1d26 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/invocations/01-basic/README.md @@ -0,0 +1,44 @@ +# Basic example of hosting an agent with the `invocations` API + +## Running the server locally + +### Environment setup + +Follow the instructions in the [Environment setup](../../README.md#environment-setup) section of the README in the parent directory to set up your environment and install dependencies. + +Run the following command to start the server: + +```bash +python main.py +``` + +### Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/invocations -i -H "Content-Type: application/json" -d '{"message": "Hi"}' +``` + +The server will respond with a JSON object containing the response text. The `-i` flag in the `curl` command includes the HTTP response headers in the output, which includes the session ID that can be used for multi-turn conversations. Here is an example of the response: + +``` +HTTP/1.1 200 +content-length: 34 +content-type: application/json +x-agent-invocation-id: ec04d020-a0e7-441e-ae83-db75635a9f83 +x-agent-session-id: 9370b9d4-cd13-4436-a57f-03b843ac0e17 +x-platform-server: azure-ai-agentserver-core/2.0.0a20260410006 (python/3.12) +date: Fri, 17 Apr 2026 23:46:44 GMT +server: hypercorn-h11 + +{"response":"Hi! How can I help?"} +``` + +### Multi-turn conversation + +To have a multi-turn conversation with the agent, take the session ID from the response headers of the previous request and include it in URL parameters for the next request. For example: + +```bash +curl -X POST http://localhost:8088/invocations?agent_session_id=9370b9d4-cd13-4436-a57f-03b843ac0e17 -i -H "Content-Type: application/json" -d '{"message": "How are you?"}' +``` diff --git a/samples/python/hosted-agents/agent-framework/invocations/01-basic/agent.manifest.yaml b/samples/python/hosted-agents/agent-framework/invocations/01-basic/agent.manifest.yaml new file mode 100644 index 000000000..9ef34e546 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/invocations/01-basic/agent.manifest.yaml @@ -0,0 +1,23 @@ +name: agent-framework-agent-basic-invocations +description: > + A basic Agent Framework agent hosted by Foundry. +metadata: + tags: + - Agent Framework + - AI Agent Hosting + - Azure AI AgentServer + - Invocations Protocol + - Streaming +template: + name: agent-framework-agent-basic-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + - name: MODEL_DEPLOYMENT_NAME + value: "{{MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: MODEL_DEPLOYMENT_NAME \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/invocations/01-basic/agent.yaml b/samples/python/hosted-agents/agent-framework/invocations/01-basic/agent.yaml new file mode 100644 index 000000000..152179a8e --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/invocations/01-basic/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: agent-framework-agent-basic-invocations +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: '0.25' + memory: '0.5Gi' diff --git a/samples/python/hosted-agents/agent-framework/invocations/01-basic/main.py b/samples/python/hosted-agents/agent-framework/invocations/01-basic/main.py new file mode 100644 index 000000000..3d63ac211 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/invocations/01-basic/main.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +from collections.abc import AsyncGenerator + +from agent_framework import Agent, AgentSession +from agent_framework.foundry import FoundryChatClient +from azure.ai.agentserver.invocations import InvocationAgentServerHost +from azure.identity import DefaultAzureCredential +from dotenv import load_dotenv +from starlette.requests import Request +from starlette.responses import JSONResponse, Response, StreamingResponse + +# Load environment variables from .env file +load_dotenv() + + +# In-memory session store — keyed by session ID. +# WARNING: This is lost on restart. Use durable storage in production. +_sessions: dict[str, AgentSession] = {} + +# Create the agent +client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], + credential=DefaultAzureCredential(), +) + +agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, +) + +app = InvocationAgentServerHost() + + +@app.invoke_handler +async def handle_invoke(request: Request): + """Handle streaming multi-turn chat with Azure OpenAI via SSE.""" + data = await request.json() + session_id = request.state.session_id + + stream = data.get("stream", False) + user_message = data.get("message", None) + if user_message is None: + error = "Missing 'message' in request" + if stream: + return StreamingResponse(content=error, status_code=400) + return Response(content=error, status_code=400) + + session = _sessions.setdefault(session_id, AgentSession(session_id=session_id)) + + if stream: + + async def stream_response() -> AsyncGenerator[str]: + async for update in agent.run(user_message, session=session, stream=True): + yield update.text + + return StreamingResponse( + stream_response(), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}, + ) + + response = await agent.run([user_message], session=session, stream=stream) + return JSONResponse({"response": response.text}) + + +if __name__ == "__main__": + app.run() diff --git a/samples/python/hosted-agents/agent-framework/invocations/01-basic/requirements.txt b/samples/python/hosted-agents/agent-framework/invocations/01-basic/requirements.txt new file mode 100644 index 000000000..313da4a83 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/invocations/01-basic/requirements.txt @@ -0,0 +1,2 @@ +agent-framework>=1.1.0 +agent-framework-foundry-hosting \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/invocations/01-basic/test-payload.json b/samples/python/hosted-agents/agent-framework/invocations/01-basic/test-payload.json new file mode 100644 index 000000000..abbefb171 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/invocations/01-basic/test-payload.json @@ -0,0 +1 @@ +{ "message": "Hi!" } \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/01-basic/.dockerignore b/samples/python/hosted-agents/agent-framework/responses/01-basic/.dockerignore new file mode 100644 index 000000000..008e6e661 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/01-basic/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/01-basic/.env.example b/samples/python/hosted-agents/agent-framework/responses/01-basic/.env.example new file mode 100644 index 000000000..fe302a8ad --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/01-basic/.env.example @@ -0,0 +1,2 @@ +FOUNDRY_PROJECT_ENDPOINT="..." +MODEL_DEPLOYMENT_NAME="..." \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/Dockerfile b/samples/python/hosted-agents/agent-framework/responses/01-basic/Dockerfile similarity index 100% rename from samples/python/hosted-agents/agent-framework/agent-with-text-search-rag/Dockerfile rename to samples/python/hosted-agents/agent-framework/responses/01-basic/Dockerfile diff --git a/samples/python/hosted-agents/agent-framework/responses/01-basic/README.md b/samples/python/hosted-agents/agent-framework/responses/01-basic/README.md new file mode 100644 index 000000000..98bd2d119 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/01-basic/README.md @@ -0,0 +1,31 @@ +# Basic example of hosting an agent with the `responses` API + +## Running the server locally + +### Environment setup + +Follow the instructions in the [Environment setup](../../README.md#environment-setup) section of the README in the parent directory to set up your environment and install dependencies. + +Run the following command to start the server: + +```bash +python main.py +``` + +### Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' +``` + +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +### Multi-turn conversation + +To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' +``` diff --git a/samples/python/hosted-agents/agent-framework/responses/01-basic/agent.manifest.yaml b/samples/python/hosted-agents/agent-framework/responses/01-basic/agent.manifest.yaml new file mode 100644 index 000000000..dd65eca1f --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/01-basic/agent.manifest.yaml @@ -0,0 +1,23 @@ +name: agent-framework-agent-basic-responses +description: > + A basic Agent Framework agent hosted by Foundry. +metadata: + tags: + - Agent Framework + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-basic-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + - name: MODEL_DEPLOYMENT_NAME + value: "{{MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/agent-framework/responses/01-basic/agent.yaml b/samples/python/hosted-agents/agent-framework/responses/01-basic/agent.yaml new file mode 100644 index 000000000..6f506d1f7 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/01-basic/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: agent-framework-agent-basic-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: '0.25' + memory: '0.5Gi' diff --git a/samples/python/hosted-agents/agent-framework/responses/01-basic/main.py b/samples/python/hosted-agents/agent-framework/responses/01-basic/main.py new file mode 100644 index 000000000..7d21b83e1 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/01-basic/main.py @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.identity import DefaultAzureCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], + credential=DefaultAzureCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent) + server.run() + + +if __name__ == "__main__": + main() diff --git a/samples/python/hosted-agents/agent-framework/responses/01-basic/requirements.txt b/samples/python/hosted-agents/agent-framework/responses/01-basic/requirements.txt new file mode 100644 index 000000000..313da4a83 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/01-basic/requirements.txt @@ -0,0 +1,2 @@ +agent-framework>=1.1.0 +agent-framework-foundry-hosting \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/02-tools/.dockerignore b/samples/python/hosted-agents/agent-framework/responses/02-tools/.dockerignore new file mode 100644 index 000000000..008e6e661 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/02-tools/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/02-tools/.env.example b/samples/python/hosted-agents/agent-framework/responses/02-tools/.env.example new file mode 100644 index 000000000..fe302a8ad --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/02-tools/.env.example @@ -0,0 +1,2 @@ +FOUNDRY_PROJECT_ENDPOINT="..." +MODEL_DEPLOYMENT_NAME="..." \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/agents-in-workflow/Dockerfile b/samples/python/hosted-agents/agent-framework/responses/02-tools/Dockerfile similarity index 100% rename from samples/python/hosted-agents/agent-framework/agents-in-workflow/Dockerfile rename to samples/python/hosted-agents/agent-framework/responses/02-tools/Dockerfile diff --git a/samples/python/hosted-agents/agent-framework/responses/02-tools/README.md b/samples/python/hosted-agents/agent-framework/responses/02-tools/README.md new file mode 100644 index 000000000..1906cf27f --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/02-tools/README.md @@ -0,0 +1,21 @@ +# Basic example of hosting an agent with the `responses` API and local tools + +## Running the server locally + +### Environment setup + +Follow the instructions in the [Environment setup](../../README.md#environment-setup) section of the README in the parent directory to set up your environment and install dependencies. + +Run the following command to start the server: + +```bash +python main.py +``` + +### Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "What is the weather in Seattle?"}' +``` diff --git a/samples/python/hosted-agents/agent-framework/responses/02-tools/agent.manifest.yaml b/samples/python/hosted-agents/agent-framework/responses/02-tools/agent.manifest.yaml new file mode 100644 index 000000000..6d90eec00 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/02-tools/agent.manifest.yaml @@ -0,0 +1,23 @@ +name: agent-framework-agent-with-local-tools-responses +description: > + An Agent Framework agent with local tools hosted by Foundry. +metadata: + tags: + - Agent Framework + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-local-tools-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + - name: MODEL_DEPLOYMENT_NAME + value: "{{MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/agent-framework/responses/02-tools/agent.yaml b/samples/python/hosted-agents/agent-framework/responses/02-tools/agent.yaml new file mode 100644 index 000000000..70c3fea75 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/02-tools/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-local-tools-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/python/hosted-agents/agent-framework/responses/02-tools/main.py b/samples/python/hosted-agents/agent-framework/responses/02-tools/main.py new file mode 100644 index 000000000..b4df5ab8b --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/02-tools/main.py @@ -0,0 +1,49 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +from random import randint + +from agent_framework import Agent, tool +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.identity import DefaultAzureCredential +from dotenv import load_dotenv +from pydantic import Field +from typing_extensions import Annotated + +# Load environment variables from .env file +load_dotenv() + + +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], + credential=DefaultAzureCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[get_weather], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent) + server.run() + + +if __name__ == "__main__": + main() diff --git a/samples/python/hosted-agents/agent-framework/responses/02-tools/requirements.txt b/samples/python/hosted-agents/agent-framework/responses/02-tools/requirements.txt new file mode 100644 index 000000000..313da4a83 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/02-tools/requirements.txt @@ -0,0 +1,2 @@ +agent-framework>=1.1.0 +agent-framework-foundry-hosting \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/03-mcp/.dockerignore b/samples/python/hosted-agents/agent-framework/responses/03-mcp/.dockerignore new file mode 100644 index 000000000..008e6e661 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/03-mcp/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/03-mcp/.env.example b/samples/python/hosted-agents/agent-framework/responses/03-mcp/.env.example new file mode 100644 index 000000000..187ec07aa --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/03-mcp/.env.example @@ -0,0 +1,3 @@ +FOUNDRY_PROJECT_ENDPOINT="..." +MODEL_DEPLOYMENT_NAME="..." +GITHUB_PAT="..." diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/Dockerfile b/samples/python/hosted-agents/agent-framework/responses/03-mcp/Dockerfile similarity index 100% rename from samples/python/hosted-agents/agent-framework/human-in-the-loop/workflow-agent-with-checkpoint-and-hitl/Dockerfile rename to samples/python/hosted-agents/agent-framework/responses/03-mcp/Dockerfile diff --git a/samples/python/hosted-agents/agent-framework/responses/03-mcp/README.md b/samples/python/hosted-agents/agent-framework/responses/03-mcp/README.md new file mode 100644 index 000000000..790423a55 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/03-mcp/README.md @@ -0,0 +1,23 @@ +# Basic example of hosting an agent with the `responses` API and a remote MCP + +## Running the server locally + +### Environment setup + +Follow the instructions in the [Environment setup](../../README.md#environment-setup) section of the README in the parent directory to set up your environment and install dependencies. + +Follow the instructions here to get a GitHub Personal Access Token (PAT): [Creating a personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) + +Run the following command to start the server: + +```bash +python main.py +``` + +### Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "List all the repositories I own on GitHub."}' +``` diff --git a/samples/python/hosted-agents/agent-framework/responses/03-mcp/agent.manifest.yaml b/samples/python/hosted-agents/agent-framework/responses/03-mcp/agent.manifest.yaml new file mode 100644 index 000000000..b26605c1d --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/03-mcp/agent.manifest.yaml @@ -0,0 +1,25 @@ +name: agent-framework-agent-with-remote-mcp-tools-responses +description: > + An Agent Framework agent with remote MCP tools hosted by Foundry. +metadata: + tags: + - Agent Framework + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-remote-mcp-tools-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + - name: MODEL_DEPLOYMENT_NAME + value: "{{MODEL_DEPLOYMENT_NAME}}" + - name: GITHUB_PAT + value: ${GITHUB_PAT} +resources: + - kind: model + id: gpt-4.1-mini + name: MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/agent-framework/responses/03-mcp/agent.yaml b/samples/python/hosted-agents/agent-framework/responses/03-mcp/agent.yaml new file mode 100644 index 000000000..a1edfa8c7 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/03-mcp/agent.yaml @@ -0,0 +1,11 @@ +kind: hosted +name: agent-framework-agent-with-remote-mcp-tools-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: GITHUB_PAT + value: ${GITHUB_PAT} \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/03-mcp/main.py b/samples/python/hosted-agents/agent-framework/responses/03-mcp/main.py new file mode 100644 index 000000000..d649de631 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/03-mcp/main.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.identity import DefaultAzureCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + +logger = logging.getLogger(__name__) + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], + credential=DefaultAzureCredential(), + ) + + github_pat = os.environ["GITHUB_PAT"] + tools = [] + if not github_pat: + logger.warning( + "GITHUB_PAT environment variable is not set. The GitHub MCP tool will not get registered.") + else: + tools.append(client.get_mcp_tool( + name="GitHub", + url="https://api.githubcopilot.com/mcp/", + headers={ + "Authorization": f"Bearer {github_pat}", + }, + approval_mode="never_require", + )) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + tools=tools, + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent) + server.run() + + +if __name__ == "__main__": + main() diff --git a/samples/python/hosted-agents/agent-framework/responses/03-mcp/requirements.txt b/samples/python/hosted-agents/agent-framework/responses/03-mcp/requirements.txt new file mode 100644 index 000000000..426485d5c --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/03-mcp/requirements.txt @@ -0,0 +1,3 @@ +agent-framework>=1.1.0 +agent-framework-foundry-hosting +mcp>=1.24.0,<2 \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/.dockerignore b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/.dockerignore new file mode 100644 index 000000000..008e6e661 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/.env.example b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/.env.example new file mode 100644 index 000000000..6afc50324 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/.env.example @@ -0,0 +1,3 @@ +FOUNDRY_PROJECT_ENDPOINT="..." +MODEL_DEPLOYMENT_NAME="..." +TOOLBOX_NAME="..." \ No newline at end of file diff --git a/samples/python/hosted-agents/custom/system-utility-agent/Dockerfile b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/Dockerfile similarity index 100% rename from samples/python/hosted-agents/custom/system-utility-agent/Dockerfile rename to samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/Dockerfile diff --git a/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/README.md b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/README.md new file mode 100644 index 000000000..3134b382c --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/README.md @@ -0,0 +1,51 @@ +# Basic example of hosting an agent with the `responses` API and Foundry Toolbox + +## Creating a Foundry Toolbox + +You can create a Foundry Toolbox by code. Refer to this sample for an example: [Foundry Toolbox CRUD Sample](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/hosted_agents/sample_toolboxes_crud.py). + +You can also create a Foundry Toolbox in the Foundry portal. Read more [here](https://learn.microsoft.com/en-us/azure/foundry/agents/how-to/tools/toolbox). + +## Deploying to Foundry + +Create a new directory and initialize a Foundry Agent project in it: + +```bash +mkdir my-foundry-agent +cd my-foundry-agent +azd ai agent init -m https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/agent.manifest.yaml +``` + +Follow the prompts to complete the initialization. Then create the necessary resources by running: + +```bash +azd provision +``` + +The above will create the toolbox with the specified tools in `agent.manifest.yaml`. + +Then deploy the agent by running: + +```bash +azd deploy +``` + +## Running the server locally + +### Environment setup + +Follow the instructions in the [Environment setup](../../README.md#environment-setup) section of the README in the parent directory to set up your environment and install dependencies. + +Run the following command to start the server: + +```bash +python main.py +``` + +## Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "What tools do you have?"}' +``` diff --git a/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/agent.manifest.yaml b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/agent.manifest.yaml new file mode 100644 index 000000000..4d6c067c6 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/agent.manifest.yaml @@ -0,0 +1,32 @@ +name: agent-framework-agent-with-foundry-toolbox-responses +description: > + An Agent Framework agent with Foundry Toolbox integration. +metadata: + tags: + - Agent Framework + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-foundry-toolbox-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + - name: MODEL_DEPLOYMENT_NAME + value: "{{MODEL_DEPLOYMENT_NAME}}" + - name: TOOLBOX_NAME + value: "agent-tools" +resources: + - kind: model + id: gpt-4.1-mini + name: MODEL_DEPLOYMENT_NAME + - kind: toolbox + name: agent-tools + tools: + - type: web_search + name: web_search + - type: code_interpreter + name: code_interpreter diff --git a/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/agent.yaml b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/agent.yaml new file mode 100644 index 000000000..f4a3bd9a8 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-foundry-toolbox-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/main.py b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/main.py new file mode 100644 index 000000000..4981e76e3 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/main.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +import httpx +from agent_framework import Agent, MCPStreamableHTTPTool +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.identity import DefaultAzureCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +class ToolboxAuth(httpx.Auth): + """httpx Auth that injects a fresh bearer token on every request.""" + + def auth_flow(self, request: httpx.Request): + credential = DefaultAzureCredential() + token = credential.get_token("https://ai.azure.com/.default").token + request.headers["Authorization"] = f"Bearer {token}" + yield request + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], + credential=DefaultAzureCredential(), + ) + + # Foundry Toolbox as a MCP tool + project_endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] + toolbox_name = os.environ["TOOLBOX_NAME"] + toolbox_endpoint = f"{project_endpoint.rstrip('/')}/toolboxes/{toolbox_name}/mcp?api-version=v1" + http_client = httpx.AsyncClient(auth=ToolboxAuth(), headers={"Foundry-Features": "Toolboxes=V1Preview"}) + foundry_mcp_tool = MCPStreamableHTTPTool( + name="toolbox", + url=toolbox_endpoint, + http_client=http_client, + load_prompts=False, + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[foundry_mcp_tool], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent) + server.run() + + +if __name__ == "__main__": + main() diff --git a/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/requirements.txt b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/requirements.txt new file mode 100644 index 000000000..426485d5c --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/04-foundry-toolbox/requirements.txt @@ -0,0 +1,3 @@ +agent-framework>=1.1.0 +agent-framework-foundry-hosting +mcp>=1.24.0,<2 \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/05-workflows/.dockerignore b/samples/python/hosted-agents/agent-framework/responses/05-workflows/.dockerignore new file mode 100644 index 000000000..008e6e661 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/05-workflows/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/05-workflows/.env.example b/samples/python/hosted-agents/agent-framework/responses/05-workflows/.env.example new file mode 100644 index 000000000..fe302a8ad --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/05-workflows/.env.example @@ -0,0 +1,2 @@ +FOUNDRY_PROJECT_ENDPOINT="..." +MODEL_DEPLOYMENT_NAME="..." \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/Dockerfile b/samples/python/hosted-agents/agent-framework/responses/05-workflows/Dockerfile similarity index 90% rename from samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/Dockerfile rename to samples/python/hosted-agents/agent-framework/responses/05-workflows/Dockerfile index 0cc939d9b..eaffb94f1 100644 --- a/samples/python/hosted-agents/agent-framework/human-in-the-loop/agent-with-thread-and-hitl/Dockerfile +++ b/samples/python/hosted-agents/agent-framework/responses/05-workflows/Dockerfile @@ -13,4 +13,4 @@ RUN if [ -f requirements.txt ]; then \ EXPOSE 8088 -CMD ["python", "main.py"] +CMD ["python", "main.py"] \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/05-workflows/README.md b/samples/python/hosted-agents/agent-framework/responses/05-workflows/README.md new file mode 100644 index 000000000..589c3426f --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/05-workflows/README.md @@ -0,0 +1,27 @@ +# Basic example of hosting an agent with the `responses` API and a workflow + +## Running the server locally + +### Environment setup + +Follow the instructions in the [Environment setup](../../README.md#environment-setup) section of the README in the parent directory to set up your environment and install dependencies. + +Run the following command to start the server: + +```bash +python main.py +``` + +### Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Create a slogan for a new electric SUV that is affordable and fun to drive."}' +``` + +Invoke with `azd`: + +```bash +azd ai agent invoke --local "Create a slogan for a new electric SUV that is affordable and fun to drive." +``` diff --git a/samples/python/hosted-agents/agent-framework/responses/05-workflows/agent.manifest.yaml b/samples/python/hosted-agents/agent-framework/responses/05-workflows/agent.manifest.yaml new file mode 100644 index 000000000..4f307ebdd --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/05-workflows/agent.manifest.yaml @@ -0,0 +1,23 @@ +name: agent-framework-workflows-responses +description: > + An Agent Framework workflow hosted by Foundry. +metadata: + tags: + - Agent Framework + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-workflows-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + - name: MODEL_DEPLOYMENT_NAME + value: "{{MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: MODEL_DEPLOYMENT_NAME \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/05-workflows/agent.yaml b/samples/python/hosted-agents/agent-framework/responses/05-workflows/agent.yaml new file mode 100644 index 000000000..a58893ddf --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/05-workflows/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-workflows-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/responses/05-workflows/main.py b/samples/python/hosted-agents/agent-framework/responses/05-workflows/main.py new file mode 100644 index 000000000..7ddc93f0f --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/05-workflows/main.py @@ -0,0 +1,70 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +from agent_framework import Agent, AgentExecutor, WorkflowBuilder +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.identity import DefaultAzureCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], + credential=DefaultAzureCredential(), + ) + + writer_agent = Agent( + client=client, + instructions=("You are an excellent slogan writer. You create new slogans based on the given topic."), + name="writer", + ) + + legal_agent = Agent( + client=client, + instructions=( + "You are an excellent legal reviewer. " + "Make necessary corrections to the slogan so that it is legally compliant." + ), + name="legal_reviewer", + ) + + format_agent = Agent( + client=client, + instructions=( + "You are an excellent content formatter. " + "You take the slogan and format it in a cool retro style when printing to a terminal." + ), + name="formatter", + ) + + # Set the context mode to `last_agent` so that each agent only sees the output of the + # previous agent instead of the full conversation history + writer_executor = AgentExecutor(writer_agent, context_mode="last_agent") + legal_executor = AgentExecutor(legal_agent, context_mode="last_agent") + format_executor = AgentExecutor(format_agent, context_mode="last_agent") + + workflow_agent = ( + WorkflowBuilder( + start_executor=writer_executor, + # Limiting the output to only the final formatted result. + # If this is not set, all intermediate results will be included in the output. + output_executors=[format_executor], + ) + .add_edge(writer_executor, legal_executor) + .add_edge(legal_executor, format_executor) + .build() + .as_agent() + ) + + server = ResponsesHostServer(workflow_agent) + server.run() + + +if __name__ == "__main__": + main() diff --git a/samples/python/hosted-agents/agent-framework/responses/05-workflows/requirements.txt b/samples/python/hosted-agents/agent-framework/responses/05-workflows/requirements.txt new file mode 100644 index 000000000..313da4a83 --- /dev/null +++ b/samples/python/hosted-agents/agent-framework/responses/05-workflows/requirements.txt @@ -0,0 +1,2 @@ +agent-framework>=1.1.0 +agent-framework-foundry-hosting \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/web-search-agent/Dockerfile b/samples/python/hosted-agents/agent-framework/web-search-agent/Dockerfile deleted file mode 100644 index 0cc939d9b..000000000 --- a/samples/python/hosted-agents/agent-framework/web-search-agent/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.12-slim - -WORKDIR /app - -COPY . user_agent/ -WORKDIR /app/user_agent - -RUN if [ -f requirements.txt ]; then \ - pip install -r requirements.txt; \ - else \ - echo "No requirements.txt found"; \ - fi - -EXPOSE 8088 - -CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/agent-framework/web-search-agent/README.md b/samples/python/hosted-agents/agent-framework/web-search-agent/README.md deleted file mode 100644 index bc658df69..000000000 --- a/samples/python/hosted-agents/agent-framework/web-search-agent/README.md +++ /dev/null @@ -1,104 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to build a web search agent using Bing Grounding, hosted using -[Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/) and -deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### Web Search Agent - -The agent uses Bing Grounding to search the web for current information and provide accurate, well-sourced answers. This demonstrates: - -- How to integrate Bing Grounding as a tool in an AI agent -- How to use the `HostedWebSearchTool` from the Agent Framework - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. -The extension builds a container image into Azure Container Registry (ACR), and creates a hosted agent version and deployment on Microsoft Foundry. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. An Azure AI Foundry project configured -2. A deployment of a chat model (e.g., `gpt-4.1-mini`) -3. A Bing Grounding connection in your project -4. Azure CLI installed and authenticated (`az login`) -5. Python 3.10+ installed - -### Environment Variables - -Create a `.env` file with the following environment variables: - -> **Note:** The `.env` file is for local development only. When deploying to Azure AI Foundry, remove the `.env` file and configure environment variables in `agent.yaml` instead. - -```bash -AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ -AZURE_AI_MODEL_DEPLOYMENT_NAME= # e.g., gpt-4.1-mini -BING_GROUNDING_CONNECTION_ID=/subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts//projects//connections/ -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```bash -pip install -r requirements.txt -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```bash -python main.py -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent - -```bash -curl -X POST http://localhost:8088/responses \ - -H "Content-Type: application/json" \ - -d '{"input": "What is the latest news in AI?"}' | jq . -``` - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. \ No newline at end of file diff --git a/samples/python/hosted-agents/agent-framework/web-search-agent/agent.yaml b/samples/python/hosted-agents/agent-framework/web-search-agent/agent.yaml deleted file mode 100644 index 5322808fb..000000000 --- a/samples/python/hosted-agents/agent-framework/web-search-agent/agent.yaml +++ /dev/null @@ -1,34 +0,0 @@ -name: WebSearchAgent -description: This Agent can perform web searches and retrieve the latest information from Bing. -metadata: - example: - - role: user - content: |- - What's the latest news in AI? - tags: - - AI Agent Grounding - - AI Agent Hosting - - Azure AI AgentServer - - Bing Grounding - - Microsoft Agent Framework - authors: - - jeomhove -template: - name: WebSearchAgent - kind: hosted - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_AI_PROJECT_ENDPOINT - value: ${AZURE_AI_PROJECT_ENDPOINT} - - name: BING_GROUNDING_CONNECTION_ID - value: ${BING_GROUNDING_CONNECTION_ID} - - name: AZURE_AI_MODEL_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4o-mini - name: chat - - kind: tool - id: bing_grounding diff --git a/samples/python/hosted-agents/agent-framework/web-search-agent/main.py b/samples/python/hosted-agents/agent-framework/web-search-agent/main.py deleted file mode 100644 index eb5a8fcc4..000000000 --- a/samples/python/hosted-agents/agent-framework/web-search-agent/main.py +++ /dev/null @@ -1,47 +0,0 @@ -import os -from dotenv import load_dotenv -from agent_framework import ChatAgent, HostedWebSearchTool -from agent_framework_azure_ai import AzureAIAgentClient -from azure.ai.agentserver.agentframework import from_agent_framework -from azure.identity.aio import DefaultAzureCredential - -# Load environment variables from .env file for local development -load_dotenv() - -def create_agent() -> ChatAgent: - """Create and return a ChatAgent with Bing Grounding search tool.""" - assert "AZURE_AI_PROJECT_ENDPOINT" in os.environ, ( - "AZURE_AI_PROJECT_ENDPOINT environment variable must be set." - ) - assert "AZURE_AI_MODEL_DEPLOYMENT_NAME" in os.environ, ( - "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable must be set." - ) - assert "BING_GROUNDING_CONNECTION_ID" in os.environ, ( - "BING_GROUNDING_CONNECTION_ID environment variable must be set to use HostedWebSearchTool." - ) - - chat_client = AzureAIAgentClient( - project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), - ) - - bing_search_tool = HostedWebSearchTool( - name="Bing Grounding Search", - description="Search the web for current information using Bing", - connection_id=os.environ["BING_GROUNDING_CONNECTION_ID"], - ) - - agent = ChatAgent( - chat_client=chat_client, - name="BingSearchAgent", - instructions=( - "You are a helpful assistant that can search the web for current information. " - "Use the Bing search tool to find up-to-date information and provide accurate, " - "well-sourced answers. Always cite your sources when possible." - ), - tools=bing_search_tool, - ) - return agent - -if __name__ == "__main__": - from_agent_framework(create_agent()).run() diff --git a/samples/python/hosted-agents/agent-framework/web-search-agent/requirements.txt b/samples/python/hosted-agents/agent-framework/web-search-agent/requirements.txt deleted file mode 100644 index 9dac56e37..000000000 --- a/samples/python/hosted-agents/agent-framework/web-search-agent/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -azure-ai-agentserver-agentframework==1.0.0b12 -pytest==8.4.2 -python-dotenv==1.1.1 -azure-monitor-opentelemetry==1.8.1 diff --git a/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/.dockerignore b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/.env.example b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/Dockerfile b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/Dockerfile new file mode 100644 index 000000000..b89292edb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/README.md b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/README.md new file mode 100644 index 000000000..7e2003d57 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/README.md @@ -0,0 +1,143 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. + +# AG-UI Protocol — Invocations (Pydantic AI) + +A minimal getting-started agent implementing the [AG-UI protocol](https://docs.ag-ui.com/introduction) over the Foundry invocations protocol, using [Pydantic AI](https://ai.pydantic.dev/) with Azure OpenAI. **Zero manual event translation** — Pydantic AI's built-in `handle_ag_ui_request` handles the full AG-UI lifecycle automatically. + +## How It Works + +1. Receives an AG-UI `RunAgentInput` payload via `POST /invocations` +2. Pydantic AI's `handle_ag_ui_request` runs the agent and streams AG-UI events (`RUN_STARTED`, `TEXT_MESSAGE_*`, `TOOL_CALL_*`, `RUN_FINISHED`) as SSE — no manual translation needed +3. The agent uses Azure OpenAI (Foundry models) via `AzureProvider` + +## Environment Variables + +| Variable | Required | Description | +|----------|----------|-------------| +| `FOUNDRY_PROJECT_ENDPOINT` | Yes | Foundry project endpoint (auto-injected in hosted containers) | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Yes | Model deployment name declared in `agent.manifest.yaml` | +| `APPLICATIONINSIGHTS_CONNECTION_STRING` | No | Application Insights connection string (auto-injected in hosted containers) | + +> **Note:** Authentication uses `DefaultAzureCredential` (managed identity, Azure CLI, etc.) — no API key needed. + +## Running Locally + +### Prerequisites + +- Python 3.10+ +- A Foundry project with a deployed model + +### Install & Run + +```bash +pip install -r requirements.txt +cp .env.example .env # then fill in values +python main.py +``` + +The agent starts on `http://localhost:8088/`. + +### Test + +```bash +curl -N -X POST http://localhost:8088/invocations \ + -H "Content-Type: application/json" \ + -H "Accept: text/event-stream" \ + -d '{ + "threadId": "thread-123", + "runId": "run-456", + "state": {}, + "messages": [{"id": "msg-1", "role": "user", "content": "Hello!"}], + "tools": [], + "context": [], + "forwardedProps": {} + }' +``` + +### SSE Event Format + +Standard AG-UI events are streamed automatically: + +``` +data: {"type":"RUN_STARTED","threadId":"thread-123","runId":"run-456"} +data: {"type":"TEXT_MESSAGE_START","messageId":"...","role":"assistant"} +data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"...","delta":"Hello"} +data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"...","delta":"! How"} +data: {"type":"TEXT_MESSAGE_END","messageId":"..."} +data: {"type":"RUN_FINISHED","threadId":"thread-123","runId":"run-456"} +``` + +## Invoke with azd + +### Local + +**Bash:** +```bash +azd ai agent invoke --local '{"messages": [{"role": "user", "content": "Hello"}]}' +``` + +**PowerShell:** +```powershell +azd ai agent invoke --local '{\"messages\": [{\"role\": \"user\", \"content\": \"Hello\"}]}' +``` + +### Remote (after `azd up`) + +**Bash:** +```bash +azd ai agent invoke '{"messages": [{"role": "user", "content": "Hello"}]}' +``` + +**PowerShell:** +```powershell +azd ai agent invoke '{\"messages\": [{\"role\": \"user\", \"content\": \"Hello\"}]}' +``` + +## Deploying to Microsoft Foundry + +To deploy your agent to Microsoft Foundry, follow the deployment guide at https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/azd-quickstart.md + +## Learn More + +- [AG-UI Protocol](https://docs.ag-ui.com/introduction) — event types, lifecycle, tools +- [Pydantic AI AG-UI docs](https://ai.pydantic.dev/ag-ui/) — `to_ag_ui()`, `handle_ag_ui_request` +- [AG-UI Dojo](https://dojo.ag-ui.com/) — interactive playground for testing AG-UI agents + +## Troubleshooting + +### Azure OpenAI Permission Denied (401) + +If you see an error like: + +``` +Error calling Azure OpenAI: Error code: 401 - {'error': {'code': 'PermissionDenied', 'message': 'The principal lacks the required data action Microsoft.CognitiveServices/accounts/OpenAI/deployments/chat/completions/action to perform POST /openai/deployments/{deployment-id}/chat/completions operation.'}} +``` + +The identity running the agent does not have the required RBAC roles on the Azure AI Foundry project. Assign the following roles: + +- **Cognitive Services OpenAI User** +- **Azure AI User** + +Use the Azure CLI to assign them: + +```bash +# Set your variables +SUBSCRIPTION_ID="" +RESOURCE_GROUP="" +PROJECT_NAME="" +PRINCIPAL_ID="" + +# Assign "Cognitive Services OpenAI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Cognitive Services OpenAI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" + +# Assign "Azure AI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Azure AI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" +``` + +> **Note:** It may take a few minutes for role assignments to propagate. Retry the request after waiting. diff --git a/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/agent.manifest.yaml new file mode 100644 index 000000000..a8d2453e4 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/agent.manifest.yaml @@ -0,0 +1,30 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: ag-ui-invocations +displayName: "AG-UI (Invocations)" +description: > + AG-UI protocol over Foundry invocations using Pydantic AI with Azure OpenAI. + Streams standard AG-UI events (RUN_STARTED, TEXT_MESSAGE_CONTENT, + RUN_FINISHED, etc.) with zero manual event translation. +metadata: + tags: + - AI Agent Hosting + - AG-UI Protocol + - Pydantic AI + - Invocations Protocol + - Bring Your Own +template: + name: ag-ui-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/agent.yaml b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/agent.yaml new file mode 100644 index 000000000..615be91d1 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/agent.yaml @@ -0,0 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: ag-ui-invocations +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi diff --git a/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/main.py b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/main.py new file mode 100644 index 000000000..5c3230ee2 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/main.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Getting-started: AG-UI protocol over Foundry invocations using Pydantic AI.""" + +import logging +import os +from urllib.parse import urlparse as _urlparse + +from starlette.requests import Request +from starlette.responses import Response + +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from openai import AsyncAzureOpenAI + +from azure.ai.agentserver.invocations import InvocationAgentServerHost +from pydantic_ai import Agent +from pydantic_ai.models.openai import OpenAIResponsesModel +from pydantic_ai.providers.openai import OpenAIProvider +from pydantic_ai.ag_ui import handle_ag_ui_request + +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +# FOUNDRY_PROJECT_ENDPOINT is auto-injected in hosted Foundry containers. +# Locally, set it manually or use 'azd ai agent run' which sets it automatically. +_endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not _endpoint: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run' " + "which sets it automatically." + ) + +_deployment = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not _deployment: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +# Azure OpenAI via managed identity (DefaultAzureCredential) +_credential = DefaultAzureCredential() +_token_provider = get_bearer_token_provider( + _credential, "https://ai.azure.com/.default") + +# Derive the Azure OpenAI endpoint from the Foundry project endpoint by +# stripping the path (e.g. /api/projects/proj) to get the base URL. +_parsed = _urlparse(_endpoint) +_azure_openai_endpoint = f"{_parsed.scheme}://{_parsed.netloc}" + +_client = AsyncAzureOpenAI( + azure_endpoint=_azure_openai_endpoint, + azure_deployment=_deployment, + azure_ad_token_provider=_token_provider, + api_version="2025-04-01-preview", +) + +model = OpenAIResponsesModel( + _deployment, provider=OpenAIProvider(openai_client=_client)) + +agent = Agent(model, instructions="You are a helpful assistant.") + +app = InvocationAgentServerHost() + + +@app.invoke_handler +async def handle_invoke(request: Request) -> Response: + return await handle_ag_ui_request(agent, request) + + +if __name__ == "__main__": + app.run() diff --git a/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/requirements.txt b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/requirements.txt new file mode 100644 index 000000000..90888c144 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/requirements.txt @@ -0,0 +1,5 @@ +pydantic-ai-slim[openai]==1.75.0 +openai==2.32.0 +azure-identity==1.25.3 +ag-ui-protocol==0.1.16 +azure-ai-agentserver-invocations==1.0.0b2 diff --git a/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/test-payload.json b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/test-payload.json new file mode 100644 index 000000000..73fd0b1f9 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/ag-ui/test-payload.json @@ -0,0 +1,9 @@ +{ + "threadId": "thread-ci", + "runId": "run-ci", + "state": {}, + "messages": [{"id": "msg-1", "role": "user", "content": "Hello!"}], + "tools": [], + "context": [], + "forwardedProps": {} +} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/.dockerignore b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/.env.example b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/.env.example new file mode 100644 index 000000000..5b471a522 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/.env.example @@ -0,0 +1,12 @@ +# GitHub fine-grained PAT with "Copilot Requests → Read-only" permission. +# Create one at: https://github.com/settings/personal-access-tokens/new +# Classic tokens (ghp_) are not supported — use github_pat_, gho_, or ghu_ tokens. +GITHUB_TOKEN= + +# Session ID for persistence/resume across restarts (optional). +# If unset, a random UUID is generated per process. +# FOUNDRY_AGENT_SESSION_ID= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/Dockerfile b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/Dockerfile new file mode 100644 index 000000000..c273b7171 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN pip install --no-input --upgrade pip && \ + if [ -f requirements.txt ]; then \ + pip install --no-input -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] + diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/README.md b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/README.md new file mode 100644 index 000000000..3bb9dc711 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/README.md @@ -0,0 +1,124 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. + +# GitHub Copilot SDK — Invocations Protocol (Streaming) + +A minimal getting-started agent using the [GitHub Copilot SDK](https://pypi.org/project/github-copilot-sdk/) (`CopilotClient`) with the [azure-ai-agentserver-invocations](https://pypi.org/project/azure-ai-agentserver-invocations/) protocol. Streams raw Copilot SDK session events as SSE with multi-turn support. + +## How It Works + +1. Receives `{"input": "..."}` via `POST /invocations` +2. On first request, tries to resume a persisted Copilot session (by `FOUNDRY_AGENT_SESSION_ID`); if none exists, creates a new one +3. Each `SessionEvent` from the Copilot SDK is streamed back as an SSE `data:` event using `event.to_dict()` +4. A final `event: done` signal marks the end of the response +5. The session is cached in memory and reused across requests for multi-turn conversation +6. Skills in the `skills/` directory are auto-loaded — e.g. the included `joke` skill makes Copilot respond in pirate tone + +## Environment Variables + +| Variable | Required | Description | +|----------|----------|-------------| +| `GITHUB_TOKEN` | Yes | GitHub fine-grained PAT with **Copilot Requests → Read-only** permission | +| `FOUNDRY_AGENT_SESSION_ID` | No | Session ID for persistence/resume. If unset, a UUID is generated | + +## Running Locally + +### Prerequisites + +- Python 3.10+ +- A GitHub fine-grained PAT (`github_pat_` prefix) + +Create one at [github.com/settings/personal-access-tokens/new](https://github.com/settings/personal-access-tokens/new) with **Account permissions → Copilot Requests → Read-only**. + +> **Note:** Classic tokens (`ghp_`) are not supported. Use a fine-grained PAT, OAuth token (`gho_`), or GitHub App user token (`ghu_`). + +### Install & Run + +```bash +pip install -r requirements.txt +cp .env.example .env # then set GITHUB_TOKEN +python main.py +``` + +The agent starts on `http://localhost:8088/`. + +### Test + +```bash +# First message +curl -N -X POST http://localhost:8088/invocations \ + -H "Content-Type: application/json" \ + -d '{"input": "What is Python?"}' + +# Follow-up (multi-turn — same session remembers context) +curl -N -X POST http://localhost:8088/invocations \ + -H "Content-Type: application/json" \ + -d '{"input": "Give me a code example"}' +``` + +### SSE Event Format + +Each Copilot SDK event is streamed via `event.to_dict()`: + +``` +data: {"type": "assistant.message_delta", "data": {"delta_content": "Python is"}}\n\n +data: {"type": "assistant.message_delta", "data": {"delta_content": " a programming"}}\n\n +... +event: done +data: {"invocation_id": "...", "session_id": "..."} +``` + +## Invoke with azd + +### Local + +**Bash:** +```bash +azd ai agent invoke --local '{"input": "What can you help me with?"}' +``` + +**PowerShell:** +```powershell +azd ai agent invoke --local '{\"input\": \"What can you help me with?\"}' +``` + +### Remote (after `azd up`) + +**Bash:** +```bash +azd ai agent invoke '{"input": "What can you help me with?"}' +``` + +**PowerShell:** +```powershell +azd ai agent invoke '{\"input\": \"What can you help me with?\"}' +``` + +## Deploying to Microsoft Foundry + +To deploy your agent to Microsoft Foundry, follow the deployment guide at https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/azd-quickstart.md + +## Adding Skills + +Any subdirectory under `skills/` containing a `SKILL.md` file is automatically loaded by the Copilot SDK. The included `joke` skill demonstrates this: + +``` +skills/ +└── joke/ + └── SKILL.md ← tells Copilot to respond like a pirate +``` + +To add your own skill, create a new folder under `skills/` with a `SKILL.md`: + +```bash +mkdir skills/my-skill +cat > skills/my-skill/SKILL.md << 'EOF' +--- +name: my-skill +description: What this skill does. +--- + +# My Skill + +Instructions for Copilot when this skill is active. +EOF +``` \ No newline at end of file diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/agent.manifest.yaml new file mode 100644 index 000000000..93e438429 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/agent.manifest.yaml @@ -0,0 +1,27 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: github-copilot-invocations +displayName: "GitHub Copilot (Invocations)" +description: > + A getting-started agent that uses the GitHub Copilot SDK (CopilotClient) with + the azure-ai-agentserver-invocations protocol, streaming raw session events + as Server-Sent Events (SSE). +metadata: + tags: + - AI Agent Hosting + - GitHub Copilot SDK + - Invocations Protocol + - Bring Your Own +template: + name: github-copilot-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # APPLICATIONINSIGHTS_CONNECTION_STRING is auto-injected by the platform — + # do NOT declare it here. + # + # GITHUB_TOKEN is a user-supplied secret (fine-grained PAT). Supply via + # .env (local) or the hosted agent secret store (production). + - name: GITHUB_TOKEN + value: ${GITHUB_TOKEN} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/agent.yaml b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/agent.yaml new file mode 100644 index 000000000..24c10c089 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/agent.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: github-copilot-invocations +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: GITHUB_TOKEN + value: ${GITHUB_TOKEN} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/main.py b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/main.py new file mode 100644 index 000000000..55348efd7 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/main.py @@ -0,0 +1,146 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Getting-started: GitHub Copilot SDK with the Foundry invocations protocol.""" + +import asyncio +import json +import logging +import os +import pathlib +import uuid + +from starlette.requests import Request +from starlette.responses import JSONResponse, Response, StreamingResponse + + +from azure.ai.agentserver.invocations import InvocationAgentServerHost +from copilot import CopilotClient, SubprocessConfig +from copilot.session import PermissionHandler + +from copilot.generated.session_events import SessionEventType + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +if not os.environ.get("GITHUB_TOKEN"): + raise EnvironmentError( + "GITHUB_TOKEN environment variable is not set. " + "Supply a GitHub fine-grained PAT with 'Copilot Requests → Read-only' permission. " + "Create one at https://github.com/settings/personal-access-tokens/new" + ) + +app = InvocationAgentServerHost() + +_client: CopilotClient | None = None +_session = None +_session_id: str | None = None +_skills_dir = str(pathlib.Path(__file__).parent / "skills") + + +async def _ensure_session(): + """Resume a persisted session or create a new one (lazy, runs once).""" + global _client, _session, _session_id + if _session is not None: + return + + _session_id = os.environ.get("FOUNDRY_AGENT_SESSION_ID") + if not _session_id: + _session_id = str(uuid.uuid4()) + logger.warning( + "FOUNDRY_AGENT_SESSION_ID not set, using: %s", _session_id) + + _client = CopilotClient( + SubprocessConfig(github_token=os.environ["GITHUB_TOKEN"]), + auto_start=False, + ) + await _client.start() + + working_dir = os.environ.get("HOME", "/home") + + try: + _session = await _client.resume_session( + _session_id, + on_permission_request=PermissionHandler.approve_all, + streaming=True, + skill_directories=[_skills_dir], + working_directory=working_dir, + ) + logger.info("Resumed session: %s", _session_id) + except Exception: + _session = await _client.create_session( + session_id=_session_id, + on_permission_request=PermissionHandler.approve_all, + streaming=True, + skill_directories=[_skills_dir], + working_directory=working_dir, + ) + logger.info("Created session: %s", _session_id) + + +async def _stream_response(invocation_id: str, input_text: str): + """Forward Copilot SDK session events as SSE.""" + await _ensure_session() + queue: asyncio.Queue = asyncio.Queue() + + def on_event(event): + if event.type == SessionEventType.SESSION_IDLE: + queue.put_nowait(None) + elif event.type == SessionEventType.SESSION_ERROR: + queue.put_nowait(RuntimeError( + getattr(event.data, "message", "error"))) + else: + queue.put_nowait(event) + + unsubscribe = _session.on(on_event) + try: + await _session.send(input_text) + while True: + item = await queue.get() + if item is None: + break + if isinstance(item, Exception): + yield f"data: {json.dumps({'type': 'error', 'message': str(item)})}\n\n".encode() + break + yield f"data: {json.dumps(item.to_dict())}\n\n".encode() + + yield f"event: done\ndata: {json.dumps({'invocation_id': invocation_id, 'session_id': _session_id})}\n\n".encode() + finally: + unsubscribe() + + +@app.invoke_handler +async def handle_invoke(request: Request) -> Response: + try: + data = await request.json() + if not isinstance(data, dict): + raise ValueError("body is not a JSON object") + input_text = data.get("input") + if not isinstance(input_text, str) or not input_text.strip(): + raise ValueError('missing or empty "input" field') + except (json.JSONDecodeError, ValueError): + return JSONResponse( + status_code=400, + content={ + "error": "invalid_request", + "message": ( + 'Request body must be a JSON object with a non-empty "input" string, ' + 'e.g. {"input": "What can you help me with?"}' + ), + }, + ) + return StreamingResponse( + _stream_response(request.state.invocation_id, input_text), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}, + ) + + +if __name__ == "__main__": + app.run() diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/requirements.txt b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/requirements.txt new file mode 100644 index 000000000..e4cfc88e2 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/requirements.txt @@ -0,0 +1,3 @@ +github-copilot-sdk>=0.2.0 + +azure-ai-agentserver-invocations==1.0.0b2 diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/skills/joke/SKILL.md b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/skills/joke/SKILL.md new file mode 100644 index 000000000..1d1a6b80c --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/skills/joke/SKILL.md @@ -0,0 +1,14 @@ +--- +name: joke +description: Tell a joke in the tone of a pirate. +--- + +# Joke Skill + +You are a pirate comedian. When asked to tell a joke, respond in the voice of a swashbuckling pirate. + +## Instructions + +- Always speak like a pirate — use "Arrr!", "matey", "ye", "plunder", "seas", etc. +- Keep jokes short and punchy. +- If the user asks for a specific topic, make the joke about that topic but still in pirate tone. diff --git a/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/test-payload.json b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/test-payload.json new file mode 100644 index 000000000..bdc3f936e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/github-copilot/test-payload.json @@ -0,0 +1 @@ +{"input": "What can you help me with?"} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/hello-world/.dockerignore b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/invocations/hello-world/.env.example b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/invocations/hello-world/Dockerfile b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/Dockerfile new file mode 100644 index 000000000..b89292edb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/invocations/hello-world/README.md b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/README.md new file mode 100644 index 000000000..c7e8bb86e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/README.md @@ -0,0 +1,195 @@ + +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency note for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + + +# What this sample demonstrates + +A minimal "hello world" hosted agent using the **Bring Your Own** approach with the **Invocations protocol** in Python. It shows how to use the [`azure-ai-agentserver-invocations`](https://pypi.org/project/azure-ai-agentserver-invocations/) SDK to host a custom agent that calls a Foundry model via the Responses API and returns the reply as a streaming SSE event stream. + +This is the simplest possible BYO integration — the protocol SDK handles the HTTP endpoints, session resolution, client header forwarding, and OpenTelemetry tracing. You supply the model call using the [Foundry SDK (`azure-ai-projects`)](https://pypi.org/project/azure-ai-projects/). + +> **Invocations vs Responses:** Unlike the Responses protocol, the Invocations protocol does **not** provide built-in server-side conversation history. This agent maintains an in-memory session store keyed by `agent_session_id`. In production, replace it with durable storage (Redis, Cosmos DB, etc.) so history survives restarts. + +## How It Works + +### Model Integration + +The agent uses the Foundry SDK to create a Responses client from the project endpoint and model deployment name. When a request arrives, the handler looks up the session history by `session_id`, appends the new user message, calls the model via the Responses API with streaming, and returns a `StreamingResponse` of SSE events — `token` events during generation, then a final `done` event. + +See [main.py](main.py) for the full implementation. + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer Invocations SDK](https://pypi.org/project/azure-ai-agentserver-invocations/), which provisions a REST API endpoint compatible with the Azure AI Invocations protocol. + +### Agent Deployment + +The hosted agent can be developed and deployed to Microsoft Foundry using the [Azure Developer CLI](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd). + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Azure Developer CLI (`azd`)** (recommended) + - [Install azd](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) and the AI agent extension: `azd ext install azure.ai.agents` + - Authenticated: `azd auth login` + +2. **Azure CLI** + - Installed and authenticated: `az login` + +3. **Python 3.10 or later** + - Verify your version: `python --version` + +> [!NOTE] +> You do **not** need an existing [Microsoft Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/what-is-foundry?view=foundry) project or model deployment to get started — `azd provision` creates them for you. If you already have a project, see the [note below](#using-azd-recommended-for-cli-workflows) on how to target it. + +### Environment Variables + +See [`.env.example`](.env.example) for the full list of environment variables this sample uses. + +| Variable | Required | Description | +|----------|----------|-------------| +| `FOUNDRY_PROJECT_ENDPOINT` | Yes | Foundry project endpoint. Auto-injected in hosted containers; set automatically by `azd ai agent run` locally. | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Yes | Model deployment name — must match your Foundry project deployment. Declared in `agent.manifest.yaml`. | +| `APPLICATIONINSIGHTS_CONNECTION_STRING` | Recommended | Enables telemetry. Auto-injected in hosted containers; set manually for local dev. | + +**Local development (without `azd`):** + +```bash +cp .env.example .env +# Edit .env and fill in your values, then: +export $(grep -v '^#' .env | xargs) +``` + +> [!NOTE] +> When using `azd ai agent run`, environment variables are handled automatically — no manual setup needed. + +### Installing Dependencies + +> [!NOTE] +> If using `azd ai agent run`, dependencies are installed automatically — skip to [Running the Sample](#running-the-sample). + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +``` + +### Running the Sample + +The recommended way to run and test hosted agents locally is with the Azure Developer CLI (`azd`) or the Foundry VS Code extension. + +#### Using the Foundry VS Code Extension + +The [Foundry VS Code extension](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=vscode) has a built-in sample gallery. You can open this sample directly from the extension without cloning the repository — it scaffolds the project into a new workspace, generates `agent.yaml`, `.env`, and `.vscode/tasks.json` + `launch.json` automatically, and configures a one-click **F5** debug experience. + +Follow the [VS Code quickstart](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=vscode) for a full step-by-step walkthrough. + +#### Using [`azd`](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd) (recommended for CLI workflows) + +No cloning required. Create a new folder, point `azd` at the manifest on GitHub, and it sets up the sample and generates Bicep infrastructure, `agent.yaml`, and env config automatically: + +```bash +# Create a new folder for the agent and navigate into it +mkdir hello-world-agent && cd hello-world-agent + +# Initialize from the manifest — azd reads it, downloads the sample, +# and generates Bicep infrastructure, agent.yaml, and env config +azd ai agent init -m https://github.com/microsoft-foundry/foundry-samples/blob/main/samples/python/hosted-agents/bring-your-own/invocations/hello-world/agent.manifest.yaml + +# Provision Azure resources (Foundry project, model deployment, App Insights) +azd provision + +# Run the agent locally (handles env vars, dependency install, and startup) +azd ai agent run +``` + +> [!NOTE] +> If you've already cloned this repository, pass a local path to the manifest instead: +> `azd ai agent init -m /samples/python/hosted-agents/bring-your-own/invocations/hello-world/agent.manifest.yaml` + +> [!NOTE] +> If you already have a Foundry project and model deployment, add `-p -d ` to `azd ai agent init` to target existing resources. You can also skip provisioning entirely and configure env vars manually — see [Without `azd`](#without-azd). + +The agent starts on `http://localhost:8088/`. To invoke it: + +```bash +azd ai agent invoke --local "What is Microsoft Foundry?" +``` + +Or use curl directly. The `-N` flag disables output buffering so you see SSE tokens as they arrive: + +> [!NOTE] +> `agent_session_id` is optional. If omitted, the server auto-generates one and returns it in the `done` event (`session_id` field). To continue a conversation across turns, pass the same `agent_session_id` in each request. + +```bash +# Turn 1 — start a new conversation +curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \ + -H "Content-Type: application/json" \ + -d '{"message": "What is Microsoft Foundry?"}' + +# Turn 2 — continue the same conversation +curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \ + -H "Content-Type: application/json" \ + -d '{"message": "What hosted agent options does it offer?"}' +``` + +Each response is a stream of SSE events: `token` events with incremental text, followed by a `done` event with the complete reply. + +#### Without `azd` + +If running without `azd`, set environment variables manually (see [Environment Variables](#environment-variables)), then: + +```bash +python main.py +``` + +### Deploying the Agent to Microsoft Foundry + +Once you've tested locally, deploy to Microsoft Foundry: + +```bash +# Provision Azure resources (skip if already done during local setup) +azd provision + +# Build, push, and deploy the agent to Foundry +azd deploy +``` + +After deploying, invoke the agent running in Foundry: + +```bash +azd ai agent invoke "What is Microsoft Foundry?" +``` + +To stream logs from the running agent: + +```bash +azd ai agent monitor +``` + +For the full deployment guide, see [Azure AI Foundry hosted agents](https://aka.ms/azdaiagent/docs). + +## Troubleshooting + +### Images built on Apple Silicon or other ARM64 machines do not work on our service + +We **recommend deploying with `azd deploy`**, which uses ACR remote build and always produces images with the correct architecture. + +If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. + +**Fix for local builds:** + +```bash +docker build --platform=linux/amd64 -t image . +``` + +This forces the image to be built for the required `amd64` architecture. diff --git a/samples/python/hosted-agents/bring-your-own/invocations/hello-world/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/agent.manifest.yaml new file mode 100644 index 000000000..2e74ed1af --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/agent.manifest.yaml @@ -0,0 +1,31 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: hello-world-python-invocations +displayName: "Hello World (Python, Invocations)" +description: > + Minimal Hello World agent using the Invocations protocol with a bring-your-own + approach. Calls a Foundry model via the Responses API and returns the response + as a streaming SSE event stream. +metadata: + tags: + - AI Agent Hosting + - Invocations Protocol + - Bring Your Own + - Python +template: + name: hello-world-python-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/bring-your-own/invocations/hello-world/agent.yaml b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/agent.yaml new file mode 100644 index 000000000..4ec537837 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/agent.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: hello-world-python-invocations +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/hello-world/main.py b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/main.py new file mode 100644 index 000000000..837c5a66d --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/main.py @@ -0,0 +1,217 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Hello World — Bring Your Own Invocations agent. + +Minimal hosted agent that forwards user input to a Foundry model via the +Responses API and returns the reply through the Invocations protocol. + +This sample demonstrates the simplest possible BYO integration: the protocol +SDK (``azure-ai-agentserver-invocations``) handles the HTTP contract and +session resolution, and you supply the model call using the Foundry SDK. + +Unlike the Responses protocol, the Invocations protocol does **not** provide +built-in server-side conversation history. This agent maintains an in-memory +session store keyed by ``agent_session_id``. In production, replace it with +durable storage (Redis, Cosmos DB, etc.) so history survives restarts. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint (auto-injected in hosted containers) + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (declared in agent.manifest.yaml) + +Usage:: + + # Set environment variables + export FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" + export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" + + # Start the agent + python main.py + + # Turn 1 — start a new conversation + curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \\ + -H "Content-Type: application/json" \\ + -d '{"message": "What is Microsoft Foundry?"}' + + # Turn 2 — continue the same conversation + curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \\ + -H "Content-Type: application/json" \\ + -d '{"message": "What hosted agent options does it offer?"}' +""" + +import asyncio +import json +import logging +import os + +from starlette.requests import Request +from starlette.responses import JSONResponse, StreamingResponse + +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +from azure.ai.agentserver.invocations import InvocationAgentServerHost + +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +# Initialize Foundry project client — reads FOUNDRY_PROJECT_ENDPOINT. +# FOUNDRY_PROJECT_ENDPOINT is auto-injected in hosted Foundry containers. +# Locally, set it manually or use 'azd ai agent run' which sets it automatically. +_endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not _endpoint: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run' " + "which sets it automatically." + ) + +_model = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not _model: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +_credential = DefaultAzureCredential() +_project_client = AIProjectClient(endpoint=_endpoint, credential=_credential) + +# Use the Responses API — not chat.completions (Chat Completions API is legacy). +_responses_client = _project_client.get_openai_client().responses + +_SYSTEM_PROMPT = "You are a helpful AI assistant. Be concise and informative." + +app = InvocationAgentServerHost() + +# In-memory session store — keyed by agent_session_id. +# WARNING: state is lost on restart. Use durable storage in production. +_sessions: dict[str, list[dict[str, str]]] = {} + + +async def _stream_reply(input_items: list[dict[str, str]]): + """Call the Foundry model and yield text deltas as they arrive. + + The Responses SDK uses a synchronous streaming iterator. We bridge it to + async by running it in a thread pool and forwarding each delta through an + ``asyncio.Queue`` so the event loop is never blocked. + """ + loop = asyncio.get_running_loop() + queue: asyncio.Queue[str | None] = asyncio.Queue() + + def _produce() -> None: + """Runs in a thread: streams from the model and enqueues each delta.""" + try: + for event in _responses_client.create( + model=_model, + instructions=_SYSTEM_PROMPT, + input=input_items, + store=False, # This agent owns history — no need to store at the model level + stream=True, + ): + if event.type == "response.output_text.delta": + loop.call_soon_threadsafe(queue.put_nowait, event.delta) + finally: + # None signals end of stream + loop.call_soon_threadsafe(queue.put_nowait, None) + + # Start sync streaming in a background thread; yield deltas as they arrive. + fut = loop.run_in_executor(None, _produce) + while (delta := await queue.get()) is not None: + yield delta + await fut # re-raise any exception that escaped the thread + + +# ── Required handler ────────────────────────────────────────────────────────── +# @app.invoke_handler is the only handler you must implement. It receives every +# POST /invocations request. The function name below is arbitrary. +# +# Two optional handlers exist for long-running operations (LRO): +# @app.get_invocation_handler — handle GET /invocations/{id} status polls +# @app.cancel_invocation_handler — handle DELETE /invocations/{id} cancellation +# For a simple streaming agent like this one, neither is needed. +# +# To serve an OpenAPI spec at GET /invocations/docs/openapi.json, pass it to +# the host constructor: InvocationAgentServerHost(openapi_spec={...}) +# ───────────────────────────────────────────────────────────────────────────── +@app.invoke_handler +async def handle_invoke(request: Request): + """Handle a streaming multi-turn chat request.""" + # Accept either a JSON object ({"message": "..."} or {"input": "..."}) or a + # plain-text body (e.g. sent directly from the Foundry portal chat UI). + try: + body = await request.body() + if not body: + raise ValueError("empty body") + try: + data = json.loads(body) + except json.JSONDecodeError: + user_message = body.decode("utf-8", errors="replace").strip() + else: + if isinstance(data, dict): + user_message = data.get("message") or data.get("input") or "" + else: + user_message = body.decode("utf-8", errors="replace").strip() + if not isinstance(user_message, str) or not user_message.strip(): + raise ValueError("missing message text") + except ValueError: + return JSONResponse( + status_code=400, + content={ + "error": "invalid_request", + "message": ( + 'Request body must be a non-empty JSON object with a "message" (or "input") ' + 'string, or a plain-text body, e.g. {"message": "What is Microsoft Foundry?"}' + ), + }, + ) + + # The Invocations SDK resolves session and invocation identity from the + # incoming request headers and exposes them via request.state. + session_id = request.state.session_id + invocation_id = request.state.invocation_id + + logger.info( + "Processing invocation %s (session %s)", invocation_id, session_id + ) + + # Retrieve or create conversation history for this session. + history = _sessions.setdefault(session_id, []) + history.append({"role": "user", "content": user_message}) + + # Build the Responses API input list from history. + # History is stored as {role, content} dicts — the same format the API accepts. + input_items = list(history) + + async def event_generator(): + full_reply = "" + try: + async for delta in _stream_reply(input_items): + full_reply += delta + yield f"data: {json.dumps({'type': 'token', 'content': delta})}\n\n" + except Exception as exc: + msg = f"Error calling model: {exc}" + logger.error(msg) + full_reply = msg + yield f"data: {json.dumps({'type': 'token', 'content': msg})}\n\n" + + # Final event carries the complete text so the caller can use it + # without having to reassemble the token stream. + yield f"data: {json.dumps({'type': 'done', 'invocation_id': invocation_id, 'session_id': session_id, 'full_text': full_reply})}\n\n" + + # Persist the assistant reply to history after streaming is complete. + if full_reply: + history.append({"role": "assistant", "content": full_reply}) + + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}, + ) + + +app.run() diff --git a/samples/python/hosted-agents/bring-your-own/invocations/hello-world/requirements.txt b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/requirements.txt new file mode 100644 index 000000000..106d14ea8 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/requirements.txt @@ -0,0 +1,3 @@ +azure-ai-agentserver-invocations==1.0.0b2 +azure-ai-projects==2.0.1 +azure-identity==1.25.3 diff --git a/samples/python/hosted-agents/bring-your-own/invocations/hello-world/test-payload.json b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/test-payload.json new file mode 100644 index 000000000..f12229a5a --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/hello-world/test-payload.json @@ -0,0 +1 @@ +{ "message": "What is Microsoft Foundry?" } diff --git a/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/.dockerignore b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/.env.example b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/Dockerfile b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/Dockerfile new file mode 100644 index 000000000..b89292edb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/README.md b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/README.md new file mode 100644 index 000000000..ab1922cc6 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/README.md @@ -0,0 +1,168 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. + +# Human-in-the-Loop Agent — Invocations Protocol + +This sample demonstrates a human-in-the-loop agent built with [azure-ai-agentserver-invocations](https://pypi.org/project/azure-ai-agentserver-invocations/) that implements an **approval-gate pattern**. The agent generates a proposal using Azure OpenAI, pauses for human review, and resumes execution after the human approves, requests a revision, or rejects. + +Session state is persisted as JSON files in the `$HOME` directory, so proposals survive agent restarts and are accessible via the **Session Files API** when deployed to Azure. + +This pattern is useful for workflows where an AI agent should **not act autonomously** — for example, drafting communications, generating code changes, or proposing decisions that require human sign-off. + +## How It Works + +``` +[new task] ──► AWAITING_APPROVAL ──► (approve) ──► COMPLETED + │ + ├──► (revise + feedback) ──► AWAITING_APPROVAL (loop) + │ + └──► (reject) ──► REJECTED +``` + +1. **Submit a task** via `POST /invocations` — the agent calls Azure OpenAI to generate a proposal and returns it with status `awaiting_approval`. +2. **The agent pauses** — the proposal is saved in memory, and the human can return at any time (minutes, hours, or days later). +3. **Respond with a decision** via another `POST /invocations` using the same `agent_session_id`: + - `approve` — the agent marks the proposal as final and returns it. + - `revise` (with feedback) — the agent generates an improved proposal incorporating the feedback. + - `reject` — the agent marks the session as rejected. +4. **Poll status** via `GET /invocations/{id}` — useful for checking whether a proposal is still pending after reconnecting. + +## OpenAPI Spec + +The agent includes an inline OpenAPI 3.0 specification that documents the request/response contract. It is served at: + +``` +GET http://localhost:8088/invocations/docs/openapi.json +``` + +## Running Locally + +### Prerequisites + +- Python 3.10+ +- Azure CLI installed and authenticated (`az login`) +- Azure OpenAI resource with a deployed model + +### Install Dependencies + +```bash +pip install -r requirements.txt +``` + +### Start the Agent + +```bash +cp .env.example .env # then edit values +export FOUNDRY_PROJECT_ENDPOINT="https://your-project.services.ai.azure.com/api/projects/your-project" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +python main.py +``` + +The agent starts on `http://localhost:8088/`. + +### Test + +```bash +# Fetch the OpenAPI spec +curl http://localhost:8088/invocations/docs/openapi.json + +# Step 1: Submit a task — agent generates a proposal +curl -X POST "http://localhost:8088/invocations?agent_session_id=session-1" \ + -H "Content-Type: application/json" \ + -d '{"task": "Draft a marketing email for our new AI product launch"}' +# -> {"status": "awaiting_approval", "proposal": "...", "session_id": "session-1", ...} + +# Step 2: Check status (e.g., after reconnecting hours later) +curl http://localhost:8088/invocations/ +# -> {"status": "awaiting_approval", "proposal": "...", ...} + +# Step 3a: Approve the proposal +curl -X POST "http://localhost:8088/invocations?agent_session_id=session-1" \ + -H "Content-Type: application/json" \ + -d '{"decision": "approve"}' +# -> {"status": "completed", "final_output": "...", ...} + +# Step 3b: Or request a revision with feedback +curl -X POST "http://localhost:8088/invocations?agent_session_id=session-1" \ + -H "Content-Type: application/json" \ + -d '{"decision": "revise", "feedback": "Make the tone more casual and add a call-to-action"}' +# -> {"status": "awaiting_approval", "proposal": "", ...} + +# Step 3c: Or reject +curl -X POST "http://localhost:8088/invocations?agent_session_id=session-1" \ + -H "Content-Type: application/json" \ + -d '{"decision": "reject"}' +# -> {"status": "rejected", ...} + +# Cancel a pending session +curl -X POST http://localhost:8088/invocations//cancel +# -> {"status": "cancelled", ...} +``` + +## Invoke with azd + +### Local + +**Bash:** +```bash +azd ai agent invoke --local '{"task": "Write a product launch announcement for Azure AI Foundry"}' +``` + +**PowerShell:** +```powershell +azd ai agent invoke --local '{\"task\": \"Write a product launch announcement for Azure AI Foundry\"}' +``` + +### Remote (after `azd up`) + +**Bash:** +```bash +azd ai agent invoke '{"task": "Write a product launch announcement for Azure AI Foundry"}' +``` + +**PowerShell:** +```powershell +azd ai agent invoke '{\"task\": \"Write a product launch announcement for Azure AI Foundry\"}' +``` + +## Deploying to Microsoft Foundry + +To deploy your agent to Microsoft Foundry, follow the deployment guide at https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/azd-quickstart.md + +## Troubleshooting + +### Azure OpenAI Permission Denied (401) + +If you see an error like: + +``` +Error calling Azure OpenAI: Error code: 401 - {'error': {'code': 'PermissionDenied', 'message': 'The principal lacks the required data action Microsoft.CognitiveServices/accounts/OpenAI/deployments/chat/completions/action to perform POST /openai/deployments/{deployment-id}/chat/completions operation.'}} +``` + +The identity running the agent does not have the required RBAC roles on the Azure AI Foundry project. Assign the following roles: + +- **Cognitive Services OpenAI User** +- **Azure AI User** + +Use the Azure CLI to assign them: + +```bash +# Set your variables +SUBSCRIPTION_ID="" +RESOURCE_GROUP="" +PROJECT_NAME="" +PRINCIPAL_ID="" + +# Assign "Cognitive Services OpenAI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Cognitive Services OpenAI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" + +# Assign "Azure AI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Azure AI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" +``` + +> **Note:** It may take a few minutes for role assignments to propagate. Retry the request after waiting. diff --git a/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.manifest.yaml new file mode 100644 index 000000000..197f4561f --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.manifest.yaml @@ -0,0 +1,34 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: human-in-the-loop-invocations +displayName: "Human In The Loop (Invocations)" +description: > + A human-in-the-loop agent that demonstrates the approval-gate pattern using + the azure-ai-agentserver-invocations SDK. Generates a proposal, pauses for + human review, and resumes after the human approves, requests a revision, or + rejects. Includes an inline OpenAPI spec for contract discovery. +metadata: + tags: + - AI Agent Hosting + - Invocations Protocol + - Bring Your Own + - Human-in-the-Loop + - OpenAPI + - Python +template: + name: human-in-the-loop-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.yaml b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.yaml new file mode 100644 index 000000000..bf393246d --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/agent.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: human-in-the-loop-invocations +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/main.py b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/main.py new file mode 100644 index 000000000..fcf7bd4a0 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/main.py @@ -0,0 +1,555 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Human-in-the-loop (HITL) agent using azure-ai-agentserver-invocations with Azure OpenAI. + +Demonstrates an approval-gate pattern where the agent: + 1. Receives a task and generates a proposal using Azure OpenAI. + 2. Pauses execution and returns the proposal for human review. + 3. Resumes after the human approves, requests a revision, or rejects. + +State machine:: + + [new task] ──► AWAITING_APPROVAL ──► (approve) ──► COMPLETED + │ + ├──► (revise + feedback) ──► AWAITING_APPROVAL (loop) + │ + └──► (reject) ──► REJECTED + +.. note:: + + Session state is persisted as JSON files in the ``$HOME`` directory, + so state survives restarts and files are accessible via the + Session Files API when deployed to Azure. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint (e.g., https://your-resource.openai.azure.com/api/projects/proj) + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (e.g., gpt-4o) + +Uses DefaultAzureCredential for authentication - works with: +- Azure CLI login (az login) +- Managed Identity in Azure +- Environment variables (AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET) + +Usage:: + + export FOUNDRY_PROJECT_ENDPOINT="https://your-resource.openai.azure.com/api/projects/proj" + export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o" + python main.py + + # Submit a task + curl -X POST "http://localhost:8088/invocations?agent_session_id=s1" \\ + -H "Content-Type: application/json" \\ + -d '{"task": "Draft a marketing email for our new AI product launch"}' + + # Approve the proposal + curl -X POST "http://localhost:8088/invocations?agent_session_id=s1" \\ + -H "Content-Type: application/json" \\ + -d '{"decision": "approve"}' + + # Or revise with feedback + curl -X POST "http://localhost:8088/invocations?agent_session_id=s1" \\ + -H "Content-Type: application/json" \\ + -d '{"decision": "revise", "feedback": "Make the tone more casual"}' + + # Or reject + curl -X POST "http://localhost:8088/invocations?agent_session_id=s1" \\ + -H "Content-Type: application/json" \\ + -d '{"decision": "reject"}' + + # Check status (e.g., after reconnecting hours later) + curl http://localhost:8088/invocations/ +""" + +import asyncio +import hashlib +import json +import logging +import os +import tempfile +from pathlib import Path +from typing import Any + +from starlette.requests import Request +from starlette.responses import JSONResponse, Response + +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +from azure.ai.agentserver.invocations import InvocationAgentServerHost + +logger = logging.getLogger("human-in-the-loop") + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +# --------------------------------------------------------------------------- +# Foundry project client — reads FOUNDRY_PROJECT_ENDPOINT. +# FOUNDRY_PROJECT_ENDPOINT is auto-injected in hosted Foundry containers. +# Locally, set it manually or use 'azd ai agent run' which sets it automatically. +# --------------------------------------------------------------------------- +FOUNDRY_PROJECT_ENDPOINT = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not FOUNDRY_PROJECT_ENDPOINT: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run'." + ) + +AZURE_AI_MODEL_DEPLOYMENT_NAME = os.environ.get( + "AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not AZURE_AI_MODEL_DEPLOYMENT_NAME: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +_credential = DefaultAzureCredential() +_project_client = AIProjectClient( + endpoint=FOUNDRY_PROJECT_ENDPOINT, credential=_credential) + +# Use the Responses API — not chat.completions (Chat Completions API is legacy). +_openai_client = _project_client.get_openai_client() + +# --------------------------------------------------------------------------- +# OpenAPI 3.0 spec -- served at GET /invocations/docs/openapi.json +# --------------------------------------------------------------------------- +OPENAPI_SPEC: dict[str, Any] = { + "openapi": "3.0.0", + "info": { + "title": "Human-in-the-Loop Agent", + "version": "1.0.0", + "description": ( + "An agent that generates proposals via Azure OpenAI, pauses for " + "human approval, and resumes after the human responds." + ), + }, + "paths": { + "/invocations": { + "post": { + "summary": "Submit a new task or respond to a pending proposal", + "parameters": [ + { + "name": "agent_session_id", + "in": "query", + "required": False, + "schema": {"type": "string"}, + } + ], + "requestBody": { + "required": True, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "task": {"type": "string"}, + "decision": { + "type": "string", + "enum": ["approve", "revise", "reject"], + }, + "feedback": {"type": "string"}, + }, + } + } + }, + }, + "responses": { + "200": { + "description": "Agent response with current status and data.", + }, + }, + } + }, + "/invocations/{invocation_id}": { + "get": { + "summary": "Check the status of a session", + "parameters": [ + { + "name": "invocation_id", + "in": "path", + "required": True, + "schema": {"type": "string"}, + } + ], + "responses": { + "200": {"description": "Current session state."}, + "404": {"description": "Session not found."}, + }, + } + }, + "/invocations/{invocation_id}/cancel": { + "post": { + "summary": "Cancel a pending session", + "parameters": [ + { + "name": "invocation_id", + "in": "path", + "required": True, + "schema": {"type": "string"}, + } + ], + "responses": { + "200": {"description": "Cancellation result."}, + "404": {"description": "Session not found."}, + }, + } + }, + }, +} + +# --------------------------------------------------------------------------- +# Session state — persisted as JSON files in $HOME. +# Writing to $HOME makes files visible via the Session Files API. +# --------------------------------------------------------------------------- +_STATE_DIR = Path(os.environ.get("HOME", os.getcwd())) + +_contexts: dict[str, dict[str, Any]] = {} +_invocation_to_session: dict[str, str] = {} + + +def _session_file_path(session_id: str) -> Path: + """Return the path to the JSON state file for a session.""" + safe_id = "".join( + c if c.isalnum() or c in "-_" else "_" for c in session_id) + # Hash suffix avoids collisions from sanitization (e.g. "a@b" vs "a#b") + hash_suffix = hashlib.sha256(session_id.encode()).hexdigest()[:8] + return _STATE_DIR / f"hitl_session_{safe_id}_{hash_suffix}.json" + + +def _save_session(session_id: str, ctx: dict[str, Any]) -> None: + """Persist session state atomically to a JSON file in $HOME.""" + data = {"session_id": session_id, **ctx} + target = _session_file_path(session_id) + fd, tmp_path = tempfile.mkstemp(dir=str(_STATE_DIR), suffix=".tmp") + try: + with os.fdopen(fd, "w") as f: + json.dump(data, f, indent=2) + os.replace(tmp_path, str(target)) + except BaseException: + try: + os.unlink(tmp_path) + except OSError: + pass + raise + logger.debug("[%s] State saved to disk", session_id) + + +def _load_all_sessions() -> None: + """Load all persisted sessions into memory on startup.""" + if not _STATE_DIR.is_dir(): + return + for path in _STATE_DIR.glob("hitl_session_*.json"): + try: + data = json.loads(path.read_text()) + if not isinstance(data, dict): + logger.warning("Skipping %s: not a JSON object", path.name) + continue + session_id = data.get("session_id") + if not session_id: + continue + required = {"status", "original_task", "proposal"} + if not required.issubset(data.keys()): + logger.warning("Skipping %s: missing required keys", path.name) + continue + ctx = {k: v for k, v in data.items() if k != "session_id"} + inv_ids = ctx.get("invocation_ids", []) + if not isinstance(inv_ids, list): + inv_ids = [] + ctx["invocation_ids"] = inv_ids + _contexts[session_id] = ctx + for inv_id in inv_ids: + _invocation_to_session[inv_id] = session_id + except Exception: + logger.warning("Failed to load session file: %s", path.name) + if _contexts: + logger.info("Loaded %d session(s) from disk", len(_contexts)) + + +_load_all_sessions() + +# --------------------------------------------------------------------------- +# Server +# --------------------------------------------------------------------------- +app = InvocationAgentServerHost(openapi_spec=OPENAPI_SPEC) + +# --------------------------------------------------------------------------- +# LLM helper +# --------------------------------------------------------------------------- +_SYSTEM_PROMPT = ( + "You are a professional assistant. The user will give you a task. " + "Generate a high-quality draft proposal that the user can review " + "and approve. Be detailed, well-structured, and ready for review.\n\n" + "If revision feedback is provided, incorporate it into an improved " + "version of the proposal." +) + + +async def _call_llm(instructions: str, input_items: list[dict[str, str]]) -> str: + """Call the Foundry Responses API and return the response text. + + The Foundry OpenAI client is synchronous; run it in a thread so we don't + block the event loop. + """ + loop = asyncio.get_running_loop() + response = await loop.run_in_executor( + None, + lambda: _openai_client.responses.create( + model=AZURE_AI_MODEL_DEPLOYMENT_NAME, + instructions=instructions, + input=input_items, + ), + ) + for item in response.output: + if item.type == "message": + for part in item.content: + if part.type == "output_text": + return part.text + return "" + + +async def _generate_proposal( + task: str, + revision_history: list[dict[str, str]], +) -> str: + """Generate or revise a proposal using Azure OpenAI Responses API. + + Builds input items with the original task and any prior + revision rounds, then calls the LLM for a new proposal. + """ + input_items: list[dict[str, str]] = [ + {"role": "user", "content": f"Task: {task}"}, + ] + for rev in revision_history: + input_items.append( + {"role": "assistant", "content": rev["proposal"]}) + input_items.append( + {"role": "user", "content": f"Revision feedback: {rev['feedback']}"}) + + return await _call_llm(_SYSTEM_PROMPT, input_items) + + +# --------------------------------------------------------------------------- +# Handlers +# --------------------------------------------------------------------------- + +@app.invoke_handler +async def handle_invoke(request: Request) -> Response: + """Handle a new task submission or a decision on a pending proposal.""" + try: + data = await request.json() + if not isinstance(data, dict): + raise ValueError("body is not a JSON object") + except (json.JSONDecodeError, ValueError): + return JSONResponse( + status_code=400, + content={ + "error": "invalid_request", + "message": ( + 'Request body must be a JSON object with either a "task" ' + '(to start a new proposal) or a "decision" ' + '(approve/revise/reject), e.g. {"task": "analyze dataset"} or ' + '{"decision": "approve"}' + ), + }, + ) + + session_id = request.state.session_id + invocation_id = request.state.invocation_id + + task = data.get("task") + decision = data.get("decision") + + if task and decision: + return JSONResponse( + {"error": "Cannot provide both 'task' and 'decision' in the same request."}, + status_code=400, + ) + + # --- New task submission --- + if task: + if not task.strip(): + return JSONResponse( + {"error": "task cannot be empty"}, + status_code=400, + ) + + existing = _contexts.get(session_id) + if existing and existing["status"] == "awaiting_approval": + return JSONResponse( + {"error": ( + f"Session {session_id} has a pending proposal. " + "Approve, revise, or reject it before submitting a new task." + )}, + status_code=409, + ) + + logger.info("[%s] New task received: %s", session_id, task[:100]) + proposal = await _generate_proposal(task, []) + + _contexts[session_id] = { + "status": "awaiting_approval", # -> AWAITING_APPROVAL (paused) + "original_task": task, + "proposal": proposal, + "revision_history": [], + "invocation_id": invocation_id, + "invocation_ids": [invocation_id], + } + _invocation_to_session[invocation_id] = session_id + _save_session(session_id, _contexts[session_id]) + + return JSONResponse({ + "session_id": session_id, + "invocation_id": invocation_id, + "status": "awaiting_approval", + "proposal": proposal, + "revision_count": 0, + }) + + # --- Decision on existing proposal --- + if decision: + ctx = _contexts.get(session_id) + if not ctx: + return JSONResponse( + {"error": f"No pending session found for session_id={session_id}"}, + status_code=400, + ) + if ctx["status"] != "awaiting_approval": + return JSONResponse( + {"error": f"Session is not awaiting approval (status={ctx['status']})"}, + status_code=400, + ) + + # Validate decision and required fields before any state mutation + if decision not in ("approve", "revise", "reject"): + return JSONResponse( + {"error": f"Unknown decision: {decision}. Use 'approve', 'revise', or 'reject'."}, + status_code=400, + ) + feedback = data.get("feedback", "") + if decision == "revise" and not feedback: + return JSONResponse( + {"error": "feedback is required for 'revise' decision"}, + status_code=400, + ) + + # All validation passed — track the invocation + ctx["invocation_id"] = invocation_id + ctx.setdefault("invocation_ids", []).append(invocation_id) + _invocation_to_session[invocation_id] = session_id + + if decision == "approve": + logger.info("[%s] Proposal approved", session_id) + ctx["status"] = "completed" # -> COMPLETED (terminal) + _save_session(session_id, ctx) + + return JSONResponse({ + "session_id": session_id, + "invocation_id": invocation_id, + "status": "completed", + "final_output": ctx["proposal"], + "revision_count": len(ctx["revision_history"]), + }) + + if decision == "revise": + logger.info("[%s] Revision requested: %s", + session_id, feedback[:100]) + ctx["revision_history"].append({ + "proposal": ctx["proposal"], + "feedback": feedback, + }) + + new_proposal = await _generate_proposal( + ctx["original_task"], ctx["revision_history"]) + ctx["proposal"] = new_proposal + ctx["status"] = "awaiting_approval" # -> AWAITING_APPROVAL (loop) + _save_session(session_id, ctx) + + return JSONResponse({ + "session_id": session_id, + "invocation_id": invocation_id, + "status": "awaiting_approval", + "proposal": new_proposal, + "revision_count": len(ctx["revision_history"]), + }) + + # decision == "reject" + logger.info("[%s] Proposal rejected", session_id) + ctx["status"] = "rejected" # -> REJECTED (terminal) + _save_session(session_id, ctx) + + return JSONResponse({ + "session_id": session_id, + "invocation_id": invocation_id, + "status": "rejected", + "revision_count": len(ctx["revision_history"]), + }) + + return JSONResponse( + {"error": "Request must include either 'task' (new task) or 'decision' (approve/revise/reject)."}, + status_code=400, + ) + + +@app.get_invocation_handler +async def handle_get_invocation(request: Request) -> Response: + """Retrieve the current status and data for a session.""" + invocation_id = request.state.invocation_id + session_id = _invocation_to_session.get(invocation_id) + + if not session_id or session_id not in _contexts: + return JSONResponse({"error": "not found"}, status_code=404) + + ctx = _contexts[session_id] + response_data: dict[str, Any] = { + "session_id": session_id, + "invocation_id": ctx["invocation_id"], + "status": ctx["status"], + "original_task": ctx["original_task"], + "revision_count": len(ctx["revision_history"]), + } + + if ctx["status"] == "awaiting_approval": + response_data["proposal"] = ctx["proposal"] + elif ctx["status"] == "completed": + response_data["final_output"] = ctx["proposal"] + + return JSONResponse(response_data) + + +@app.cancel_invocation_handler +async def handle_cancel_invocation(request: Request) -> Response: + """Cancel a pending session.""" + invocation_id = request.state.invocation_id + session_id = _invocation_to_session.get(invocation_id) + + if not session_id or session_id not in _contexts: + return JSONResponse({"error": "not found"}, status_code=404) + + ctx = _contexts[session_id] + if ctx["status"] in ("completed", "rejected"): + return JSONResponse({ + "session_id": session_id, + "invocation_id": invocation_id, + "status": ctx["status"], + "error": "session already finalized", + }) + + ctx["invocation_id"] = invocation_id + ctx.setdefault("invocation_ids", []).append(invocation_id) + _invocation_to_session[invocation_id] = session_id + ctx["status"] = "cancelled" # -> CANCELLED (terminal) + _save_session(session_id, ctx) + logger.info("[%s] Session cancelled", session_id) + + return JSONResponse({ + "session_id": session_id, + "invocation_id": invocation_id, + "status": "cancelled", + }) + + +if __name__ == "__main__": + app.run() diff --git a/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/requirements.txt b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/requirements.txt new file mode 100644 index 000000000..8c85ffcbc --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/requirements.txt @@ -0,0 +1,4 @@ +azure-ai-agentserver-invocations==1.0.0b2 +azure-ai-agentserver-core==2.0.0b2 +azure-ai-projects==2.0.1 +azure-identity==1.25.3 diff --git a/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/test-payload.json b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/test-payload.json new file mode 100644 index 000000000..62440aebb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/human-in-the-loop/test-payload.json @@ -0,0 +1 @@ +{ "task": "Draft a marketing email for our new AI product launch" } diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/.dockerignore b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/.env.example b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/.gitignore b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/.gitignore new file mode 100644 index 000000000..8e8438024 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/.gitignore @@ -0,0 +1 @@ +.azure diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/Dockerfile b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/Dockerfile new file mode 100644 index 000000000..b89292edb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/README.md b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/README.md new file mode 100644 index 000000000..0bf4a4d90 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/README.md @@ -0,0 +1,74 @@ +# LangGraph Multi-turn Chat Agent + +A multi-turn conversational agent built with [LangGraph](https://langchain-ai.github.io/langgraph/) +and Azure OpenAI, hosted via the **invocations** protocol. + +## What it demonstrates + +- **LangGraph agent graph** with conditional tool-calling routing +- **Two built-in tools**: `get_current_time` and `calculator` +- **Multi-turn conversations** via `agent_session_id` (in-memory session store) +- **SSE streaming** output over the invocations protocol +- **Azure OpenAI** with `DefaultAzureCredential` authentication + +## Architecture + +``` +┌───────┐ ┌─────────┐ ┌───────┐ +│ START │───▶│ chatbot │───▶│ END │ +└───────┘ └────┬─────┘ └───────┘ + │ tool_calls? + ▼ + ┌─────────┐ + │ tools │ + └────┬─────┘ + │ + └──▶ chatbot (loop) +``` + +## Prerequisites + +- Python 3.12+ +- Azure OpenAI resource with a deployed model (e.g., `gpt-4.1-mini`) +- Azure CLI login (`az login`) or other `DefaultAzureCredential` source + +## Environment variables + +| Variable | Required | Default | Description | +|---|---|---|---| +| `FOUNDRY_PROJECT_ENDPOINT` | Yes | — | Foundry project endpoint URL | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Yes | — | Model deployment name declared in `agent.manifest.yaml` | + +## Running locally + +```bash +cp .env.example .env # then edit values +pip install -r requirements.txt +python main.py +``` + +## Testing with curl + +```bash +# Turn 1 — ask for the time (triggers tool call) +curl -N -X POST 'http://localhost:8088/invocations?agent_session_id=s1' \ + -H 'Content-Type: application/json' \ + -d '{"message": "What time is it right now?"}' + +# Turn 2 — ask a math question (triggers calculator tool) +curl -N -X POST 'http://localhost:8088/invocations?agent_session_id=s1' \ + -H 'Content-Type: application/json' \ + -d '{"message": "What is 42 * 17?"}' + +# Turn 3 — follow-up (uses conversation context, no tools) +curl -N -X POST 'http://localhost:8088/invocations?agent_session_id=s1' \ + -H 'Content-Type: application/json' \ + -d '{"message": "Add 100 to that result"}' +``` + +## Deploying to Azure AI Agent Hosting + +```bash +azd ai agent init -m agent.manifest.yaml +azd up +``` diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/agent.manifest.yaml new file mode 100644 index 000000000..c6c4da9a1 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/agent.manifest.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: langgraph-chat-invocations +displayName: "LangGraph Chat (Invocations)" +description: > + Multi-turn chat agent built with LangGraph and the Invocations protocol. + Demonstrates a tool-calling agent graph (chatbot ↔ tools) with + get_current_time and calculator tools. +metadata: + tags: + - AI Agent Hosting + - Invocations Protocol + - Bring Your Own + - LangGraph + - Tool Calling + - Python +template: + name: langgraph-chat-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/agent.yaml b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/agent.yaml new file mode 100644 index 000000000..840f0aff5 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/agent.yaml @@ -0,0 +1,23 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml + +kind: hosted +name: langgraph-chat-invocations +description: | + Multi-turn chat agent built with LangGraph and the Invocations protocol. Demonstrates a tool-calling agent graph (chatbot ↔ tools) with get_current_time and calculator tools. +metadata: + tags: + - AI Agent Hosting + - Invocations Protocol + - Bring Your Own + - LangGraph + - Tool Calling + - Python +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: gpt-4.1-mini diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/azure.yaml b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/azure.yaml new file mode 100644 index 000000000..db27c2297 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/azure.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json + +requiredVersions: + extensions: + azure.ai.agents: '>=0.1.0-preview' +name: ai-foundry-starter-basic +services: + langgraph-chat-invocations: + project: . + host: azure.ai.agent + language: docker + docker: + remoteBuild: true + config: + container: + resources: + cpu: "0.25" + memory: 0.5Gi + scale: + maxReplicas: 1 + deployments: + - model: + format: OpenAI + name: gpt-4.1-mini + version: "2025-04-14" + name: gpt-4.1-mini + sku: + capacity: 5719 + name: GlobalStandard + startupCommand: python main.py +infra: + provider: bicep + path: ./infra diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/abbreviations.json b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/abbreviations.json new file mode 100644 index 000000000..00cef3fc9 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/abbreviations.json @@ -0,0 +1,137 @@ +{ + "aiFoundryAccounts": "aif", + "analysisServicesServers": "as", + "apiManagementService": "apim-", + "appConfigurationStores": "appcs-", + "appManagedEnvironments": "cae-", + "appContainerApps": "ca-", + "authorizationPolicyDefinitions": "policy-", + "automationAutomationAccounts": "aa-", + "blueprintBlueprints": "bp-", + "blueprintBlueprintsArtifacts": "bpa-", + "cacheRedis": "redis-", + "cdnProfiles": "cdnp-", + "cdnProfilesEndpoints": "cdne-", + "cognitiveServicesAccounts": "cog-", + "cognitiveServicesFormRecognizer": "cog-fr-", + "cognitiveServicesTextAnalytics": "cog-ta-", + "computeAvailabilitySets": "avail-", + "computeCloudServices": "cld-", + "computeDiskEncryptionSets": "des", + "computeDisks": "disk", + "computeDisksOs": "osdisk", + "computeGalleries": "gal", + "computeSnapshots": "snap-", + "computeVirtualMachines": "vm", + "computeVirtualMachineScaleSets": "vmss-", + "containerInstanceContainerGroups": "ci", + "containerRegistryRegistries": "cr", + "containerServiceManagedClusters": "aks-", + "databricksWorkspaces": "dbw-", + "dataFactoryFactories": "adf-", + "dataLakeAnalyticsAccounts": "dla", + "dataLakeStoreAccounts": "dls", + "dataMigrationServices": "dms-", + "dBforMySQLServers": "mysql-", + "dBforPostgreSQLServers": "psql-", + "devicesIotHubs": "iot-", + "devicesProvisioningServices": "provs-", + "devicesProvisioningServicesCertificates": "pcert-", + "documentDBDatabaseAccounts": "cosmos-", + "documentDBMongoDatabaseAccounts": "cosmon-", + "eventGridDomains": "evgd-", + "eventGridDomainsTopics": "evgt-", + "eventGridEventSubscriptions": "evgs-", + "eventHubNamespaces": "evhns-", + "eventHubNamespacesEventHubs": "evh-", + "hdInsightClustersHadoop": "hadoop-", + "hdInsightClustersHbase": "hbase-", + "hdInsightClustersKafka": "kafka-", + "hdInsightClustersMl": "mls-", + "hdInsightClustersSpark": "spark-", + "hdInsightClustersStorm": "storm-", + "hybridComputeMachines": "arcs-", + "insightsActionGroups": "ag-", + "insightsComponents": "appi-", + "keyVaultVaults": "kv-", + "kubernetesConnectedClusters": "arck", + "kustoClusters": "dec", + "kustoClustersDatabases": "dedb", + "logicIntegrationAccounts": "ia-", + "logicWorkflows": "logic-", + "machineLearningServicesWorkspaces": "mlw-", + "managedIdentityUserAssignedIdentities": "id-", + "managementManagementGroups": "mg-", + "migrateAssessmentProjects": "migr-", + "networkApplicationGateways": "agw-", + "networkApplicationSecurityGroups": "asg-", + "networkAzureFirewalls": "afw-", + "networkBastionHosts": "bas-", + "networkConnections": "con-", + "networkDnsZones": "dnsz-", + "networkExpressRouteCircuits": "erc-", + "networkFirewallPolicies": "afwp-", + "networkFirewallPoliciesWebApplication": "waf", + "networkFirewallPoliciesRuleGroups": "wafrg", + "networkFrontDoors": "fd-", + "networkFrontdoorWebApplicationFirewallPolicies": "fdfp-", + "networkLoadBalancersExternal": "lbe-", + "networkLoadBalancersInternal": "lbi-", + "networkLoadBalancersInboundNatRules": "rule-", + "networkLocalNetworkGateways": "lgw-", + "networkNatGateways": "ng-", + "networkNetworkInterfaces": "nic-", + "networkNetworkSecurityGroups": "nsg-", + "networkNetworkSecurityGroupsSecurityRules": "nsgsr-", + "networkNetworkWatchers": "nw-", + "networkPrivateDnsZones": "pdnsz-", + "networkPrivateLinkServices": "pl-", + "networkPublicIPAddresses": "pip-", + "networkPublicIPPrefixes": "ippre-", + "networkRouteFilters": "rf-", + "networkRouteTables": "rt-", + "networkRouteTablesRoutes": "udr-", + "networkTrafficManagerProfiles": "traf-", + "networkVirtualNetworkGateways": "vgw-", + "networkVirtualNetworks": "vnet-", + "networkVirtualNetworksSubnets": "snet-", + "networkVirtualNetworksVirtualNetworkPeerings": "peer-", + "networkVirtualWans": "vwan-", + "networkVpnGateways": "vpng-", + "networkVpnGatewaysVpnConnections": "vcn-", + "networkVpnGatewaysVpnSites": "vst-", + "notificationHubsNamespaces": "ntfns-", + "notificationHubsNamespacesNotificationHubs": "ntf-", + "operationalInsightsWorkspaces": "log-", + "portalDashboards": "dash-", + "powerBIDedicatedCapacities": "pbi-", + "purviewAccounts": "pview-", + "recoveryServicesVaults": "rsv-", + "resourcesResourceGroups": "rg-", + "searchSearchServices": "srch-", + "serviceBusNamespaces": "sb-", + "serviceBusNamespacesQueues": "sbq-", + "serviceBusNamespacesTopics": "sbt-", + "serviceEndPointPolicies": "se-", + "serviceFabricClusters": "sf-", + "signalRServiceSignalR": "sigr", + "sqlManagedInstances": "sqlmi-", + "sqlServers": "sql-", + "sqlServersDataWarehouse": "sqldw-", + "sqlServersDatabases": "sqldb-", + "sqlServersDatabasesStretch": "sqlstrdb-", + "storageStorageAccounts": "st", + "storageStorageAccountsVm": "stvm", + "storSimpleManagers": "ssimp", + "streamAnalyticsCluster": "asa-", + "synapseWorkspaces": "syn", + "synapseWorkspacesAnalyticsWorkspaces": "synw", + "synapseWorkspacesSqlPoolsDedicated": "syndp", + "synapseWorkspacesSqlPoolsSpark": "synsp", + "timeSeriesInsightsEnvironments": "tsi-", + "webServerFarms": "plan-", + "webSitesAppService": "app-", + "webSitesAppServiceEnvironment": "ase-", + "webSitesFunctions": "func-", + "webStaticSites": "stapp-" +} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/ai/acr-role-assignment.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/ai/acr-role-assignment.bicep new file mode 100644 index 000000000..3e0c2b218 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/ai/acr-role-assignment.bicep @@ -0,0 +1,27 @@ +targetScope = 'resourceGroup' + +@description('Name of the existing container registry') +param acrName string + +@description('Principal ID to grant AcrPull role') +param principalId string + +@description('Full resource ID of the ACR (for generating unique GUID)') +param acrResourceId string + +// Reference the existing ACR in this resource group +resource acr 'Microsoft.ContainerRegistry/registries@2023-07-01' existing = { + name: acrName +} + +// Grant AcrPull role to the AI project's managed identity +resource acrPullRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: acr + name: guid(acrResourceId, principalId, '7f951dda-4ed3-4680-a7ca-43fe172d538d') + properties: { + principalId: principalId + principalType: 'ServicePrincipal' + // AcrPull role + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') + } +} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/ai/ai-project.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/ai/ai-project.bicep new file mode 100644 index 000000000..130a03781 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/ai/ai-project.bicep @@ -0,0 +1,430 @@ +targetScope = 'resourceGroup' + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Main location for the resources') +param location string + +var resourceToken = uniqueString(subscription().id, resourceGroup().id, location) + +@description('Name of the project') +param aiFoundryProjectName string + +param deployments deploymentsType + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('Optional. Name of an existing AI Services account in the current resource group. If not provided, a new one will be created.') +param existingAiAccountName string = '' + +@description('List of connections to provision') +param connections array = [] + +@secure() +@description('Map of connection name to credentials object. Kept as @secure to prevent secrets from appearing in deployment logs. Example: { "my-conn": { "key": "secret" } }') +param connectionCredentials object = {} + +@description('Also provision dependent resources and connect to the project') +param additionalDependentResources dependentResourcesType + +@description('Enable monitoring via appinsights and log analytics') +param enableMonitoring bool = true + +@description('Enable hosted agent deployment') +param enableHostedAgents bool = false + +@description('Enable the capability host for agent conversations. When false and hosted agents are enabled, the capability host is not created (v2 hosted agents handle storage automatically).') +param enableCapabilityHost bool = true + +@description('Optional. Existing container registry resource ID. If provided, a connection will be created to this ACR instead of creating a new one.') +param existingContainerRegistryResourceId string = '' + +@description('Optional. Existing container registry login server (e.g., myregistry.azurecr.io). Required if existingContainerRegistryResourceId is provided.') +param existingContainerRegistryEndpoint string = '' + +@description('Optional. Name of an existing ACR connection on the Foundry project. If provided, no new ACR or connection will be created.') +param existingAcrConnectionName string = '' + +@description('Optional. Existing Application Insights connection string. If provided, a connection will be created but no new App Insights resource.') +param existingApplicationInsightsConnectionString string = '' + +@description('Optional. Existing Application Insights resource ID. Used for connection metadata when providing an existing App Insights.') +param existingApplicationInsightsResourceId string = '' + +@description('Optional. Name of an existing Application Insights connection on the Foundry project. If provided, no new App Insights or connection will be created.') +param existingAppInsightsConnectionName string = '' + +// Load abbreviations +var abbrs = loadJsonContent('../../abbreviations.json') + +// Determine which resources to create based on connections +var hasStorageConnection = length(filter(additionalDependentResources, conn => conn.resource == 'storage')) > 0 +var hasAcrConnection = length(filter(additionalDependentResources, conn => conn.resource == 'registry')) > 0 +var hasExistingAcr = !empty(existingContainerRegistryResourceId) +var hasExistingAcrConnection = !empty(existingAcrConnectionName) +var hasExistingAppInsightsConnection = !empty(existingAppInsightsConnectionName) +var hasExistingAppInsightsConnectionString = !empty(existingApplicationInsightsConnectionString) +// Only create new App Insights resources if monitoring enabled and no existing connection/connection string +var shouldCreateAppInsights = enableMonitoring && !hasExistingAppInsightsConnection && !hasExistingAppInsightsConnectionString +var hasSearchConnection = length(filter(additionalDependentResources, conn => conn.resource == 'azure_ai_search')) > 0 +var hasBingConnection = length(filter(additionalDependentResources, conn => conn.resource == 'bing_grounding')) > 0 +var hasBingCustomConnection = length(filter(additionalDependentResources, conn => conn.resource == 'bing_custom_grounding')) > 0 + +// Extract connection names from ai.yaml for each resource type +var storageConnectionName = hasStorageConnection ? filter(additionalDependentResources, conn => conn.resource == 'storage')[0].connectionName : '' +var acrConnectionName = hasAcrConnection ? filter(additionalDependentResources, conn => conn.resource == 'registry')[0].connectionName : '' +var searchConnectionName = hasSearchConnection ? filter(additionalDependentResources, conn => conn.resource == 'azure_ai_search')[0].connectionName : '' +var bingConnectionName = hasBingConnection ? filter(additionalDependentResources, conn => conn.resource == 'bing_grounding')[0].connectionName : '' +var bingCustomConnectionName = hasBingCustomConnection ? filter(additionalDependentResources, conn => conn.resource == 'bing_custom_grounding')[0].connectionName : '' + +// Enable monitoring via Log Analytics and Application Insights +module logAnalytics '../monitor/loganalytics.bicep' = if (shouldCreateAppInsights) { + name: 'logAnalytics' + params: { + location: location + tags: tags + name: 'logs-${resourceToken}' + } +} + +module applicationInsights '../monitor/applicationinsights.bicep' = if (shouldCreateAppInsights) { + name: 'applicationInsights' + params: { + location: location + tags: tags + name: 'appi-${resourceToken}' + logAnalyticsWorkspaceId: logAnalytics.outputs.id + } +} + +// Always create a new AI Account for now (simplified approach) +// TODO: Add support for existing accounts in a future version +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-06-01' = { + name: !empty(existingAiAccountName) ? existingAiAccountName : 'ai-account-${resourceToken}' + location: location + tags: tags + sku: { + name: 'S0' + } + kind: 'AIServices' + identity: { + type: 'SystemAssigned' + } + properties: { + allowProjectManagement: true + customSubDomainName: !empty(existingAiAccountName) ? existingAiAccountName : 'ai-account-${resourceToken}' + networkAcls: { + defaultAction: 'Allow' + virtualNetworkRules: [] + ipRules: [] + } + publicNetworkAccess: 'Enabled' + disableLocalAuth: true + } + + @batchSize(1) + resource seqDeployments 'deployments' = [ + for dep in (deployments??[]): { + name: dep.name + properties: { + model: dep.model + } + sku: dep.sku + } + ] + + resource project 'projects' = { + name: aiFoundryProjectName + location: location + identity: { + type: 'SystemAssigned' + } + properties: { + description: '${aiFoundryProjectName} Project' + displayName: '${aiFoundryProjectName}Project' + } + dependsOn: [ + seqDeployments + ] + } + + resource aiFoundryAccountCapabilityHost 'capabilityHosts@2025-10-01-preview' = if (enableHostedAgents && enableCapabilityHost) { + name: 'agents' + properties: { + capabilityHostKind: 'Agents' + // IMPORTANT: this is required to enable hosted agents deployment + // if no BYO Net is provided + enablePublicHostingEnvironment: true + } + } +} + + +// Create connection towards appinsights: +// - when we create a new App Insights resource, OR +// - when the user provided an existing App Insights connection string + resource ID but no existing connection name +// Both cases are merged into a single resource to avoid duplicate ARM resource definitions (which fail deployment). +var shouldCreateExistingAppInsightsConnection = enableMonitoring && hasExistingAppInsightsConnectionString && !hasExistingAppInsightsConnection && !empty(existingApplicationInsightsResourceId) +var shouldCreateAppInsightsConnection = shouldCreateAppInsights || shouldCreateExistingAppInsightsConnection + +resource appInsightConnection 'Microsoft.CognitiveServices/accounts/projects/connections@2025-04-01-preview' = if (shouldCreateAppInsightsConnection) { + parent: aiAccount::project + name: 'appi-connection' + properties: { + category: 'AppInsights' + target: shouldCreateAppInsights ? applicationInsights.outputs.id : existingApplicationInsightsResourceId + authType: 'ApiKey' + isSharedToAll: true + credentials: { + key: shouldCreateAppInsights ? applicationInsights.outputs.connectionString : existingApplicationInsightsConnectionString + } + metadata: { + ApiType: 'Azure' + ResourceId: shouldCreateAppInsights ? applicationInsights.outputs.id : existingApplicationInsightsResourceId + } + } +} + +// Create additional connections from ai.yaml configuration +module aiConnections './connection.bicep' = [for (connection, index) in connections: { + name: 'connection-${connection.name}' + params: { + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + connectionConfig: connection + credentials: connectionCredentials[?connection.name] ?? {} + } +}] + +resource localUserAiDeveloperRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: resourceGroup() + name: guid(subscription().id, resourceGroup().id, principalId, '64702f94-c441-49e6-a78b-ef80e0188fee') + properties: { + principalId: principalId + principalType: principalType + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', '64702f94-c441-49e6-a78b-ef80e0188fee') + } +} + +resource localUserCognitiveServicesUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: resourceGroup() + name: guid(subscription().id, resourceGroup().id, principalId, 'a97b65f3-24c7-4388-baec-2e87135dc908') + properties: { + principalId: principalId + principalType: principalType + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', 'a97b65f3-24c7-4388-baec-2e87135dc908') + } +} + +resource projectCognitiveServicesUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: aiAccount + name: guid(subscription().id, resourceGroup().id, aiAccount::project.name, '53ca6127-db72-4b80-b1b0-d745d6d5456d') + properties: { + principalId: aiAccount::project.identity.principalId + principalType: 'ServicePrincipal' + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', '53ca6127-db72-4b80-b1b0-d745d6d5456d') + } +} + + +// All connections are now created directly within their respective resource modules +// using the centralized ./connection.bicep module + +// Storage module - deploy if storage connection is defined in ai.yaml +module storage '../storage/storage.bicep' = if (hasStorageConnection) { + name: 'storage' + params: { + location: location + tags: tags + resourceName: 'st${resourceToken}' + connectionName: storageConnectionName + principalId: principalId + principalType: principalType + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + } +} + +// Azure Container Registry module - deploy if ACR connection is defined in ai.yaml +module acr '../host/acr.bicep' = if (hasAcrConnection) { + name: 'acr' + params: { + location: location + tags: tags + resourceName: '${abbrs.containerRegistryRegistries}${resourceToken}' + connectionName: acrConnectionName + principalId: principalId + principalType: principalType + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + } +} + +// Connection for existing ACR - create if user provided an existing ACR resource ID but no existing connection +module existingAcrConnection './connection.bicep' = if (hasExistingAcr && !hasExistingAcrConnection) { + name: 'existing-acr-connection' + params: { + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + connectionConfig: { + name: 'acr-connection' + category: 'ContainerRegistry' + target: existingContainerRegistryEndpoint + authType: 'ManagedIdentity' + isSharedToAll: true + metadata: { + ResourceId: existingContainerRegistryResourceId + } + } + credentials: { + clientId: aiAccount::project.identity.principalId + resourceId: existingContainerRegistryResourceId + } + } +} + +// Extract resource group name from the existing ACR resource ID +// Resource ID format: /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.ContainerRegistry/registries/{name} +var existingAcrResourceGroup = hasExistingAcr ? split(existingContainerRegistryResourceId, '/')[4] : '' +var existingAcrName = hasExistingAcr ? last(split(existingContainerRegistryResourceId, '/')) : '' + +// Grant AcrPull role to the AI project's managed identity on the existing ACR +// This allows the hosted agents to pull images from the user-provided registry +// Note: User must have permission to assign roles on the existing ACR (Owner or User Access Administrator) +// Using a module allows scoping to a different resource group if the ACR isn't in the same RG +// Skip if connection already exists (role assignment should already be in place) +module existingAcrRoleAssignment './acr-role-assignment.bicep' = if (hasExistingAcr && !hasExistingAcrConnection) { + name: 'existing-acr-role-assignment' + scope: resourceGroup(existingAcrResourceGroup) + params: { + acrName: existingAcrName + acrResourceId: existingContainerRegistryResourceId + principalId: aiAccount::project.identity.principalId + } +} + +// Bing Search grounding module - deploy if Bing connection is defined in ai.yaml or parameter is enabled +module bingGrounding '../search/bing_grounding.bicep' = if (hasBingConnection) { + name: 'bing-grounding' + params: { + tags: tags + resourceName: 'bing-${resourceToken}' + connectionName: bingConnectionName + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + } +} + +// Bing Custom Search grounding module - deploy if custom Bing connection is defined in ai.yaml or parameter is enabled +module bingCustomGrounding '../search/bing_custom_grounding.bicep' = if (hasBingCustomConnection) { + name: 'bing-custom-grounding' + params: { + tags: tags + resourceName: 'bingcustom-${resourceToken}' + connectionName: bingCustomConnectionName + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + } +} + +// Azure AI Search module - deploy if search connection is defined in ai.yaml +module azureAiSearch '../search/azure_ai_search.bicep' = if (hasSearchConnection) { + name: 'azure-ai-search' + params: { + tags: tags + resourceName: 'search-${resourceToken}' + connectionName: searchConnectionName + storageAccountResourceId: hasStorageConnection ? storage!.outputs.storageAccountId : '' + containerName: 'knowledge' + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + principalId: principalId + principalType: principalType + location: location + } +} + +// Outputs +output AZURE_AI_PROJECT_ENDPOINT string = aiAccount::project.properties.endpoints['AI Foundry API'] +output AZURE_OPENAI_ENDPOINT string = aiAccount.properties.endpoints['OpenAI Language Model Instance API'] +output aiServicesEndpoint string = aiAccount.properties.endpoint +output accountId string = aiAccount.id +output projectId string = aiAccount::project.id +output aiServicesAccountName string = aiAccount.name +output aiServicesProjectName string = aiAccount::project.name +output aiServicesPrincipalId string = aiAccount.identity.principalId +output projectName string = aiAccount::project.name +output APPLICATIONINSIGHTS_CONNECTION_STRING string = shouldCreateAppInsights ? applicationInsights.outputs.connectionString : (hasExistingAppInsightsConnectionString ? existingApplicationInsightsConnectionString : '') +output APPLICATIONINSIGHTS_RESOURCE_ID string = shouldCreateAppInsights ? applicationInsights.outputs.id : (hasExistingAppInsightsConnectionString ? existingApplicationInsightsResourceId : '') + +// Connection outputs from the connections array +output connectionIds array = [for (connection, index) in (connections ?? []): { + name: aiConnections[index].outputs.connectionName + id: aiConnections[index].outputs.connectionId +}] + +// Grouped dependent resources outputs +output dependentResources object = { + registry: { + name: hasAcrConnection ? acr!.outputs.containerRegistryName : '' + loginServer: hasAcrConnection ? acr!.outputs.containerRegistryLoginServer : ((hasExistingAcr || hasExistingAcrConnection) ? existingContainerRegistryEndpoint : '') + connectionName: hasAcrConnection ? acr!.outputs.containerRegistryConnectionName : (hasExistingAcrConnection ? existingAcrConnectionName : (hasExistingAcr ? 'acr-connection' : '')) + } + bing_grounding: { + name: (hasBingConnection) ? bingGrounding!.outputs.bingGroundingName : '' + connectionName: (hasBingConnection) ? bingGrounding!.outputs.bingGroundingConnectionName : '' + connectionId: (hasBingConnection) ? bingGrounding!.outputs.bingGroundingConnectionId : '' + } + bing_custom_grounding: { + name: (hasBingCustomConnection) ? bingCustomGrounding!.outputs.bingCustomGroundingName : '' + connectionName: (hasBingCustomConnection) ? bingCustomGrounding!.outputs.bingCustomGroundingConnectionName : '' + connectionId: (hasBingCustomConnection) ? bingCustomGrounding!.outputs.bingCustomGroundingConnectionId : '' + } + search: { + serviceName: hasSearchConnection ? azureAiSearch!.outputs.searchServiceName : '' + connectionName: hasSearchConnection ? azureAiSearch!.outputs.searchConnectionName : '' + } + storage: { + accountName: hasStorageConnection ? storage!.outputs.storageAccountName : '' + connectionName: hasStorageConnection ? storage!.outputs.storageConnectionName : '' + } +} + +type deploymentsType = { + @description('Specify the name of cognitive service account deployment.') + name: string + + @description('Required. Properties of Cognitive Services account deployment model.') + model: { + @description('Required. The name of Cognitive Services account deployment model.') + name: string + + @description('Required. The format of Cognitive Services account deployment model.') + format: string + + @description('Required. The version of Cognitive Services account deployment model.') + version: string + } + + @description('The resource model definition representing SKU.') + sku: { + @description('Required. The name of the resource model definition representing SKU.') + name: string + + @description('The capacity of the resource model definition representing SKU.') + capacity: int + } +}[]? + +type dependentResourcesType = { + @description('The type of dependent resource to create') + resource: 'storage' | 'registry' | 'azure_ai_search' | 'bing_grounding' | 'bing_custom_grounding' + + @description('The connection name for this resource') + connectionName: string +}[] diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/ai/connection.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/ai/connection.bicep new file mode 100644 index 000000000..a08726645 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/ai/connection.bicep @@ -0,0 +1,112 @@ +targetScope = 'resourceGroup' + +@description('AI Services account name') +param aiServicesAccountName string + +@description('AI project name') +param aiProjectName string + +// Connection configuration type definition +type ConnectionConfig = { + @description('Name of the connection') + name: string + + @description('Category of the connection (e.g., ContainerRegistry, AzureStorageAccount, CognitiveSearch, AzureOpenAI)') + category: string + + @description('Target endpoint or URL for the connection') + target: string + + @description('Authentication type') + authType: 'AAD' | 'AccessKey' | 'AccountKey' | 'AgenticIdentity' | 'ApiKey' | 'CustomKeys' | 'ManagedIdentity' | 'None' | 'OAuth2' | 'PAT' | 'SAS' | 'ServicePrincipal' | 'UsernamePassword' | 'UserEntraToken' | 'ProjectManagedIdentity' + + @description('Whether the connection is shared to all users (optional, defaults to true)') + isSharedToAll: bool? + + @description('Additional metadata for the connection (optional)') + metadata: object? + + @description('Error message if the connection fails (optional)') + error: string? + + @description('Expiry time for the connection (optional)') + expiryTime: string? + + @description('Private endpoint requirement: Required, NotRequired, or NotApplicable (optional)') + peRequirement: ('NotApplicable' | 'NotRequired' | 'Required')? + + @description('Private endpoint status: Active, Inactive, or NotApplicable (optional)') + peStatus: ('Active' | 'Inactive' | 'NotApplicable')? + + @description('List of users to share the connection with (optional, alternative to isSharedToAll)') + sharedUserList: string[]? + + @description('Whether to use workspace managed identity (optional)') + useWorkspaceManagedIdentity: bool? + + @description('OAuth2 authorization endpoint URL (optional, OAuth2 authType only)') + authorizationUrl: string? + + @description('OAuth2 token endpoint URL (optional, OAuth2 authType only)') + tokenUrl: string? + + @description('OAuth2 refresh token endpoint URL (optional, OAuth2 authType only)') + refreshUrl: string? + + @description('OAuth2 scopes to request (optional, OAuth2 authType only)') + scopes: string[]? + + @description('Token audience for UserEntraToken / AgenticIdentity auth types (optional)') + audience: string? + + @description('Managed connector name for OAuth2 managed connectors (optional)') + connectorName: string? +} + +@description('Connection configuration') +param connectionConfig ConnectionConfig + +@secure() +@description('Credentials for the connection. Kept as a separate @secure parameter to prevent secrets from appearing in deployment logs. Shape depends on authType — e.g. { key: "..." } for ApiKey, { clientId: "...", clientSecret: "..." } for OAuth2/ServicePrincipal.') +param credentials object = {} + + +// Get reference to the AI Services account and project +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = { + name: aiServicesAccountName + + resource project 'projects' existing = { + name: aiProjectName + } +} + +// Create the connection +resource connection 'Microsoft.CognitiveServices/accounts/projects/connections@2025-04-01-preview' = { + parent: aiAccount::project + name: connectionConfig.name + properties: { + category: connectionConfig.category + target: connectionConfig.target + authType: connectionConfig.authType + isSharedToAll: connectionConfig.?isSharedToAll ?? true + credentials: !empty(credentials) ? credentials : null + metadata: connectionConfig.?metadata + // Only include if they appear in the connectionConfig + ...connectionConfig.?error != null ? { error: connectionConfig.?error } : {} + ...connectionConfig.?expiryTime != null ? { expiryTime: connectionConfig.?expiryTime } : {} + ...connectionConfig.?peRequirement != null ? { peRequirement: connectionConfig.?peRequirement } : {} + ...connectionConfig.?peStatus != null ? { peStatus: connectionConfig.?peStatus } : {} + ...connectionConfig.?sharedUserList != null ? { sharedUserList: connectionConfig.?sharedUserList } : {} + ...connectionConfig.?useWorkspaceManagedIdentity != null ? { useWorkspaceManagedIdentity: connectionConfig.?useWorkspaceManagedIdentity } : {} + ...connectionConfig.?authorizationUrl != null ? { authorizationUrl: connectionConfig.?authorizationUrl } : {} + ...connectionConfig.?tokenUrl != null ? { tokenUrl: connectionConfig.?tokenUrl } : {} + ...connectionConfig.?refreshUrl != null ? { refreshUrl: connectionConfig.?refreshUrl } : {} + ...connectionConfig.?scopes != null ? { scopes: connectionConfig.?scopes } : {} + ...connectionConfig.?audience != null ? { audience: connectionConfig.?audience } : {} + ...connectionConfig.?connectorName != null ? { connectorName: connectionConfig.?connectorName } : {} + } +} + +// Outputs +output connectionName string = connection.name +output connectionId string = connection.id diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/host/acr.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/host/acr.bicep new file mode 100644 index 000000000..360bf2298 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/host/acr.bicep @@ -0,0 +1,87 @@ +targetScope = 'resourceGroup' + +@description('The location used for all deployed resources') +param location string = resourceGroup().location + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Resource name for the container registry') +param resourceName string + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Name for the AI Foundry ACR connection') +param connectionName string = 'acr-connection' + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Create the Container Registry +module containerRegistry 'br/public:avm/res/container-registry/registry:0.1.1' = { + name: 'registry' + params: { + name: resourceName + location: location + tags: tags + publicNetworkAccess: 'Enabled' + roleAssignments:[ + { + principalId: principalId + principalType: principalType + roleDefinitionIdOrName: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') + } + // TODO SEPARATELY + { + // the foundry project itself can pull from the ACR + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') + } + ] + } +} + +// Create the ACR connection using the centralized connection module +module acrConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'acr-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'ContainerRegistry' + target: containerRegistry.outputs.loginServer + authType: 'ManagedIdentity' + isSharedToAll: true + metadata: { + ResourceId: containerRegistry.outputs.resourceId + } + } + credentials: { + clientId: aiAccount::aiProject.identity.principalId + resourceId: containerRegistry.outputs.resourceId + } + } +} + +output containerRegistryName string = containerRegistry.outputs.name +output containerRegistryLoginServer string = containerRegistry.outputs.loginServer +output containerRegistryResourceId string = containerRegistry.outputs.resourceId +output containerRegistryConnectionName string = acrConnection.outputs.connectionName diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/monitor/applicationinsights-dashboard.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/monitor/applicationinsights-dashboard.bicep new file mode 100644 index 000000000..f3e0952b4 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/monitor/applicationinsights-dashboard.bicep @@ -0,0 +1,1236 @@ +metadata description = 'Creates a dashboard for an Application Insights instance.' +param name string +param applicationInsightsName string +param location string = resourceGroup().location +param tags object = {} + +// 2020-09-01-preview because that is the latest valid version +resource applicationInsightsDashboard 'Microsoft.Portal/dashboards@2020-09-01-preview' = { + name: name + location: location + tags: tags + properties: { + lenses: [ + { + order: 0 + parts: [ + { + position: { + x: 0 + y: 0 + colSpan: 2 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'id' + value: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + { + name: 'Version' + value: '1.0' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/AspNetOverviewPinnedPart' + asset: { + idInputName: 'id' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'overview' + } + } + { + position: { + x: 2 + y: 0 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'Version' + value: '1.0' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/ProactiveDetectionAsyncPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'ProactiveDetection' + } + } + { + position: { + x: 3 + y: 0 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'ResourceId' + value: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/QuickPulseButtonSmallPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + } + } + { + position: { + x: 4 + y: 0 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'TimeContext' + value: { + durationMs: 86400000 + endTime: null + createdTime: '2018-05-04T01:20:33.345Z' + isInitialTime: true + grain: 1 + useDashboardTimeRange: false + } + } + { + name: 'Version' + value: '1.0' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/AvailabilityNavButtonPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + } + } + { + position: { + x: 5 + y: 0 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'TimeContext' + value: { + durationMs: 86400000 + endTime: null + createdTime: '2018-05-08T18:47:35.237Z' + isInitialTime: true + grain: 1 + useDashboardTimeRange: false + } + } + { + name: 'ConfigurationId' + value: '78ce933e-e864-4b05-a27b-71fd55a6afad' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/AppMapButtonPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + } + } + { + position: { + x: 0 + y: 1 + colSpan: 3 + rowSpan: 1 + } + metadata: { + inputs: [] + type: 'Extension/HubsExtension/PartType/MarkdownPart' + settings: { + content: { + settings: { + content: '# Usage' + title: '' + subtitle: '' + } + } + } + } + } + { + position: { + x: 3 + y: 1 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'TimeContext' + value: { + durationMs: 86400000 + endTime: null + createdTime: '2018-05-04T01:22:35.782Z' + isInitialTime: true + grain: 1 + useDashboardTimeRange: false + } + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/UsageUsersOverviewPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + } + } + { + position: { + x: 4 + y: 1 + colSpan: 3 + rowSpan: 1 + } + metadata: { + inputs: [] + type: 'Extension/HubsExtension/PartType/MarkdownPart' + settings: { + content: { + settings: { + content: '# Reliability' + title: '' + subtitle: '' + } + } + } + } + } + { + position: { + x: 7 + y: 1 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ResourceId' + value: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + { + name: 'DataModel' + value: { + version: '1.0.0' + timeContext: { + durationMs: 86400000 + createdTime: '2018-05-04T23:42:40.072Z' + isInitialTime: false + grain: 1 + useDashboardTimeRange: false + } + } + isOptional: true + } + { + name: 'ConfigurationId' + value: '8a02f7bf-ac0f-40e1-afe9-f0e72cfee77f' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/CuratedBladeFailuresPinnedPart' + isAdapter: true + asset: { + idInputName: 'ResourceId' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'failures' + } + } + { + position: { + x: 8 + y: 1 + colSpan: 3 + rowSpan: 1 + } + metadata: { + inputs: [] + type: 'Extension/HubsExtension/PartType/MarkdownPart' + settings: { + content: { + settings: { + content: '# Responsiveness\r\n' + title: '' + subtitle: '' + } + } + } + } + } + { + position: { + x: 11 + y: 1 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ResourceId' + value: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + { + name: 'DataModel' + value: { + version: '1.0.0' + timeContext: { + durationMs: 86400000 + createdTime: '2018-05-04T23:43:37.804Z' + isInitialTime: false + grain: 1 + useDashboardTimeRange: false + } + } + isOptional: true + } + { + name: 'ConfigurationId' + value: '2a8ede4f-2bee-4b9c-aed9-2db0e8a01865' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/CuratedBladePerformancePinnedPart' + isAdapter: true + asset: { + idInputName: 'ResourceId' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'performance' + } + } + { + position: { + x: 12 + y: 1 + colSpan: 3 + rowSpan: 1 + } + metadata: { + inputs: [] + type: 'Extension/HubsExtension/PartType/MarkdownPart' + settings: { + content: { + settings: { + content: '# Browser' + title: '' + subtitle: '' + } + } + } + } + } + { + position: { + x: 15 + y: 1 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'MetricsExplorerJsonDefinitionId' + value: 'BrowserPerformanceTimelineMetrics' + } + { + name: 'TimeContext' + value: { + durationMs: 86400000 + createdTime: '2018-05-08T12:16:27.534Z' + isInitialTime: false + grain: 1 + useDashboardTimeRange: false + } + } + { + name: 'CurrentFilter' + value: { + eventTypes: [ + 4 + 1 + 3 + 5 + 2 + 6 + 13 + ] + typeFacets: {} + isPermissive: false + } + } + { + name: 'id' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'Version' + value: '1.0' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/MetricsExplorerBladePinnedPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'browser' + } + } + { + position: { + x: 0 + y: 2 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'sessions/count' + aggregationType: 5 + namespace: 'microsoft.insights/components/kusto' + metricVisualization: { + displayName: 'Sessions' + color: '#47BDF5' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'users/count' + aggregationType: 5 + namespace: 'microsoft.insights/components/kusto' + metricVisualization: { + displayName: 'Users' + color: '#7E58FF' + } + } + ] + title: 'Unique sessions and users' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + openBladeOnClick: { + openBlade: true + destinationBlade: { + extensionName: 'HubsExtension' + bladeName: 'ResourceMenuBlade' + parameters: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + menuid: 'segmentationUsers' + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 4 + y: 2 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'requests/failed' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Failed requests' + color: '#EC008C' + } + } + ] + title: 'Failed requests' + visualization: { + chartType: 3 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + openBladeOnClick: { + openBlade: true + destinationBlade: { + extensionName: 'HubsExtension' + bladeName: 'ResourceMenuBlade' + parameters: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + menuid: 'failures' + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 8 + y: 2 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'requests/duration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Server response time' + color: '#00BCF2' + } + } + ] + title: 'Server response time' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + openBladeOnClick: { + openBlade: true + destinationBlade: { + extensionName: 'HubsExtension' + bladeName: 'ResourceMenuBlade' + parameters: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + menuid: 'performance' + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 12 + y: 2 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'browserTimings/networkDuration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Page load network connect time' + color: '#7E58FF' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'browserTimings/processingDuration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Client processing time' + color: '#44F1C8' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'browserTimings/sendDuration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Send request time' + color: '#EB9371' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'browserTimings/receiveDuration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Receiving response time' + color: '#0672F1' + } + } + ] + title: 'Average page load time breakdown' + visualization: { + chartType: 3 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 0 + y: 5 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'availabilityResults/availabilityPercentage' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Availability' + color: '#47BDF5' + } + } + ] + title: 'Average availability' + visualization: { + chartType: 3 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + openBladeOnClick: { + openBlade: true + destinationBlade: { + extensionName: 'HubsExtension' + bladeName: 'ResourceMenuBlade' + parameters: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + menuid: 'availability' + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 4 + y: 5 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'exceptions/server' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Server exceptions' + color: '#47BDF5' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'dependencies/failed' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Dependency failures' + color: '#7E58FF' + } + } + ] + title: 'Server exceptions and Dependency failures' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 8 + y: 5 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'performanceCounters/processorCpuPercentage' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Processor time' + color: '#47BDF5' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'performanceCounters/processCpuPercentage' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Process CPU' + color: '#7E58FF' + } + } + ] + title: 'Average processor and process CPU utilization' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 12 + y: 5 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'exceptions/browser' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Browser exceptions' + color: '#47BDF5' + } + } + ] + title: 'Browser exceptions' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 0 + y: 8 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'availabilityResults/count' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Availability test results count' + color: '#47BDF5' + } + } + ] + title: 'Availability test results count' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 4 + y: 8 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'performanceCounters/processIOBytesPerSecond' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Process IO rate' + color: '#47BDF5' + } + } + ] + title: 'Average process I/O rate' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 8 + y: 8 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'performanceCounters/memoryAvailableBytes' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Available memory' + color: '#47BDF5' + } + } + ] + title: 'Average available memory' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + ] + } + ] + } +} + +resource applicationInsights 'Microsoft.Insights/components@2020-02-02' existing = { + name: applicationInsightsName +} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/monitor/applicationinsights.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/monitor/applicationinsights.bicep new file mode 100644 index 000000000..f8c1e8ad9 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/monitor/applicationinsights.bicep @@ -0,0 +1,31 @@ +metadata description = 'Creates an Application Insights instance based on an existing Log Analytics workspace.' +param name string +param dashboardName string = '' +param location string = resourceGroup().location +param tags object = {} +param logAnalyticsWorkspaceId string + +resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = { + name: name + location: location + tags: tags + kind: 'web' + properties: { + Application_Type: 'web' + WorkspaceResourceId: logAnalyticsWorkspaceId + } +} + +module applicationInsightsDashboard 'applicationinsights-dashboard.bicep' = if (!empty(dashboardName)) { + name: 'application-insights-dashboard' + params: { + name: dashboardName + location: location + applicationInsightsName: applicationInsights.name + } +} + +output connectionString string = applicationInsights.properties.ConnectionString +output id string = applicationInsights.id +output instrumentationKey string = applicationInsights.properties.InstrumentationKey +output name string = applicationInsights.name diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/monitor/loganalytics.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/monitor/loganalytics.bicep new file mode 100644 index 000000000..bf87f546d --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/monitor/loganalytics.bicep @@ -0,0 +1,22 @@ +metadata description = 'Creates a Log Analytics workspace.' +param name string +param location string = resourceGroup().location +param tags object = {} + +resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2021-12-01-preview' = { + name: name + location: location + tags: tags + properties: any({ + retentionInDays: 30 + features: { + searchVersion: 1 + } + sku: { + name: 'PerGB2018' + } + }) +} + +output id string = logAnalytics.id +output name string = logAnalytics.name diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/search/azure_ai_search.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/search/azure_ai_search.bicep new file mode 100644 index 000000000..ba6e9bdf4 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/search/azure_ai_search.bicep @@ -0,0 +1,211 @@ +targetScope = 'resourceGroup' + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Azure Search resource name') +param resourceName string + +@description('Azure Search SKU name') +param azureSearchSkuName string = 'basic' + +@description('Azure storage account resource ID') +param storageAccountResourceId string + +@description('container name') +param containerName string = 'knowledgebase' + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('Name for the AI Foundry search connection') +param connectionName string = 'azure-ai-search-connection' + +@description('Location for all resources') +param location string = resourceGroup().location + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Azure Search Service +resource searchService 'Microsoft.Search/searchServices@2024-06-01-preview' = { + name: resourceName + location: location + tags: tags + sku: { + name: azureSearchSkuName + } + identity: { + type: 'SystemAssigned' + } + properties: { + replicaCount: 1 + partitionCount: 1 + hostingMode: 'default' + authOptions: { + aadOrApiKey: { + aadAuthFailureMode: 'http401WithBearerChallenge' + } + } + disableLocalAuth: false + encryptionWithCmk: { + enforcement: 'Unspecified' + } + publicNetworkAccess: 'enabled' + } +} + +// Reference to existing Storage Account +resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' existing = { + name: last(split(storageAccountResourceId, '/')) +} + +// Reference to existing Blob Service +resource blobService 'Microsoft.Storage/storageAccounts/blobServices@2023-05-01' existing = { + parent: storageAccount + name: 'default' +} + +// Storage Container (create if it doesn't exist) +resource storageContainer 'Microsoft.Storage/storageAccounts/blobServices/containers@2023-05-01' = { + parent: blobService + name: containerName + properties: { + publicAccess: 'None' + } +} + +// RBAC Assignments + +// Search needs to read from Storage +resource searchToStorageRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(storageAccount.id, searchService.id, 'Storage Blob Data Reader', uniqueString(deployment().name)) + scope: storageAccount + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1') // Storage Blob Data Reader + principalId: searchService.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// Search needs OpenAI access (AI Services account) +resource searchToAIServicesRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName)) { + name: guid(aiServicesAccountName, searchService.id, 'Cognitive Services OpenAI User', uniqueString(deployment().name)) + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '5e0bd9bd-7b93-4f28-af87-19fc36ad61bd') // Cognitive Services OpenAI User + principalId: searchService.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// AI Project needs Search access - Service Contributor +resource aiServicesToSearchServiceRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: guid(searchService.id, aiServicesAccountName, aiProjectName, 'Search Service Contributor', uniqueString(deployment().name)) + scope: searchService + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7ca78c08-252a-4471-8644-bb5ff32d4ba0') // Search Service Contributor + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// AI Project needs Search access - Index Data Contributor +resource aiServicesToSearchDataRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: guid(searchService.id, aiServicesAccountName, aiProjectName, 'Search Index Data Contributor', uniqueString(deployment().name)) + scope: searchService + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '8ebe5a00-799e-43f5-93ac-243d3dce84a7') // Search Index Data Contributor + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// User permissions - Search Index Data Contributor +resource userToSearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(searchService.id, principalId, 'Search Index Data Contributor', uniqueString(deployment().name)) + scope: searchService + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '8ebe5a00-799e-43f5-93ac-243d3dce84a7') // Search Index Data Contributor + principalId: principalId + principalType: principalType + } +} + +// // User permissions - Storage Blob Data Contributor +// resource userToStorageRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { +// name: guid(storageAccount.id, principalId, 'Storage Blob Data Contributor', uniqueString(deployment().name)) +// scope: storageAccount +// properties: { +// roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe') // Storage Blob Data Contributor +// principalId: principalId +// principalType: principalType +// } +// } + +// // Project needs Search access - Index Data Contributor +// resource projectToSearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { +// name: guid(searchService.id, aiProjectName, 'Search Index Data Contributor', uniqueString(deployment().name)) +// scope: searchService +// properties: { +// roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '8ebe5a00-799e-43f5-93ac-243d3dce84a7') // Search Index Data Contributor +// principalId: aiAccountPrincipalId // Using AI account principal ID as project identity +// principalType: 'ServicePrincipal' +// } +// } + +// Create the AI Search connection using the centralized connection module +module aiSearchConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'ai-search-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'CognitiveSearch' + target: 'https://${searchService.name}.search.windows.net' + authType: 'AAD' + isSharedToAll: true + metadata: { + ApiVersion: '2024-07-01' + ResourceId: searchService.id + ApiType: 'Azure' + type: 'azure_ai_search' + } + } + } + dependsOn: [ + aiServicesToSearchDataRoleAssignment + ] +} + +// Outputs +output searchServiceName string = searchService.name +output searchServiceId string = searchService.id +output searchServicePrincipalId string = searchService.identity.principalId +output storageAccountName string = storageAccount.name +output storageAccountId string = storageAccount.id +output containerName string = storageContainer.name +output storageAccountPrincipalId string = storageAccount.identity.principalId +output searchConnectionName string = (!empty(aiServicesAccountName) && !empty(aiProjectName)) ? aiSearchConnection!.outputs.connectionName : '' +output searchConnectionId string = (!empty(aiServicesAccountName) && !empty(aiProjectName)) ? aiSearchConnection!.outputs.connectionId : '' + diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/search/bing_custom_grounding.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/search/bing_custom_grounding.bicep new file mode 100644 index 000000000..ac2d66cba --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/search/bing_custom_grounding.bicep @@ -0,0 +1,84 @@ +targetScope = 'resourceGroup' + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Bing custom grounding resource name') +param resourceName string + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Name for the AI Foundry Bing Custom Search connection') +param connectionName string = 'bing-custom-grounding-connection' + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Bing Search resource for grounding capability +resource bingCustomSearch 'Microsoft.Bing/accounts@2020-06-10' = { + name: resourceName + location: 'global' + tags: tags + sku: { + name: 'G1' + } + properties: { + statisticsEnabled: false + } + kind: 'Bing.CustomGrounding' +} + +// Role assignment to allow AI project to use Bing Search +resource bingCustomSearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + scope: bingCustomSearch + name: guid(subscription().id, resourceGroup().id, 'bing-search-role', aiServicesAccountName, aiProjectName) + properties: { + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', 'a97b65f3-24c7-4388-baec-2e87135dc908') // Cognitive Services User + } +} + +// Create the Bing Custom Search connection using the centralized connection module +module aiSearchConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'bing-custom-search-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'GroundingWithCustomSearch' + target: bingCustomSearch.properties.endpoint + authType: 'ApiKey' + isSharedToAll: true + metadata: { + Location: 'global' + ResourceId: bingCustomSearch.id + ApiType: 'Azure' + type: 'bing_custom_search' + } + } + credentials: { + key: bingCustomSearch.listKeys().key1 + } + } + dependsOn: [ + bingCustomSearchRoleAssignment + ] +} + +// Outputs +output bingCustomGroundingName string = bingCustomSearch.name +output bingCustomGroundingConnectionName string = aiSearchConnection.outputs.connectionName +output bingCustomGroundingResourceId string = bingCustomSearch.id +output bingCustomGroundingConnectionId string = aiSearchConnection.outputs.connectionId diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/search/bing_grounding.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/search/bing_grounding.bicep new file mode 100644 index 000000000..a2fbc6bd2 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/search/bing_grounding.bicep @@ -0,0 +1,83 @@ +targetScope = 'resourceGroup' + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Bing grounding resource name') +param resourceName string + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Name for the AI Foundry Bing Search connection') +param connectionName string = 'bing-grounding-connection' + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Bing Search resource for grounding capability +resource bingSearch 'Microsoft.Bing/accounts@2020-06-10' = { + name: resourceName + location: 'global' + tags: tags + sku: { + name: 'G1' + } + properties: { + statisticsEnabled: false + } + kind: 'Bing.Grounding' +} + +// Role assignment to allow AI project to use Bing Search +resource bingSearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + scope: bingSearch + name: guid(subscription().id, resourceGroup().id, 'bing-search-role', aiServicesAccountName, aiProjectName) + properties: { + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', 'a97b65f3-24c7-4388-baec-2e87135dc908') // Cognitive Services User + } +} + +// Create the Bing Search connection using the centralized connection module +module bingSearchConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'bing-search-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'GroundingWithBingSearch' + target: bingSearch.properties.endpoint + authType: 'ApiKey' + isSharedToAll: true + metadata: { + Location: 'global' + ResourceId: bingSearch.id + ApiType: 'Azure' + type: 'bing_grounding' + } + } + credentials: { + key: bingSearch.listKeys().key1 + } + } + dependsOn: [ + bingSearchRoleAssignment + ] +} + +output bingGroundingName string = bingSearch.name +output bingGroundingConnectionName string = bingSearchConnection.outputs.connectionName +output bingGroundingResourceId string = bingSearch.id +output bingGroundingConnectionId string = bingSearchConnection.outputs.connectionId diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/storage/storage.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/storage/storage.bicep new file mode 100644 index 000000000..6bad1d157 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/core/storage/storage.bicep @@ -0,0 +1,113 @@ +targetScope = 'resourceGroup' + +@description('The location used for all deployed resources') +param location string = resourceGroup().location + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Storage account resource name') +param resourceName string + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Name for the AI Foundry storage connection') +param connectionName string = 'storage-connection' + +// Storage Account for the AI Services account +resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' = { + name: resourceName + location: location + tags: tags + sku: { + name: 'Standard_LRS' + } + kind: 'StorageV2' + identity: { + type: 'SystemAssigned' + } + properties: { + supportsHttpsTrafficOnly: true + allowBlobPublicAccess: false + minimumTlsVersion: 'TLS1_2' + accessTier: 'Hot' + encryption: { + services: { + blob: { + enabled: true + } + file: { + enabled: true + } + } + keySource: 'Microsoft.Storage' + } + } +} + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Role assignment for AI Services to access the storage account +resource storageRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: guid(storageAccount.id, aiAccount.id, 'ai-storage-contributor') + scope: storageAccount + properties: { + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe') // Storage Blob Data Contributor + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// User permissions - Storage Blob Data Contributor +resource userStorageRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(storageAccount.id, principalId, 'Storage Blob Data Contributor') + scope: storageAccount + properties: { + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe') // Storage Blob Data Contributor + principalId: principalId + principalType: principalType + } +} + +// Create the storage connection using the centralized connection module +module storageConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'storage-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'AzureStorageAccount' + target: storageAccount.properties.primaryEndpoints.blob + authType: 'AAD' + isSharedToAll: true + metadata: { + ApiType: 'Azure' + ResourceId: storageAccount.id + location: storageAccount.location + } + } + } +} + +output storageAccountName string = storageAccount.name +output storageAccountId string = storageAccount.id +output storageAccountPrincipalId string = storageAccount.identity.principalId +output storageConnectionName string = storageConnection.outputs.connectionName diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/main.bicep b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/main.bicep new file mode 100644 index 000000000..943d43291 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/main.bicep @@ -0,0 +1,201 @@ +targetScope = 'subscription' +// targetScope = 'resourceGroup' + +@minLength(1) +@maxLength(64) +@description('Name of the environment that can be used as part of naming resource convention') +param environmentName string + +@minLength(1) +@maxLength(90) +@description('Name of the resource group to use or create') +param resourceGroupName string = 'rg-${environmentName}' + +// Restricted locations to match list from +// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/responses?tabs=python-key#region-availability +@minLength(1) +@description('Primary location for all resources') +@allowed([ + 'australiaeast' + 'brazilsouth' + 'canadacentral' + 'canadaeast' + 'eastus' + 'eastus2' + 'francecentral' + 'germanywestcentral' + 'italynorth' + 'japaneast' + 'koreacentral' + 'northcentralus' + 'norwayeast' + 'polandcentral' + 'southafricanorth' + 'southcentralus' + 'southeastasia' + 'southindia' + 'spaincentral' + 'swedencentral' + 'switzerlandnorth' + 'uaenorth' + 'uksouth' + 'westus' + 'westus2' + 'westus3' +]) +param location string + +param aiDeploymentsLocation string + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('Optional. Name of an existing AI Services account within the resource group. If not provided, a new one will be created.') +param aiFoundryResourceName string = '' + +@description('Optional. Name of the AI Foundry project. If not provided, a default name will be used.') +param aiFoundryProjectName string = 'ai-project-${environmentName}' + +@description('List of model deployments') +param aiProjectDeploymentsJson string = '[]' + +@description('List of connections') +param aiProjectConnectionsJson string = '[]' + +@secure() +@description('JSON map of connection name to credentials object. Example: {"my-conn":{"key":"secret"}}') +param aiProjectConnectionCredentialsJson string = '{}' + +@description('List of resources to create and connect to the AI project') +param aiProjectDependentResourcesJson string = '[]' + +var aiProjectDeployments = json(aiProjectDeploymentsJson) +var aiProjectConnections = json(aiProjectConnectionsJson) +var aiProjectConnectionCreds = json(aiProjectConnectionCredentialsJson) +var aiProjectDependentResources = json(aiProjectDependentResourcesJson) + +@description('Enable hosted agent deployment') +param enableHostedAgents bool + +@description('Enable the capability host for supporting BYO storage of agent conversations. When false and hosted agents are enabled, the capability host is not created.') +param enableCapabilityHost bool + +@description('Enable monitoring for the AI project') +param enableMonitoring bool + +@description('Optional. Existing container registry resource ID. If provided, no new ACR will be created and a connection to this ACR will be established.') +param existingContainerRegistryResourceId string = '' + +@description('Optional. Existing container registry endpoint (login server). Required if existingContainerRegistryResourceId is provided.') +param existingContainerRegistryEndpoint string = '' + +@description('Optional. Name of an existing ACR connection on the Foundry project. If provided, no new ACR or connection will be created.') +param existingAcrConnectionName string = '' + +@description('Optional. Existing Application Insights connection string. If provided, a connection will be created but no new App Insights resource.') +param existingApplicationInsightsConnectionString string = '' + +@description('Optional. Existing Application Insights resource ID. Used for connection metadata when providing an existing App Insights.') +param existingApplicationInsightsResourceId string = '' + +@description('Optional. Name of an existing Application Insights connection on the Foundry project. If provided, no new App Insights or connection will be created.') +param existingAppInsightsConnectionName string = '' + +// Tags that should be applied to all resources. +// +// Note that 'azd-service-name' tags should be applied separately to service host resources. +// Example usage: +// tags: union(tags, { 'azd-service-name': }) +var tags = { + 'azd-env-name': environmentName +} + +// Check if resource group exists and create it if it doesn't +resource rg 'Microsoft.Resources/resourceGroups@2021-04-01' = { + name: resourceGroupName + location: location + tags: tags +} + +// Build dependent resources array conditionally +// Check if ACR already exists in the user-provided array to avoid duplicates +// Also skip if user provided an existing container registry endpoint or connection name +var hasAcr = contains(map(aiProjectDependentResources, r => r.resource), 'registry') +var shouldCreateAcr = enableHostedAgents && !hasAcr && empty(existingContainerRegistryResourceId) && empty(existingAcrConnectionName) +var dependentResources = shouldCreateAcr ? union(aiProjectDependentResources, [ + { + resource: 'registry' + connectionName: 'acr-connection' + } +]) : aiProjectDependentResources + +// AI Project module +module aiProject 'core/ai/ai-project.bicep' = { + scope: rg + name: 'ai-project' + params: { + tags: tags + location: aiDeploymentsLocation + aiFoundryProjectName: aiFoundryProjectName + principalId: principalId + principalType: principalType + existingAiAccountName: aiFoundryResourceName + deployments: aiProjectDeployments + connections: aiProjectConnections + connectionCredentials: aiProjectConnectionCreds + additionalDependentResources: dependentResources + enableMonitoring: enableMonitoring + enableHostedAgents: enableHostedAgents + enableCapabilityHost: enableCapabilityHost + existingContainerRegistryResourceId: existingContainerRegistryResourceId + existingContainerRegistryEndpoint: existingContainerRegistryEndpoint + existingAcrConnectionName: existingAcrConnectionName + existingApplicationInsightsConnectionString: existingApplicationInsightsConnectionString + existingApplicationInsightsResourceId: existingApplicationInsightsResourceId + existingAppInsightsConnectionName: existingAppInsightsConnectionName + } +} + +// Resources +output AZURE_RESOURCE_GROUP string = resourceGroupName +output AZURE_AI_ACCOUNT_ID string = aiProject.outputs.accountId +output AZURE_AI_PROJECT_ID string = aiProject.outputs.projectId +output AZURE_AI_FOUNDRY_PROJECT_ID string = aiProject.outputs.projectId +output AZURE_AI_ACCOUNT_NAME string = aiProject.outputs.aiServicesAccountName +output AZURE_AI_PROJECT_NAME string = aiProject.outputs.projectName + +// Endpoints +output AZURE_AI_PROJECT_ENDPOINT string = aiProject.outputs.AZURE_AI_PROJECT_ENDPOINT +output AZURE_OPENAI_ENDPOINT string = aiProject.outputs.AZURE_OPENAI_ENDPOINT +output APPLICATIONINSIGHTS_CONNECTION_STRING string = aiProject.outputs.APPLICATIONINSIGHTS_CONNECTION_STRING +output APPLICATIONINSIGHTS_RESOURCE_ID string = aiProject.outputs.APPLICATIONINSIGHTS_RESOURCE_ID + +// Dependent Resources and Connections + +// ACR +output AZURE_AI_PROJECT_ACR_CONNECTION_NAME string = aiProject.outputs.dependentResources.registry.connectionName +output AZURE_CONTAINER_REGISTRY_ENDPOINT string = aiProject.outputs.dependentResources.registry.loginServer + +// Bing Search +output BING_GROUNDING_CONNECTION_NAME string = aiProject.outputs.dependentResources.bing_grounding.connectionName +output BING_GROUNDING_RESOURCE_NAME string = aiProject.outputs.dependentResources.bing_grounding.name +output BING_GROUNDING_CONNECTION_ID string = aiProject.outputs.dependentResources.bing_grounding.connectionId + +// Bing Custom Search +output BING_CUSTOM_GROUNDING_CONNECTION_NAME string = aiProject.outputs.dependentResources.bing_custom_grounding.connectionName +output BING_CUSTOM_GROUNDING_NAME string = aiProject.outputs.dependentResources.bing_custom_grounding.name +output BING_CUSTOM_GROUNDING_CONNECTION_ID string = aiProject.outputs.dependentResources.bing_custom_grounding.connectionId + +// Azure AI Search +output AZURE_AI_SEARCH_CONNECTION_NAME string = aiProject.outputs.dependentResources.search.connectionName +output AZURE_AI_SEARCH_SERVICE_NAME string = aiProject.outputs.dependentResources.search.serviceName + +// Azure Storage +output AZURE_STORAGE_CONNECTION_NAME string = aiProject.outputs.dependentResources.storage.connectionName +output AZURE_STORAGE_ACCOUNT_NAME string = aiProject.outputs.dependentResources.storage.accountName + +// Connections +output AI_PROJECT_CONNECTION_IDS_JSON string = string(aiProject.outputs.connectionIds) diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/main.parameters.json b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/main.parameters.json new file mode 100644 index 000000000..681875d5e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/infra/main.parameters.json @@ -0,0 +1,69 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "resourceGroupName": { + "value": "${AZURE_RESOURCE_GROUP}" + }, + "environmentName": { + "value": "${AZURE_ENV_NAME}" + }, + "location": { + "value": "${AZURE_LOCATION}" + }, + "aiFoundryResourceName": { + "value": "${AZURE_AI_ACCOUNT_NAME}" + }, + "aiFoundryProjectName": { + "value": "${AZURE_AI_PROJECT_NAME}" + }, + "aiDeploymentsLocation": { + "value": "${AZURE_LOCATION}" + }, + "principalId": { + "value": "${AZURE_PRINCIPAL_ID}" + }, + "principalType": { + "value": "${AZURE_PRINCIPAL_TYPE}" + }, + "aiProjectDeploymentsJson": { + "value": "${AI_PROJECT_DEPLOYMENTS=[]}" + }, + "aiProjectConnectionsJson": { + "value": "${AI_PROJECT_CONNECTIONS=[]}" + }, + "aiProjectConnectionCredentialsJson": { + "value": "${AI_PROJECT_CONNECTION_CREDENTIALS}" + }, + "aiProjectDependentResourcesJson": { + "value": "${AI_PROJECT_DEPENDENT_RESOURCES=[]}" + }, + "enableMonitoring": { + "value": "${ENABLE_MONITORING=true}" + }, + "enableHostedAgents": { + "value": "${ENABLE_HOSTED_AGENTS=false}" + }, + "enableCapabilityHost": { + "value": "${ENABLE_CAPABILITY_HOST=true}" + }, + "existingContainerRegistryResourceId": { + "value": "${AZURE_CONTAINER_REGISTRY_RESOURCE_ID=}" + }, + "existingContainerRegistryEndpoint": { + "value": "${AZURE_CONTAINER_REGISTRY_ENDPOINT=}" + }, + "existingAcrConnectionName": { + "value": "${AZURE_AI_PROJECT_ACR_CONNECTION_NAME=}" + }, + "existingApplicationInsightsConnectionString": { + "value": "${APPLICATIONINSIGHTS_CONNECTION_STRING=}" + }, + "existingApplicationInsightsResourceId": { + "value": "${APPLICATIONINSIGHTS_RESOURCE_ID=}" + }, + "existingAppInsightsConnectionName": { + "value": "${APPLICATIONINSIGHTS_CONNECTION_NAME=}" + } + } +} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/main.py b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/main.py new file mode 100644 index 000000000..1cfd7d9b5 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/main.py @@ -0,0 +1,262 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Multi-turn chat agent using LangGraph with Azure OpenAI. + +Demonstrates how to integrate a LangGraph agent (with tool-calling) into +the Azure AI Agent Hosting invocations protocol. The graph has two nodes: + + 1. **chatbot** — calls Azure OpenAI (with tools bound) + 2. **tools** — executes any tool calls the model makes + +Tracing: All LangGraph node, LLM, and tool spans are auto-traced via +``langchain-azure-ai`` and exported to Application Insights. + 2. **tools** — executes any tool calls the model makes + +A conditional edge routes back to the chatbot after tool execution so the +model can incorporate the tool results. + +Conversation state: Uses an in-memory session store keyed by +``agent_session_id``. In production, replace with durable storage. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (default: gpt-4o) + +Usage:: + + export FOUNDRY_PROJECT_ENDPOINT="https://your-resource.openai.azure.com/api/projects/proj" + python main.py + + # Turn 1 + curl -N -X POST 'http://localhost:8088/invocations?agent_session_id=s1' \\ + -H 'Content-Type: application/json' \\ + -d '{"message": "What time is it right now?"}' + + # Turn 2 — remembers context + curl -N -X POST 'http://localhost:8088/invocations?agent_session_id=s1' \\ + -H 'Content-Type: application/json' \\ + -d '{"message": "And what is 42 * 17?"}' +""" + +import asyncio +import json +import logging +import os +from datetime import datetime, timezone +from typing import Annotated + +import httpx +from starlette.requests import Request +from starlette.responses import JSONResponse, StreamingResponse + +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from langchain_openai import ChatOpenAI +from langgraph.graph import StateGraph, START, END +from langgraph.graph.message import add_messages +from langgraph.prebuilt import ToolNode +from langchain_core.tools import tool +from langchain_core.messages import HumanMessage, AIMessageChunk +from typing_extensions import TypedDict + +from azure.ai.agentserver.invocations import InvocationAgentServerHost + +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + + +# ── Azure OpenAI config ───────────────────────────────────────────── +FOUNDRY_PROJECT_ENDPOINT = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not FOUNDRY_PROJECT_ENDPOINT: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run'." + ) + +AZURE_AI_MODEL_DEPLOYMENT_NAME = os.environ.get( + "AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not AZURE_AI_MODEL_DEPLOYMENT_NAME: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +_credential = DefaultAzureCredential() +_token_provider = get_bearer_token_provider( + _credential, "https://ai.azure.com/.default" +) + + +# httpx Auth hook that injects a fresh Azure AD token on every request. +class _AzureTokenAuth(httpx.Auth): + def __init__(self, provider): + self._provider = provider + + def auth_flow(self, request): + request.headers["Authorization"] = f"Bearer {self._provider()}" + yield request + + +_http_client = httpx.Client(auth=_AzureTokenAuth(_token_provider)) + + +# ── Tools ──────────────────────────────────────────────────────────── +@tool +def get_current_time() -> str: + """Return the current UTC date and time.""" + return datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC") + + +@tool +def calculator(expression: str) -> str: + """Evaluate a simple math expression and return the result.""" + try: + result = eval(expression, {"__builtins__": {}}) # noqa: S307 + return str(result) + except Exception as e: + return f"Error: {e}" + + +TOOLS = [get_current_time, calculator] + + +# ── LangGraph definition ──────────────────────────────────────────── +class State(TypedDict): + messages: Annotated[list, add_messages] + + +def _build_graph() -> StateGraph: + """Build and compile the LangGraph agent graph.""" + llm = ChatOpenAI( + base_url=f"{FOUNDRY_PROJECT_ENDPOINT}/openai/v1", + api_key="placeholder", # overridden by _AzureTokenAuth + model=AZURE_AI_MODEL_DEPLOYMENT_NAME, + use_responses_api=True, + streaming=True, + http_client=_http_client, + ) + llm_with_tools = llm.bind_tools(TOOLS) + + def chatbot(state: State): + return {"messages": [llm_with_tools.invoke(state["messages"])]} + + def route_tools(state: State): + last = state["messages"][-1] + if hasattr(last, "tool_calls") and last.tool_calls: + return "tools" + return END + + graph = StateGraph(State) + graph.add_node("chatbot", chatbot) + graph.add_node("tools", ToolNode(tools=TOOLS)) + graph.add_edge(START, "chatbot") + graph.add_conditional_edges("chatbot", route_tools, { + "tools": "tools", END: END}) + graph.add_edge("tools", "chatbot") + return graph.compile() + + +GRAPH = _build_graph() + + +# ── Agent server wiring ───────────────────────────────────────────── +app = InvocationAgentServerHost() + + +# In-memory session store +_sessions: dict[str, list] = {} + + +@app.invoke_handler +async def handle_invoke(request: Request): + """Run the LangGraph agent and stream tokens back via SSE.""" + # Accept either a JSON object ({"message": "..."} or {"input": "..."}) or a + # plain-text body (e.g. sent directly from the Foundry portal chat UI). + try: + body = await request.body() + if not body: + raise ValueError("empty body") + try: + data = json.loads(body) + except json.JSONDecodeError: + user_message = body.decode("utf-8", errors="replace").strip() + else: + if isinstance(data, dict): + user_message = data.get("message") or data.get("input") or "" + else: + user_message = body.decode("utf-8", errors="replace").strip() + if not isinstance(user_message, str) or not user_message.strip(): + raise ValueError("missing message text") + except ValueError: + return JSONResponse( + status_code=400, + content={ + "error": "invalid_request", + "message": ( + 'Request body must be a non-empty JSON object with a "message" (or "input") ' + 'string, or a plain-text body, e.g. {"message": "What time is it right now?"}' + ), + }, + ) + + session_id = request.state.session_id + invocation_id = request.state.invocation_id + + # Retrieve or create session history + history = _sessions.setdefault(session_id, []) + history.append(HumanMessage(content=user_message)) + + async def event_generator(): + full_text = "" + + # Run graph with full conversation history + result = await GRAPH.ainvoke({"messages": list(history)}) + + # The last message is the AI response + ai_message = result["messages"][-1] + # With use_responses_api, content may be a list of content blocks + # rather than a plain string. + raw = ai_message.content + if isinstance(raw, list): + full_text = "".join( + block.get("text", "") if isinstance(block, dict) else str(block) + for block in raw + ) + else: + full_text = raw or "" + + # Stream the response word-by-word for SSE effect + words = full_text.split(" ") + for i, word in enumerate(words): + token = word if i == 0 else " " + word + event = {"type": "token", "content": token} + yield f"data: {json.dumps(event)}\n\n" + + # Save to history + history.append(ai_message) + + # Send completion event + turn = len([m for m in history if isinstance(m, HumanMessage)]) + done_event = { + "type": "done", + "invocation_id": invocation_id, + "session_id": session_id, + "turn": turn, + "full_text": full_text, + } + yield f"data: {json.dumps(done_event)}\n\n" + + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}, + ) + + +if __name__ == "__main__": + app.run() diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/requirements.txt b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/requirements.txt new file mode 100644 index 000000000..ed6e08286 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/requirements.txt @@ -0,0 +1,7 @@ +azure-ai-agentserver-invocations==1.0.0b2 +azure-ai-agentserver-core==2.0.0b2 +azure-identity==1.25.3 +langgraph==1.1.8 +langgraph-prebuilt==1.0.10 +langchain-core==1.3.0 +langchain-openai==1.1.14 diff --git a/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/test-payload.json b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/test-payload.json new file mode 100644 index 000000000..796cb6660 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/langgraph-chat/test-payload.json @@ -0,0 +1 @@ +{ "message": "What time is it right now?" } diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/.dockerignore b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/.env.example b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/Dockerfile b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/Dockerfile new file mode 100644 index 000000000..b89292edb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/README.md b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/README.md new file mode 100644 index 000000000..95efe0fee --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/README.md @@ -0,0 +1,127 @@ +# Note-Taking Agent — Python (Invocations Protocol) + +A note-taking agent built with `azure-ai-agentserver-invocations` and Azure OpenAI. Uses function calling to save and retrieve notes, with per-session JSONL persistence accessible via the Session Files API. + +## Features + +- **Save notes** — natural language commands like "save a note - buy groceries" +- **Retrieve notes** — "show me my notes" returns all saved entries with timestamps +- **Per-session isolation** — each session gets its own note file +- **Streaming responses** — real-time SSE streaming via the Invocations protocol +- **Session Files API** — notes stored at `$HOME` are accessible via the platform file API + +## Prerequisites + +- Python 3.12+ +- Azure OpenAI resource with a deployed model (e.g., `gpt-4.1-mini`) +- Azure credentials configured (e.g., `az login`) + +## Environment Variables + +| Variable | Description | Example | +|---|---|---| +| `FOUNDRY_PROJECT_ENDPOINT` | Foundry project endpoint (auto-injected when deployed) | `https://account.services.ai.azure.com/api/projects/proj` | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Model deployment name | `gpt-4.1-mini` | + +## Run Locally + +```bash +# Install dependencies +pip install -r requirements.txt + +# Set environment variables +export FOUNDRY_PROJECT_ENDPOINT="https://account.services.ai.azure.com/api/projects/proj" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" + +# Start the agent +python main.py +``` + +## Test with curl + +### Save a note + +```bash +curl -N -X POST "http://localhost:8088/invocations?agent_session_id=my-session" \ + -H "Content-Type: application/json" \ + -d '{"message": "save a note - book reservation for dinner"}' +``` + +### Save another note + +```bash +curl -N -X POST "http://localhost:8088/invocations?agent_session_id=my-session" \ + -H "Content-Type: application/json" \ + -d '{"message": "save a note - buy groceries for the weekend"}' +``` + +### Get all notes + +```bash +curl -N -X POST "http://localhost:8088/invocations?agent_session_id=my-session" \ + -H "Content-Type: application/json" \ + -d '{"message": "show me all my notes"}' +``` + +### New session (isolated) + +```bash +curl -N -X POST "http://localhost:8088/invocations?agent_session_id=another-session" \ + -H "Content-Type: application/json" \ + -d '{"message": "show me my notes"}' +``` + +## Deploy + +See the [Azure AI Agent Hosting documentation](../../README.md) for deployment instructions. + +## File Structure + +| File | Description | +|---|---| +| `main.py` | Agent entry point with Invocations handler and OpenAI function calling | +| `note_store.py` | Thread-safe per-session JSONL note persistence | +| `requirements.txt` | Python dependencies | +| `Dockerfile` | Container image definition with SSL cert support | +| `agent.yaml` | Agent hosting configuration | +| `agent.manifest.yaml` | Agent metadata and template | +| `.dockerignore` | Docker build exclusions | + +## Troubleshooting + +### Azure OpenAI Permission Denied (401) + +If you see an error like: + +``` +Error calling Azure OpenAI: Error code: 401 - {'error': {'code': 'PermissionDenied', 'message': 'The principal lacks the required data action Microsoft.CognitiveServices/accounts/OpenAI/deployments/chat/completions/action to perform POST /openai/deployments/{deployment-id}/chat/completions operation.'}} +``` + +The identity running the agent does not have the required RBAC roles on the Azure AI Foundry project. Assign the following roles: + +- **Cognitive Services OpenAI User** +- **Azure AI User** + +Use the Azure CLI to assign them: + +```bash +# Set your variables +SUBSCRIPTION_ID="" +RESOURCE_GROUP="" +PROJECT_NAME="" +PRINCIPAL_ID="" + +# Assign "Cognitive Services OpenAI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Cognitive Services OpenAI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" + +# Assign "Azure AI User" role +az role assignment create \ + --assignee "$PRINCIPAL_ID" \ + --role "Azure AI User" \ + --scope "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.MachineLearningServices/workspaces/$PROJECT_NAME" +``` + +> **Note:** It may take a few minutes for role assignments to propagate. Retry the request after waiting. \ No newline at end of file diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.manifest.yaml new file mode 100644 index 000000000..3eb549dfe --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.manifest.yaml @@ -0,0 +1,32 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: notetaking-agent-invocations +displayName: "Notetaking Agent (Invocations)" +description: > + Note-taking agent using the Invocations protocol. Demonstrates function + calling (save_note/get_notes tools) with per-session JSONL persistence and SSE + streaming. +metadata: + tags: + - AI Agent Hosting + - Invocations Protocol + - Bring Your Own + - Function Calling + - Python +template: + name: notetaking-agent-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.yaml b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.yaml new file mode 100644 index 000000000..1e04ebfa2 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/agent.yaml @@ -0,0 +1,11 @@ +kind: hosted +name: notetaking-agent-invocations-python +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/main.py b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/main.py new file mode 100644 index 000000000..70170d26e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/main.py @@ -0,0 +1,276 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Note-taking agent using azure-ai-agentserver-invocations with Azure OpenAI. + +Uses the Azure OpenAI Responses API with function calling to understand user +intent (save/get notes) and streams responses as SSE via the Invocations +protocol. Notes are persisted per session in JSONL files accessible via the +Session Files API. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint (auto-injected by the + platform, e.g., https://account.services.ai.azure.com/api/projects/proj) + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (e.g., gpt-4o) + +Usage:: + + # Set environment variables + export FOUNDRY_PROJECT_ENDPOINT="https://account.services.ai.azure.com/api/projects/proj" + export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o" + + # Start the agent + python main.py + + # Save a note + curl -N -X POST "http://localhost:8088/invocations?agent_session_id=my-session" \\ + -H "Content-Type: application/json" \\ + -d '{"message": "save a note - book reservation for dinner"}' + + # Get all notes + curl -N -X POST "http://localhost:8088/invocations?agent_session_id=my-session" \\ + -H "Content-Type: application/json" \\ + -d '{"message": "get all my notes"}' +""" + +import asyncio +import json +import logging +import os + +from starlette.requests import Request +from starlette.responses import JSONResponse, StreamingResponse + +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +from azure.ai.agentserver.invocations import InvocationAgentServerHost + +import note_store + +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +# ── Configuration ───────────────────────────────────────────────────────────── + +FOUNDRY_PROJECT_ENDPOINT = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not FOUNDRY_PROJECT_ENDPOINT: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run'." + ) + +AZURE_AI_MODEL_DEPLOYMENT_NAME = os.environ.get( + "AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not AZURE_AI_MODEL_DEPLOYMENT_NAME: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +_credential = DefaultAzureCredential() +_project_client = AIProjectClient( + endpoint=FOUNDRY_PROJECT_ENDPOINT, credential=_credential) + +# Use the Responses API — not chat.completions (Chat Completions API is legacy). +_openai_client = _project_client.get_openai_client() + +# Tool definitions for Azure OpenAI Responses API +TOOLS = [ + { + "type": "function", + "name": "save_note", + "description": "Save a note with the current timestamp. Use this when the user asks to save, add, or create a note.", + "parameters": { + "type": "object", + "properties": { + "note": { + "type": "string", + "description": "The note text to save", + } + }, + "required": ["note"], + }, + }, + { + "type": "function", + "name": "get_notes", + "description": "Retrieve all saved notes. Use this when the user asks to get, list, show, or view their notes.", + "parameters": {"type": "object", "properties": {}, "required": []}, + }, +] + +SYSTEM_PROMPT = ( + "You are a helpful note-taking assistant. You can save notes and retrieve them. " + "When the user asks to save a note, extract the note content and call save_note. " + "When the user asks to see their notes, call get_notes. " + "Always respond in a friendly, concise manner." +) + + +def _execute_tool_call(function_name: str, arguments: str, session_id: str) -> str: + """Execute a tool call and return the result as JSON.""" + try: + args = json.loads(arguments) + except json.JSONDecodeError as e: + return json.dumps({"error": f"Invalid tool arguments: {e}"}) + + if function_name == "save_note": + note_text = args.get("note") + if not note_text: + return json.dumps({"error": "Missing required 'note' argument"}) + entry = note_store.save_note(session_id, note_text) + return json.dumps({"status": "saved", "note": entry.note, "timestamp": entry.timestamp}) + elif function_name == "get_notes": + notes = note_store.get_notes(session_id) + return json.dumps({ + "count": len(notes), + "notes": [{"note": n.note, "timestamp": n.timestamp} for n in notes], + }) + return json.dumps({"error": f"Unknown function: {function_name}"}) + + +async def _stream_response(follow_up_input: list, session_id: str, invocation_id: str): + """Stream the final LLM response as SSE events (after tool calls resolved).""" + full_text = "" + + try: + loop = asyncio.get_event_loop() + openai_stream = await loop.run_in_executor( + None, + lambda: _openai_client.responses.create( + model=AZURE_AI_MODEL_DEPLOYMENT_NAME, + instructions=SYSTEM_PROMPT, + input=follow_up_input, + stream=True, + ), + ) + + for event in openai_stream: + if event.type == "response.output_text.delta": + full_text += event.delta + sse = json.dumps({"type": "token", "content": event.delta}) + yield f"data: {sse}\n\n" + + except Exception as e: + error_msg = f"Error calling Azure OpenAI: {e}" + full_text = error_msg + sse = json.dumps({"type": "token", "content": error_msg}) + yield f"data: {sse}\n\n" + + done_event = json.dumps({ + "type": "done", + "invocation_id": invocation_id, + "session_id": session_id, + "full_text": full_text, + }) + yield f"data: {done_event}\n\n" + + +async def _stream_direct(text: str, session_id: str, invocation_id: str): + """Stream a pre-computed response (no tool calls needed).""" + if text: + event = json.dumps({"type": "token", "content": text}) + yield f"data: {event}\n\n" + + done_event = json.dumps({ + "type": "done", + "invocation_id": invocation_id, + "session_id": session_id, + "full_text": text, + }) + yield f"data: {done_event}\n\n" + + +app = InvocationAgentServerHost() + + +@app.invoke_handler +async def handle_invoke(request: Request): + """Handle note-taking requests with Azure OpenAI Responses API.""" + try: + data = await request.json() + if not isinstance(data, dict): + raise ValueError("body is not a JSON object") + user_message = data.get("message") + if not isinstance(user_message, str) or not user_message.strip(): + raise ValueError('missing or empty "message" field') + except (json.JSONDecodeError, ValueError): + return JSONResponse( + status_code=400, + content={ + "error": "invalid_request", + "message": ( + 'Request body must be a JSON object with a non-empty "message" string, ' + 'e.g. {"message": "save a note - book reservation for dinner"}' + ), + }, + ) + + session_id = request.state.session_id + invocation_id = request.state.invocation_id + + try: + loop = asyncio.get_event_loop() + response = await loop.run_in_executor( + None, + lambda: _openai_client.responses.create( + model=AZURE_AI_MODEL_DEPLOYMENT_NAME, + instructions=SYSTEM_PROMPT, + input=user_message, + tools=TOOLS, + ), + ) + except Exception as e: + return StreamingResponse( + _stream_direct( + f"Error calling Azure OpenAI: {e}", session_id, invocation_id), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}, + ) + + # Check if there are function_call output items + function_calls = [ + item for item in response.output + if item.type == "function_call" + ] + + if function_calls: + # Execute tool calls, then stream follow-up response + follow_up_input = [] + for fc in function_calls: + follow_up_input.append(fc) + result = _execute_tool_call(fc.name, fc.arguments, session_id) + follow_up_input.append({ + "type": "function_call_output", + "call_id": fc.call_id, + "output": result, + }) + + return StreamingResponse( + _stream_response(follow_up_input, session_id, invocation_id), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}, + ) + else: + # No tool calls — extract text from the first response directly + direct_text = "" + for item in response.output: + if item.type == "message": + for part in item.content: + if part.type == "output_text": + direct_text += part.text + return StreamingResponse( + _stream_direct(direct_text, session_id, invocation_id), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}, + ) + + +if __name__ == "__main__": + app.run() diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/note_store.py b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/note_store.py new file mode 100644 index 000000000..f59db8d4a --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/note_store.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Thread-safe per-session note storage using JSONL files.""" + +import json +import os +import threading +from dataclasses import dataclass +from datetime import datetime, timezone + + +@dataclass +class NoteEntry: + """A single note with its creation timestamp.""" + + note: str + timestamp: str + + +_lock = threading.Lock() + + +def _get_file_path(session_id: str) -> str: + """Return the JSONL file path for a given session. + + Files are stored under $HOME so they are accessible via the Session Files API. + """ + safe_id = "".join(c if c.isalnum() or c in "-_" else "_" for c in session_id) + base_dir = os.environ.get("HOME", os.getcwd()) + return os.path.join(base_dir, f"notes_{safe_id}.jsonl") + + +def save_note(session_id: str, note_text: str) -> NoteEntry: + """Append a note to the session's JSONL file.""" + entry = NoteEntry(note=note_text, timestamp=datetime.now(timezone.utc).isoformat()) + line = json.dumps({"note": entry.note, "timestamp": entry.timestamp}) + with _lock: + with open(_get_file_path(session_id), "a") as f: + f.write(line + "\n") + return entry + + +def get_notes(session_id: str) -> list[NoteEntry]: + """Read all notes from the session's JSONL file.""" + path = _get_file_path(session_id) + with _lock: + if not os.path.exists(path): + return [] + with open(path) as f: + entries = [] + for line in f: + line = line.strip() + if line: + try: + data = json.loads(line) + entries.append(NoteEntry(note=data["note"], timestamp=data["timestamp"])) + except (json.JSONDecodeError, KeyError): + continue + return entries diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/requirements.txt b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/requirements.txt new file mode 100644 index 000000000..8c85ffcbc --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/requirements.txt @@ -0,0 +1,4 @@ +azure-ai-agentserver-invocations==1.0.0b2 +azure-ai-agentserver-core==2.0.0b2 +azure-ai-projects==2.0.1 +azure-identity==1.25.3 diff --git a/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/test-payload.json b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/test-payload.json new file mode 100644 index 000000000..77b882b23 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/notetaking-agent/test-payload.json @@ -0,0 +1 @@ +{ "message": "save a note - book reservation for dinner" } diff --git a/samples/python/hosted-agents/bring-your-own/invocations/toolbox/.dockerignore b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/invocations/toolbox/.env.example b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/.env.example new file mode 100644 index 000000000..a2afcc99d --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/.env.example @@ -0,0 +1,13 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Toolbox MCP endpoint — full URL including toolbox name and api-version. +TOOLBOX_ENDPOINT= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/invocations/toolbox/Dockerfile b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/Dockerfile new file mode 100644 index 000000000..cd962d026 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/Dockerfile @@ -0,0 +1,7 @@ +FROM mcr.microsoft.com/azurelinux/base/python:3.12 +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python3", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/invocations/toolbox/README.md b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/README.md new file mode 100644 index 000000000..d5e73c2d0 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/README.md @@ -0,0 +1,116 @@ + +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + + +# What this sample demonstrates + +A **Bring Your Own** hosted agent using the **Invocations protocol** with **Azure AI Foundry Toolbox MCP** integration in Python. It shows how to connect to a Foundry toolbox at startup, discover available tools via MCP, and let the model call them during conversation through an agentic tool-calling loop. + +This sample combines: +- The [`azure-ai-agentserver-invocations`](https://pypi.org/project/azure-ai-agentserver-invocations/) SDK for the Invocations protocol +- The [Foundry SDK (`azure-ai-projects`)](https://pypi.org/project/azure-ai-projects/) for model access via the Responses API +- Direct MCP (JSON-RPC over HTTP) for toolbox tool discovery and invocation + +> **Invocations vs Responses:** Unlike the Responses protocol, the Invocations protocol does **not** provide built-in server-side conversation history. This agent maintains an in-memory session store keyed by `agent_session_id`. In production, replace it with durable storage (Redis, Cosmos DB, etc.) so history survives restarts. + +## How It Works + +### Toolbox Integration + +At startup, the agent connects to the toolbox MCP endpoint, runs `initialize` + `tools/list`, and converts the discovered tools into function definitions for the Responses API. When the model requests a tool call, the agent executes it via MCP `tools/call` and feeds the result back to the model. + +### Model Integration + +The agent uses the Foundry SDK Responses API with tool definitions. The agentic loop handles multi-step tool calling — the model can call tools multiple times before producing a final text answer. + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer Invocations SDK](https://pypi.org/project/azure-ai-agentserver-invocations/), which provisions a REST API endpoint compatible with the Azure AI Invocations protocol. + +### Agent Deployment + +The hosted agent can be developed and deployed to Microsoft Foundry using the [Azure Developer CLI](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd). + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Azure Developer CLI (`azd`)** (recommended) + - [Install azd](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) and the AI agent extension: `azd ext install azure.ai.agents` + - Authenticated: `azd auth login` + +2. **Azure CLI** + - Installed and authenticated: `az login` + +3. **Python 3.10 or later** + - Verify your version: `python --version` + +4. **A Foundry Toolbox** + - Create a toolbox in your Foundry project (e.g. with web search, Azure AI Search, or custom MCP tools) + +### Environment Variables + +See [`.env.example`](.env.example) for the full list of environment variables this sample uses. + +| Variable | Required | Description | +|----------|----------|-------------| +| `FOUNDRY_PROJECT_ENDPOINT` | Yes | Foundry project endpoint. Auto-injected in hosted containers; set automatically by `azd ai agent run` locally. | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Yes | Model deployment name — must match your Foundry project deployment. Declared in `agent.manifest.yaml`. | +| `TOOLBOX_ENDPOINT` | Yes | Full toolbox MCP endpoint URL including toolbox name and `?api-version=v1`. Declared in `agent.manifest.yaml`. | +| `APPLICATIONINSIGHTS_CONNECTION_STRING` | Recommended | Enables telemetry. Auto-injected in hosted containers; set manually for local dev. | + +`TOOLBOX_ENDPOINT` must be the complete MCP URL for your toolbox. Two forms are supported: +``` +# Latest version: +https://.services.ai.azure.com/api/projects//toolboxes//mcp?api-version=v1 + +# Pinned to a specific version: +https://.services.ai.azure.com/api/projects//toolboxes//versions//mcp?api-version=v1 +``` +Set it as an environment variable in `.env` for local dev, or via `azd env set TOOLBOX_ENDPOINT ""` for deployed agents. + +**Local development (without `azd`):** + +```bash +cp .env.example .env +# Edit .env and fill in your values, then: +export $(grep -v '^#' .env | xargs) +``` + +### Installing Dependencies + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +``` + +### Running the Sample + +```bash +python main.py +``` + +The agent starts on `http://localhost:8088`. + +### Testing + +```bash +# Turn 1 +curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \ + -H "Content-Type: application/json" \ + -d '{"message": "Search the web for Azure AI Foundry news"}' + +# Turn 2 (same session) +curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \ + -H "Content-Type: application/json" \ + -d '{"message": "Tell me more about the first result"}' +``` diff --git a/samples/python/hosted-agents/bring-your-own/invocations/toolbox/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/agent.manifest.yaml new file mode 100644 index 000000000..967e47b82 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/agent.manifest.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: toolbox-python-invocations +displayName: "Toolbox (Python, Invocations)" +description: > + Bring-your-own agent using the Invocations protocol with Foundry Toolbox MCP + integration. Connects to a toolbox at startup, discovers tools, and lets the + model call them during conversation. +metadata: + tags: + - AI Agent Hosting + - Invocations Protocol + - Bring Your Own + - Toolbox + - Python +template: + name: toolbox-python-invocations + kind: hosted + protocols: + - protocol: invocations + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" + - name: TOOLBOX_ENDPOINT + value: "{{TOOLBOX_ENDPOINT}}" +resources: + - kind: model + id: gpt-4.1 + name: AZURE_AI_MODEL_DEPLOYMENT_NAME + - kind: toolbox + name: web-search-tools + tools: + - type: web_search diff --git a/samples/python/hosted-agents/bring-your-own/invocations/toolbox/agent.yaml b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/agent.yaml new file mode 100644 index 000000000..f2756ccaf --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/agent.yaml @@ -0,0 +1,16 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: toolbox-python-invocations +protocols: + - protocol: invocations + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + # FOUNDRY_PROJECT_ENDPOINT is injected by the platform — do NOT declare it here. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME=gpt-4.1} + - name: TOOLBOX_ENDPOINT + value: ${TOOLBOX_ENDPOINT} +dockerfile_path: Dockerfile diff --git a/samples/python/hosted-agents/bring-your-own/invocations/toolbox/main.py b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/main.py new file mode 100644 index 000000000..bd0b68de0 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/main.py @@ -0,0 +1,403 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Toolbox — Bring Your Own Invocations agent with Foundry Toolbox MCP. + +Hosted agent that connects to an Azure AI Foundry toolbox via MCP, +discovers tools at startup, and lets the model call them during +conversation. Uses the Invocations protocol for request/response handling. + +The agent: +1. Connects to the toolbox MCP endpoint and discovers available tools +2. On each request, sends the conversation + tool definitions to the model +3. If the model requests a tool call, executes it via MCP and loops +4. Returns the final text response as a streaming SSE event stream + +Unlike the Responses protocol, the Invocations protocol does **not** provide +built-in server-side conversation history. This agent maintains an in-memory +session store keyed by ``agent_session_id``. In production, replace it with +durable storage (Redis, Cosmos DB, etc.) so history survives restarts. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint (auto-injected in hosted containers) + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (declared in agent.manifest.yaml) + TOOLBOX_ENDPOINT: Full toolbox MCP endpoint URL (declared in agent.manifest.yaml) + +Usage:: + + # Set environment variables + export FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" + export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1" + export TOOLBOX_ENDPOINT="https://.services.ai.azure.com/api/projects//toolboxes//mcp?api-version=v1" + + # Start the agent + python main.py + + # Turn 1 — start a new conversation + curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \\ + -H "Content-Type: application/json" \\ + -d '{"message": "Search the web for Azure AI Foundry news"}' + + # Turn 2 — continue the same conversation + curl -sS -N -X POST "http://localhost:8088/invocations?agent_session_id=chat-001" \\ + -H "Content-Type: application/json" \\ + -d '{"message": "Tell me more about the first result"}' +""" + +from azure.ai.agentserver.invocations import InvocationAgentServerHost +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.ai.projects import AIProjectClient +from starlette.responses import JSONResponse, StreamingResponse +from starlette.requests import Request +import asyncio +import json +import logging +import os + +import httpx +from dotenv import load_dotenv + +load_dotenv(override=False) + + +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +# ── Configuration ───────────────────────────────────────────────────────────── + +_endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not _endpoint: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run' " + "which sets it automatically." + ) + +_model = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not _model: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +# Platform injects TOOLBOX_{NAME}_MCP_ENDPOINT for declared toolbox resources. +# Fall back to TOOLBOX_ENDPOINT for local dev (.env). +TOOLBOX_ENDPOINT = ( + os.environ.get("TOOLBOX_WEB_SEARCH_TOOLS_MCP_ENDPOINT") + or os.environ.get("TOOLBOX_ENDPOINT", "") +) +if not TOOLBOX_ENDPOINT: + raise EnvironmentError( + "TOOLBOX_ENDPOINT environment variable is not set. " + "Set it to your toolbox MCP endpoint URL, or declare the toolbox " + "in agent.manifest.yaml resources." + ) +# Ensure api-version query param is present. +if "api-version=" not in TOOLBOX_ENDPOINT: + sep = "&" if "?" in TOOLBOX_ENDPOINT else "?" + TOOLBOX_ENDPOINT += f"{sep}api-version=v1" + +# Feature-flag header value (e.g. "Toolboxes=V1Preview"). +_TOOLBOX_FEATURES = os.getenv( + "FOUNDRY_AGENT_TOOLBOX_FEATURES", "Toolboxes=V1Preview") + +_credential = DefaultAzureCredential() +_project_client = AIProjectClient(endpoint=_endpoint, credential=_credential) +_responses_client = _project_client.get_openai_client().responses +_token_provider = get_bearer_token_provider( + _credential, "https://ai.azure.com/.default") + +_SYSTEM_PROMPT = ( + "You are a helpful AI assistant with access to tools via Azure AI Foundry toolbox. " + "Use the available tools when appropriate to answer user questions. " + "Be concise and informative." +) + +# ── Toolbox MCP client ──────────────────────────────────────────────────────── + + +class _McpToolboxClient: + """Lightweight MCP client for toolbox tool discovery and invocation.""" + + def __init__(self, endpoint: str, token_provider): + self.endpoint = endpoint + self._get_token = token_provider + self._session_id: str | None = None + self._req_id = 0 + + def _headers(self) -> dict: + h = { + "Content-Type": "application/json", + "Authorization": f"Bearer {self._get_token()}", + } + if _TOOLBOX_FEATURES: + h["Foundry-Features"] = _TOOLBOX_FEATURES + if self._session_id: + h["mcp-session-id"] = self._session_id + return h + + def _next_id(self) -> int: + self._req_id += 1 + return self._req_id + + def initialize(self) -> str: + """Send MCP initialize + initialized notification.""" + with httpx.Client(timeout=60) as client: + resp = client.post( + self.endpoint, + headers=self._headers(), + json={ + "jsonrpc": "2.0", + "id": self._next_id(), + "method": "initialize", + "params": { + "protocolVersion": "2024-11-05", + "capabilities": {}, + "clientInfo": {"name": "byo-invocations-toolbox", "version": "1.0.0"}, + }, + }, + ) + resp.raise_for_status() + self._session_id = resp.headers.get("mcp-session-id") + data = resp.json() + + # Send initialized notification + client.post( + self.endpoint, + headers=self._headers(), + json={"jsonrpc": "2.0", "method": "notifications/initialized"}, + ) + return data.get("result", {}).get("serverInfo", {}).get("name", "unknown") + + def list_tools(self) -> list[dict]: + """Call tools/list and return tool definitions.""" + with httpx.Client(timeout=60) as client: + resp = client.post( + self.endpoint, + headers=self._headers(), + json={"jsonrpc": "2.0", "id": self._next_id( + ), "method": "tools/list", "params": {}}, + ) + resp.raise_for_status() + return resp.json().get("result", {}).get("tools", []) + + def call_tool(self, name: str, arguments: dict) -> str: + """Call a tool and return the text result.""" + with httpx.Client(timeout=120) as client: + resp = client.post( + self.endpoint, + headers=self._headers(), + json={ + "jsonrpc": "2.0", + "id": self._next_id(), + "method": "tools/call", + "params": {"name": name, "arguments": arguments}, + }, + ) + resp.raise_for_status() + result = resp.json().get("result", {}) + content = result.get("content", []) + texts = [] + for c in content: + if isinstance(c, dict): + if c.get("type") == "text" and c.get("text"): + texts.append(c["text"]) + elif c.get("type") == "resource": + resource = c.get("resource", {}) + if resource.get("text"): + texts.append(resource["text"]) + return "\n".join(texts) if texts else json.dumps(result) + + +# ── Lazy tool discovery ─────────────────────────────────────────────────────── +# Defer MCP connection to first request so the container can start and pass +# health checks before the toolbox endpoint is reachable. + +_mcp_client: _McpToolboxClient | None = None +_tool_definitions: list[dict] = [] +_tools_initialized = False + + +def _ensure_tools(): + global _mcp_client, _tool_definitions, _tools_initialized + if _tools_initialized: + return + _mcp_client = _McpToolboxClient(TOOLBOX_ENDPOINT, _token_provider) + server_name = _mcp_client.initialize() + mcp_tools = _mcp_client.list_tools() + logger.info("Toolbox '%s' connected: %d tool(s) discovered", + server_name, len(mcp_tools)) + for t in mcp_tools: + _tool_definitions.append({ + "type": "function", + "name": t["name"], + "description": t.get("description", ""), + "parameters": t.get("inputSchema", {"type": "object", "properties": {}}), + }) + _tools_initialized = True + +# ── Agentic loop ────────────────────────────────────────────────────────────── + + +app = InvocationAgentServerHost() + +# In-memory session store — keyed by agent_session_id. +# WARNING: state is lost on restart. Use durable storage in production. +_sessions: dict[str, list[dict]] = {} + +_MAX_TOOL_ROUNDS = 10 + + +def _call_model(input_items: list[dict]) -> object: + """Call the model with tool definitions and return the response.""" + _ensure_tools() + return _responses_client.create( + model=_model, + instructions=_SYSTEM_PROMPT, + input=input_items, + tools=_tool_definitions if _tool_definitions else None, + store=False, + ) + + +def _run_agent_loop(input_items: list[dict]) -> str: + """Execute the agentic tool-calling loop synchronously. + + Calls the model, checks for tool calls, executes them, feeds results + back, and repeats until the model produces a text response or we hit + the max rounds limit. + """ + for _ in range(_MAX_TOOL_ROUNDS): + response = _call_model(input_items) + + # Check if the model wants to call tools + tool_calls = [ + item for item in response.output + if getattr(item, "type", None) == "function_call" + ] + + if not tool_calls: + # No tool calls — return the text response + return response.output_text or "(No response)" + + # Execute each tool call and build result items + for tc in tool_calls: + try: + arguments = json.loads(tc.arguments) if isinstance( + tc.arguments, str) else tc.arguments + result_text = _mcp_client.call_tool(tc.name, arguments) + logger.info("Tool '%s' returned %d chars", + tc.name, len(result_text)) + except Exception as e: + logger.error("Tool '%s' failed: %s", tc.name, e) + result_text = f"Error calling tool: {e}" + + # Append the function call and its result to input for the next round + input_items.append({ + "type": "function_call", + "id": tc.id, + "call_id": tc.call_id, + "name": tc.name, + "arguments": tc.arguments if isinstance(tc.arguments, str) else json.dumps(tc.arguments), + }) + input_items.append({ + "type": "function_call_output", + "call_id": tc.call_id, + "output": result_text, + }) + + return "(Reached maximum tool call rounds)" + + +async def _stream_agent_reply(input_items: list[dict]): + """Run the agent loop in a thread and yield the result as SSE events.""" + loop = asyncio.get_running_loop() + result = await loop.run_in_executor(None, _run_agent_loop, input_items) + yield f"data: {json.dumps({'type': 'token', 'content': result})}\n\n" + + +@app.invoke_handler +async def handle_invoke(request: Request): + """Handle a streaming multi-turn chat request with toolbox tools.""" + # Accept either a JSON object ({"message": "..."}, {"input": "..."}, or + # {"query": "..."}) or a plain-text body. + try: + body = await request.body() + if not body: + raise ValueError("empty body") + try: + data = json.loads(body) + except json.JSONDecodeError: + user_message = body.decode("utf-8", errors="replace").strip() + else: + if isinstance(data, dict): + user_message = ( + data.get("message") + or data.get("input") + or data.get("query") + or "" + ) + else: + user_message = body.decode("utf-8", errors="replace").strip() + if not isinstance(user_message, str) or not user_message.strip(): + raise ValueError("missing message text") + except ValueError: + return JSONResponse( + status_code=400, + content={ + "error": "invalid_request", + "message": ( + 'Request body must be a non-empty JSON object with a "message" ' + '(or "input" / "query") string, or a plain-text body, ' + 'e.g. {"message": "What is Microsoft Foundry?"}' + ), + }, + ) + + session_id = request.state.session_id + invocation_id = request.state.invocation_id + + logger.info("Processing invocation %s (session %s)", + invocation_id, session_id) + + # Retrieve or create conversation history for this session. + history = _sessions.setdefault(session_id, []) + history.append({"role": "user", "content": user_message}) + input_items = list(history) + + async def event_generator(): + full_reply = "" + try: + async for delta in _stream_agent_reply(input_items): + # Parse the SSE data to extract the content for history + try: + event_data = json.loads(delta.split( + "data: ", 1)[1].split("\n")[0]) + full_reply += event_data.get("content", "") + except (IndexError, json.JSONDecodeError): + pass + yield delta + except Exception as exc: + msg = f"Error calling model: {exc}" + logger.error(msg) + full_reply = msg + yield f"data: {json.dumps({'type': 'token', 'content': msg})}\n\n" + + yield f"data: {json.dumps({'type': 'done', 'invocation_id': invocation_id, 'session_id': session_id, 'full_text': full_reply})}\n\n" + + if full_reply: + history.append({"role": "assistant", "content": full_reply}) + + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}, + ) + + +app.run() diff --git a/samples/python/hosted-agents/bring-your-own/invocations/toolbox/requirements.txt b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/requirements.txt new file mode 100644 index 000000000..ab63287bf --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/requirements.txt @@ -0,0 +1,6 @@ +azure-ai-agentserver-invocations==1.0.0b2 +azure-ai-agentserver-core==2.0.0b2 +azure-ai-projects==2.0.1 +azure-identity==1.25.3 +httpx +python-dotenv==1.1.1 diff --git a/samples/python/hosted-agents/bring-your-own/invocations/toolbox/test-payload.json b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/test-payload.json new file mode 100644 index 000000000..aa025a2d1 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/invocations/toolbox/test-payload.json @@ -0,0 +1 @@ +{ "message": "Search the web for Azure AI Foundry news" } diff --git a/samples/python/hosted-agents/bring-your-own/responses/background-agent/.dockerignore b/samples/python/hosted-agents/bring-your-own/responses/background-agent/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/background-agent/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/responses/background-agent/.env.example b/samples/python/hosted-agents/bring-your-own/responses/background-agent/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/background-agent/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/responses/background-agent/Dockerfile b/samples/python/hosted-agents/bring-your-own/responses/background-agent/Dockerfile new file mode 100644 index 000000000..b89292edb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/background-agent/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/responses/background-agent/README.md b/samples/python/hosted-agents/bring-your-own/responses/background-agent/README.md new file mode 100644 index 000000000..4975d5028 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/background-agent/README.md @@ -0,0 +1,87 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. + +# Background Agent — Responses Protocol (Long-Running) + +This sample demonstrates a long-running agent built with [azure-ai-agentserver-responses](https://pypi.org/project/azure-ai-agentserver-responses/) that uses the background execution mode for asynchronous processing. It calls Azure OpenAI to generate a multi-section research analysis, streaming LLM tokens as they arrive via the Responses API event lifecycle. + +## How It Works + +The agent receives a request via `POST /responses` with `"background": true`. The server returns immediately while the handler calls Azure OpenAI in the background, streaming response tokens as `text.delta` events. The caller polls `GET /responses/{id}` until the response reaches a terminal status (`completed`, `failed`, or `incomplete`). In-flight requests can be cancelled via `POST /responses/{id}/cancel`. + +## Running Locally + +### Prerequisites + +- Python 3.12+ +- Azure CLI installed and authenticated (`az login`) +- Foundry project with a deployed model + +### Install Dependencies + +```bash +pip install -r requirements.txt +``` + +### Start the Agent + +```bash +cp .env.example .env # then edit values +export FOUNDRY_PROJECT_ENDPOINT="https://your-project.services.ai.azure.com/api/projects/your-project" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +python main.py +``` + +The agent starts on `http://localhost:8088/`. + +### Test — Background Mode + +```bash +# Submit a background research analysis +curl -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"model": "research", "input": "Analyze the impact of AI on healthcare", "background": true, "store": true}' + +# Poll for result (use the id from the response) +curl http://localhost:8088/responses/ + +# Cancel an in-flight request +curl -X POST http://localhost:8088/responses//cancel +``` + +### Test — Default Mode (Synchronous) + +```bash +curl -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"model": "research", "input": "Analyze the impact of AI on healthcare"}' +``` + +## Invoke with azd + +### Local + +**Bash:** +```bash +azd ai agent invoke --local "Analyze the impact of AI on healthcare" +``` + +**PowerShell:** +```powershell +azd ai agent invoke --local "Analyze the impact of AI on healthcare" +``` + +### Remote (after `azd up`) + +**Bash:** +```bash +azd ai agent invoke "Analyze the impact of AI on healthcare" +``` + +**PowerShell:** +```powershell +azd ai agent invoke "Analyze the impact of AI on healthcare" +``` + +## Deploying to Microsoft Foundry + +To deploy your agent to Microsoft Foundry, follow the deployment guide at https://github.com/microsoft/hosted-agents-vnext-private-preview/blob/main/azd-quickstart.md diff --git a/samples/python/hosted-agents/bring-your-own/responses/background-agent/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/responses/background-agent/agent.manifest.yaml new file mode 100644 index 000000000..c72560436 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/background-agent/agent.manifest.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: background-agent-responses +displayName: "Background Agent (Responses)" +description: > + A long-running agent that demonstrates the background execution pattern using + the azure-ai-agentserver-responses SDK. Processes requests asynchronously via + background=true, streaming LLM tokens as they arrive. Supports polling and + cancellation. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Bring Your Own + - Background Mode + - Python +template: + name: background-agent-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/bring-your-own/responses/background-agent/agent.yaml b/samples/python/hosted-agents/bring-your-own/responses/background-agent/agent.yaml new file mode 100644 index 000000000..49a6a4c86 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/background-agent/agent.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: background-agent-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} diff --git a/samples/python/hosted-agents/bring-your-own/responses/background-agent/main.py b/samples/python/hosted-agents/bring-your-own/responses/background-agent/main.py new file mode 100644 index 000000000..81aa20171 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/background-agent/main.py @@ -0,0 +1,146 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Background (long-running) agent using azure-ai-agentserver-responses with Azure OpenAI. + +Demonstrates the background execution pattern where: + - POST /responses with ``background: true`` returns immediately. + - GET /responses/{id} polls for the completed result. + - POST /responses/{id}/cancel cancels in-flight work. + +The agent calls Azure OpenAI to generate a detailed research analysis, +streaming LLM tokens as they arrive via ``text.emit_delta()``. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint (auto-injected by the platform) + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (e.g., gpt-4o) + +Uses DefaultAzureCredential for authentication - works with: +- Azure CLI login (az login) +- Managed Identity in Azure +- Environment variables (AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET) + +Usage:: + + # Set environment variables + export FOUNDRY_PROJECT_ENDPOINT="https://your-resource.openai.azure.com/" + export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o" + + # Start the agent + python main.py + + # Submit a background request + curl -X POST http://localhost:8088/responses \\ + -H "Content-Type: application/json" \\ + -d '{"model": "research", "input": "Analyze the impact of AI on healthcare", "background": true, "store": true}' + + # Poll for result (use the id from the response above) + curl http://localhost:8088/responses/ + + # Cancel an in-flight request + curl -X POST http://localhost:8088/responses//cancel +""" + +import asyncio +import logging +import os + +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +from azure.ai.agentserver.responses import ( + CreateResponse, + ResponseContext, + ResponsesAgentServerHost, + TextResponse, +) + +logger = logging.getLogger("background-agent") + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +# ── Configuration ───────────────────────────────────────────────────────────── + +FOUNDRY_PROJECT_ENDPOINT = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not FOUNDRY_PROJECT_ENDPOINT: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run'." + ) + +AZURE_AI_MODEL_DEPLOYMENT_NAME = os.environ.get( + "AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not AZURE_AI_MODEL_DEPLOYMENT_NAME: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +_credential = DefaultAzureCredential() +_project_client = AIProjectClient( + endpoint=FOUNDRY_PROJECT_ENDPOINT, credential=_credential) + +# Use the Responses API — not chat.completions (Chat Completions API is legacy). +_openai_client = _project_client.get_openai_client() + +_RESEARCH_SYSTEM_PROMPT = ( + "You are a research analyst. When given a topic, produce a thorough " + "multi-section analysis report. Include:\n" + "1. Executive Summary\n" + "2. Background & Context\n" + "3. Key Findings (at least 3)\n" + "4. Implications & Recommendations\n" + "5. Conclusion\n\n" + "Be detailed and substantive. Target 500-800 words." +) + +app = ResponsesAgentServerHost() + + +@app.response_handler +async def background_handler( + request: CreateResponse, + context: ResponseContext, + cancellation_signal: asyncio.Event, +): + """Process a request with Azure OpenAI streaming. + + Works in all modes (default, streaming, background, background+streaming) + — the SDK handles mode negotiation automatically. + """ + + async def stream_openai(): + """Yield tokens from Azure OpenAI as an async iterable.""" + user_input = await context.get_input_text() or "General AI trends analysis" + logger.info("Starting LLM research analysis for: %s", user_input[:100]) + try: + loop = asyncio.get_event_loop() + openai_stream = await loop.run_in_executor( + None, + lambda: _openai_client.responses.create( + model=AZURE_AI_MODEL_DEPLOYMENT_NAME, + instructions=_RESEARCH_SYSTEM_PROMPT, + input=f"Research topic: {user_input}", + stream=True, + ), + ) + + for event in openai_stream: + if event.type == "response.output_text.delta": + yield event.delta + + except Exception as exc: + logger.error("Azure OpenAI error: %s", exc) + yield f"Error calling Azure OpenAI: {exc}" + + logger.info("Analysis complete") + + return TextResponse(context, request, text=stream_openai()) + + +if __name__ == "__main__": + app.run() diff --git a/samples/python/hosted-agents/bring-your-own/responses/background-agent/requirements.txt b/samples/python/hosted-agents/bring-your-own/responses/background-agent/requirements.txt new file mode 100644 index 000000000..e55ca548c --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/background-agent/requirements.txt @@ -0,0 +1,4 @@ +azure-ai-agentserver-responses==1.0.0b4 +azure-ai-agentserver-core==2.0.0b2 +azure-ai-projects==2.0.1 +azure-identity==1.25.3 diff --git a/samples/python/hosted-agents/bring-your-own/responses/hello-world/.dockerignore b/samples/python/hosted-agents/bring-your-own/responses/hello-world/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/hello-world/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/responses/hello-world/.env.example b/samples/python/hosted-agents/bring-your-own/responses/hello-world/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/hello-world/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/responses/hello-world/Dockerfile b/samples/python/hosted-agents/bring-your-own/responses/hello-world/Dockerfile new file mode 100644 index 000000000..b89292edb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/hello-world/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/responses/hello-world/README.md b/samples/python/hosted-agents/bring-your-own/responses/hello-world/README.md new file mode 100644 index 000000000..06bcde813 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/hello-world/README.md @@ -0,0 +1,185 @@ + +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency note for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + + +# What this sample demonstrates + +A minimal "hello world" hosted agent using the **Bring Your Own** approach with the **Responses protocol**. It shows how to use the [`azure-ai-agentserver-responses`](https://pypi.org/project/azure-ai-agentserver-responses/) SDK to host a custom agent that calls a Foundry model via the Responses API and returns the reply through the standard Responses protocol contract. + +This is the simplest possible BYO integration — the protocol SDK handles the HTTP endpoints, SSE lifecycle, health probes, and OpenTelemetry tracing. You supply the model call using the [Foundry SDK (`azure-ai-projects`)](https://pypi.org/project/azure-ai-projects/). + +## How It Works + +### Model Integration + +The agent uses the Foundry SDK to create an OpenAI-compatible Responses client from the project endpoint. When a request arrives, the handler extracts the user's input text, calls the model via the Responses API, and returns the reply as a `TextResponse` — which the SDK automatically wraps in the correct SSE lifecycle events (`response.created` → `response.in_progress` → content events → `response.completed`). + +See [main.py](main.py) for the full implementation. + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer Responses SDK](https://pypi.org/project/azure-ai-agentserver-responses/), which provisions a REST API endpoint compatible with the OpenAI Responses protocol. + +### Agent Deployment + +The hosted agent can be developed and deployed to Microsoft Foundry using the [Azure Developer CLI](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd). + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Azure Developer CLI (`azd`)** (recommended) + - [Install azd](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) and the AI agent extension: `azd ext install azure.ai.agents` + - Authenticated: `azd auth login` + +2. **Azure CLI** + - Installed and authenticated: `az login` + +3. **Python 3.10 or higher** + - Verify your version: `python --version` + +> [!NOTE] +> You do **not** need an existing [Microsoft Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/what-is-foundry?view=foundry) project or model deployment to get started — `azd provision` creates them for you. If you already have a project, see the [note below](#using-azd-recommended-for-cli-workflows) on how to target it. + +### Environment Variables + +See [`.env.example`](.env.example) for the full list of environment variables this sample uses. + +| Variable | Required | Description | +|----------|----------|-------------| +| `FOUNDRY_PROJECT_ENDPOINT` | Yes | Foundry project endpoint. Auto-injected in hosted containers; set automatically by `azd ai agent run` locally. | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Yes | Model deployment name — must match your Foundry project deployment. Declared in `agent.manifest.yaml`. | +| `APPLICATIONINSIGHTS_CONNECTION_STRING` | Recommended | Enables telemetry. Auto-injected in hosted containers; set manually for local dev. | + +**Local development (without `azd`):** + +```bash +# Copy and fill in values, then source +cp .env.example .env +# Edit .env with your values +source .env +``` + +> [!NOTE] +> When using `azd ai agent run`, environment variables are handled automatically — no manual setup needed. + +### Installing Dependencies + +> [!NOTE] +> If using `azd ai agent run`, dependencies are installed automatically — skip to [Running the Sample](#running-the-sample). + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +``` + +### Running the Sample + +The recommended way to run and test hosted agents locally is with the Azure Developer CLI (`azd`) or the Foundry VS Code extension. + +#### Using the Foundry VS Code Extension + +The [Foundry VS Code extension](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=vscode) has a built-in sample gallery. You can open this sample directly from the extension without cloning the repository — it scaffolds the project into a new workspace, generates `agent.yaml`, `.env`, and `.vscode/tasks.json` + `launch.json` automatically, and configures a one-click **F5** debug experience. + +Follow the [VS Code quickstart](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=vscode) for a full step-by-step walkthrough. + +#### Using [`azd`](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd) (recommended for CLI workflows) + +No cloning required. Create a new folder, point `azd` at the manifest on GitHub, and it sets up the sample and generates Bicep infrastructure, `agent.yaml`, and env config automatically: + +```bash +# Create a new folder for the agent and navigate into it +mkdir hello-world-agent && cd hello-world-agent + +# Initialize from the manifest — azd reads it, downloads the sample, +# and generates Bicep infrastructure, agent.yaml, and env config +azd ai agent init -m https://github.com/microsoft-foundry/foundry-samples/blob/main/samples/python/hosted-agents/bring-your-own/responses/hello-world/agent.manifest.yaml + +# Provision Azure resources (Foundry project, model deployment, App Insights) +azd provision + +# Run the agent locally (handles env vars, Docker build, and startup) +azd ai agent run +``` + +> [!NOTE] +> If you've already cloned this repository, pass a local path to the manifest instead: +> `azd ai agent init -m /samples/python/hosted-agents/bring-your-own/responses/hello-world/agent.manifest.yaml` + +> [!NOTE] +> If you already have a Foundry project and model deployment, add `-p -d ` to `azd ai agent init` to target existing resources. You can also skip provisioning entirely and configure env vars manually — see [Without `azd`](#without-azd). + +The agent starts on `http://localhost:8088/`. To invoke it: + +```bash +azd ai agent invoke --local "What is Microsoft Foundry?" +``` + +Or use curl directly: + +```bash +curl -sS -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"input": "What is Microsoft Foundry?", "stream": false}' | jq . +``` + +#### Without `azd` + +If running without `azd`, set environment variables manually (see [Environment Variables](#environment-variables)), then: + +```bash +python main.py +``` + +### Deploying the Agent to Microsoft Foundry + +Once you've tested locally, deploy to Microsoft Foundry: + +```bash +# Provision Azure resources (skip if already done during local setup) +azd provision + +# Build, push, and deploy the agent to Foundry +azd deploy +``` + +After deploying, invoke the agent running in Foundry: + +```bash +azd ai agent invoke "What is Microsoft Foundry?" +``` + +To stream logs from the running agent: + +```bash +azd ai agent monitor +``` + +For the full deployment guide, see [Azure AI Foundry hosted agents](https://aka.ms/azdaiagent/docs). + +## Troubleshooting + +### Images built on Apple Silicon or other ARM64 machines do not work on our service + +We **recommend deploying with `azd deploy`**, which uses ACR remote build and always produces images with the correct architecture. + +If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. + +**Fix for local builds** + +Use this command to build the image locally: + +```shell +docker build --platform=linux/amd64 -t image . +``` + +This forces the image to be built for the required `amd64` architecture. diff --git a/samples/python/hosted-agents/bring-your-own/responses/hello-world/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/responses/hello-world/agent.manifest.yaml new file mode 100644 index 000000000..17e0dc713 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/hello-world/agent.manifest.yaml @@ -0,0 +1,31 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: hello-world-python-responses +displayName: "Hello World (Python, Responses)" +description: > + Minimal Hello World agent using the Responses protocol with a bring-your-own + approach. Calls a Foundry model via the Responses API and returns the + response. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Bring Your Own + - Python +template: + name: hello-world-python-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/bring-your-own/responses/hello-world/agent.yaml b/samples/python/hosted-agents/bring-your-own/responses/hello-world/agent.yaml new file mode 100644 index 000000000..15bc22852 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/hello-world/agent.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml +kind: hosted +name: hello-world-python-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} diff --git a/samples/python/hosted-agents/bring-your-own/responses/hello-world/main.py b/samples/python/hosted-agents/bring-your-own/responses/hello-world/main.py new file mode 100644 index 000000000..bcb95b2d5 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/hello-world/main.py @@ -0,0 +1,144 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Hello World — Bring Your Own Responses agent. + +Minimal hosted agent that forwards user input to a Foundry model via the +Responses API and returns the reply through the Responses protocol. + +This sample demonstrates the simplest possible BYO integration: the protocol +SDK (``azure-ai-agentserver-responses``) handles the HTTP contract and SSE +lifecycle, and you supply the model call using the Foundry SDK. + +Conversation history is automatically managed by the platform via +``previous_response_id``. The handler calls ``context.get_history()`` to +retrieve prior turns and includes them in the model call so the agent +maintains context across messages. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint (auto-injected in hosted containers) + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (declared in agent.manifest.yaml) + +Usage:: + + # Set environment variables + export FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" + export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" + + # Start the agent + python main.py + + # Invoke the agent + curl -sS -X POST http://localhost:8088/responses \\ + -H "Content-Type: application/json" \\ + -d '{"input": "What is Microsoft Foundry?", "stream": false}' | jq . +""" + +import asyncio +import logging +import os + +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +from azure.ai.agentserver.responses import ( + CreateResponse, + ResponseContext, + ResponsesAgentServerHost, + ResponsesServerOptions, + TextResponse, +) +from azure.ai.agentserver.responses.models import ( + MessageContentInputTextContent, + MessageContentOutputTextContent, +) + +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +# Initialize Foundry project client — reads FOUNDRY_PROJECT_ENDPOINT. +# FOUNDRY_PROJECT_ENDPOINT is auto-injected in hosted Foundry containers. +# Locally, set it manually or use 'azd ai agent run' which sets it automatically. +_endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not _endpoint: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run' " + "which sets it automatically." + ) + +_model = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not _model: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +_credential = DefaultAzureCredential() +_project_client = AIProjectClient(endpoint=_endpoint, credential=_credential) + +# Use the Responses client — not chat.completions +_responses_client = _project_client.get_openai_client().responses + +_SYSTEM_PROMPT = "You are a helpful AI assistant. Be concise and informative." + +app = ResponsesAgentServerHost( + options=ResponsesServerOptions(default_fetch_history_count=20), +) + + +def _build_input(current_input: str, history: list) -> list[dict]: + """Build Responses API input from conversation history and current message. + + The platform stores conversation history as typed items. We convert them + into simple {"role": ..., "content": ...} dicts that the Responses API accepts. + """ + input_items = [] + for item in history: + if hasattr(item, "content") and item.content: + for content in item.content: + if isinstance(content, MessageContentOutputTextContent) and content.text: + input_items.append( + {"role": "assistant", "content": content.text}) + elif isinstance(content, MessageContentInputTextContent) and content.text: + input_items.append( + {"role": "user", "content": content.text}) + input_items.append({"role": "user", "content": current_input}) + return input_items + + +@app.response_handler +async def handler( + request: CreateResponse, + context: ResponseContext, + _cancellation_signal: asyncio.Event, +): + """Forward user input to the model with conversation history.""" + user_input = await context.get_input_text() or "Hello!" + history = await context.get_history() + + logger.info("Processing request %s", context.response_id) + + input_items = _build_input(user_input, history) + + # Run the synchronous OpenAI SDK call in a thread pool to avoid blocking + # the async event loop. + response = await asyncio.get_running_loop().run_in_executor( + None, + lambda: _responses_client.create( + model=_model, + instructions=_SYSTEM_PROMPT, + input=input_items, + store=False, # The platform manages conversation history — no need to store at the model level + ), + ) + + return TextResponse(context, request, text=response.output_text) + + +app.run() diff --git a/samples/python/hosted-agents/bring-your-own/responses/hello-world/requirements.txt b/samples/python/hosted-agents/bring-your-own/responses/hello-world/requirements.txt new file mode 100644 index 000000000..b405c55b8 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/hello-world/requirements.txt @@ -0,0 +1,3 @@ +azure-ai-agentserver-responses==1.0.0b4 +azure-ai-projects==2.0.1 +azure-identity==1.25.3 diff --git a/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/.dockerignore b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/.env.example b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/Dockerfile b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/Dockerfile new file mode 100644 index 000000000..b89292edb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/README.md b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/README.md new file mode 100644 index 000000000..2f4b24290 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/README.md @@ -0,0 +1,80 @@ +# LangGraph Multi-turn Chat Agent (Responses Protocol) + +A multi-turn conversational agent built with [LangGraph](https://langchain-ai.github.io/langgraph/) +and Azure OpenAI, hosted via the **responses** protocol. + +## What it demonstrates + +- **LangGraph agent graph** with conditional tool-calling routing +- **Two built-in tools**: `get_current_time` and `calculator` +- **Server-side conversation state** via `previous_response_id` — no application-side session storage +- **Streaming** output over the responses protocol +- **Azure OpenAI** with `DefaultAzureCredential` authentication + +## Architecture + +``` +┌───────┐ ┌─────────┐ ┌───────┐ +│ START │───▶│ chatbot │───▶│ END │ +└───────┘ └────┬─────┘ └───────┘ + │ tool_calls? + ▼ + ┌─────────┐ + │ tools │ + └────┬─────┘ + │ + └──▶ chatbot (loop) +``` + +## Key difference from invocations protocol + +This sample uses the **responses** protocol where conversation history is +managed server-side. The platform stores conversation state and resolves it +via `previous_response_id` — no need for an in-memory session store. + +## Prerequisites + +- Python 3.12+ +- Azure OpenAI resource with a deployed model (e.g., `gpt-4.1-mini`) +- Azure CLI login (`az login`) or other `DefaultAzureCredential` source + +## Environment variables + +| Variable | Required | Default | Description | +|---|---|---|---| +| `FOUNDRY_PROJECT_ENDPOINT` | Yes | — | Foundry project endpoint URL | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Yes | — | Model deployment name declared in `agent.manifest.yaml` | + +## Running locally + +```bash +cp .env.example .env # then edit values +pip install -r requirements.txt +python main.py +``` + +## Testing with curl + +```bash +# Turn 1 — ask for the time (triggers tool call) +curl -N -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"model": "chat", "input": "What time is it right now?", "stream": true}' + +# Turn 2 — chain via previous_response_id +curl -N -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"model": "chat", "input": "What is 42 * 17?", "previous_response_id": "", "stream": true}' + +# Turn 3 — context recall +curl -N -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"model": "chat", "input": "Add 100 to that result", "previous_response_id": "", "stream": true}' +``` + +## Deploying to Azure AI Agent Hosting + +```bash +azd ai agent init -m agent.manifest.yaml +azd up +``` diff --git a/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/agent.manifest.yaml new file mode 100644 index 000000000..d81068a64 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/agent.manifest.yaml @@ -0,0 +1,34 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: langgraph-chat-responses +displayName: "LangGraph Chat (Responses)" +description: > + Multi-turn chat agent built with LangGraph using the Responses protocol. + Demonstrates a tool-calling agent graph (chatbot ↔ tools) with server-side + conversation state via previous_response_id. Includes get_current_time and + calculator tools. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Bring Your Own + - LangGraph + - Tool Calling + - Python +template: + name: langgraph-chat-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/agent.yaml b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/agent.yaml new file mode 100644 index 000000000..27ca1a137 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/agent.yaml @@ -0,0 +1,11 @@ +kind: hosted +name: langgraph-chat-responses +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} diff --git a/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/main.py b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/main.py new file mode 100644 index 000000000..25a5c32eb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/main.py @@ -0,0 +1,229 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Multi-turn chat agent using LangGraph with Azure OpenAI (responses protocol). + +Demonstrates how to integrate a LangGraph agent (with tool-calling) into +the Azure AI Agent Hosting responses protocol. The graph has two nodes: + + 1. **chatbot** — calls Azure OpenAI (with tools bound) + 2. **tools** — executes any tool calls the model makes + +Conversation state: This sample does NOT use any in-memory session state. +Conversation context is automatically managed by the platform via +``previous_response_id`` and ``context.get_history()``. + +Tracing: All LangGraph node, LLM, and tool spans are auto-traced via +``langchain-azure-ai`` and exported to Application Insights. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint (auto-injected by the platform) + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (default: gpt-4o) + +Usage:: + + export FOUNDRY_PROJECT_ENDPOINT="https://your-resource.openai.azure.com/" + python main.py + + # Turn 1 + curl -N -X POST http://localhost:8088/responses \\ + -H "Content-Type: application/json" \\ + -d '{"model": "chat", "input": "What time is it right now?", "stream": true}' + + # Turn 2 — chain via previous_response_id + curl -N -X POST http://localhost:8088/responses \\ + -H "Content-Type: application/json" \\ + -d '{"model": "chat", "input": "What is 42 * 17?", "previous_response_id": "", "stream": true}' +""" + +import asyncio +import logging +import os +from datetime import datetime, timezone +from typing import Annotated + +import httpx +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from langchain_openai import ChatOpenAI +from langgraph.graph import StateGraph, START, END +from langgraph.graph.message import add_messages +from langgraph.prebuilt import ToolNode +from langchain_core.tools import tool +from langchain_core.messages import HumanMessage, AIMessage +from typing_extensions import TypedDict + +from azure.ai.agentserver.responses import ( + CreateResponse, + ResponseContext, + ResponsesAgentServerHost, + ResponsesServerOptions, + TextResponse, +) +from azure.ai.agentserver.responses.models import ( + MessageContentInputTextContent, + MessageContentOutputTextContent, +) + + +# ── Configuration ──────────────────────────────────────────────────── +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +FOUNDRY_PROJECT_ENDPOINT = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not FOUNDRY_PROJECT_ENDPOINT: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run'." + ) + +AZURE_AI_MODEL_DEPLOYMENT_NAME = os.environ.get( + "AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not AZURE_AI_MODEL_DEPLOYMENT_NAME: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +_credential = DefaultAzureCredential() +_token_provider = get_bearer_token_provider( + _credential, "https://ai.azure.com/.default" +) + + +# httpx Auth hook that injects a fresh Azure AD token on every request. +class _AzureTokenAuth(httpx.Auth): + def __init__(self, provider): + self._provider = provider + + def auth_flow(self, request): + request.headers["Authorization"] = f"Bearer {self._provider()}" + yield request + + +_http_client = httpx.Client(auth=_AzureTokenAuth(_token_provider)) + + +# ── Tools ──────────────────────────────────────────────────────────── +@tool +def get_current_time() -> str: + """Return the current UTC date and time.""" + return datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC") + + +@tool +def calculator(expression: str) -> str: + """Evaluate a simple math expression and return the result.""" + try: + result = eval(expression, {"__builtins__": {}}) # noqa: S307 + return str(result) + except Exception as e: + return f"Error: {e}" + + +TOOLS = [get_current_time, calculator] + + +# ── LangGraph definition ──────────────────────────────────────────── +class State(TypedDict): + messages: Annotated[list, add_messages] + + +def _build_graph() -> StateGraph: + """Build and compile the LangGraph agent graph.""" + llm = ChatOpenAI( + base_url=f"{FOUNDRY_PROJECT_ENDPOINT}/openai/v1", + api_key="placeholder", # overridden by _AzureTokenAuth + model=AZURE_AI_MODEL_DEPLOYMENT_NAME, + use_responses_api=True, + streaming=True, + http_client=_http_client, + ) + llm_with_tools = llm.bind_tools(TOOLS) + + def chatbot(state: State): + return {"messages": [llm_with_tools.invoke(state["messages"])]} + + def route_tools(state: State): + last = state["messages"][-1] + if hasattr(last, "tool_calls") and last.tool_calls: + return "tools" + return END + + graph = StateGraph(State) + graph.add_node("chatbot", chatbot) + graph.add_node("tools", ToolNode(tools=TOOLS)) + graph.add_edge(START, "chatbot") + graph.add_conditional_edges("chatbot", route_tools, { + "tools": "tools", END: END}) + graph.add_edge("tools", "chatbot") + return graph.compile() + + +GRAPH = _build_graph() + + +# ── Helpers ────────────────────────────────────────────────────────── +def _history_to_langchain_messages(history: list) -> list: + """Convert responses-protocol history items to LangChain messages.""" + messages = [] + for item in history: + if hasattr(item, "content") and item.content: + for content in item.content: + if isinstance(content, MessageContentOutputTextContent) and content.text: + messages.append(AIMessage(content=content.text)) + elif isinstance(content, MessageContentInputTextContent) and content.text: + messages.append(HumanMessage(content=content.text)) + return messages + + +# ── Agent server wiring ───────────────────────────────────────────── +app = ResponsesAgentServerHost( + options=ResponsesServerOptions(default_fetch_history_count=20)) + + +@app.response_handler +async def handle_create( + request: CreateResponse, + context: ResponseContext, + cancellation_signal: asyncio.Event, +): + """Run the LangGraph agent and stream the response.""" + + async def run_graph(): + """Fetch history, run the graph, and yield the result.""" + try: + try: + history = await context.get_history() + except Exception: + history = [] + current_input = await context.get_input_text() or "Hello!" + + lc_messages = _history_to_langchain_messages(history) + lc_messages.append(HumanMessage(content=current_input)) + + result = await GRAPH.ainvoke({"messages": lc_messages}) + + # With use_responses_api, content may be a list of content blocks + # rather than a plain string. + raw = result["messages"][-1].content + if isinstance(raw, list): + yield "".join( + block.get("text", "") if isinstance(block, dict) else str(block) + for block in raw + ) + else: + yield raw or "" + except Exception as exc: + logger.exception("run_graph failed") + yield f"[ERROR] {type(exc).__name__}: {exc}" + + return TextResponse(context, request, text=run_graph()) + + +if __name__ == "__main__": + app.run() diff --git a/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/requirements.txt b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/requirements.txt new file mode 100644 index 000000000..e8d8e46a4 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/langgraph-chat/requirements.txt @@ -0,0 +1,7 @@ +azure-ai-agentserver-responses==1.0.0b4 +azure-ai-agentserver-core==2.0.0b2 +azure-identity==1.25.3 +langgraph==1.1.8 +langgraph-prebuilt==1.0.10 +langchain-core==1.3.0 +langchain-openai==1.1.14 diff --git a/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/.dockerignore b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/.env.example b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/.env.example new file mode 100644 index 000000000..86eb2456e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/.env.example @@ -0,0 +1,10 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/Dockerfile b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/Dockerfile new file mode 100644 index 000000000..b89292edb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/README.md b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/README.md new file mode 100644 index 000000000..69c0a7831 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/README.md @@ -0,0 +1,91 @@ +# Note-Taking Agent — Python (Responses Protocol) + +A note-taking agent built with `azure-ai-agentserver-responses` and Azure OpenAI. Uses function calling to save and retrieve notes, with per-session JSONL persistence accessible via the Session Files API. + +## Features + +- **Save notes** — natural language commands like "save a note - buy groceries" +- **Retrieve notes** — "show me my notes" returns all saved entries with timestamps +- **Per-session isolation** — each session gets its own note file +- **Streaming responses** — real-time SSE streaming via the Responses protocol +- **Session Files API** — notes stored at `$HOME` are accessible via the platform file API + +## Prerequisites + +- Python 3.12+ +- Azure OpenAI resource with a deployed model (e.g., `gpt-4.1-mini`) +- Azure credentials configured (e.g., `az login`) + +## Environment Variables + +| Variable | Description | Example | +|---|---|---| +| `FOUNDRY_PROJECT_ENDPOINT` | Foundry project endpoint URL | `https://your-project.services.ai.azure.com/api/projects/your-project` | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Model deployment name declared in `agent.manifest.yaml` | `gpt-4.1-mini` | + +## Run Locally + +```bash +# Copy and edit environment file +cp .env.example .env + +# Install dependencies +pip install -r requirements.txt + +# Start the agent +python main.py +``` + +## Test with curl + +### Save a note + +```bash +curl -N -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{ + "input": "save a note - book reservation for dinner", + "stream": true, + "agent_session_id": "my-session" + }' +``` + +### Save another note + +```bash +curl -N -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{ + "input": "save a note - buy groceries for the weekend", + "stream": true, + "agent_session_id": "my-session" + }' +``` + +### Get all notes + +```bash +curl -N -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{ + "input": "show me all my notes", + "stream": true, + "agent_session_id": "my-session" + }' +``` + +## Deploy + +See the [Azure AI Agent Hosting documentation](../../README.md) for deployment instructions. + +## File Structure + +| File | Description | +|---|---| +| `main.py` | Agent entry point with Responses handler and OpenAI function calling | +| `note_store.py` | Thread-safe per-session JSONL note persistence | +| `requirements.txt` | Python dependencies | +| `Dockerfile` | Container image definition | +| `agent.yaml` | Agent hosting configuration | +| `agent.manifest.yaml` | Agent metadata and template | +| `.dockerignore` | Docker build exclusions | diff --git a/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/agent.manifest.yaml new file mode 100644 index 000000000..69de29cd8 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/agent.manifest.yaml @@ -0,0 +1,32 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: notetaking-agent-responses +displayName: "Notetaking Agent (Responses)" +description: > + Note-taking agent using the Responses protocol. Demonstrates function calling + (save_note/get_notes tools) with per-session JSONL persistence and streaming + responses. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Bring Your Own + - Function Calling + - Python +template: + name: notetaking-agent-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" +resources: + - kind: model + id: gpt-4.1-mini + name: AZURE_AI_MODEL_DEPLOYMENT_NAME diff --git a/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/agent.yaml b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/agent.yaml new file mode 100644 index 000000000..66eb2598e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/agent.yaml @@ -0,0 +1,11 @@ +kind: hosted +name: notetaking-agent-responses-python +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} diff --git a/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/main.py b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/main.py new file mode 100644 index 000000000..3e4598fc7 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/main.py @@ -0,0 +1,243 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Note-taking agent using azure-ai-agentserver-responses with Azure OpenAI. + +Uses the Azure OpenAI Responses API with function calling to understand user +intent (save/get notes) and streams responses via the Responses protocol. +Notes are persisted per session in JSONL files accessible via the Session +Files API. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint (auto-injected by the platform) + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (e.g., gpt-4o) + +Usage:: + + # Set environment variables + export FOUNDRY_PROJECT_ENDPOINT="https://your-resource.openai.azure.com/" + export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o" + + # Start the agent + python main.py + + # Save a note + curl -N -X POST http://localhost:8088/responses \\ + -H "Content-Type: application/json" \\ + -d '{"input": "save a note - book reservation for dinner", "stream": true, "agent_session_id": "my-session"}' + + # Get all notes + curl -N -X POST http://localhost:8088/responses \\ + -H "Content-Type: application/json" \\ + -d '{"input": "get all my notes", "stream": true, "agent_session_id": "my-session"}' +""" + +import asyncio +import json +import logging +import os + +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +from azure.ai.agentserver.responses import ( + CreateResponse, + ResponseContext, + ResponseEventStream, + ResponsesAgentServerHost, +) + +import note_store + +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +# ── Configuration ───────────────────────────────────────────────────────────── + +FOUNDRY_PROJECT_ENDPOINT = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not FOUNDRY_PROJECT_ENDPOINT: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run'." + ) + +AZURE_AI_MODEL_DEPLOYMENT_NAME = os.environ.get( + "AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not AZURE_AI_MODEL_DEPLOYMENT_NAME: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +_credential = DefaultAzureCredential() +_project_client = AIProjectClient( + endpoint=FOUNDRY_PROJECT_ENDPOINT, credential=_credential) + +# Use the Responses API — not chat.completions (Chat Completions API is legacy). +_openai_client = _project_client.get_openai_client() + +# Tool definitions for Azure OpenAI Responses API +TOOLS = [ + { + "type": "function", + "name": "save_note", + "description": "Save a note with the current timestamp. Use this when the user asks to save, add, or create a note.", + "parameters": { + "type": "object", + "properties": { + "note": { + "type": "string", + "description": "The note text to save", + } + }, + "required": ["note"], + }, + }, + { + "type": "function", + "name": "get_notes", + "description": "Retrieve all saved notes. Use this when the user asks to get, list, show, or view their notes.", + "parameters": {"type": "object", "properties": {}, "required": []}, + }, +] + +SYSTEM_PROMPT = ( + "You are a helpful note-taking assistant. You can save notes and retrieve them. " + "When the user asks to save a note, extract the note content and call save_note. " + "When the user asks to see their notes, call get_notes. " + "Always respond in a friendly, concise manner." +) + + +def _execute_tool_call(function_name: str, arguments: str, session_id: str) -> str: + """Execute a tool call and return the result as JSON.""" + try: + args = json.loads(arguments) + except json.JSONDecodeError as e: + return json.dumps({"error": f"Invalid tool arguments: {e}"}) + + if function_name == "save_note": + note_text = args.get("note") + if not note_text: + return json.dumps({"error": "Missing required 'note' argument"}) + entry = note_store.save_note(session_id, note_text) + return json.dumps({"status": "saved", "note": entry.note, "timestamp": entry.timestamp}) + elif function_name == "get_notes": + notes = note_store.get_notes(session_id) + return json.dumps({ + "count": len(notes), + "notes": [{"note": n.note, "timestamp": n.timestamp} for n in notes], + }) + return json.dumps({"error": f"Unknown function: {function_name}"}) + + +app = ResponsesAgentServerHost() + + +@app.response_handler +async def handle_create( + request: CreateResponse, + context: ResponseContext, + cancellation_signal: asyncio.Event, +): + """Handle note-taking requests with Azure OpenAI Responses API.""" + stream = ResponseEventStream( + response_id=context.response_id, + request=request, + ) + + yield stream.emit_created() + yield stream.emit_in_progress() + + user_input = await context.get_input_text() or "" + session_id = request.get("agent_session_id") or "default" + + # Emit output item structure before streaming content + message_item = stream.add_output_item_message() + yield message_item.emit_added() + + text_content = message_item.add_text_content() + yield text_content.emit_added() + + full_text = "" + + try: + loop = asyncio.get_event_loop() + + # First call — determine if tool calls are needed + response = await loop.run_in_executor( + None, + lambda: _openai_client.responses.create( + model=AZURE_AI_MODEL_DEPLOYMENT_NAME, + instructions=SYSTEM_PROMPT, + input=user_input, + tools=TOOLS, + ), + ) + + # Check if there are function_call output items + function_calls = [ + item for item in response.output + if item.type == "function_call" + ] + + if function_calls: + # Execute tool calls and build follow-up input + follow_up_input = [] + # Include the function_call items from the response + for fc in function_calls: + follow_up_input.append(fc) + result = _execute_tool_call(fc.name, fc.arguments, session_id) + follow_up_input.append({ + "type": "function_call_output", + "call_id": fc.call_id, + "output": result, + }) + + # Second call — stream the final response with tool results + openai_stream = await loop.run_in_executor( + None, + lambda: _openai_client.responses.create( + model=AZURE_AI_MODEL_DEPLOYMENT_NAME, + instructions=SYSTEM_PROMPT, + input=follow_up_input, + stream=True, + ), + ) + + for event in openai_stream: + if cancellation_signal.is_set(): + yield stream.emit_incomplete("cancelled") + return + if event.type == "response.output_text.delta": + full_text += event.delta + yield text_content.emit_delta(event.delta) + else: + # No tool calls — extract text from the first response directly + for item in response.output: + if item.type == "message": + for part in item.content: + if part.type == "output_text": + full_text += part.text + if full_text: + yield text_content.emit_delta(full_text) + + except Exception as e: + if not full_text: + full_text = f"Error calling Azure OpenAI: {e}" + yield text_content.emit_delta(full_text) + + yield text_content.emit_text_done() + yield text_content.emit_done() + yield message_item.emit_done() + + yield stream.emit_completed() + + +if __name__ == "__main__": + app.run() diff --git a/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/note_store.py b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/note_store.py new file mode 100644 index 000000000..f59db8d4a --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/note_store.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Thread-safe per-session note storage using JSONL files.""" + +import json +import os +import threading +from dataclasses import dataclass +from datetime import datetime, timezone + + +@dataclass +class NoteEntry: + """A single note with its creation timestamp.""" + + note: str + timestamp: str + + +_lock = threading.Lock() + + +def _get_file_path(session_id: str) -> str: + """Return the JSONL file path for a given session. + + Files are stored under $HOME so they are accessible via the Session Files API. + """ + safe_id = "".join(c if c.isalnum() or c in "-_" else "_" for c in session_id) + base_dir = os.environ.get("HOME", os.getcwd()) + return os.path.join(base_dir, f"notes_{safe_id}.jsonl") + + +def save_note(session_id: str, note_text: str) -> NoteEntry: + """Append a note to the session's JSONL file.""" + entry = NoteEntry(note=note_text, timestamp=datetime.now(timezone.utc).isoformat()) + line = json.dumps({"note": entry.note, "timestamp": entry.timestamp}) + with _lock: + with open(_get_file_path(session_id), "a") as f: + f.write(line + "\n") + return entry + + +def get_notes(session_id: str) -> list[NoteEntry]: + """Read all notes from the session's JSONL file.""" + path = _get_file_path(session_id) + with _lock: + if not os.path.exists(path): + return [] + with open(path) as f: + entries = [] + for line in f: + line = line.strip() + if line: + try: + data = json.loads(line) + entries.append(NoteEntry(note=data["note"], timestamp=data["timestamp"])) + except (json.JSONDecodeError, KeyError): + continue + return entries diff --git a/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/requirements.txt b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/requirements.txt new file mode 100644 index 000000000..e55ca548c --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/notetaking-agent/requirements.txt @@ -0,0 +1,4 @@ +azure-ai-agentserver-responses==1.0.0b4 +azure-ai-agentserver-core==2.0.0b2 +azure-ai-projects==2.0.1 +azure-identity==1.25.3 diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/.dockerignore b/samples/python/hosted-agents/bring-your-own/responses/toolbox/.dockerignore new file mode 100644 index 000000000..8f846cb51 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/.dockerignore @@ -0,0 +1,27 @@ +**/__pycache__/ +**/*.py[cod] +**/*.egg-info/ +.eggs/ + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE settings +.vscode/ +.idea/ + +# Version control +.git/ +.gitignore + +# Docker files +Dockerfile +.dockerignore + +# Docs +README.md + +# Local environment (never bake credentials into the image) +.env diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/.env.example b/samples/python/hosted-agents/bring-your-own/responses/toolbox/.env.example new file mode 100644 index 000000000..a2afcc99d --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/.env.example @@ -0,0 +1,13 @@ +# Foundry project endpoint — auto-injected in hosted containers. +# Only set manually if running without `azd ai agent run`. +# FOUNDRY_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + +# Model deployment name — must match a deployment in your Foundry project. +AZURE_AI_MODEL_DEPLOYMENT_NAME= + +# Toolbox MCP endpoint — full URL including toolbox name and api-version. +TOOLBOX_ENDPOINT= + +# Application Insights — auto-injected in hosted containers. +# Set for local telemetry (optional but recommended). +# APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/.gitignore b/samples/python/hosted-agents/bring-your-own/responses/toolbox/.gitignore new file mode 100644 index 000000000..8e8438024 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/.gitignore @@ -0,0 +1 @@ +.azure diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/Dockerfile b/samples/python/hosted-agents/bring-your-own/responses/toolbox/Dockerfile new file mode 100644 index 000000000..cd962d026 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/Dockerfile @@ -0,0 +1,7 @@ +FROM mcr.microsoft.com/azurelinux/base/python:3.12 +WORKDIR /app +COPY . user_agent/ +WORKDIR /app/user_agent +RUN if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +EXPOSE 8088 +CMD ["python3", "main.py"] diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/README.md b/samples/python/hosted-agents/bring-your-own/responses/toolbox/README.md new file mode 100644 index 000000000..a09916a2a --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/README.md @@ -0,0 +1,116 @@ + +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + + +# What this sample demonstrates + +A **Bring Your Own** hosted agent using the **Responses protocol** with **Azure AI Foundry Toolbox MCP** integration in Python. It shows how to connect to a Foundry toolbox at startup, discover available tools via MCP, and let the model call them during conversation through an agentic tool-calling loop. + +This sample combines: +- The [`azure-ai-agentserver-responses`](https://pypi.org/project/azure-ai-agentserver-responses/) SDK for the Responses protocol +- The [Foundry SDK (`azure-ai-projects`)](https://pypi.org/project/azure-ai-projects/) for model access via the Responses API +- Direct MCP (JSON-RPC over HTTP) for toolbox tool discovery and invocation + +Conversation history is automatically managed by the platform via `previous_response_id`. The handler calls `context.get_history()` to retrieve prior turns. + +## How It Works + +### Toolbox Integration + +At startup, the agent connects to the toolbox MCP endpoint, runs `initialize` + `tools/list`, and converts the discovered tools into function definitions for the Responses API. When the model requests a tool call, the agent executes it via MCP `tools/call` and feeds the result back to the model. + +### Model Integration + +The agent uses the Foundry SDK Responses API with tool definitions. The agentic loop handles multi-step tool calling — the model can call tools multiple times before producing a final text answer. + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer Responses SDK](https://pypi.org/project/azure-ai-agentserver-responses/), which provisions a REST API endpoint compatible with the Azure AI Responses protocol. + +### Agent Deployment + +The hosted agent can be developed and deployed to Microsoft Foundry using the [Azure Developer CLI](https://learn.microsoft.com/en-us/azure/foundry/agents/quickstarts/quickstart-hosted-agent?view=foundry&pivots=azd). + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Azure Developer CLI (`azd`)** (recommended) + - [Install azd](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) and the AI agent extension: `azd ext install azure.ai.agents` + - Authenticated: `azd auth login` + +2. **Azure CLI** + - Installed and authenticated: `az login` + +3. **Python 3.10 or later** + - Verify your version: `python --version` + +4. **A Foundry Toolbox** + - Create a toolbox in your Foundry project (e.g. with web search, Azure AI Search, or custom MCP tools) + +### Environment Variables + +See [`.env.example`](.env.example) for the full list of environment variables this sample uses. + +| Variable | Required | Description | +|----------|----------|-------------| +| `FOUNDRY_PROJECT_ENDPOINT` | Yes | Foundry project endpoint. Auto-injected in hosted containers; set automatically by `azd ai agent run` locally. | +| `AZURE_AI_MODEL_DEPLOYMENT_NAME` | Yes | Model deployment name — must match your Foundry project deployment. Declared in `agent.manifest.yaml`. | +| `TOOLBOX_ENDPOINT` | Yes | Full toolbox MCP endpoint URL including toolbox name and `?api-version=v1`. Declared in `agent.manifest.yaml`. | +| `APPLICATIONINSIGHTS_CONNECTION_STRING` | Recommended | Enables telemetry. Auto-injected in hosted containers; set manually for local dev. | + +`TOOLBOX_ENDPOINT` must be the complete MCP URL for your toolbox. Two forms are supported: +``` +# Latest version: +https://.services.ai.azure.com/api/projects//toolboxes//mcp?api-version=v1 + +# Pinned to a specific version: +https://.services.ai.azure.com/api/projects//toolboxes//versions//mcp?api-version=v1 +``` +Set it as an environment variable in `.env` for local dev, or via `azd env set TOOLBOX_ENDPOINT ""` for deployed agents. + +**Local development (without `azd`):** + +```bash +cp .env.example .env +# Edit .env and fill in your values, then: +export $(grep -v '^#' .env | xargs) +``` + +### Installing Dependencies + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +``` + +### Running the Sample + +```bash +python main.py +``` + +The agent starts on `http://localhost:8088`. + +### Testing + +```bash +# Non-streaming +curl -sS -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"input": "Search the web for Azure AI Foundry news", "stream": false}' | jq . + +# Streaming +curl -sS -N -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{"input": "What is Azure AI Foundry?", "stream": true}' +``` diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/agent.manifest.yaml b/samples/python/hosted-agents/bring-your-own/responses/toolbox/agent.manifest.yaml new file mode 100644 index 000000000..174c7fc4c --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/agent.manifest.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/AgentManifest.yaml +name: toolbox-python-responses +displayName: "Toolbox (Python, Responses)" +description: > + Bring-your-own agent using the Responses protocol with Foundry Toolbox MCP + integration. Connects to a toolbox at startup, discovers tools, and lets the + model call them during conversation. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Bring Your Own + - Toolbox + - Python +template: + name: toolbox-python-responses + kind: hosted + protocols: + - protocol: responses + version: 1.0.0 + environment_variables: + # FOUNDRY_PROJECT_ENDPOINT and APPLICATIONINSIGHTS_CONNECTION_STRING + # are injected by the platform (hosted) and translated by azd (local) + # — do NOT declare them here. + # + # Model deployment name — resolved from the resources section below. + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: "{{AZURE_AI_MODEL_DEPLOYMENT_NAME}}" + - name: TOOLBOX_ENDPOINT + value: "{{TOOLBOX_ENDPOINT}}" +resources: + - kind: model + id: gpt-4.1 + name: AZURE_AI_MODEL_DEPLOYMENT_NAME + - kind: toolbox + name: web-search-tools + tools: + - type: web_search diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/agent.yaml b/samples/python/hosted-agents/bring-your-own/responses/toolbox/agent.yaml new file mode 100644 index 000000000..0b3c6f917 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/agent.yaml @@ -0,0 +1,26 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml + +kind: hosted +name: toolbox-python-responses +description: | + Bring-your-own agent using the Responses protocol with Foundry Toolbox MCP integration. Connects to a toolbox at startup, discovers tools, and lets the model call them during conversation. +metadata: + tags: + - AI Agent Hosting + - Responses Protocol + - Bring Your Own + - Toolbox + - Python +protocols: + - protocol: responses + version: 1.0.0 +resources: + cpu: "0.25" + memory: 0.5Gi +environment_variables: + - name: AZURE_AI_MODEL_DEPLOYMENT_NAME + value: gpt-4.1 + - name: TOOLBOX_ENDPOINT + value: '{{TOOLBOX_ENDPOINT}}' + - name: TOOLBOX_WEB_SEARCH_TOOLS_MCP_ENDPOINT + value: ${TOOLBOX_WEB_SEARCH_TOOLS_MCP_ENDPOINT} diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/azure.yaml b/samples/python/hosted-agents/bring-your-own/responses/toolbox/azure.yaml new file mode 100644 index 000000000..d3bb79afb --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/azure.yaml @@ -0,0 +1,37 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json + +requiredVersions: + extensions: + azure.ai.agents: '>=0.1.0-preview' +name: ai-foundry-starter-basic +services: + toolbox-python-responses: + project: . + host: azure.ai.agent + language: docker + docker: + remoteBuild: true + config: + container: + resources: + cpu: "0.25" + memory: 0.5Gi + scale: + maxReplicas: 1 + deployments: + - model: + format: OpenAI + name: gpt-4.1 + version: "2025-04-14" + name: gpt-4.1 + sku: + capacity: 10 + name: GlobalStandard + startupCommand: python main.py + toolboxes: + - name: web-search-tools + tools: + - type: web_search +infra: + provider: bicep + path: ./infra diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/abbreviations.json b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/abbreviations.json new file mode 100644 index 000000000..00cef3fc9 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/abbreviations.json @@ -0,0 +1,137 @@ +{ + "aiFoundryAccounts": "aif", + "analysisServicesServers": "as", + "apiManagementService": "apim-", + "appConfigurationStores": "appcs-", + "appManagedEnvironments": "cae-", + "appContainerApps": "ca-", + "authorizationPolicyDefinitions": "policy-", + "automationAutomationAccounts": "aa-", + "blueprintBlueprints": "bp-", + "blueprintBlueprintsArtifacts": "bpa-", + "cacheRedis": "redis-", + "cdnProfiles": "cdnp-", + "cdnProfilesEndpoints": "cdne-", + "cognitiveServicesAccounts": "cog-", + "cognitiveServicesFormRecognizer": "cog-fr-", + "cognitiveServicesTextAnalytics": "cog-ta-", + "computeAvailabilitySets": "avail-", + "computeCloudServices": "cld-", + "computeDiskEncryptionSets": "des", + "computeDisks": "disk", + "computeDisksOs": "osdisk", + "computeGalleries": "gal", + "computeSnapshots": "snap-", + "computeVirtualMachines": "vm", + "computeVirtualMachineScaleSets": "vmss-", + "containerInstanceContainerGroups": "ci", + "containerRegistryRegistries": "cr", + "containerServiceManagedClusters": "aks-", + "databricksWorkspaces": "dbw-", + "dataFactoryFactories": "adf-", + "dataLakeAnalyticsAccounts": "dla", + "dataLakeStoreAccounts": "dls", + "dataMigrationServices": "dms-", + "dBforMySQLServers": "mysql-", + "dBforPostgreSQLServers": "psql-", + "devicesIotHubs": "iot-", + "devicesProvisioningServices": "provs-", + "devicesProvisioningServicesCertificates": "pcert-", + "documentDBDatabaseAccounts": "cosmos-", + "documentDBMongoDatabaseAccounts": "cosmon-", + "eventGridDomains": "evgd-", + "eventGridDomainsTopics": "evgt-", + "eventGridEventSubscriptions": "evgs-", + "eventHubNamespaces": "evhns-", + "eventHubNamespacesEventHubs": "evh-", + "hdInsightClustersHadoop": "hadoop-", + "hdInsightClustersHbase": "hbase-", + "hdInsightClustersKafka": "kafka-", + "hdInsightClustersMl": "mls-", + "hdInsightClustersSpark": "spark-", + "hdInsightClustersStorm": "storm-", + "hybridComputeMachines": "arcs-", + "insightsActionGroups": "ag-", + "insightsComponents": "appi-", + "keyVaultVaults": "kv-", + "kubernetesConnectedClusters": "arck", + "kustoClusters": "dec", + "kustoClustersDatabases": "dedb", + "logicIntegrationAccounts": "ia-", + "logicWorkflows": "logic-", + "machineLearningServicesWorkspaces": "mlw-", + "managedIdentityUserAssignedIdentities": "id-", + "managementManagementGroups": "mg-", + "migrateAssessmentProjects": "migr-", + "networkApplicationGateways": "agw-", + "networkApplicationSecurityGroups": "asg-", + "networkAzureFirewalls": "afw-", + "networkBastionHosts": "bas-", + "networkConnections": "con-", + "networkDnsZones": "dnsz-", + "networkExpressRouteCircuits": "erc-", + "networkFirewallPolicies": "afwp-", + "networkFirewallPoliciesWebApplication": "waf", + "networkFirewallPoliciesRuleGroups": "wafrg", + "networkFrontDoors": "fd-", + "networkFrontdoorWebApplicationFirewallPolicies": "fdfp-", + "networkLoadBalancersExternal": "lbe-", + "networkLoadBalancersInternal": "lbi-", + "networkLoadBalancersInboundNatRules": "rule-", + "networkLocalNetworkGateways": "lgw-", + "networkNatGateways": "ng-", + "networkNetworkInterfaces": "nic-", + "networkNetworkSecurityGroups": "nsg-", + "networkNetworkSecurityGroupsSecurityRules": "nsgsr-", + "networkNetworkWatchers": "nw-", + "networkPrivateDnsZones": "pdnsz-", + "networkPrivateLinkServices": "pl-", + "networkPublicIPAddresses": "pip-", + "networkPublicIPPrefixes": "ippre-", + "networkRouteFilters": "rf-", + "networkRouteTables": "rt-", + "networkRouteTablesRoutes": "udr-", + "networkTrafficManagerProfiles": "traf-", + "networkVirtualNetworkGateways": "vgw-", + "networkVirtualNetworks": "vnet-", + "networkVirtualNetworksSubnets": "snet-", + "networkVirtualNetworksVirtualNetworkPeerings": "peer-", + "networkVirtualWans": "vwan-", + "networkVpnGateways": "vpng-", + "networkVpnGatewaysVpnConnections": "vcn-", + "networkVpnGatewaysVpnSites": "vst-", + "notificationHubsNamespaces": "ntfns-", + "notificationHubsNamespacesNotificationHubs": "ntf-", + "operationalInsightsWorkspaces": "log-", + "portalDashboards": "dash-", + "powerBIDedicatedCapacities": "pbi-", + "purviewAccounts": "pview-", + "recoveryServicesVaults": "rsv-", + "resourcesResourceGroups": "rg-", + "searchSearchServices": "srch-", + "serviceBusNamespaces": "sb-", + "serviceBusNamespacesQueues": "sbq-", + "serviceBusNamespacesTopics": "sbt-", + "serviceEndPointPolicies": "se-", + "serviceFabricClusters": "sf-", + "signalRServiceSignalR": "sigr", + "sqlManagedInstances": "sqlmi-", + "sqlServers": "sql-", + "sqlServersDataWarehouse": "sqldw-", + "sqlServersDatabases": "sqldb-", + "sqlServersDatabasesStretch": "sqlstrdb-", + "storageStorageAccounts": "st", + "storageStorageAccountsVm": "stvm", + "storSimpleManagers": "ssimp", + "streamAnalyticsCluster": "asa-", + "synapseWorkspaces": "syn", + "synapseWorkspacesAnalyticsWorkspaces": "synw", + "synapseWorkspacesSqlPoolsDedicated": "syndp", + "synapseWorkspacesSqlPoolsSpark": "synsp", + "timeSeriesInsightsEnvironments": "tsi-", + "webServerFarms": "plan-", + "webSitesAppService": "app-", + "webSitesAppServiceEnvironment": "ase-", + "webSitesFunctions": "func-", + "webStaticSites": "stapp-" +} diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/ai/acr-role-assignment.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/ai/acr-role-assignment.bicep new file mode 100644 index 000000000..3e0c2b218 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/ai/acr-role-assignment.bicep @@ -0,0 +1,27 @@ +targetScope = 'resourceGroup' + +@description('Name of the existing container registry') +param acrName string + +@description('Principal ID to grant AcrPull role') +param principalId string + +@description('Full resource ID of the ACR (for generating unique GUID)') +param acrResourceId string + +// Reference the existing ACR in this resource group +resource acr 'Microsoft.ContainerRegistry/registries@2023-07-01' existing = { + name: acrName +} + +// Grant AcrPull role to the AI project's managed identity +resource acrPullRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: acr + name: guid(acrResourceId, principalId, '7f951dda-4ed3-4680-a7ca-43fe172d538d') + properties: { + principalId: principalId + principalType: 'ServicePrincipal' + // AcrPull role + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') + } +} diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/ai/ai-project.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/ai/ai-project.bicep new file mode 100644 index 000000000..130a03781 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/ai/ai-project.bicep @@ -0,0 +1,430 @@ +targetScope = 'resourceGroup' + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Main location for the resources') +param location string + +var resourceToken = uniqueString(subscription().id, resourceGroup().id, location) + +@description('Name of the project') +param aiFoundryProjectName string + +param deployments deploymentsType + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('Optional. Name of an existing AI Services account in the current resource group. If not provided, a new one will be created.') +param existingAiAccountName string = '' + +@description('List of connections to provision') +param connections array = [] + +@secure() +@description('Map of connection name to credentials object. Kept as @secure to prevent secrets from appearing in deployment logs. Example: { "my-conn": { "key": "secret" } }') +param connectionCredentials object = {} + +@description('Also provision dependent resources and connect to the project') +param additionalDependentResources dependentResourcesType + +@description('Enable monitoring via appinsights and log analytics') +param enableMonitoring bool = true + +@description('Enable hosted agent deployment') +param enableHostedAgents bool = false + +@description('Enable the capability host for agent conversations. When false and hosted agents are enabled, the capability host is not created (v2 hosted agents handle storage automatically).') +param enableCapabilityHost bool = true + +@description('Optional. Existing container registry resource ID. If provided, a connection will be created to this ACR instead of creating a new one.') +param existingContainerRegistryResourceId string = '' + +@description('Optional. Existing container registry login server (e.g., myregistry.azurecr.io). Required if existingContainerRegistryResourceId is provided.') +param existingContainerRegistryEndpoint string = '' + +@description('Optional. Name of an existing ACR connection on the Foundry project. If provided, no new ACR or connection will be created.') +param existingAcrConnectionName string = '' + +@description('Optional. Existing Application Insights connection string. If provided, a connection will be created but no new App Insights resource.') +param existingApplicationInsightsConnectionString string = '' + +@description('Optional. Existing Application Insights resource ID. Used for connection metadata when providing an existing App Insights.') +param existingApplicationInsightsResourceId string = '' + +@description('Optional. Name of an existing Application Insights connection on the Foundry project. If provided, no new App Insights or connection will be created.') +param existingAppInsightsConnectionName string = '' + +// Load abbreviations +var abbrs = loadJsonContent('../../abbreviations.json') + +// Determine which resources to create based on connections +var hasStorageConnection = length(filter(additionalDependentResources, conn => conn.resource == 'storage')) > 0 +var hasAcrConnection = length(filter(additionalDependentResources, conn => conn.resource == 'registry')) > 0 +var hasExistingAcr = !empty(existingContainerRegistryResourceId) +var hasExistingAcrConnection = !empty(existingAcrConnectionName) +var hasExistingAppInsightsConnection = !empty(existingAppInsightsConnectionName) +var hasExistingAppInsightsConnectionString = !empty(existingApplicationInsightsConnectionString) +// Only create new App Insights resources if monitoring enabled and no existing connection/connection string +var shouldCreateAppInsights = enableMonitoring && !hasExistingAppInsightsConnection && !hasExistingAppInsightsConnectionString +var hasSearchConnection = length(filter(additionalDependentResources, conn => conn.resource == 'azure_ai_search')) > 0 +var hasBingConnection = length(filter(additionalDependentResources, conn => conn.resource == 'bing_grounding')) > 0 +var hasBingCustomConnection = length(filter(additionalDependentResources, conn => conn.resource == 'bing_custom_grounding')) > 0 + +// Extract connection names from ai.yaml for each resource type +var storageConnectionName = hasStorageConnection ? filter(additionalDependentResources, conn => conn.resource == 'storage')[0].connectionName : '' +var acrConnectionName = hasAcrConnection ? filter(additionalDependentResources, conn => conn.resource == 'registry')[0].connectionName : '' +var searchConnectionName = hasSearchConnection ? filter(additionalDependentResources, conn => conn.resource == 'azure_ai_search')[0].connectionName : '' +var bingConnectionName = hasBingConnection ? filter(additionalDependentResources, conn => conn.resource == 'bing_grounding')[0].connectionName : '' +var bingCustomConnectionName = hasBingCustomConnection ? filter(additionalDependentResources, conn => conn.resource == 'bing_custom_grounding')[0].connectionName : '' + +// Enable monitoring via Log Analytics and Application Insights +module logAnalytics '../monitor/loganalytics.bicep' = if (shouldCreateAppInsights) { + name: 'logAnalytics' + params: { + location: location + tags: tags + name: 'logs-${resourceToken}' + } +} + +module applicationInsights '../monitor/applicationinsights.bicep' = if (shouldCreateAppInsights) { + name: 'applicationInsights' + params: { + location: location + tags: tags + name: 'appi-${resourceToken}' + logAnalyticsWorkspaceId: logAnalytics.outputs.id + } +} + +// Always create a new AI Account for now (simplified approach) +// TODO: Add support for existing accounts in a future version +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-06-01' = { + name: !empty(existingAiAccountName) ? existingAiAccountName : 'ai-account-${resourceToken}' + location: location + tags: tags + sku: { + name: 'S0' + } + kind: 'AIServices' + identity: { + type: 'SystemAssigned' + } + properties: { + allowProjectManagement: true + customSubDomainName: !empty(existingAiAccountName) ? existingAiAccountName : 'ai-account-${resourceToken}' + networkAcls: { + defaultAction: 'Allow' + virtualNetworkRules: [] + ipRules: [] + } + publicNetworkAccess: 'Enabled' + disableLocalAuth: true + } + + @batchSize(1) + resource seqDeployments 'deployments' = [ + for dep in (deployments??[]): { + name: dep.name + properties: { + model: dep.model + } + sku: dep.sku + } + ] + + resource project 'projects' = { + name: aiFoundryProjectName + location: location + identity: { + type: 'SystemAssigned' + } + properties: { + description: '${aiFoundryProjectName} Project' + displayName: '${aiFoundryProjectName}Project' + } + dependsOn: [ + seqDeployments + ] + } + + resource aiFoundryAccountCapabilityHost 'capabilityHosts@2025-10-01-preview' = if (enableHostedAgents && enableCapabilityHost) { + name: 'agents' + properties: { + capabilityHostKind: 'Agents' + // IMPORTANT: this is required to enable hosted agents deployment + // if no BYO Net is provided + enablePublicHostingEnvironment: true + } + } +} + + +// Create connection towards appinsights: +// - when we create a new App Insights resource, OR +// - when the user provided an existing App Insights connection string + resource ID but no existing connection name +// Both cases are merged into a single resource to avoid duplicate ARM resource definitions (which fail deployment). +var shouldCreateExistingAppInsightsConnection = enableMonitoring && hasExistingAppInsightsConnectionString && !hasExistingAppInsightsConnection && !empty(existingApplicationInsightsResourceId) +var shouldCreateAppInsightsConnection = shouldCreateAppInsights || shouldCreateExistingAppInsightsConnection + +resource appInsightConnection 'Microsoft.CognitiveServices/accounts/projects/connections@2025-04-01-preview' = if (shouldCreateAppInsightsConnection) { + parent: aiAccount::project + name: 'appi-connection' + properties: { + category: 'AppInsights' + target: shouldCreateAppInsights ? applicationInsights.outputs.id : existingApplicationInsightsResourceId + authType: 'ApiKey' + isSharedToAll: true + credentials: { + key: shouldCreateAppInsights ? applicationInsights.outputs.connectionString : existingApplicationInsightsConnectionString + } + metadata: { + ApiType: 'Azure' + ResourceId: shouldCreateAppInsights ? applicationInsights.outputs.id : existingApplicationInsightsResourceId + } + } +} + +// Create additional connections from ai.yaml configuration +module aiConnections './connection.bicep' = [for (connection, index) in connections: { + name: 'connection-${connection.name}' + params: { + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + connectionConfig: connection + credentials: connectionCredentials[?connection.name] ?? {} + } +}] + +resource localUserAiDeveloperRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: resourceGroup() + name: guid(subscription().id, resourceGroup().id, principalId, '64702f94-c441-49e6-a78b-ef80e0188fee') + properties: { + principalId: principalId + principalType: principalType + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', '64702f94-c441-49e6-a78b-ef80e0188fee') + } +} + +resource localUserCognitiveServicesUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: resourceGroup() + name: guid(subscription().id, resourceGroup().id, principalId, 'a97b65f3-24c7-4388-baec-2e87135dc908') + properties: { + principalId: principalId + principalType: principalType + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', 'a97b65f3-24c7-4388-baec-2e87135dc908') + } +} + +resource projectCognitiveServicesUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: aiAccount + name: guid(subscription().id, resourceGroup().id, aiAccount::project.name, '53ca6127-db72-4b80-b1b0-d745d6d5456d') + properties: { + principalId: aiAccount::project.identity.principalId + principalType: 'ServicePrincipal' + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', '53ca6127-db72-4b80-b1b0-d745d6d5456d') + } +} + + +// All connections are now created directly within their respective resource modules +// using the centralized ./connection.bicep module + +// Storage module - deploy if storage connection is defined in ai.yaml +module storage '../storage/storage.bicep' = if (hasStorageConnection) { + name: 'storage' + params: { + location: location + tags: tags + resourceName: 'st${resourceToken}' + connectionName: storageConnectionName + principalId: principalId + principalType: principalType + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + } +} + +// Azure Container Registry module - deploy if ACR connection is defined in ai.yaml +module acr '../host/acr.bicep' = if (hasAcrConnection) { + name: 'acr' + params: { + location: location + tags: tags + resourceName: '${abbrs.containerRegistryRegistries}${resourceToken}' + connectionName: acrConnectionName + principalId: principalId + principalType: principalType + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + } +} + +// Connection for existing ACR - create if user provided an existing ACR resource ID but no existing connection +module existingAcrConnection './connection.bicep' = if (hasExistingAcr && !hasExistingAcrConnection) { + name: 'existing-acr-connection' + params: { + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + connectionConfig: { + name: 'acr-connection' + category: 'ContainerRegistry' + target: existingContainerRegistryEndpoint + authType: 'ManagedIdentity' + isSharedToAll: true + metadata: { + ResourceId: existingContainerRegistryResourceId + } + } + credentials: { + clientId: aiAccount::project.identity.principalId + resourceId: existingContainerRegistryResourceId + } + } +} + +// Extract resource group name from the existing ACR resource ID +// Resource ID format: /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.ContainerRegistry/registries/{name} +var existingAcrResourceGroup = hasExistingAcr ? split(existingContainerRegistryResourceId, '/')[4] : '' +var existingAcrName = hasExistingAcr ? last(split(existingContainerRegistryResourceId, '/')) : '' + +// Grant AcrPull role to the AI project's managed identity on the existing ACR +// This allows the hosted agents to pull images from the user-provided registry +// Note: User must have permission to assign roles on the existing ACR (Owner or User Access Administrator) +// Using a module allows scoping to a different resource group if the ACR isn't in the same RG +// Skip if connection already exists (role assignment should already be in place) +module existingAcrRoleAssignment './acr-role-assignment.bicep' = if (hasExistingAcr && !hasExistingAcrConnection) { + name: 'existing-acr-role-assignment' + scope: resourceGroup(existingAcrResourceGroup) + params: { + acrName: existingAcrName + acrResourceId: existingContainerRegistryResourceId + principalId: aiAccount::project.identity.principalId + } +} + +// Bing Search grounding module - deploy if Bing connection is defined in ai.yaml or parameter is enabled +module bingGrounding '../search/bing_grounding.bicep' = if (hasBingConnection) { + name: 'bing-grounding' + params: { + tags: tags + resourceName: 'bing-${resourceToken}' + connectionName: bingConnectionName + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + } +} + +// Bing Custom Search grounding module - deploy if custom Bing connection is defined in ai.yaml or parameter is enabled +module bingCustomGrounding '../search/bing_custom_grounding.bicep' = if (hasBingCustomConnection) { + name: 'bing-custom-grounding' + params: { + tags: tags + resourceName: 'bingcustom-${resourceToken}' + connectionName: bingCustomConnectionName + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + } +} + +// Azure AI Search module - deploy if search connection is defined in ai.yaml +module azureAiSearch '../search/azure_ai_search.bicep' = if (hasSearchConnection) { + name: 'azure-ai-search' + params: { + tags: tags + resourceName: 'search-${resourceToken}' + connectionName: searchConnectionName + storageAccountResourceId: hasStorageConnection ? storage!.outputs.storageAccountId : '' + containerName: 'knowledge' + aiServicesAccountName: aiAccount.name + aiProjectName: aiAccount::project.name + principalId: principalId + principalType: principalType + location: location + } +} + +// Outputs +output AZURE_AI_PROJECT_ENDPOINT string = aiAccount::project.properties.endpoints['AI Foundry API'] +output AZURE_OPENAI_ENDPOINT string = aiAccount.properties.endpoints['OpenAI Language Model Instance API'] +output aiServicesEndpoint string = aiAccount.properties.endpoint +output accountId string = aiAccount.id +output projectId string = aiAccount::project.id +output aiServicesAccountName string = aiAccount.name +output aiServicesProjectName string = aiAccount::project.name +output aiServicesPrincipalId string = aiAccount.identity.principalId +output projectName string = aiAccount::project.name +output APPLICATIONINSIGHTS_CONNECTION_STRING string = shouldCreateAppInsights ? applicationInsights.outputs.connectionString : (hasExistingAppInsightsConnectionString ? existingApplicationInsightsConnectionString : '') +output APPLICATIONINSIGHTS_RESOURCE_ID string = shouldCreateAppInsights ? applicationInsights.outputs.id : (hasExistingAppInsightsConnectionString ? existingApplicationInsightsResourceId : '') + +// Connection outputs from the connections array +output connectionIds array = [for (connection, index) in (connections ?? []): { + name: aiConnections[index].outputs.connectionName + id: aiConnections[index].outputs.connectionId +}] + +// Grouped dependent resources outputs +output dependentResources object = { + registry: { + name: hasAcrConnection ? acr!.outputs.containerRegistryName : '' + loginServer: hasAcrConnection ? acr!.outputs.containerRegistryLoginServer : ((hasExistingAcr || hasExistingAcrConnection) ? existingContainerRegistryEndpoint : '') + connectionName: hasAcrConnection ? acr!.outputs.containerRegistryConnectionName : (hasExistingAcrConnection ? existingAcrConnectionName : (hasExistingAcr ? 'acr-connection' : '')) + } + bing_grounding: { + name: (hasBingConnection) ? bingGrounding!.outputs.bingGroundingName : '' + connectionName: (hasBingConnection) ? bingGrounding!.outputs.bingGroundingConnectionName : '' + connectionId: (hasBingConnection) ? bingGrounding!.outputs.bingGroundingConnectionId : '' + } + bing_custom_grounding: { + name: (hasBingCustomConnection) ? bingCustomGrounding!.outputs.bingCustomGroundingName : '' + connectionName: (hasBingCustomConnection) ? bingCustomGrounding!.outputs.bingCustomGroundingConnectionName : '' + connectionId: (hasBingCustomConnection) ? bingCustomGrounding!.outputs.bingCustomGroundingConnectionId : '' + } + search: { + serviceName: hasSearchConnection ? azureAiSearch!.outputs.searchServiceName : '' + connectionName: hasSearchConnection ? azureAiSearch!.outputs.searchConnectionName : '' + } + storage: { + accountName: hasStorageConnection ? storage!.outputs.storageAccountName : '' + connectionName: hasStorageConnection ? storage!.outputs.storageConnectionName : '' + } +} + +type deploymentsType = { + @description('Specify the name of cognitive service account deployment.') + name: string + + @description('Required. Properties of Cognitive Services account deployment model.') + model: { + @description('Required. The name of Cognitive Services account deployment model.') + name: string + + @description('Required. The format of Cognitive Services account deployment model.') + format: string + + @description('Required. The version of Cognitive Services account deployment model.') + version: string + } + + @description('The resource model definition representing SKU.') + sku: { + @description('Required. The name of the resource model definition representing SKU.') + name: string + + @description('The capacity of the resource model definition representing SKU.') + capacity: int + } +}[]? + +type dependentResourcesType = { + @description('The type of dependent resource to create') + resource: 'storage' | 'registry' | 'azure_ai_search' | 'bing_grounding' | 'bing_custom_grounding' + + @description('The connection name for this resource') + connectionName: string +}[] diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/ai/connection.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/ai/connection.bicep new file mode 100644 index 000000000..a08726645 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/ai/connection.bicep @@ -0,0 +1,112 @@ +targetScope = 'resourceGroup' + +@description('AI Services account name') +param aiServicesAccountName string + +@description('AI project name') +param aiProjectName string + +// Connection configuration type definition +type ConnectionConfig = { + @description('Name of the connection') + name: string + + @description('Category of the connection (e.g., ContainerRegistry, AzureStorageAccount, CognitiveSearch, AzureOpenAI)') + category: string + + @description('Target endpoint or URL for the connection') + target: string + + @description('Authentication type') + authType: 'AAD' | 'AccessKey' | 'AccountKey' | 'AgenticIdentity' | 'ApiKey' | 'CustomKeys' | 'ManagedIdentity' | 'None' | 'OAuth2' | 'PAT' | 'SAS' | 'ServicePrincipal' | 'UsernamePassword' | 'UserEntraToken' | 'ProjectManagedIdentity' + + @description('Whether the connection is shared to all users (optional, defaults to true)') + isSharedToAll: bool? + + @description('Additional metadata for the connection (optional)') + metadata: object? + + @description('Error message if the connection fails (optional)') + error: string? + + @description('Expiry time for the connection (optional)') + expiryTime: string? + + @description('Private endpoint requirement: Required, NotRequired, or NotApplicable (optional)') + peRequirement: ('NotApplicable' | 'NotRequired' | 'Required')? + + @description('Private endpoint status: Active, Inactive, or NotApplicable (optional)') + peStatus: ('Active' | 'Inactive' | 'NotApplicable')? + + @description('List of users to share the connection with (optional, alternative to isSharedToAll)') + sharedUserList: string[]? + + @description('Whether to use workspace managed identity (optional)') + useWorkspaceManagedIdentity: bool? + + @description('OAuth2 authorization endpoint URL (optional, OAuth2 authType only)') + authorizationUrl: string? + + @description('OAuth2 token endpoint URL (optional, OAuth2 authType only)') + tokenUrl: string? + + @description('OAuth2 refresh token endpoint URL (optional, OAuth2 authType only)') + refreshUrl: string? + + @description('OAuth2 scopes to request (optional, OAuth2 authType only)') + scopes: string[]? + + @description('Token audience for UserEntraToken / AgenticIdentity auth types (optional)') + audience: string? + + @description('Managed connector name for OAuth2 managed connectors (optional)') + connectorName: string? +} + +@description('Connection configuration') +param connectionConfig ConnectionConfig + +@secure() +@description('Credentials for the connection. Kept as a separate @secure parameter to prevent secrets from appearing in deployment logs. Shape depends on authType — e.g. { key: "..." } for ApiKey, { clientId: "...", clientSecret: "..." } for OAuth2/ServicePrincipal.') +param credentials object = {} + + +// Get reference to the AI Services account and project +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = { + name: aiServicesAccountName + + resource project 'projects' existing = { + name: aiProjectName + } +} + +// Create the connection +resource connection 'Microsoft.CognitiveServices/accounts/projects/connections@2025-04-01-preview' = { + parent: aiAccount::project + name: connectionConfig.name + properties: { + category: connectionConfig.category + target: connectionConfig.target + authType: connectionConfig.authType + isSharedToAll: connectionConfig.?isSharedToAll ?? true + credentials: !empty(credentials) ? credentials : null + metadata: connectionConfig.?metadata + // Only include if they appear in the connectionConfig + ...connectionConfig.?error != null ? { error: connectionConfig.?error } : {} + ...connectionConfig.?expiryTime != null ? { expiryTime: connectionConfig.?expiryTime } : {} + ...connectionConfig.?peRequirement != null ? { peRequirement: connectionConfig.?peRequirement } : {} + ...connectionConfig.?peStatus != null ? { peStatus: connectionConfig.?peStatus } : {} + ...connectionConfig.?sharedUserList != null ? { sharedUserList: connectionConfig.?sharedUserList } : {} + ...connectionConfig.?useWorkspaceManagedIdentity != null ? { useWorkspaceManagedIdentity: connectionConfig.?useWorkspaceManagedIdentity } : {} + ...connectionConfig.?authorizationUrl != null ? { authorizationUrl: connectionConfig.?authorizationUrl } : {} + ...connectionConfig.?tokenUrl != null ? { tokenUrl: connectionConfig.?tokenUrl } : {} + ...connectionConfig.?refreshUrl != null ? { refreshUrl: connectionConfig.?refreshUrl } : {} + ...connectionConfig.?scopes != null ? { scopes: connectionConfig.?scopes } : {} + ...connectionConfig.?audience != null ? { audience: connectionConfig.?audience } : {} + ...connectionConfig.?connectorName != null ? { connectorName: connectionConfig.?connectorName } : {} + } +} + +// Outputs +output connectionName string = connection.name +output connectionId string = connection.id diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/host/acr.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/host/acr.bicep new file mode 100644 index 000000000..360bf2298 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/host/acr.bicep @@ -0,0 +1,87 @@ +targetScope = 'resourceGroup' + +@description('The location used for all deployed resources') +param location string = resourceGroup().location + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Resource name for the container registry') +param resourceName string + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Name for the AI Foundry ACR connection') +param connectionName string = 'acr-connection' + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Create the Container Registry +module containerRegistry 'br/public:avm/res/container-registry/registry:0.1.1' = { + name: 'registry' + params: { + name: resourceName + location: location + tags: tags + publicNetworkAccess: 'Enabled' + roleAssignments:[ + { + principalId: principalId + principalType: principalType + roleDefinitionIdOrName: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') + } + // TODO SEPARATELY + { + // the foundry project itself can pull from the ACR + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + roleDefinitionIdOrName: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') + } + ] + } +} + +// Create the ACR connection using the centralized connection module +module acrConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'acr-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'ContainerRegistry' + target: containerRegistry.outputs.loginServer + authType: 'ManagedIdentity' + isSharedToAll: true + metadata: { + ResourceId: containerRegistry.outputs.resourceId + } + } + credentials: { + clientId: aiAccount::aiProject.identity.principalId + resourceId: containerRegistry.outputs.resourceId + } + } +} + +output containerRegistryName string = containerRegistry.outputs.name +output containerRegistryLoginServer string = containerRegistry.outputs.loginServer +output containerRegistryResourceId string = containerRegistry.outputs.resourceId +output containerRegistryConnectionName string = acrConnection.outputs.connectionName diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/monitor/applicationinsights-dashboard.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/monitor/applicationinsights-dashboard.bicep new file mode 100644 index 000000000..f3e0952b4 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/monitor/applicationinsights-dashboard.bicep @@ -0,0 +1,1236 @@ +metadata description = 'Creates a dashboard for an Application Insights instance.' +param name string +param applicationInsightsName string +param location string = resourceGroup().location +param tags object = {} + +// 2020-09-01-preview because that is the latest valid version +resource applicationInsightsDashboard 'Microsoft.Portal/dashboards@2020-09-01-preview' = { + name: name + location: location + tags: tags + properties: { + lenses: [ + { + order: 0 + parts: [ + { + position: { + x: 0 + y: 0 + colSpan: 2 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'id' + value: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + { + name: 'Version' + value: '1.0' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/AspNetOverviewPinnedPart' + asset: { + idInputName: 'id' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'overview' + } + } + { + position: { + x: 2 + y: 0 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'Version' + value: '1.0' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/ProactiveDetectionAsyncPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'ProactiveDetection' + } + } + { + position: { + x: 3 + y: 0 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'ResourceId' + value: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/QuickPulseButtonSmallPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + } + } + { + position: { + x: 4 + y: 0 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'TimeContext' + value: { + durationMs: 86400000 + endTime: null + createdTime: '2018-05-04T01:20:33.345Z' + isInitialTime: true + grain: 1 + useDashboardTimeRange: false + } + } + { + name: 'Version' + value: '1.0' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/AvailabilityNavButtonPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + } + } + { + position: { + x: 5 + y: 0 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'TimeContext' + value: { + durationMs: 86400000 + endTime: null + createdTime: '2018-05-08T18:47:35.237Z' + isInitialTime: true + grain: 1 + useDashboardTimeRange: false + } + } + { + name: 'ConfigurationId' + value: '78ce933e-e864-4b05-a27b-71fd55a6afad' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/AppMapButtonPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + } + } + { + position: { + x: 0 + y: 1 + colSpan: 3 + rowSpan: 1 + } + metadata: { + inputs: [] + type: 'Extension/HubsExtension/PartType/MarkdownPart' + settings: { + content: { + settings: { + content: '# Usage' + title: '' + subtitle: '' + } + } + } + } + } + { + position: { + x: 3 + y: 1 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'TimeContext' + value: { + durationMs: 86400000 + endTime: null + createdTime: '2018-05-04T01:22:35.782Z' + isInitialTime: true + grain: 1 + useDashboardTimeRange: false + } + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/UsageUsersOverviewPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + } + } + { + position: { + x: 4 + y: 1 + colSpan: 3 + rowSpan: 1 + } + metadata: { + inputs: [] + type: 'Extension/HubsExtension/PartType/MarkdownPart' + settings: { + content: { + settings: { + content: '# Reliability' + title: '' + subtitle: '' + } + } + } + } + } + { + position: { + x: 7 + y: 1 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ResourceId' + value: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + { + name: 'DataModel' + value: { + version: '1.0.0' + timeContext: { + durationMs: 86400000 + createdTime: '2018-05-04T23:42:40.072Z' + isInitialTime: false + grain: 1 + useDashboardTimeRange: false + } + } + isOptional: true + } + { + name: 'ConfigurationId' + value: '8a02f7bf-ac0f-40e1-afe9-f0e72cfee77f' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/CuratedBladeFailuresPinnedPart' + isAdapter: true + asset: { + idInputName: 'ResourceId' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'failures' + } + } + { + position: { + x: 8 + y: 1 + colSpan: 3 + rowSpan: 1 + } + metadata: { + inputs: [] + type: 'Extension/HubsExtension/PartType/MarkdownPart' + settings: { + content: { + settings: { + content: '# Responsiveness\r\n' + title: '' + subtitle: '' + } + } + } + } + } + { + position: { + x: 11 + y: 1 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ResourceId' + value: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + { + name: 'DataModel' + value: { + version: '1.0.0' + timeContext: { + durationMs: 86400000 + createdTime: '2018-05-04T23:43:37.804Z' + isInitialTime: false + grain: 1 + useDashboardTimeRange: false + } + } + isOptional: true + } + { + name: 'ConfigurationId' + value: '2a8ede4f-2bee-4b9c-aed9-2db0e8a01865' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/CuratedBladePerformancePinnedPart' + isAdapter: true + asset: { + idInputName: 'ResourceId' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'performance' + } + } + { + position: { + x: 12 + y: 1 + colSpan: 3 + rowSpan: 1 + } + metadata: { + inputs: [] + type: 'Extension/HubsExtension/PartType/MarkdownPart' + settings: { + content: { + settings: { + content: '# Browser' + title: '' + subtitle: '' + } + } + } + } + } + { + position: { + x: 15 + y: 1 + colSpan: 1 + rowSpan: 1 + } + metadata: { + inputs: [ + { + name: 'ComponentId' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'MetricsExplorerJsonDefinitionId' + value: 'BrowserPerformanceTimelineMetrics' + } + { + name: 'TimeContext' + value: { + durationMs: 86400000 + createdTime: '2018-05-08T12:16:27.534Z' + isInitialTime: false + grain: 1 + useDashboardTimeRange: false + } + } + { + name: 'CurrentFilter' + value: { + eventTypes: [ + 4 + 1 + 3 + 5 + 2 + 6 + 13 + ] + typeFacets: {} + isPermissive: false + } + } + { + name: 'id' + value: { + Name: applicationInsights.name + SubscriptionId: subscription().subscriptionId + ResourceGroup: resourceGroup().name + } + } + { + name: 'Version' + value: '1.0' + } + ] + #disable-next-line BCP036 + type: 'Extension/AppInsightsExtension/PartType/MetricsExplorerBladePinnedPart' + asset: { + idInputName: 'ComponentId' + type: 'ApplicationInsights' + } + defaultMenuItemId: 'browser' + } + } + { + position: { + x: 0 + y: 2 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'sessions/count' + aggregationType: 5 + namespace: 'microsoft.insights/components/kusto' + metricVisualization: { + displayName: 'Sessions' + color: '#47BDF5' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'users/count' + aggregationType: 5 + namespace: 'microsoft.insights/components/kusto' + metricVisualization: { + displayName: 'Users' + color: '#7E58FF' + } + } + ] + title: 'Unique sessions and users' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + openBladeOnClick: { + openBlade: true + destinationBlade: { + extensionName: 'HubsExtension' + bladeName: 'ResourceMenuBlade' + parameters: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + menuid: 'segmentationUsers' + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 4 + y: 2 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'requests/failed' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Failed requests' + color: '#EC008C' + } + } + ] + title: 'Failed requests' + visualization: { + chartType: 3 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + openBladeOnClick: { + openBlade: true + destinationBlade: { + extensionName: 'HubsExtension' + bladeName: 'ResourceMenuBlade' + parameters: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + menuid: 'failures' + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 8 + y: 2 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'requests/duration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Server response time' + color: '#00BCF2' + } + } + ] + title: 'Server response time' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + openBladeOnClick: { + openBlade: true + destinationBlade: { + extensionName: 'HubsExtension' + bladeName: 'ResourceMenuBlade' + parameters: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + menuid: 'performance' + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 12 + y: 2 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'browserTimings/networkDuration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Page load network connect time' + color: '#7E58FF' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'browserTimings/processingDuration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Client processing time' + color: '#44F1C8' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'browserTimings/sendDuration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Send request time' + color: '#EB9371' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'browserTimings/receiveDuration' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Receiving response time' + color: '#0672F1' + } + } + ] + title: 'Average page load time breakdown' + visualization: { + chartType: 3 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 0 + y: 5 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'availabilityResults/availabilityPercentage' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Availability' + color: '#47BDF5' + } + } + ] + title: 'Average availability' + visualization: { + chartType: 3 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + openBladeOnClick: { + openBlade: true + destinationBlade: { + extensionName: 'HubsExtension' + bladeName: 'ResourceMenuBlade' + parameters: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + menuid: 'availability' + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 4 + y: 5 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'exceptions/server' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Server exceptions' + color: '#47BDF5' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'dependencies/failed' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Dependency failures' + color: '#7E58FF' + } + } + ] + title: 'Server exceptions and Dependency failures' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 8 + y: 5 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'performanceCounters/processorCpuPercentage' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Processor time' + color: '#47BDF5' + } + } + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'performanceCounters/processCpuPercentage' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Process CPU' + color: '#7E58FF' + } + } + ] + title: 'Average processor and process CPU utilization' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 12 + y: 5 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'exceptions/browser' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Browser exceptions' + color: '#47BDF5' + } + } + ] + title: 'Browser exceptions' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 0 + y: 8 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'availabilityResults/count' + aggregationType: 7 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Availability test results count' + color: '#47BDF5' + } + } + ] + title: 'Availability test results count' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 4 + y: 8 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'performanceCounters/processIOBytesPerSecond' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Process IO rate' + color: '#47BDF5' + } + } + ] + title: 'Average process I/O rate' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + { + position: { + x: 8 + y: 8 + colSpan: 4 + rowSpan: 3 + } + metadata: { + inputs: [ + { + name: 'options' + value: { + chart: { + metrics: [ + { + resourceMetadata: { + id: '/subscriptions/${subscription().subscriptionId}/resourceGroups/${resourceGroup().name}/providers/Microsoft.Insights/components/${applicationInsights.name}' + } + name: 'performanceCounters/memoryAvailableBytes' + aggregationType: 4 + namespace: 'microsoft.insights/components' + metricVisualization: { + displayName: 'Available memory' + color: '#47BDF5' + } + } + ] + title: 'Average available memory' + visualization: { + chartType: 2 + legendVisualization: { + isVisible: true + position: 2 + hideSubtitle: false + } + axisVisualization: { + x: { + isVisible: true + axisType: 2 + } + y: { + isVisible: true + axisType: 1 + } + } + } + } + } + } + { + name: 'sharedTimeRange' + isOptional: true + } + ] + #disable-next-line BCP036 + type: 'Extension/HubsExtension/PartType/MonitorChartPart' + settings: {} + } + } + ] + } + ] + } +} + +resource applicationInsights 'Microsoft.Insights/components@2020-02-02' existing = { + name: applicationInsightsName +} diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/monitor/applicationinsights.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/monitor/applicationinsights.bicep new file mode 100644 index 000000000..f8c1e8ad9 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/monitor/applicationinsights.bicep @@ -0,0 +1,31 @@ +metadata description = 'Creates an Application Insights instance based on an existing Log Analytics workspace.' +param name string +param dashboardName string = '' +param location string = resourceGroup().location +param tags object = {} +param logAnalyticsWorkspaceId string + +resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = { + name: name + location: location + tags: tags + kind: 'web' + properties: { + Application_Type: 'web' + WorkspaceResourceId: logAnalyticsWorkspaceId + } +} + +module applicationInsightsDashboard 'applicationinsights-dashboard.bicep' = if (!empty(dashboardName)) { + name: 'application-insights-dashboard' + params: { + name: dashboardName + location: location + applicationInsightsName: applicationInsights.name + } +} + +output connectionString string = applicationInsights.properties.ConnectionString +output id string = applicationInsights.id +output instrumentationKey string = applicationInsights.properties.InstrumentationKey +output name string = applicationInsights.name diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/monitor/loganalytics.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/monitor/loganalytics.bicep new file mode 100644 index 000000000..bf87f546d --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/monitor/loganalytics.bicep @@ -0,0 +1,22 @@ +metadata description = 'Creates a Log Analytics workspace.' +param name string +param location string = resourceGroup().location +param tags object = {} + +resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2021-12-01-preview' = { + name: name + location: location + tags: tags + properties: any({ + retentionInDays: 30 + features: { + searchVersion: 1 + } + sku: { + name: 'PerGB2018' + } + }) +} + +output id string = logAnalytics.id +output name string = logAnalytics.name diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/search/azure_ai_search.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/search/azure_ai_search.bicep new file mode 100644 index 000000000..ba6e9bdf4 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/search/azure_ai_search.bicep @@ -0,0 +1,211 @@ +targetScope = 'resourceGroup' + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Azure Search resource name') +param resourceName string + +@description('Azure Search SKU name') +param azureSearchSkuName string = 'basic' + +@description('Azure storage account resource ID') +param storageAccountResourceId string + +@description('container name') +param containerName string = 'knowledgebase' + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('Name for the AI Foundry search connection') +param connectionName string = 'azure-ai-search-connection' + +@description('Location for all resources') +param location string = resourceGroup().location + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Azure Search Service +resource searchService 'Microsoft.Search/searchServices@2024-06-01-preview' = { + name: resourceName + location: location + tags: tags + sku: { + name: azureSearchSkuName + } + identity: { + type: 'SystemAssigned' + } + properties: { + replicaCount: 1 + partitionCount: 1 + hostingMode: 'default' + authOptions: { + aadOrApiKey: { + aadAuthFailureMode: 'http401WithBearerChallenge' + } + } + disableLocalAuth: false + encryptionWithCmk: { + enforcement: 'Unspecified' + } + publicNetworkAccess: 'enabled' + } +} + +// Reference to existing Storage Account +resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' existing = { + name: last(split(storageAccountResourceId, '/')) +} + +// Reference to existing Blob Service +resource blobService 'Microsoft.Storage/storageAccounts/blobServices@2023-05-01' existing = { + parent: storageAccount + name: 'default' +} + +// Storage Container (create if it doesn't exist) +resource storageContainer 'Microsoft.Storage/storageAccounts/blobServices/containers@2023-05-01' = { + parent: blobService + name: containerName + properties: { + publicAccess: 'None' + } +} + +// RBAC Assignments + +// Search needs to read from Storage +resource searchToStorageRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(storageAccount.id, searchService.id, 'Storage Blob Data Reader', uniqueString(deployment().name)) + scope: storageAccount + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1') // Storage Blob Data Reader + principalId: searchService.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// Search needs OpenAI access (AI Services account) +resource searchToAIServicesRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName)) { + name: guid(aiServicesAccountName, searchService.id, 'Cognitive Services OpenAI User', uniqueString(deployment().name)) + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '5e0bd9bd-7b93-4f28-af87-19fc36ad61bd') // Cognitive Services OpenAI User + principalId: searchService.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// AI Project needs Search access - Service Contributor +resource aiServicesToSearchServiceRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: guid(searchService.id, aiServicesAccountName, aiProjectName, 'Search Service Contributor', uniqueString(deployment().name)) + scope: searchService + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7ca78c08-252a-4471-8644-bb5ff32d4ba0') // Search Service Contributor + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// AI Project needs Search access - Index Data Contributor +resource aiServicesToSearchDataRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: guid(searchService.id, aiServicesAccountName, aiProjectName, 'Search Index Data Contributor', uniqueString(deployment().name)) + scope: searchService + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '8ebe5a00-799e-43f5-93ac-243d3dce84a7') // Search Index Data Contributor + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// User permissions - Search Index Data Contributor +resource userToSearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(searchService.id, principalId, 'Search Index Data Contributor', uniqueString(deployment().name)) + scope: searchService + properties: { + // GOOD + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '8ebe5a00-799e-43f5-93ac-243d3dce84a7') // Search Index Data Contributor + principalId: principalId + principalType: principalType + } +} + +// // User permissions - Storage Blob Data Contributor +// resource userToStorageRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { +// name: guid(storageAccount.id, principalId, 'Storage Blob Data Contributor', uniqueString(deployment().name)) +// scope: storageAccount +// properties: { +// roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe') // Storage Blob Data Contributor +// principalId: principalId +// principalType: principalType +// } +// } + +// // Project needs Search access - Index Data Contributor +// resource projectToSearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { +// name: guid(searchService.id, aiProjectName, 'Search Index Data Contributor', uniqueString(deployment().name)) +// scope: searchService +// properties: { +// roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '8ebe5a00-799e-43f5-93ac-243d3dce84a7') // Search Index Data Contributor +// principalId: aiAccountPrincipalId // Using AI account principal ID as project identity +// principalType: 'ServicePrincipal' +// } +// } + +// Create the AI Search connection using the centralized connection module +module aiSearchConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'ai-search-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'CognitiveSearch' + target: 'https://${searchService.name}.search.windows.net' + authType: 'AAD' + isSharedToAll: true + metadata: { + ApiVersion: '2024-07-01' + ResourceId: searchService.id + ApiType: 'Azure' + type: 'azure_ai_search' + } + } + } + dependsOn: [ + aiServicesToSearchDataRoleAssignment + ] +} + +// Outputs +output searchServiceName string = searchService.name +output searchServiceId string = searchService.id +output searchServicePrincipalId string = searchService.identity.principalId +output storageAccountName string = storageAccount.name +output storageAccountId string = storageAccount.id +output containerName string = storageContainer.name +output storageAccountPrincipalId string = storageAccount.identity.principalId +output searchConnectionName string = (!empty(aiServicesAccountName) && !empty(aiProjectName)) ? aiSearchConnection!.outputs.connectionName : '' +output searchConnectionId string = (!empty(aiServicesAccountName) && !empty(aiProjectName)) ? aiSearchConnection!.outputs.connectionId : '' + diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/search/bing_custom_grounding.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/search/bing_custom_grounding.bicep new file mode 100644 index 000000000..ac2d66cba --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/search/bing_custom_grounding.bicep @@ -0,0 +1,84 @@ +targetScope = 'resourceGroup' + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Bing custom grounding resource name') +param resourceName string + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Name for the AI Foundry Bing Custom Search connection') +param connectionName string = 'bing-custom-grounding-connection' + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Bing Search resource for grounding capability +resource bingCustomSearch 'Microsoft.Bing/accounts@2020-06-10' = { + name: resourceName + location: 'global' + tags: tags + sku: { + name: 'G1' + } + properties: { + statisticsEnabled: false + } + kind: 'Bing.CustomGrounding' +} + +// Role assignment to allow AI project to use Bing Search +resource bingCustomSearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + scope: bingCustomSearch + name: guid(subscription().id, resourceGroup().id, 'bing-search-role', aiServicesAccountName, aiProjectName) + properties: { + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', 'a97b65f3-24c7-4388-baec-2e87135dc908') // Cognitive Services User + } +} + +// Create the Bing Custom Search connection using the centralized connection module +module aiSearchConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'bing-custom-search-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'GroundingWithCustomSearch' + target: bingCustomSearch.properties.endpoint + authType: 'ApiKey' + isSharedToAll: true + metadata: { + Location: 'global' + ResourceId: bingCustomSearch.id + ApiType: 'Azure' + type: 'bing_custom_search' + } + } + credentials: { + key: bingCustomSearch.listKeys().key1 + } + } + dependsOn: [ + bingCustomSearchRoleAssignment + ] +} + +// Outputs +output bingCustomGroundingName string = bingCustomSearch.name +output bingCustomGroundingConnectionName string = aiSearchConnection.outputs.connectionName +output bingCustomGroundingResourceId string = bingCustomSearch.id +output bingCustomGroundingConnectionId string = aiSearchConnection.outputs.connectionId diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/search/bing_grounding.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/search/bing_grounding.bicep new file mode 100644 index 000000000..a2fbc6bd2 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/search/bing_grounding.bicep @@ -0,0 +1,83 @@ +targetScope = 'resourceGroup' + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Bing grounding resource name') +param resourceName string + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Name for the AI Foundry Bing Search connection') +param connectionName string = 'bing-grounding-connection' + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Bing Search resource for grounding capability +resource bingSearch 'Microsoft.Bing/accounts@2020-06-10' = { + name: resourceName + location: 'global' + tags: tags + sku: { + name: 'G1' + } + properties: { + statisticsEnabled: false + } + kind: 'Bing.Grounding' +} + +// Role assignment to allow AI project to use Bing Search +resource bingSearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + scope: bingSearch + name: guid(subscription().id, resourceGroup().id, 'bing-search-role', aiServicesAccountName, aiProjectName) + properties: { + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', 'a97b65f3-24c7-4388-baec-2e87135dc908') // Cognitive Services User + } +} + +// Create the Bing Search connection using the centralized connection module +module bingSearchConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'bing-search-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'GroundingWithBingSearch' + target: bingSearch.properties.endpoint + authType: 'ApiKey' + isSharedToAll: true + metadata: { + Location: 'global' + ResourceId: bingSearch.id + ApiType: 'Azure' + type: 'bing_grounding' + } + } + credentials: { + key: bingSearch.listKeys().key1 + } + } + dependsOn: [ + bingSearchRoleAssignment + ] +} + +output bingGroundingName string = bingSearch.name +output bingGroundingConnectionName string = bingSearchConnection.outputs.connectionName +output bingGroundingResourceId string = bingSearch.id +output bingGroundingConnectionId string = bingSearchConnection.outputs.connectionId diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/storage/storage.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/storage/storage.bicep new file mode 100644 index 000000000..6bad1d157 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/core/storage/storage.bicep @@ -0,0 +1,113 @@ +targetScope = 'resourceGroup' + +@description('The location used for all deployed resources') +param location string = resourceGroup().location + +@description('Tags that will be applied to all resources') +param tags object = {} + +@description('Storage account resource name') +param resourceName string + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('AI Services account name for the project parent') +param aiServicesAccountName string = '' + +@description('AI project name for creating the connection') +param aiProjectName string = '' + +@description('Name for the AI Foundry storage connection') +param connectionName string = 'storage-connection' + +// Storage Account for the AI Services account +resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' = { + name: resourceName + location: location + tags: tags + sku: { + name: 'Standard_LRS' + } + kind: 'StorageV2' + identity: { + type: 'SystemAssigned' + } + properties: { + supportsHttpsTrafficOnly: true + allowBlobPublicAccess: false + minimumTlsVersion: 'TLS1_2' + accessTier: 'Hot' + encryption: { + services: { + blob: { + enabled: true + } + file: { + enabled: true + } + } + keySource: 'Microsoft.Storage' + } + } +} + +// Get reference to the AI Services account and project to access their managed identities +resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: aiServicesAccountName + + resource aiProject 'projects' existing = { + name: aiProjectName + } +} + +// Role assignment for AI Services to access the storage account +resource storageRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: guid(storageAccount.id, aiAccount.id, 'ai-storage-contributor') + scope: storageAccount + properties: { + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe') // Storage Blob Data Contributor + principalId: aiAccount::aiProject.identity.principalId + principalType: 'ServicePrincipal' + } +} + +// User permissions - Storage Blob Data Contributor +resource userStorageRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(storageAccount.id, principalId, 'Storage Blob Data Contributor') + scope: storageAccount + properties: { + roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe') // Storage Blob Data Contributor + principalId: principalId + principalType: principalType + } +} + +// Create the storage connection using the centralized connection module +module storageConnection '../ai/connection.bicep' = if (!empty(aiServicesAccountName) && !empty(aiProjectName)) { + name: 'storage-connection-creation' + params: { + aiServicesAccountName: aiServicesAccountName + aiProjectName: aiProjectName + connectionConfig: { + name: connectionName + category: 'AzureStorageAccount' + target: storageAccount.properties.primaryEndpoints.blob + authType: 'AAD' + isSharedToAll: true + metadata: { + ApiType: 'Azure' + ResourceId: storageAccount.id + location: storageAccount.location + } + } + } +} + +output storageAccountName string = storageAccount.name +output storageAccountId string = storageAccount.id +output storageAccountPrincipalId string = storageAccount.identity.principalId +output storageConnectionName string = storageConnection.outputs.connectionName diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/main.bicep b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/main.bicep new file mode 100644 index 000000000..943d43291 --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/main.bicep @@ -0,0 +1,201 @@ +targetScope = 'subscription' +// targetScope = 'resourceGroup' + +@minLength(1) +@maxLength(64) +@description('Name of the environment that can be used as part of naming resource convention') +param environmentName string + +@minLength(1) +@maxLength(90) +@description('Name of the resource group to use or create') +param resourceGroupName string = 'rg-${environmentName}' + +// Restricted locations to match list from +// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/responses?tabs=python-key#region-availability +@minLength(1) +@description('Primary location for all resources') +@allowed([ + 'australiaeast' + 'brazilsouth' + 'canadacentral' + 'canadaeast' + 'eastus' + 'eastus2' + 'francecentral' + 'germanywestcentral' + 'italynorth' + 'japaneast' + 'koreacentral' + 'northcentralus' + 'norwayeast' + 'polandcentral' + 'southafricanorth' + 'southcentralus' + 'southeastasia' + 'southindia' + 'spaincentral' + 'swedencentral' + 'switzerlandnorth' + 'uaenorth' + 'uksouth' + 'westus' + 'westus2' + 'westus3' +]) +param location string + +param aiDeploymentsLocation string + +@description('Id of the user or app to assign application roles') +param principalId string + +@description('Principal type of user or app') +param principalType string + +@description('Optional. Name of an existing AI Services account within the resource group. If not provided, a new one will be created.') +param aiFoundryResourceName string = '' + +@description('Optional. Name of the AI Foundry project. If not provided, a default name will be used.') +param aiFoundryProjectName string = 'ai-project-${environmentName}' + +@description('List of model deployments') +param aiProjectDeploymentsJson string = '[]' + +@description('List of connections') +param aiProjectConnectionsJson string = '[]' + +@secure() +@description('JSON map of connection name to credentials object. Example: {"my-conn":{"key":"secret"}}') +param aiProjectConnectionCredentialsJson string = '{}' + +@description('List of resources to create and connect to the AI project') +param aiProjectDependentResourcesJson string = '[]' + +var aiProjectDeployments = json(aiProjectDeploymentsJson) +var aiProjectConnections = json(aiProjectConnectionsJson) +var aiProjectConnectionCreds = json(aiProjectConnectionCredentialsJson) +var aiProjectDependentResources = json(aiProjectDependentResourcesJson) + +@description('Enable hosted agent deployment') +param enableHostedAgents bool + +@description('Enable the capability host for supporting BYO storage of agent conversations. When false and hosted agents are enabled, the capability host is not created.') +param enableCapabilityHost bool + +@description('Enable monitoring for the AI project') +param enableMonitoring bool + +@description('Optional. Existing container registry resource ID. If provided, no new ACR will be created and a connection to this ACR will be established.') +param existingContainerRegistryResourceId string = '' + +@description('Optional. Existing container registry endpoint (login server). Required if existingContainerRegistryResourceId is provided.') +param existingContainerRegistryEndpoint string = '' + +@description('Optional. Name of an existing ACR connection on the Foundry project. If provided, no new ACR or connection will be created.') +param existingAcrConnectionName string = '' + +@description('Optional. Existing Application Insights connection string. If provided, a connection will be created but no new App Insights resource.') +param existingApplicationInsightsConnectionString string = '' + +@description('Optional. Existing Application Insights resource ID. Used for connection metadata when providing an existing App Insights.') +param existingApplicationInsightsResourceId string = '' + +@description('Optional. Name of an existing Application Insights connection on the Foundry project. If provided, no new App Insights or connection will be created.') +param existingAppInsightsConnectionName string = '' + +// Tags that should be applied to all resources. +// +// Note that 'azd-service-name' tags should be applied separately to service host resources. +// Example usage: +// tags: union(tags, { 'azd-service-name': }) +var tags = { + 'azd-env-name': environmentName +} + +// Check if resource group exists and create it if it doesn't +resource rg 'Microsoft.Resources/resourceGroups@2021-04-01' = { + name: resourceGroupName + location: location + tags: tags +} + +// Build dependent resources array conditionally +// Check if ACR already exists in the user-provided array to avoid duplicates +// Also skip if user provided an existing container registry endpoint or connection name +var hasAcr = contains(map(aiProjectDependentResources, r => r.resource), 'registry') +var shouldCreateAcr = enableHostedAgents && !hasAcr && empty(existingContainerRegistryResourceId) && empty(existingAcrConnectionName) +var dependentResources = shouldCreateAcr ? union(aiProjectDependentResources, [ + { + resource: 'registry' + connectionName: 'acr-connection' + } +]) : aiProjectDependentResources + +// AI Project module +module aiProject 'core/ai/ai-project.bicep' = { + scope: rg + name: 'ai-project' + params: { + tags: tags + location: aiDeploymentsLocation + aiFoundryProjectName: aiFoundryProjectName + principalId: principalId + principalType: principalType + existingAiAccountName: aiFoundryResourceName + deployments: aiProjectDeployments + connections: aiProjectConnections + connectionCredentials: aiProjectConnectionCreds + additionalDependentResources: dependentResources + enableMonitoring: enableMonitoring + enableHostedAgents: enableHostedAgents + enableCapabilityHost: enableCapabilityHost + existingContainerRegistryResourceId: existingContainerRegistryResourceId + existingContainerRegistryEndpoint: existingContainerRegistryEndpoint + existingAcrConnectionName: existingAcrConnectionName + existingApplicationInsightsConnectionString: existingApplicationInsightsConnectionString + existingApplicationInsightsResourceId: existingApplicationInsightsResourceId + existingAppInsightsConnectionName: existingAppInsightsConnectionName + } +} + +// Resources +output AZURE_RESOURCE_GROUP string = resourceGroupName +output AZURE_AI_ACCOUNT_ID string = aiProject.outputs.accountId +output AZURE_AI_PROJECT_ID string = aiProject.outputs.projectId +output AZURE_AI_FOUNDRY_PROJECT_ID string = aiProject.outputs.projectId +output AZURE_AI_ACCOUNT_NAME string = aiProject.outputs.aiServicesAccountName +output AZURE_AI_PROJECT_NAME string = aiProject.outputs.projectName + +// Endpoints +output AZURE_AI_PROJECT_ENDPOINT string = aiProject.outputs.AZURE_AI_PROJECT_ENDPOINT +output AZURE_OPENAI_ENDPOINT string = aiProject.outputs.AZURE_OPENAI_ENDPOINT +output APPLICATIONINSIGHTS_CONNECTION_STRING string = aiProject.outputs.APPLICATIONINSIGHTS_CONNECTION_STRING +output APPLICATIONINSIGHTS_RESOURCE_ID string = aiProject.outputs.APPLICATIONINSIGHTS_RESOURCE_ID + +// Dependent Resources and Connections + +// ACR +output AZURE_AI_PROJECT_ACR_CONNECTION_NAME string = aiProject.outputs.dependentResources.registry.connectionName +output AZURE_CONTAINER_REGISTRY_ENDPOINT string = aiProject.outputs.dependentResources.registry.loginServer + +// Bing Search +output BING_GROUNDING_CONNECTION_NAME string = aiProject.outputs.dependentResources.bing_grounding.connectionName +output BING_GROUNDING_RESOURCE_NAME string = aiProject.outputs.dependentResources.bing_grounding.name +output BING_GROUNDING_CONNECTION_ID string = aiProject.outputs.dependentResources.bing_grounding.connectionId + +// Bing Custom Search +output BING_CUSTOM_GROUNDING_CONNECTION_NAME string = aiProject.outputs.dependentResources.bing_custom_grounding.connectionName +output BING_CUSTOM_GROUNDING_NAME string = aiProject.outputs.dependentResources.bing_custom_grounding.name +output BING_CUSTOM_GROUNDING_CONNECTION_ID string = aiProject.outputs.dependentResources.bing_custom_grounding.connectionId + +// Azure AI Search +output AZURE_AI_SEARCH_CONNECTION_NAME string = aiProject.outputs.dependentResources.search.connectionName +output AZURE_AI_SEARCH_SERVICE_NAME string = aiProject.outputs.dependentResources.search.serviceName + +// Azure Storage +output AZURE_STORAGE_CONNECTION_NAME string = aiProject.outputs.dependentResources.storage.connectionName +output AZURE_STORAGE_ACCOUNT_NAME string = aiProject.outputs.dependentResources.storage.accountName + +// Connections +output AI_PROJECT_CONNECTION_IDS_JSON string = string(aiProject.outputs.connectionIds) diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/main.parameters.json b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/main.parameters.json new file mode 100644 index 000000000..681875d5e --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/infra/main.parameters.json @@ -0,0 +1,69 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "resourceGroupName": { + "value": "${AZURE_RESOURCE_GROUP}" + }, + "environmentName": { + "value": "${AZURE_ENV_NAME}" + }, + "location": { + "value": "${AZURE_LOCATION}" + }, + "aiFoundryResourceName": { + "value": "${AZURE_AI_ACCOUNT_NAME}" + }, + "aiFoundryProjectName": { + "value": "${AZURE_AI_PROJECT_NAME}" + }, + "aiDeploymentsLocation": { + "value": "${AZURE_LOCATION}" + }, + "principalId": { + "value": "${AZURE_PRINCIPAL_ID}" + }, + "principalType": { + "value": "${AZURE_PRINCIPAL_TYPE}" + }, + "aiProjectDeploymentsJson": { + "value": "${AI_PROJECT_DEPLOYMENTS=[]}" + }, + "aiProjectConnectionsJson": { + "value": "${AI_PROJECT_CONNECTIONS=[]}" + }, + "aiProjectConnectionCredentialsJson": { + "value": "${AI_PROJECT_CONNECTION_CREDENTIALS}" + }, + "aiProjectDependentResourcesJson": { + "value": "${AI_PROJECT_DEPENDENT_RESOURCES=[]}" + }, + "enableMonitoring": { + "value": "${ENABLE_MONITORING=true}" + }, + "enableHostedAgents": { + "value": "${ENABLE_HOSTED_AGENTS=false}" + }, + "enableCapabilityHost": { + "value": "${ENABLE_CAPABILITY_HOST=true}" + }, + "existingContainerRegistryResourceId": { + "value": "${AZURE_CONTAINER_REGISTRY_RESOURCE_ID=}" + }, + "existingContainerRegistryEndpoint": { + "value": "${AZURE_CONTAINER_REGISTRY_ENDPOINT=}" + }, + "existingAcrConnectionName": { + "value": "${AZURE_AI_PROJECT_ACR_CONNECTION_NAME=}" + }, + "existingApplicationInsightsConnectionString": { + "value": "${APPLICATIONINSIGHTS_CONNECTION_STRING=}" + }, + "existingApplicationInsightsResourceId": { + "value": "${APPLICATIONINSIGHTS_RESOURCE_ID=}" + }, + "existingAppInsightsConnectionName": { + "value": "${APPLICATIONINSIGHTS_CONNECTION_NAME=}" + } + } +} diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/main.py b/samples/python/hosted-agents/bring-your-own/responses/toolbox/main.py new file mode 100644 index 000000000..7b21f761a --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/main.py @@ -0,0 +1,406 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Toolbox — Bring Your Own Responses agent with Foundry Toolbox MCP. + +Hosted agent that connects to an Azure AI Foundry toolbox via MCP, +discovers tools at startup, and lets the model call them during +conversation. Uses the Responses protocol for request/response handling. + +The agent: +1. Connects to the toolbox MCP endpoint and discovers available tools +2. On each request, sends the conversation + tool definitions to the model +3. If the model requests a tool call, executes it via MCP and loops +4. Returns the final text response through the Responses protocol SSE stream + +Conversation history is automatically managed by the platform via +``previous_response_id``. The handler calls ``context.get_history()`` to +retrieve prior turns and includes them in the model call so the agent +maintains context across messages. + +Required environment variables: + FOUNDRY_PROJECT_ENDPOINT: Foundry project endpoint (auto-injected in hosted containers) + AZURE_AI_MODEL_DEPLOYMENT_NAME: Model deployment name (declared in agent.manifest.yaml) + TOOLBOX_ENDPOINT: Full toolbox MCP endpoint URL (declared in agent.manifest.yaml) + +Usage:: + + # Set environment variables + export FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" + export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4.1" + export TOOLBOX_ENDPOINT="https://.services.ai.azure.com/api/projects//toolboxes//mcp?api-version=v1" + + # Start the agent + python main.py + + # Invoke the agent + curl -sS -X POST http://localhost:8088/responses \\ + -H "Content-Type: application/json" \\ + -d '{"input": "Search the web for Azure AI Foundry news", "stream": false}' | jq . +""" + +from azure.ai.agentserver.responses.models import ( + MessageContentInputTextContent, + MessageContentOutputTextContent, +) +from azure.ai.agentserver.responses import ( + CreateResponse, + ResponseContext, + ResponseEventStream, + ResponsesAgentServerHost, + ResponsesServerOptions, + get_input_expanded, +) +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.ai.projects import AIProjectClient +import asyncio +import json +import logging +import os + +import httpx +from dotenv import load_dotenv + +load_dotenv(override=False) + + +logger = logging.getLogger(__name__) + +if not os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING"): + logger.warning( + "APPLICATIONINSIGHTS_CONNECTION_STRING not set — traces will not be sent to " + "Application Insights. Set it to enable local telemetry. " + "(This variable is auto-injected in hosted Foundry containers — do not declare it in agent.manifest.yaml.)" + ) + +# ── Configuration ───────────────────────────────────────────────────────────── + +_endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT") +if not _endpoint: + raise EnvironmentError( + "FOUNDRY_PROJECT_ENDPOINT environment variable is not set. " + "Set it to your Foundry project endpoint, or use 'azd ai agent run' " + "which sets it automatically." + ) + +_model = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +if not _model: + raise EnvironmentError( + "AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set. " + "Set it to your model deployment name as declared in agent.manifest.yaml." + ) + +# Platform injects TOOLBOX_{NAME}_MCP_ENDPOINT for declared toolbox resources. +# Fall back to TOOLBOX_ENDPOINT for local dev (.env). +TOOLBOX_ENDPOINT = ( + os.environ.get("TOOLBOX_WEB_SEARCH_TOOLS_MCP_ENDPOINT") + or os.environ.get("TOOLBOX_ENDPOINT", "") +) +if not TOOLBOX_ENDPOINT: + raise EnvironmentError( + "TOOLBOX_ENDPOINT environment variable is not set. " + "Set it to your toolbox MCP endpoint URL, or declare the toolbox " + "in agent.manifest.yaml resources." + ) +# Ensure api-version query param is present. +if "api-version=" not in TOOLBOX_ENDPOINT: + sep = "&" if "?" in TOOLBOX_ENDPOINT else "?" + TOOLBOX_ENDPOINT += f"{sep}api-version=v1" + +# Feature-flag header value (e.g. "Toolboxes=V1Preview"). +_TOOLBOX_FEATURES = os.getenv("FOUNDRY_AGENT_TOOLBOX_FEATURES", "Toolboxes=V1Preview") + +_credential = DefaultAzureCredential() +_project_client = AIProjectClient(endpoint=_endpoint, credential=_credential) +_responses_client = _project_client.get_openai_client().responses +_token_provider = get_bearer_token_provider( + _credential, "https://ai.azure.com/.default") + +_SYSTEM_PROMPT = ( + "You are a helpful AI assistant with access to tools via Azure AI Foundry toolbox. " + "Use the available tools when appropriate to answer user questions. " + "Be concise and informative." +) + +# ── Toolbox MCP client ──────────────────────────────────────────────────────── + + +class _McpToolboxClient: + """Lightweight MCP client for toolbox tool discovery and invocation.""" + + def __init__(self, endpoint: str, token_provider): + self.endpoint = endpoint + self._get_token = token_provider + self._session_id: str | None = None + self._req_id = 0 + + def _headers(self) -> dict: + h = { + "Content-Type": "application/json", + "Authorization": f"Bearer {self._get_token()}", + } + if _TOOLBOX_FEATURES: + h["Foundry-Features"] = _TOOLBOX_FEATURES + if self._session_id: + h["mcp-session-id"] = self._session_id + return h + + def _next_id(self) -> int: + self._req_id += 1 + return self._req_id + + def initialize(self) -> str: + """Send MCP initialize + initialized notification.""" + with httpx.Client(timeout=60) as client: + resp = client.post( + self.endpoint, + headers=self._headers(), + json={ + "jsonrpc": "2.0", + "id": self._next_id(), + "method": "initialize", + "params": { + "protocolVersion": "2024-11-05", + "capabilities": {}, + "clientInfo": {"name": "byo-responses-toolbox", "version": "1.0.0"}, + }, + }, + ) + resp.raise_for_status() + self._session_id = resp.headers.get("mcp-session-id") + data = resp.json() + + # Send initialized notification + client.post( + self.endpoint, + headers=self._headers(), + json={"jsonrpc": "2.0", "method": "notifications/initialized"}, + ) + return data.get("result", {}).get("serverInfo", {}).get("name", "unknown") + + def list_tools(self) -> list[dict]: + """Call tools/list and return tool definitions.""" + with httpx.Client(timeout=60) as client: + resp = client.post( + self.endpoint, + headers=self._headers(), + json={"jsonrpc": "2.0", "id": self._next_id( + ), "method": "tools/list", "params": {}}, + ) + resp.raise_for_status() + return resp.json().get("result", {}).get("tools", []) + + def call_tool(self, name: str, arguments: dict) -> str: + """Call a tool and return the text result.""" + with httpx.Client(timeout=120) as client: + resp = client.post( + self.endpoint, + headers=self._headers(), + json={ + "jsonrpc": "2.0", + "id": self._next_id(), + "method": "tools/call", + "params": {"name": name, "arguments": arguments}, + }, + ) + resp.raise_for_status() + result = resp.json().get("result", {}) + content = result.get("content", []) + texts = [] + for c in content: + if isinstance(c, dict): + if c.get("type") == "text" and c.get("text"): + texts.append(c["text"]) + elif c.get("type") == "resource": + resource = c.get("resource", {}) + if resource.get("text"): + texts.append(resource["text"]) + return "\n".join(texts) if texts else json.dumps(result) + + +# ── Lazy tool discovery ─────────────────────────────────────────────────────── +# Defer MCP connection to first request so the container can start and pass +# health checks before the toolbox endpoint is reachable. + +_mcp_client: _McpToolboxClient | None = None +_tool_definitions: list[dict] = [] +_tools_initialized = False + + +def _ensure_tools(): + global _mcp_client, _tool_definitions, _tools_initialized + if _tools_initialized: + return + logger.info("Connecting to toolbox: %s", TOOLBOX_ENDPOINT) + _mcp_client = _McpToolboxClient(TOOLBOX_ENDPOINT, _token_provider) + server_name = _mcp_client.initialize() + mcp_tools = _mcp_client.list_tools() + logger.info("Toolbox '%s' connected: %d tool(s) discovered", + server_name, len(mcp_tools)) + for t in mcp_tools: + _tool_definitions.append({ + "type": "function", + "name": t["name"], + "description": t.get("description", ""), + "parameters": t.get("inputSchema", {"type": "object", "properties": {}}), + }) + _tools_initialized = True + +# ── Agentic loop ────────────────────────────────────────────────────────────── + + +_MAX_TOOL_ROUNDS = 10 + + +def _call_model(input_items: list[dict]) -> object: + """Call the model with tool definitions and return the response.""" + _ensure_tools() + return _responses_client.create( + model=_model, + instructions=_SYSTEM_PROMPT, + input=input_items, + tools=_tool_definitions if _tool_definitions else None, + store=False, + ) + + +def _run_agent_loop(input_items: list[dict]) -> str: + """Execute the agentic tool-calling loop synchronously. + + Calls the model, checks for tool calls, executes them, feeds results + back, and repeats until the model produces a text response or we hit + the max rounds limit. + """ + for _ in range(_MAX_TOOL_ROUNDS): + response = _call_model(input_items) + + # Check if the model wants to call tools + tool_calls = [ + item for item in response.output + if getattr(item, "type", None) == "function_call" + ] + + if not tool_calls: + return response.output_text or "(No response)" + + # Execute each tool call and build result items + for tc in tool_calls: + try: + arguments = json.loads(tc.arguments) if isinstance( + tc.arguments, str) else tc.arguments + result_text = _mcp_client.call_tool(tc.name, arguments) + logger.info("Tool '%s' returned %d chars", + tc.name, len(result_text)) + except Exception as e: + logger.error("Tool '%s' failed: %s", tc.name, e) + result_text = f"Error calling tool: {e}" + + input_items.append({ + "type": "function_call", + "id": tc.id, + "call_id": tc.call_id, + "name": tc.name, + "arguments": tc.arguments if isinstance(tc.arguments, str) else json.dumps(tc.arguments), + }) + input_items.append({ + "type": "function_call_output", + "call_id": tc.call_id, + "output": result_text, + }) + + return "(Reached maximum tool call rounds)" + + +# ── Responses protocol handler ──────────────────────────────────────────────── + +app = ResponsesAgentServerHost( + options=ResponsesServerOptions(default_fetch_history_count=20), +) + + +def _get_input_text(request: CreateResponse) -> str | None: + """Extract plain text from a CreateResponse input.""" + inp = request.input + if isinstance(inp, str): + return inp + items = get_input_expanded(request) + for item in items: + content = getattr(item, "content", None) + if content is None: + continue + if isinstance(content, str): + return content + if isinstance(content, list): + for part in content: + text = getattr(part, "text", None) + if text: + return text + return None + + +def _build_input(current_input: str, history: list) -> list[dict]: + """Build Responses API input from conversation history and current message.""" + input_items = [] + for item in history: + if hasattr(item, "content") and item.content: + for content in item.content: + if isinstance(content, MessageContentOutputTextContent) and content.text: + input_items.append( + {"role": "assistant", "content": content.text}) + elif isinstance(content, MessageContentInputTextContent) and content.text: + input_items.append( + {"role": "user", "content": content.text}) + input_items.append({"role": "user", "content": current_input}) + return input_items + + +@app.response_handler +async def handler( + request: CreateResponse, + context: ResponseContext, + cancellation_signal: asyncio.Event, +): + """Forward user input to the model with toolbox tools and conversation history.""" + stream = ResponseEventStream( + response_id=context.response_id, + model=getattr(request, "model", None), + ) + + yield stream.emit_created() + yield stream.emit_in_progress() + + user_input = _get_input_text(request) or "" + if not user_input: + message_item = stream.add_output_item_message() + yield message_item.emit_added() + for event in message_item.text_content("No input provided."): + yield event + yield message_item.emit_done() + yield stream.emit_completed() + return + + history = await context.get_history() + input_items = _build_input(user_input, history) + + logger.info("Processing request %s", context.response_id) + + try: + loop = asyncio.get_running_loop() + assistant_reply = await loop.run_in_executor(None, _run_agent_loop, input_items) + except Exception as e: + logger.error("Failed to process request: %s", e, exc_info=True) + assistant_reply = f"I encountered an error processing your request: {e}" + + message_item = stream.add_output_item_message() + yield message_item.emit_added() + + text_content = message_item.add_text_content() + yield text_content.emit_added() + yield text_content.emit_delta(assistant_reply) + yield text_content.emit_text_done() + yield text_content.emit_done() + yield message_item.emit_done() + + yield stream.emit_completed() + + +app.run() diff --git a/samples/python/hosted-agents/bring-your-own/responses/toolbox/requirements.txt b/samples/python/hosted-agents/bring-your-own/responses/toolbox/requirements.txt new file mode 100644 index 000000000..f54a45ecc --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/responses/toolbox/requirements.txt @@ -0,0 +1,6 @@ +azure-ai-agentserver-responses==1.0.0b4 +azure-ai-agentserver-core==2.0.0b2 +azure-ai-projects==2.0.1 +azure-identity==1.25.3 +httpx +python-dotenv==1.1.1 diff --git a/samples/python/hosted-agents/bring-your-own/sample_toolboxes_crud.py b/samples/python/hosted-agents/bring-your-own/sample_toolboxes_crud.py new file mode 100644 index 000000000..c76baf9ef --- /dev/null +++ b/samples/python/hosted-agents/bring-your-own/sample_toolboxes_crud.py @@ -0,0 +1,627 @@ +""" +Comprehensive SDK samples for Azure AI Foundry toolbox CRUD operations. + +Tested with azure-ai-projects 2.1.0a20260408001. + +API: client.beta.toolboxes + - create_version(toolbox_name, tools=[], description=..., metadata=..., policies=...) + - get(toolbox_name) -> ToolboxObject (id, name, default_version) + - get_version(toolbox_name, ver) -> ToolboxVersionObject + - list() -> ItemPaged[ToolboxObject] + - list_versions(toolbox_name) -> ItemPaged[ToolboxVersionObject] + - update(toolbox_name, default_version=ver) -> promote a version to default + - delete_version(toolbox_name, ver) + - delete(toolbox_name) + +All tool types demonstrated: + - MCPTool (no-auth, key-auth, OAuth, Entra token passthrough, filtered) + - OpenApiTool (anonymous, project-connection auth) + - A2APreviewTool (agent-to-agent) + - FileSearchTool + - AzureAISearchTool + - WebSearchTool / BingCustomSearchConfiguration + - BingGroundingTool + - CodeInterpreterTool + - Multi-tool combinations + +Prerequisites: + pip install azure-identity python-dotenv httpx + pip install azure-ai-projects --pre + Set environment variables in .env (see bottom of file). +""" + +import json +import os +import sys +import traceback +from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import ( + MCPTool, + FileSearchTool, + OpenApiTool, + A2APreviewTool, + AzureAISearchTool, + AzureAISearchToolResource, + AISearchIndexResource, + CodeInterpreterTool, + OpenApiAnonymousAuthDetails, + OpenApiProjectConnectionAuthDetails, + OpenApiProjectConnectionSecurityScheme, + WebSearchTool, + BingCustomSearchConfiguration, +) + +load_dotenv() + +ENDPOINT = os.environ["FOUNDRY_PROJECT_ENDPOINT"] + +credential = DefaultAzureCredential() +client = AIProjectClient(endpoint=ENDPOINT, credential=credential) + + +# ═══════════════════════════════════════════════════════════════════════════ +# Helper: MCP tools/list + tools/call via REST (validates toolbox is live) +# ═══════════════════════════════════════════════════════════════════════════ +def _toolbox_mcp_endpoint(toolbox_name: str) -> str: + """Build the MCP gateway URL for a toolbox.""" + return f"{ENDPOINT}/toolboxes/{toolbox_name}/mcp?api-version=v1" + + +_MCP_SCOPE = "https://ai.azure.com/.default" +_MCP_FEATURE_HEADER = "Toolboxes=V1Preview" + + +def _mcp_headers() -> dict: + token = credential.get_token(_MCP_SCOPE).token + return { + "Authorization": f"Bearer {token}", + "Content-Type": "application/json", + "Foundry-Features": _MCP_FEATURE_HEADER, + } + + +def _mcp_tools_list(toolbox_name: str) -> list: + """Call tools/list on the toolbox MCP endpoint.""" + import httpx + + url = _toolbox_mcp_endpoint(toolbox_name) + payload = {"jsonrpc": "2.0", "id": 1, "method": "tools/list", "params": {}} + resp = httpx.post(url, json=payload, headers=_mcp_headers(), timeout=60) + resp.raise_for_status() + data = resp.json() + tools = data.get("result", {}).get("tools", []) + print(f" tools/list → {len(tools)} tool(s)") + for t in tools[:5]: + print(f" - {t.get('name', '?')}") + return tools + + +def _mcp_tools_call(toolbox_name: str, tool_name: str, arguments: dict) -> dict: + """Call tools/call on the toolbox MCP endpoint.""" + import httpx + + url = _toolbox_mcp_endpoint(toolbox_name) + payload = { + "jsonrpc": "2.0", + "id": 2, + "method": "tools/call", + "params": {"name": tool_name, "arguments": arguments}, + } + resp = httpx.post(url, json=payload, headers=_mcp_headers(), timeout=60) + resp.raise_for_status() + data = resp.json() + result = data.get("result", {}) + content = result.get("content", []) + print(f" tools/call({tool_name}) → {len(content)} content block(s)") + if content: + first = content[0] + text = first.get("text", "") + print(f" preview: {text[:200]}...") + return result + + +# ═══════════════════════════════════════════════════════════════════════════ +# Lifecycle helpers: create → list versions → new version → promote → delete +# ═══════════════════════════════════════════════════════════════════════════ +def _full_lifecycle(toolbox_name: str, tools: list, *, validate_call=None): + """Run the full CRUD lifecycle for a toolbox. + + 1. create_version (v1) + 2. get + 3. tools/list (MCP validation) + 4. optional tools/call + 5. create_version (v2 — same tools, new description) + 6. list_versions + 7. update → promote v2 to default + 8. get_version v2 + 9. delete_version v1 + 10. delete toolbox + """ + print(f"\n{'='*60}") + print(f"LIFECYCLE: {toolbox_name}") + print(f"{'='*60}") + + # 1. create v1 + v1 = client.beta.toolboxes.create_version( + toolbox_name=toolbox_name, + tools=tools, + description=f"{toolbox_name} v1", + ) + print(f" 1. create_version → version={v1.version}, name={v1.name}") + + # 2. get toolbox + tb = client.beta.toolboxes.get(toolbox_name=toolbox_name) + print(f" 2. get → name={tb.name}, default_version={tb.default_version}") + + # 3. tools/list + listed_tools = _mcp_tools_list(toolbox_name) + + # 4. optional tools/call + if validate_call: + tool_name, args = validate_call + # find match + matching = [t for t in listed_tools if t.get("name") == tool_name] + if matching: + _mcp_tools_call(toolbox_name, tool_name, args) + else: + print(f" ⚠ tool '{tool_name}' not found in tools/list — skipping call") + + # 5. create v2 + v2 = client.beta.toolboxes.create_version( + toolbox_name=toolbox_name, + tools=tools, + description=f"{toolbox_name} v2 (promoted)", + ) + print(f" 5. create_version → version={v2.version}") + + # 6. list versions + versions = list(client.beta.toolboxes.list_versions(toolbox_name=toolbox_name)) + print(f" 6. list_versions → {len(versions)} version(s): {[v.version for v in versions]}") + + # 7. promote v2 + updated = client.beta.toolboxes.update(toolbox_name=toolbox_name, default_version=v2.version) + print(f" 7. update (promote) → default_version={updated.default_version}") + + # 8. get version v2 + v2_detail = client.beta.toolboxes.get_version(toolbox_name=toolbox_name, version=v2.version) + print(f" 8. get_version → version={v2_detail.version}, desc={v2_detail.description}") + + # 9. delete v1 + client.beta.toolboxes.delete_version(toolbox_name=toolbox_name, version=v1.version) + print(f" 9. delete_version v1 → OK") + + # 10. delete toolbox + client.beta.toolboxes.delete(toolbox_name=toolbox_name) + print(f" 10. delete → OK") + + return True + + +# ═══════════════════════════════════════════════════════════════════════════ +# Individual tool samples +# ═══════════════════════════════════════════════════════════════════════════ + +# --------------------------------------------------------------------------- +# 1. MCP — No Auth (public server, e.g. gitmcp.io) +# --------------------------------------------------------------------------- +def sample_mcp_no_auth(): + return _full_lifecycle( + "mcp-noauth-sample", + [ + MCPTool( + server_label="gitmcp", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 2. MCP — Key Auth +# --------------------------------------------------------------------------- +def sample_mcp_key_auth(): + return _full_lifecycle( + "mcp-keyauth-sample", + [ + MCPTool( + server_label="github", + server_url="https://api.githubcopilot.com/mcp", + project_connection_id=os.environ["MCP_CONNECTION_ID"], + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 3. MCP — OAuth +# --------------------------------------------------------------------------- +def sample_mcp_oauth(): + return _full_lifecycle( + "mcp-oauth-sample", + [ + MCPTool( + server_label="github-oauth", + server_url="https://api.githubcopilot.com/mcp", + project_connection_id=os.environ["MCP_OAUTH_CONNECTION_ID"], + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 4. MCP — Entra Token Passthrough (e.g. Outlook Mail via agent365) +# --------------------------------------------------------------------------- +def sample_mcp_entra_passthrough(): + return _full_lifecycle( + "mcp-entra-passthrough-sample", + [ + MCPTool( + server_label="outlook-mail", + server_url="https://agent365.svc.cloud.microsoft/agents/servers/mcp_MailTools", + project_connection_id=os.environ["MCP_ENTRA_PASSTHROUGH_CONNECTION_ID"], + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 5. MCP — Filtered tools +# --------------------------------------------------------------------------- +def sample_mcp_filtered(): + return _full_lifecycle( + "mcp-filtered-sample", + [ + MCPTool( + server_label="github-filtered", + server_url="https://api.githubcopilot.com/mcp", + project_connection_id=os.environ["MCP_CONNECTION_ID"], + allowed_tools=["search_repositories", "get_file_contents"], + headers={"Accept": "application/json"}, + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 6. OpenAPI — No Auth (anonymous) +# --------------------------------------------------------------------------- +def sample_openapi_no_auth(): + spec = { + "openapi": "3.0.0", + "info": {"title": "JSON Placeholder", "version": "1.0"}, + "servers": [{"url": "https://jsonplaceholder.typicode.com"}], + "paths": { + "/posts/{id}": { + "get": { + "operationId": "getPost", + "summary": "Get a post by ID", + "parameters": [ + { + "name": "id", + "in": "path", + "required": True, + "schema": {"type": "integer"}, + } + ], + "responses": {"200": {"description": "A post object"}}, + } + } + }, + } + return _full_lifecycle( + "openapi-noauth-sample", + [ + OpenApiTool( + openapi={ + "name": "jsonplaceholder", + "spec": spec, + "auth": OpenApiAnonymousAuthDetails(), + } + ) + ], + validate_call=("getPost", {"id": 1}), + ) + + +# --------------------------------------------------------------------------- +# 7. OpenAPI — With Project Connection Auth +# --------------------------------------------------------------------------- +def sample_openapi_with_connection(): + spec = { + "openapi": "3.0.1", + "info": {"title": "TripAdvisor API", "version": "1.0"}, + "servers": [{"url": "https://api.content.tripadvisor.com/api/v1"}], + "paths": { + "/location/search": { + "get": { + "operationId": "searchLocations", + "summary": "Search for locations", + "parameters": [ + { + "name": "searchQuery", + "in": "query", + "required": True, + "schema": {"type": "string"}, + }, + { + "name": "language", + "in": "query", + "schema": {"type": "string", "default": "en"}, + }, + ], + "responses": {"200": {"description": "Search results"}}, + "security": [{"apiKeyAuth": []}], + } + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "name": "key", + "in": "query", + } + } + }, + } + return _full_lifecycle( + "openapi-tripadvisor-sample", + [ + OpenApiTool( + openapi={ + "name": "tripadvisor", + "spec": spec, + "auth": OpenApiProjectConnectionAuthDetails( + security_scheme=OpenApiProjectConnectionSecurityScheme( + project_connection_id=os.environ["TRIPADVISOR_CONNECTION_ID"], + ), + ), + } + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 8. A2A — Agent-to-Agent +# --------------------------------------------------------------------------- +def sample_a2a(): + return _full_lifecycle( + "a2a-sample", + [ + A2APreviewTool( + project_connection_id=os.environ.get("A2A_CONNECTION_ID", ""), + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 9. File Search +# --------------------------------------------------------------------------- +def sample_file_search(): + return _full_lifecycle( + "filesearch-sample", + [ + FileSearchTool( + name="filesearch_docs", + vector_store_ids=[os.environ["FILE_SEARCH_VECTOR_STORE_ID"]], + description="Search uploaded files for grounded passages.", + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 10. Azure AI Search +# --------------------------------------------------------------------------- +def sample_azure_ai_search(): + return _full_lifecycle( + "aisearch-sample", + [ + AzureAISearchTool( + azure_ai_search=AzureAISearchToolResource( + indexes=[ + AISearchIndexResource( + index_name=os.environ["AI_SEARCH_INDEX_NAME"], + project_connection_id=os.environ["AI_SEARCH_CONNECTION_ID"], + ) + ] + ) + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 11. Code Interpreter +# --------------------------------------------------------------------------- +def sample_code_interpreter(): + return _full_lifecycle( + "codeinterp-sample", + [CodeInterpreterTool()], + ) + + +# --------------------------------------------------------------------------- +# 12. Web Search +# --------------------------------------------------------------------------- +def sample_websearch_tool(): + return _full_lifecycle( + "websearch-sample", + [WebSearchTool()], + validate_call=("web_search", {"query": "Azure AI Foundry documentation"}), + ) + + +# --------------------------------------------------------------------------- +# 13. Web Search — Bing Custom Search +# --------------------------------------------------------------------------- +def sample_websearch_custom(): + return _full_lifecycle( + "websearch-customsearch-sample", + [ + WebSearchTool( + custom_search_configuration=BingCustomSearchConfiguration( + project_connection_id=os.environ["BING_SEARCH_CONNECTION_ID"], + instance_name=os.environ["BING_SEARCH_INSTANCE_NAME"], + ) + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 14. Multi-Tool (MCP + MCP) +# --------------------------------------------------------------------------- +def sample_multi_tool(): + return _full_lifecycle( + "multi-tool-sample", + [ + MCPTool( + server_label="gitmcp", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ), + MCPTool( + server_label="github", + server_url="https://api.githubcopilot.com/mcp", + project_connection_id=os.environ["MCP_CONNECTION_ID"], + ), + ], + ) + + +# --------------------------------------------------------------------------- +# 15. Multi-Tool (file search + MCP) +# --------------------------------------------------------------------------- +def sample_multi_filesearch_mcp(): + return _full_lifecycle( + "multi-filesearch-mcp-sample", + [ + FileSearchTool( + name="filesearch_project_docs", + vector_store_ids=[os.environ["FILE_SEARCH_VECTOR_STORE_ID"]], + description="Find relevant passages from uploaded project files.", + ), + MCPTool( + server_label="gitmcp-files", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ), + ], + ) + + +# --------------------------------------------------------------------------- +# 16. Multi-Tool (web search + MCP) +# --------------------------------------------------------------------------- +def sample_multi_websearch_mcp(): + return _full_lifecycle( + "multi-websearch-mcp-sample", + [ + WebSearchTool(), + MCPTool( + server_label="gitmcp-web", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ), + ], + ) + + +# --------------------------------------------------------------------------- +# 17. Multi-Tool (AI Search + MCP) +# --------------------------------------------------------------------------- +def sample_multi_aisearch_mcp(): + return _full_lifecycle( + "multi-aisearch-mcp-sample", + [ + AzureAISearchTool( + azure_ai_search=AzureAISearchToolResource( + indexes=[ + AISearchIndexResource( + index_name=os.environ["AI_SEARCH_INDEX_NAME"], + project_connection_id=os.environ["AI_SEARCH_CONNECTION_ID"], + ) + ] + ), + ), + MCPTool( + server_label="gitmcp-aisearch", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ), + ], + ) + + +# --------------------------------------------------------------------------- +# 18. List all toolboxes +# --------------------------------------------------------------------------- +def sample_list_all(): + import httpx as _httpx + token = credential.get_token(_MCP_SCOPE).token + resp = _httpx.get( + f"{ENDPOINT}/toolboxes", + params={"api-version": "v1", "limit": 100}, + headers={"Authorization": f"Bearer {token}", "Foundry-Features": _MCP_FEATURE_HEADER}, + timeout=30, + ) + resp.raise_for_status() + toolboxes = resp.json().get("data", []) + print(f"\n{len(toolboxes)} toolbox(es):") + for tb in toolboxes: + print(f" {tb['name']} default_version={tb.get('default_version')}") + return toolboxes + + +# ═══════════════════════════════════════════════════════════════════════════ +# Runner +# ═══════════════════════════════════════════════════════════════════════════ +SAMPLES = { + "mcp-noauth": sample_mcp_no_auth, + "mcp-keyauth": sample_mcp_key_auth, + "mcp-oauth": sample_mcp_oauth, + "mcp-entra-passthrough": sample_mcp_entra_passthrough, + "mcp-filtered": sample_mcp_filtered, + "openapi-noauth": sample_openapi_no_auth, + "openapi-conn": sample_openapi_with_connection, + "a2a": sample_a2a, + "filesearch": sample_file_search, + "aisearch": sample_azure_ai_search, + "codeinterp": sample_code_interpreter, + "websearch": sample_websearch_tool, + "websearch-custom": sample_websearch_custom, + "multi": sample_multi_tool, + "multi-filesearch-mcp": sample_multi_filesearch_mcp, + "multi-websearch-mcp": sample_multi_websearch_mcp, + "multi-aisearch-mcp": sample_multi_aisearch_mcp, + "list": sample_list_all, +} + +if __name__ == "__main__": + if len(sys.argv) >= 2 and sys.argv[1] == "all": + # Run all samples, collect pass/fail report + results = {} + for name, fn in SAMPLES.items(): + if name == "list": + continue + try: + fn() + results[name] = "PASS" + except Exception as exc: + results[name] = f"FAIL: {exc}" + traceback.print_exc() + print("\n" + "=" * 60) + print("CRUD TEST REPORT") + print("=" * 60) + for name, status in results.items(): + mark = "✓" if status == "PASS" else "✗" + print(f" {mark} {name}: {status}") + passed = sum(1 for v in results.values() if v == "PASS") + print(f"\n {passed}/{len(results)} passed") + elif len(sys.argv) >= 2 and sys.argv[1] in SAMPLES: + SAMPLES[sys.argv[1]]() + else: + print(f"Usage: python {sys.argv[0]} ") + print(f"Samples: {', '.join(SAMPLES.keys())}") + sys.exit(1) diff --git a/samples/python/hosted-agents/code-interpreter-custom/.env.sample b/samples/python/hosted-agents/code-interpreter-custom/.env.sample deleted file mode 100644 index f44eac748..000000000 --- a/samples/python/hosted-agents/code-interpreter-custom/.env.sample +++ /dev/null @@ -1,3 +0,0 @@ -AZURE_AI_PROJECT_ENDPOINT= -AZURE_AI_CONNECTION_ID= -AZURE_AI_MODEL_DEPLOYMENT_NAME= \ No newline at end of file diff --git a/samples/python/hosted-agents/code-interpreter-custom/.gitignore b/samples/python/hosted-agents/code-interpreter-custom/.gitignore deleted file mode 100644 index 67b5e38af..000000000 --- a/samples/python/hosted-agents/code-interpreter-custom/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.venv/ -.env -__pycache__/ diff --git a/samples/python/hosted-agents/code-interpreter-custom/README.md b/samples/python/hosted-agents/code-interpreter-custom/README.md deleted file mode 100644 index bc92450be..000000000 --- a/samples/python/hosted-agents/code-interpreter-custom/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Custom Code Interpreter with Session Pool MCP server - -This provides example Bicep code for setting up a Container Apps dynamic session pool -with a custom code interpreter image, as well as Python client code demonstrating -how to use it with a Foundry Hosted Agent. - -You will need the following installed to run the sample code: - -- The `az` CLI -- Python3 -- A Python3 package manager like `uv` or `pip` + `venv` - - If you are using `pip`, make sure `ensurepip` is installed. On Debian/Ubuntu - systems, this would mean running `apt install python3.12-venv`. - -## Running code sample - -### Enable MCP server for dynamic sessions - -This is required to enable the preview feature. - -```console -az feature register --namespace Microsoft.App --name SessionPoolsSupportMCP -az provider register -n Microsoft.App -``` - -### Create a dynamic session pool with a code interpreter image - -Using the `az` CLI, deploy with the provided Bicep template file: - -```console -az deployment group create \ - --name custom-code-interpreter \ - --subscription \ - --resource-group \ - --template-file ./infra.bicep -``` - -> [!NOTE] This can take a while! Allocating the dynamic session pool -> can take up to 1 hour, depending on the number of standby instances -> requested. - -### Use the custom code interpreter in an agent - -Copy the [`.env.sample`](./.env.sample) file to `.env` and fill in the values with -the output of the above deployment, which you can find in the Web Portal under the -resource group. - -Finally, install Python dependencies and run the script: - -```console -# Using uv - -uv sync -uv run ./main.py - -# Using pip - -python3 -m venv .venv -./.venv/bin/pip3 install -r requirements.txt -./.venv/bin/python3 ./main.py -``` - -## Limitations - -File input/output and use of file stores are not directly supported in APIs, so you must use URLs (such as data URLs for small files and Azure Blob Service SAS URLs for large ones) to get data in and out. diff --git a/samples/python/hosted-agents/code-interpreter-custom/infra.bicep b/samples/python/hosted-agents/code-interpreter-custom/infra.bicep deleted file mode 100644 index 7f131b089..000000000 --- a/samples/python/hosted-agents/code-interpreter-custom/infra.bicep +++ /dev/null @@ -1,231 +0,0 @@ -@description('Suffix for resource names to ensure uniqueness') -@minLength(3) -param suffix string = uniqueString(resourceGroup().id) - -@description('Container Apps environment name') -@minLength(3) -param environmentName string = 'aca-env-${suffix}' - -@description('Session pool name') -@minLength(3) -param sessionPoolName string = 'sp-${suffix}' - -@description('The amount of CPU to provide to each container instance, in vCPU counts') -@minValue(1) -@maxValue(16) -param cpu int = 1 - -@description('The amount of RAM to provide to each container instance, in GiB') -@minValue(1) -@maxValue(16) -param memory int = 2 - -@description('Location of all ACA resources.') -@allowed([ - 'eastus' - 'swedencentral' - 'northeurope' -]) -param location string = 'swedencentral' - -@description('Use managed identity for deployment script principal') -param useManagedIdentity bool = true - -@description('An image that implements the code interpreter HTTP API') -param image string = 'mcr.microsoft.com/k8se/services/codeinterpreter:0.9.18-python3.12' - -@description('Model deployment name') -param modelDeploymentName string = 'my-gpt-4o-mini' - -@description('Model to deploy') -param modelName string = 'gpt-4o-mini' - -resource environment 'Microsoft.App/managedEnvironments@2025-10-02-preview' = { - name: environmentName - location: location - properties: { - workloadProfiles: [ - { - name: 'Consumption' - workloadProfileType: 'Consumption' - } - ] - } -} - -resource sessionPool 'Microsoft.App/sessionPools@2025-10-02-preview' = { - name: sessionPoolName - location: location - properties: { - environmentId: environment.id - poolManagementType: 'Dynamic' - containerType: 'CustomContainer' - scaleConfiguration: { - maxConcurrentSessions: 10 - readySessionInstances: 5 - } - dynamicPoolConfiguration: { - lifecycleConfiguration: { - cooldownPeriodInSeconds: 600 - lifecycleType: 'Timed' - } - } - customContainerTemplate: { - containers: [ - { - name: 'jupyterpython' - image: image - env: [ - { - name: 'SYS_RUNTIME_SANDBOX' - value: 'AzureContainerApps-DynamicSessions' - } - { - name: 'AZURE_CODE_EXEC_ENV' - value: 'AzureContainerApps-DynamicSessions-Py3.12' - } - { - name: 'AZURECONTAINERAPPS_SESSIONS_SANDBOX_VERSION' - value: '7758' - } - { - name: 'JUPYTER_TOKEN' - value: 'AzureContainerApps-DynamicSessions' - } - ] - resources: { - cpu: cpu - memory: '${memory}Gi' - } - probes: [ - { - type: 'Liveness' - httpGet: { - path: '/health' - port: 6000 - } - failureThreshold: 4 - } - { - type: 'Startup' - httpGet: { - path: '/health' - port: 6000 - } - failureThreshold: 30 - periodSeconds: 2 - } - ] - } - ] - ingress: { - targetPort: 6000 - } - } - mcpServerSettings: { - isMcpServerEnabled: true - } - sessionNetworkConfiguration: { - status: 'egressEnabled' - } - } -} - -resource scriptPrincipal 'Microsoft.ManagedIdentity/userAssignedIdentities@2023-01-31' = if (useManagedIdentity){ - name: 'deployScriptIdentity-${suffix}' - location: location -} - -resource roleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (useManagedIdentity) { - name: guid(scriptPrincipal!.id, 'apps-sessionpool-contributor') - scope: resourceGroup() - properties: { - roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'f7669afb-68b2-44b4-9c5f-6d2a47fddda0') // Container Apps SessionPools Contributor - principalId: scriptPrincipal!.properties.principalId - principalType: 'ServicePrincipal' - } -} - -resource deployScript 'Microsoft.Resources/deploymentScripts@2020-10-01' = { - name: 'getmcpkey-${suffix}' - location: location - kind: 'AzureCLI' - identity: useManagedIdentity ? { - type: 'UserAssigned' - userAssignedIdentities: { - '${scriptPrincipal!.id}': {} - } - } : null - properties: { - azCliVersion: '2.77.0' - scriptContent: ''' - az rest --method post --url "$SESSION_POOL_ID/fetchMCPServerCredentials?api-version=2025-02-02-preview" | jq -c '{"key": .apiKey}' > $AZ_SCRIPTS_OUTPUT_PATH - ''' - timeout: 'PT30M' - retentionInterval: 'P1D' - cleanupPreference: 'OnSuccess' - environmentVariables: [ - { - name: 'SESSION_POOL_ID' - value: sessionPool.id - } - ] - } -} - -resource aiAccount 'Microsoft.CognitiveServices/accounts@2025-10-01-preview' = { - name: 'aia-${suffix}' - location: location - kind: 'AIServices' - sku: { - name: 'S0' - } - properties: { - customSubDomainName: 'myaiaccount-${suffix}' - allowProjectManagement: true - } - - resource project 'projects' = { - name: 'aip-${suffix}s' - properties: { - description: 'This is my AI project.' - } - - resource mcpConn 'connections' = { - name: 'aic-${suffix}' - properties: { - authType: 'CustomKeys' - category: 'RemoteTool' - credentials: { - keys: { - 'x-ms-apikey': deployScript.properties.outputs.key - } - } - target: sessionPool.properties.mcpServerSettings.mcpServerEndpoint - } - } - } - - resource model 'deployments' = { - name: modelDeploymentName - sku: { - name: 'GlobalStandard' - capacity: 1 - } - properties: { - model: { - format: 'OpenAI' - name: modelName - } - } - } -} - -@description('Outputs the ID of the project connection for the Code Interpreter MCP Tool') -output AZURE_AI_CONNECTION_ID string = aiAccount::project::mcpConn.id - -@description('Model deployment name') -output AZURE_AI_MODEL_DEPLOYMENT_NAME string = aiAccount::model.name - -@description('AI Project Endpoint') -output AZURE_AI_PROJECT_ENDPOINT string = aiAccount::project.properties.endpoints['AI Foundry API'] diff --git a/samples/python/hosted-agents/code-interpreter-custom/main.py b/samples/python/hosted-agents/code-interpreter-custom/main.py deleted file mode 100644 index 02559cd5e..000000000 --- a/samples/python/hosted-agents/code-interpreter-custom/main.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -import dotenv -from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import PromptAgentDefinition, MCPTool -from azure.identity import DefaultAzureCredential - -dotenv.load_dotenv() - -project_client = AIProjectClient( - endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) -openai_client = project_client.get_openai_client() - -tools = [ - MCPTool( - # This is just a placeholder. Connection details are in - # the project connection referenced by `project_connection_id`. - server_url="https://localhost", - server_label="python_tool", - require_approval="never", - allowed_tools=[ - "launchShell", - "runPythonCodeInRemoteEnvironment", - ], - project_connection_id=os.environ["AZURE_AI_CONNECTION_ID"], - ), -] - -EXAMPLE_DATA_FILE_URL = "https://raw.githubusercontent.com/Azure-Samples/azureai-samples/refs/heads/main/scenarios/Agents/data/nifty_500_quarterly_results.csv" - -with project_client: - agent = project_client.agents.create_version( - agent_name="MyAgent", - definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - instructions="""\ -You are a helpful agent that can use a Python code interpreter to assist users. Use the `python_tool` MCP -server to perform any calculations or numerical analyses. ALWAYS call the `launchShell` tool first before -calling the `runPythonCodeInRemoteEnvironment` tool. If you need to provide any non-text data to the user, -always print a data URI with the contents. NEVER provide a path to a file in the remote environment to the user. -""", - temperature=0, - tools=tools, - ), - ) - print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})") - - # Use the agent to analyze a CSV file and produce a histogram - response = openai_client.responses.create( - input=f"Please analyze the CSV file at {EXAMPLE_DATA_FILE_URL}. Could you please create bar chart in the TRANSPORTATION sector for the operating profit and provide a file to me?", - extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, - ) - print(f"[Response {response.id}]: {response.output_text}") - - # Clean up resources by deleting the agent version - # This prevents accumulation of unused agent versions in your project - project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - print("Agent deleted") diff --git a/samples/python/hosted-agents/code-interpreter-custom/pyproject.toml b/samples/python/hosted-agents/code-interpreter-custom/pyproject.toml deleted file mode 100644 index ddac6b24c..000000000 --- a/samples/python/hosted-agents/code-interpreter-custom/pyproject.toml +++ /dev/null @@ -1,13 +0,0 @@ -[project] -name = "code-interpreter-custom" -version = "0.1.0" -description = "Basic example for using custom code interpreter session pools." -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "aiohttp>=3.13.2", - "azure-ai-projects==2.0.0b2", - "azure-identity>=1.25.1", - "dotenv>=0.9.9", - "openai>=2.8.1", -] diff --git a/samples/python/hosted-agents/code-interpreter-custom/requirements.txt b/samples/python/hosted-agents/code-interpreter-custom/requirements.txt deleted file mode 100644 index 91da7397a..000000000 --- a/samples/python/hosted-agents/code-interpreter-custom/requirements.txt +++ /dev/null @@ -1,40 +0,0 @@ -aiohappyeyeballs==2.6.1 -aiohttp==3.13.2 -aiosignal==1.4.0 -annotated-types==0.7.0 -anyio==4.12.0 -attrs==25.4.0 -azure-ai-projects==2.0.0b2 -azure-core==1.36.0 -azure-identity==1.25.1 -azure-storage-blob==12.27.1 -certifi==2025.11.12 -cffi==2.0.0 -charset-normalizer==3.4.4 -cryptography==46.0.3 -distro==1.9.0 -dotenv==0.9.9 -frozenlist==1.8.0 -h11==0.16.0 -httpcore==1.0.9 -httpx==0.28.1 -idna==3.11 -isodate==0.7.2 -jiter==0.12.0 -msal==1.34.0 -msal-extensions==1.3.1 -multidict==6.7.0 -openai==2.8.1 -propcache==0.4.1 -pycparser==2.23 -pydantic==2.12.5 -pydantic_core==2.41.5 -PyJWT==2.10.1 -python-dotenv==1.2.1 -requests==2.32.5 -sniffio==1.3.1 -tqdm==4.67.1 -typing-inspection==0.4.2 -typing_extensions==4.15.0 -urllib3==2.5.0 -yarl==1.22.0 diff --git a/samples/python/hosted-agents/code-interpreter-custom/uv.lock b/samples/python/hosted-agents/code-interpreter-custom/uv.lock deleted file mode 100644 index f7f71c417..000000000 --- a/samples/python/hosted-agents/code-interpreter-custom/uv.lock +++ /dev/null @@ -1,1148 +0,0 @@ -version = 1 -revision = 3 -requires-python = ">=3.12" - -[[package]] -name = "aiohappyeyeballs" -version = "2.6.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, -] - -[[package]] -name = "aiohttp" -version = "3.13.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohappyeyeballs" }, - { name = "aiosignal" }, - { name = "attrs" }, - { name = "frozenlist" }, - { name = "multidict" }, - { name = "propcache" }, - { name = "yarl" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/29/9b/01f00e9856d0a73260e86dd8ed0c2234a466c5c1712ce1c281548df39777/aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b", size = 737623, upload-time = "2025-10-28T20:56:30.797Z" }, - { url = "https://files.pythonhosted.org/packages/5a/1b/4be39c445e2b2bd0aab4ba736deb649fabf14f6757f405f0c9685019b9e9/aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc", size = 492664, upload-time = "2025-10-28T20:56:32.708Z" }, - { url = "https://files.pythonhosted.org/packages/28/66/d35dcfea8050e131cdd731dff36434390479b4045a8d0b9d7111b0a968f1/aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7", size = 491808, upload-time = "2025-10-28T20:56:34.57Z" }, - { url = "https://files.pythonhosted.org/packages/00/29/8e4609b93e10a853b65f8291e64985de66d4f5848c5637cddc70e98f01f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb", size = 1738863, upload-time = "2025-10-28T20:56:36.377Z" }, - { url = "https://files.pythonhosted.org/packages/9d/fa/4ebdf4adcc0def75ced1a0d2d227577cd7b1b85beb7edad85fcc87693c75/aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3", size = 1700586, upload-time = "2025-10-28T20:56:38.034Z" }, - { url = "https://files.pythonhosted.org/packages/da/04/73f5f02ff348a3558763ff6abe99c223381b0bace05cd4530a0258e52597/aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f", size = 1768625, upload-time = "2025-10-28T20:56:39.75Z" }, - { url = "https://files.pythonhosted.org/packages/f8/49/a825b79ffec124317265ca7d2344a86bcffeb960743487cb11988ffb3494/aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6", size = 1867281, upload-time = "2025-10-28T20:56:41.471Z" }, - { url = "https://files.pythonhosted.org/packages/b9/48/adf56e05f81eac31edcfae45c90928f4ad50ef2e3ea72cb8376162a368f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e", size = 1752431, upload-time = "2025-10-28T20:56:43.162Z" }, - { url = "https://files.pythonhosted.org/packages/30/ab/593855356eead019a74e862f21523db09c27f12fd24af72dbc3555b9bfd9/aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7", size = 1562846, upload-time = "2025-10-28T20:56:44.85Z" }, - { url = "https://files.pythonhosted.org/packages/39/0f/9f3d32271aa8dc35036e9668e31870a9d3b9542dd6b3e2c8a30931cb27ae/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d", size = 1699606, upload-time = "2025-10-28T20:56:46.519Z" }, - { url = "https://files.pythonhosted.org/packages/2c/3c/52d2658c5699b6ef7692a3f7128b2d2d4d9775f2a68093f74bca06cf01e1/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b", size = 1720663, upload-time = "2025-10-28T20:56:48.528Z" }, - { url = "https://files.pythonhosted.org/packages/9b/d4/8f8f3ff1fb7fb9e3f04fcad4e89d8a1cd8fc7d05de67e3de5b15b33008ff/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8", size = 1737939, upload-time = "2025-10-28T20:56:50.77Z" }, - { url = "https://files.pythonhosted.org/packages/03/d3/ddd348f8a27a634daae39a1b8e291ff19c77867af438af844bf8b7e3231b/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16", size = 1555132, upload-time = "2025-10-28T20:56:52.568Z" }, - { url = "https://files.pythonhosted.org/packages/39/b8/46790692dc46218406f94374903ba47552f2f9f90dad554eed61bfb7b64c/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169", size = 1764802, upload-time = "2025-10-28T20:56:54.292Z" }, - { url = "https://files.pythonhosted.org/packages/ba/e4/19ce547b58ab2a385e5f0b8aa3db38674785085abcf79b6e0edd1632b12f/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248", size = 1719512, upload-time = "2025-10-28T20:56:56.428Z" }, - { url = "https://files.pythonhosted.org/packages/70/30/6355a737fed29dcb6dfdd48682d5790cb5eab050f7b4e01f49b121d3acad/aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e", size = 426690, upload-time = "2025-10-28T20:56:58.736Z" }, - { url = "https://files.pythonhosted.org/packages/0a/0d/b10ac09069973d112de6ef980c1f6bb31cb7dcd0bc363acbdad58f927873/aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45", size = 453465, upload-time = "2025-10-28T20:57:00.795Z" }, - { url = "https://files.pythonhosted.org/packages/bf/78/7e90ca79e5aa39f9694dcfd74f4720782d3c6828113bb1f3197f7e7c4a56/aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be", size = 732139, upload-time = "2025-10-28T20:57:02.455Z" }, - { url = "https://files.pythonhosted.org/packages/db/ed/1f59215ab6853fbaa5c8495fa6cbc39edfc93553426152b75d82a5f32b76/aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742", size = 490082, upload-time = "2025-10-28T20:57:04.784Z" }, - { url = "https://files.pythonhosted.org/packages/68/7b/fe0fe0f5e05e13629d893c760465173a15ad0039c0a5b0d0040995c8075e/aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293", size = 489035, upload-time = "2025-10-28T20:57:06.894Z" }, - { url = "https://files.pythonhosted.org/packages/d2/04/db5279e38471b7ac801d7d36a57d1230feeee130bbe2a74f72731b23c2b1/aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811", size = 1720387, upload-time = "2025-10-28T20:57:08.685Z" }, - { url = "https://files.pythonhosted.org/packages/31/07/8ea4326bd7dae2bd59828f69d7fdc6e04523caa55e4a70f4a8725a7e4ed2/aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a", size = 1688314, upload-time = "2025-10-28T20:57:10.693Z" }, - { url = "https://files.pythonhosted.org/packages/48/ab/3d98007b5b87ffd519d065225438cc3b668b2f245572a8cb53da5dd2b1bc/aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4", size = 1756317, upload-time = "2025-10-28T20:57:12.563Z" }, - { url = "https://files.pythonhosted.org/packages/97/3d/801ca172b3d857fafb7b50c7c03f91b72b867a13abca982ed6b3081774ef/aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a", size = 1858539, upload-time = "2025-10-28T20:57:14.623Z" }, - { url = "https://files.pythonhosted.org/packages/f7/0d/4764669bdf47bd472899b3d3db91fffbe925c8e3038ec591a2fd2ad6a14d/aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e", size = 1739597, upload-time = "2025-10-28T20:57:16.399Z" }, - { url = "https://files.pythonhosted.org/packages/c4/52/7bd3c6693da58ba16e657eb904a5b6decfc48ecd06e9ac098591653b1566/aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb", size = 1555006, upload-time = "2025-10-28T20:57:18.288Z" }, - { url = "https://files.pythonhosted.org/packages/48/30/9586667acec5993b6f41d2ebcf96e97a1255a85f62f3c653110a5de4d346/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded", size = 1683220, upload-time = "2025-10-28T20:57:20.241Z" }, - { url = "https://files.pythonhosted.org/packages/71/01/3afe4c96854cfd7b30d78333852e8e851dceaec1c40fd00fec90c6402dd2/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b", size = 1712570, upload-time = "2025-10-28T20:57:22.253Z" }, - { url = "https://files.pythonhosted.org/packages/11/2c/22799d8e720f4697a9e66fd9c02479e40a49de3de2f0bbe7f9f78a987808/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8", size = 1733407, upload-time = "2025-10-28T20:57:24.37Z" }, - { url = "https://files.pythonhosted.org/packages/34/cb/90f15dd029f07cebbd91f8238a8b363978b530cd128488085b5703683594/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04", size = 1550093, upload-time = "2025-10-28T20:57:26.257Z" }, - { url = "https://files.pythonhosted.org/packages/69/46/12dce9be9d3303ecbf4d30ad45a7683dc63d90733c2d9fe512be6716cd40/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476", size = 1758084, upload-time = "2025-10-28T20:57:28.349Z" }, - { url = "https://files.pythonhosted.org/packages/f9/c8/0932b558da0c302ffd639fc6362a313b98fdf235dc417bc2493da8394df7/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23", size = 1716987, upload-time = "2025-10-28T20:57:30.233Z" }, - { url = "https://files.pythonhosted.org/packages/5d/8b/f5bd1a75003daed099baec373aed678f2e9b34f2ad40d85baa1368556396/aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254", size = 425859, upload-time = "2025-10-28T20:57:32.105Z" }, - { url = "https://files.pythonhosted.org/packages/5d/28/a8a9fc6957b2cee8902414e41816b5ab5536ecf43c3b1843c10e82c559b2/aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a", size = 452192, upload-time = "2025-10-28T20:57:34.166Z" }, - { url = "https://files.pythonhosted.org/packages/9b/36/e2abae1bd815f01c957cbf7be817b3043304e1c87bad526292a0410fdcf9/aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b", size = 735234, upload-time = "2025-10-28T20:57:36.415Z" }, - { url = "https://files.pythonhosted.org/packages/ca/e3/1ee62dde9b335e4ed41db6bba02613295a0d5b41f74a783c142745a12763/aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61", size = 490733, upload-time = "2025-10-28T20:57:38.205Z" }, - { url = "https://files.pythonhosted.org/packages/1a/aa/7a451b1d6a04e8d15a362af3e9b897de71d86feac3babf8894545d08d537/aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4", size = 491303, upload-time = "2025-10-28T20:57:40.122Z" }, - { url = "https://files.pythonhosted.org/packages/57/1e/209958dbb9b01174870f6a7538cd1f3f28274fdbc88a750c238e2c456295/aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b", size = 1717965, upload-time = "2025-10-28T20:57:42.28Z" }, - { url = "https://files.pythonhosted.org/packages/08/aa/6a01848d6432f241416bc4866cae8dc03f05a5a884d2311280f6a09c73d6/aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694", size = 1667221, upload-time = "2025-10-28T20:57:44.869Z" }, - { url = "https://files.pythonhosted.org/packages/87/4f/36c1992432d31bbc789fa0b93c768d2e9047ec8c7177e5cd84ea85155f36/aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906", size = 1757178, upload-time = "2025-10-28T20:57:47.216Z" }, - { url = "https://files.pythonhosted.org/packages/ac/b4/8e940dfb03b7e0f68a82b88fd182b9be0a65cb3f35612fe38c038c3112cf/aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9", size = 1838001, upload-time = "2025-10-28T20:57:49.337Z" }, - { url = "https://files.pythonhosted.org/packages/d7/ef/39f3448795499c440ab66084a9db7d20ca7662e94305f175a80f5b7e0072/aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011", size = 1716325, upload-time = "2025-10-28T20:57:51.327Z" }, - { url = "https://files.pythonhosted.org/packages/d7/51/b311500ffc860b181c05d91c59a1313bdd05c82960fdd4035a15740d431e/aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6", size = 1547978, upload-time = "2025-10-28T20:57:53.554Z" }, - { url = "https://files.pythonhosted.org/packages/31/64/b9d733296ef79815226dab8c586ff9e3df41c6aff2e16c06697b2d2e6775/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213", size = 1682042, upload-time = "2025-10-28T20:57:55.617Z" }, - { url = "https://files.pythonhosted.org/packages/3f/30/43d3e0f9d6473a6db7d472104c4eff4417b1e9df01774cb930338806d36b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49", size = 1680085, upload-time = "2025-10-28T20:57:57.59Z" }, - { url = "https://files.pythonhosted.org/packages/16/51/c709f352c911b1864cfd1087577760ced64b3e5bee2aa88b8c0c8e2e4972/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae", size = 1728238, upload-time = "2025-10-28T20:57:59.525Z" }, - { url = "https://files.pythonhosted.org/packages/19/e2/19bd4c547092b773caeb48ff5ae4b1ae86756a0ee76c16727fcfd281404b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa", size = 1544395, upload-time = "2025-10-28T20:58:01.914Z" }, - { url = "https://files.pythonhosted.org/packages/cf/87/860f2803b27dfc5ed7be532832a3498e4919da61299b4a1f8eb89b8ff44d/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4", size = 1742965, upload-time = "2025-10-28T20:58:03.972Z" }, - { url = "https://files.pythonhosted.org/packages/67/7f/db2fc7618925e8c7a601094d5cbe539f732df4fb570740be88ed9e40e99a/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a", size = 1697585, upload-time = "2025-10-28T20:58:06.189Z" }, - { url = "https://files.pythonhosted.org/packages/0c/07/9127916cb09bb38284db5036036042b7b2c514c8ebaeee79da550c43a6d6/aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940", size = 431621, upload-time = "2025-10-28T20:58:08.636Z" }, - { url = "https://files.pythonhosted.org/packages/fb/41/554a8a380df6d3a2bba8a7726429a23f4ac62aaf38de43bb6d6cde7b4d4d/aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4", size = 457627, upload-time = "2025-10-28T20:58:11Z" }, - { url = "https://files.pythonhosted.org/packages/c7/8e/3824ef98c039d3951cb65b9205a96dd2b20f22241ee17d89c5701557c826/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673", size = 767360, upload-time = "2025-10-28T20:58:13.358Z" }, - { url = "https://files.pythonhosted.org/packages/a4/0f/6a03e3fc7595421274fa34122c973bde2d89344f8a881b728fa8c774e4f1/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd", size = 504616, upload-time = "2025-10-28T20:58:15.339Z" }, - { url = "https://files.pythonhosted.org/packages/c6/aa/ed341b670f1bc8a6f2c6a718353d13b9546e2cef3544f573c6a1ff0da711/aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3", size = 509131, upload-time = "2025-10-28T20:58:17.693Z" }, - { url = "https://files.pythonhosted.org/packages/7f/f0/c68dac234189dae5c4bbccc0f96ce0cc16b76632cfc3a08fff180045cfa4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf", size = 1864168, upload-time = "2025-10-28T20:58:20.113Z" }, - { url = "https://files.pythonhosted.org/packages/8f/65/75a9a76db8364b5d0e52a0c20eabc5d52297385d9af9c35335b924fafdee/aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e", size = 1719200, upload-time = "2025-10-28T20:58:22.583Z" }, - { url = "https://files.pythonhosted.org/packages/f5/55/8df2ed78d7f41d232f6bd3ff866b6f617026551aa1d07e2f03458f964575/aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5", size = 1843497, upload-time = "2025-10-28T20:58:24.672Z" }, - { url = "https://files.pythonhosted.org/packages/e9/e0/94d7215e405c5a02ccb6a35c7a3a6cfff242f457a00196496935f700cde5/aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad", size = 1935703, upload-time = "2025-10-28T20:58:26.758Z" }, - { url = "https://files.pythonhosted.org/packages/0b/78/1eeb63c3f9b2d1015a4c02788fb543141aad0a03ae3f7a7b669b2483f8d4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e", size = 1792738, upload-time = "2025-10-28T20:58:29.787Z" }, - { url = "https://files.pythonhosted.org/packages/41/75/aaf1eea4c188e51538c04cc568040e3082db263a57086ea74a7d38c39e42/aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61", size = 1624061, upload-time = "2025-10-28T20:58:32.529Z" }, - { url = "https://files.pythonhosted.org/packages/9b/c2/3b6034de81fbcc43de8aeb209073a2286dfb50b86e927b4efd81cf848197/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661", size = 1789201, upload-time = "2025-10-28T20:58:34.618Z" }, - { url = "https://files.pythonhosted.org/packages/c9/38/c15dcf6d4d890217dae79d7213988f4e5fe6183d43893a9cf2fe9e84ca8d/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98", size = 1776868, upload-time = "2025-10-28T20:58:38.835Z" }, - { url = "https://files.pythonhosted.org/packages/04/75/f74fd178ac81adf4f283a74847807ade5150e48feda6aef024403716c30c/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693", size = 1790660, upload-time = "2025-10-28T20:58:41.507Z" }, - { url = "https://files.pythonhosted.org/packages/e7/80/7368bd0d06b16b3aba358c16b919e9c46cf11587dc572091031b0e9e3ef0/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a", size = 1617548, upload-time = "2025-10-28T20:58:43.674Z" }, - { url = "https://files.pythonhosted.org/packages/7d/4b/a6212790c50483cb3212e507378fbe26b5086d73941e1ec4b56a30439688/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be", size = 1817240, upload-time = "2025-10-28T20:58:45.787Z" }, - { url = "https://files.pythonhosted.org/packages/ff/f7/ba5f0ba4ea8d8f3c32850912944532b933acbf0f3a75546b89269b9b7dde/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c", size = 1762334, upload-time = "2025-10-28T20:58:47.936Z" }, - { url = "https://files.pythonhosted.org/packages/7e/83/1a5a1856574588b1cad63609ea9ad75b32a8353ac995d830bf5da9357364/aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734", size = 464685, upload-time = "2025-10-28T20:58:50.642Z" }, - { url = "https://files.pythonhosted.org/packages/9f/4d/d22668674122c08f4d56972297c51a624e64b3ed1efaa40187607a7cb66e/aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f", size = 498093, upload-time = "2025-10-28T20:58:52.782Z" }, -] - -[[package]] -name = "aiosignal" -version = "1.4.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "frozenlist" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, -] - -[[package]] -name = "annotated-types" -version = "0.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, -] - -[[package]] -name = "anyio" -version = "4.11.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "idna" }, - { name = "sniffio" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c6/78/7d432127c41b50bccba979505f272c16cbcadcc33645d5fa3a738110ae75/anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4", size = 219094, upload-time = "2025-09-23T09:19:12.58Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" }, -] - -[[package]] -name = "attrs" -version = "25.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, -] - -[[package]] -name = "azure-ai-projects" -version = "2.0.0b2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "azure-core" }, - { name = "azure-storage-blob" }, - { name = "isodate" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f2/96/ec17f99f5ced3d82876e89b4f950d8c7466c84d79016c5905b1c03b6c484/azure_ai_projects-2.0.0b2.tar.gz", hash = "sha256:4444cc49c799359b9c25d7f59c126862053cb591b63e69ffc640774b4ceb2b73", size = 369393, upload-time = "2025-11-15T06:17:46.312Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a9/41/d9a2b3eb33b4ffd9acfaa115cfd456e32d0c754227d6d78ec5d039ff75c2/azure_ai_projects-2.0.0b2-py3-none-any.whl", hash = "sha256:642496fdf9846c91f3557d39899d3893f0ce8f910334320686fc8f617492351d", size = 234023, upload-time = "2025-11-15T06:17:48.141Z" }, -] - -[[package]] -name = "azure-core" -version = "1.36.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "requests" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/0a/c4/d4ff3bc3ddf155156460bff340bbe9533f99fac54ddea165f35a8619f162/azure_core-1.36.0.tar.gz", hash = "sha256:22e5605e6d0bf1d229726af56d9e92bc37b6e726b141a18be0b4d424131741b7", size = 351139, upload-time = "2025-10-15T00:33:49.083Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b1/3c/b90d5afc2e47c4a45f4bba00f9c3193b0417fad5ad3bb07869f9d12832aa/azure_core-1.36.0-py3-none-any.whl", hash = "sha256:fee9923a3a753e94a259563429f3644aaf05c486d45b1215d098115102d91d3b", size = 213302, upload-time = "2025-10-15T00:33:51.058Z" }, -] - -[[package]] -name = "azure-identity" -version = "1.25.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "azure-core" }, - { name = "cryptography" }, - { name = "msal" }, - { name = "msal-extensions" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/06/8d/1a6c41c28a37eab26dc85ab6c86992c700cd3f4a597d9ed174b0e9c69489/azure_identity-1.25.1.tar.gz", hash = "sha256:87ca8328883de6036443e1c37b40e8dc8fb74898240f61071e09d2e369361456", size = 279826, upload-time = "2025-10-06T20:30:02.194Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/83/7b/5652771e24fff12da9dde4c20ecf4682e606b104f26419d139758cc935a6/azure_identity-1.25.1-py3-none-any.whl", hash = "sha256:e9edd720af03dff020223cd269fa3a61e8f345ea75443858273bcb44844ab651", size = 191317, upload-time = "2025-10-06T20:30:04.251Z" }, -] - -[[package]] -name = "azure-storage-blob" -version = "12.27.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "azure-core" }, - { name = "cryptography" }, - { name = "isodate" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/36/7c/2fd872e11a88163f208b9c92de273bf64bb22d0eef9048cc6284d128a77a/azure_storage_blob-12.27.1.tar.gz", hash = "sha256:a1596cc4daf5dac9be115fcb5db67245eae894cf40e4248243754261f7b674a6", size = 597579, upload-time = "2025-10-29T12:27:16.185Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3d/9e/1c90a122ea6180e8c72eb7294adc92531b0e08eb3d2324c2ba70d37f4802/azure_storage_blob-12.27.1-py3-none-any.whl", hash = "sha256:65d1e25a4628b7b6acd20ff7902d8da5b4fde8e46e19c8f6d213a3abc3ece272", size = 428954, upload-time = "2025-10-29T12:27:18.072Z" }, -] - -[[package]] -name = "certifi" -version = "2025.11.12" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, -] - -[[package]] -name = "cffi" -version = "2.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pycparser", marker = "implementation_name != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, - { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, - { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, - { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, - { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, - { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, - { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, - { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, - { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, - { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, - { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, - { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, - { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, - { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, - { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, - { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, - { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, - { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, - { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, - { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, - { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, - { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, - { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, - { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, - { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, - { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, - { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, - { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, - { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, - { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, - { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, - { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, - { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, - { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, - { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, - { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, - { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, - { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, - { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, - { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, - { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, - { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, - { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, - { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, - { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, - { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, -] - -[[package]] -name = "charset-normalizer" -version = "3.4.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, - { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, - { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, - { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, - { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, - { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, - { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, - { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, - { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, - { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, - { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, - { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, - { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, - { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, - { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, - { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, - { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, - { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, - { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, - { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, - { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, - { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, - { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, - { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, - { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, - { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, - { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, - { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, - { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, - { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, - { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, - { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, - { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, - { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, - { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, - { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, - { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, - { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, - { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, - { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, - { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, - { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, - { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, - { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, - { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, - { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, - { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, - { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, - { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, -] - -[[package]] -name = "code-interpreter-custom" -version = "0.1.0" -source = { virtual = "." } -dependencies = [ - { name = "aiohttp" }, - { name = "azure-ai-projects" }, - { name = "azure-identity" }, - { name = "dotenv" }, - { name = "openai" }, -] - -[package.metadata] -requires-dist = [ - { name = "aiohttp", specifier = ">=3.13.2" }, - { name = "azure-ai-projects", specifier = "==2.0.0b2" }, - { name = "azure-identity", specifier = ">=1.25.1" }, - { name = "dotenv", specifier = ">=0.9.9" }, - { name = "openai", specifier = ">=2.8.1" }, -] - -[[package]] -name = "colorama" -version = "0.4.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, -] - -[[package]] -name = "cryptography" -version = "46.0.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, - { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, - { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, - { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, - { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, - { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, - { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, - { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, - { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, - { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, - { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, - { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, - { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, - { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, - { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" }, - { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" }, - { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" }, - { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" }, - { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" }, - { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" }, - { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" }, - { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" }, - { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" }, - { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" }, - { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" }, - { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" }, - { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" }, - { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" }, - { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" }, - { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, - { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, - { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, - { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, - { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, - { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, - { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, - { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, - { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, - { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, - { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, - { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, - { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, - { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, -] - -[[package]] -name = "distro" -version = "1.9.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, -] - -[[package]] -name = "dotenv" -version = "0.9.9" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "python-dotenv" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/b2/b7/545d2c10c1fc15e48653c91efde329a790f2eecfbbf2bd16003b5db2bab0/dotenv-0.9.9-py2.py3-none-any.whl", hash = "sha256:29cf74a087b31dafdb5a446b6d7e11cbce8ed2741540e2339c69fbef92c94ce9", size = 1892, upload-time = "2025-02-19T22:15:01.647Z" }, -] - -[[package]] -name = "frozenlist" -version = "1.8.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2d/f5/c831fac6cc817d26fd54c7eaccd04ef7e0288806943f7cc5bbf69f3ac1f0/frozenlist-1.8.0.tar.gz", hash = "sha256:3ede829ed8d842f6cd48fc7081d7a41001a56f1f38603f9d49bf3020d59a31ad", size = 45875, upload-time = "2025-10-06T05:38:17.865Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/69/29/948b9aa87e75820a38650af445d2ef2b6b8a6fab1a23b6bb9e4ef0be2d59/frozenlist-1.8.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:78f7b9e5d6f2fdb88cdde9440dc147259b62b9d3b019924def9f6478be254ac1", size = 87782, upload-time = "2025-10-06T05:36:06.649Z" }, - { url = "https://files.pythonhosted.org/packages/64/80/4f6e318ee2a7c0750ed724fa33a4bdf1eacdc5a39a7a24e818a773cd91af/frozenlist-1.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:229bf37d2e4acdaf808fd3f06e854a4a7a3661e871b10dc1f8f1896a3b05f18b", size = 50594, upload-time = "2025-10-06T05:36:07.69Z" }, - { url = "https://files.pythonhosted.org/packages/2b/94/5c8a2b50a496b11dd519f4a24cb5496cf125681dd99e94c604ccdea9419a/frozenlist-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f833670942247a14eafbb675458b4e61c82e002a148f49e68257b79296e865c4", size = 50448, upload-time = "2025-10-06T05:36:08.78Z" }, - { url = "https://files.pythonhosted.org/packages/6a/bd/d91c5e39f490a49df14320f4e8c80161cfcce09f1e2cde1edd16a551abb3/frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:494a5952b1c597ba44e0e78113a7266e656b9794eec897b19ead706bd7074383", size = 242411, upload-time = "2025-10-06T05:36:09.801Z" }, - { url = "https://files.pythonhosted.org/packages/8f/83/f61505a05109ef3293dfb1ff594d13d64a2324ac3482be2cedc2be818256/frozenlist-1.8.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96f423a119f4777a4a056b66ce11527366a8bb92f54e541ade21f2374433f6d4", size = 243014, upload-time = "2025-10-06T05:36:11.394Z" }, - { url = "https://files.pythonhosted.org/packages/d8/cb/cb6c7b0f7d4023ddda30cf56b8b17494eb3a79e3fda666bf735f63118b35/frozenlist-1.8.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3462dd9475af2025c31cc61be6652dfa25cbfb56cbbf52f4ccfe029f38decaf8", size = 234909, upload-time = "2025-10-06T05:36:12.598Z" }, - { url = "https://files.pythonhosted.org/packages/31/c5/cd7a1f3b8b34af009fb17d4123c5a778b44ae2804e3ad6b86204255f9ec5/frozenlist-1.8.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4c800524c9cd9bac5166cd6f55285957fcfc907db323e193f2afcd4d9abd69b", size = 250049, upload-time = "2025-10-06T05:36:14.065Z" }, - { url = "https://files.pythonhosted.org/packages/c0/01/2f95d3b416c584a1e7f0e1d6d31998c4a795f7544069ee2e0962a4b60740/frozenlist-1.8.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d6a5df73acd3399d893dafc71663ad22534b5aa4f94e8a2fabfe856c3c1b6a52", size = 256485, upload-time = "2025-10-06T05:36:15.39Z" }, - { url = "https://files.pythonhosted.org/packages/ce/03/024bf7720b3abaebcff6d0793d73c154237b85bdf67b7ed55e5e9596dc9a/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:405e8fe955c2280ce66428b3ca55e12b3c4e9c336fb2103a4937e891c69a4a29", size = 237619, upload-time = "2025-10-06T05:36:16.558Z" }, - { url = "https://files.pythonhosted.org/packages/69/fa/f8abdfe7d76b731f5d8bd217827cf6764d4f1d9763407e42717b4bed50a0/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:908bd3f6439f2fef9e85031b59fd4f1297af54415fb60e4254a95f75b3cab3f3", size = 250320, upload-time = "2025-10-06T05:36:17.821Z" }, - { url = "https://files.pythonhosted.org/packages/f5/3c/b051329f718b463b22613e269ad72138cc256c540f78a6de89452803a47d/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:294e487f9ec720bd8ffcebc99d575f7eff3568a08a253d1ee1a0378754b74143", size = 246820, upload-time = "2025-10-06T05:36:19.046Z" }, - { url = "https://files.pythonhosted.org/packages/0f/ae/58282e8f98e444b3f4dd42448ff36fa38bef29e40d40f330b22e7108f565/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:74c51543498289c0c43656701be6b077f4b265868fa7f8a8859c197006efb608", size = 250518, upload-time = "2025-10-06T05:36:20.763Z" }, - { url = "https://files.pythonhosted.org/packages/8f/96/007e5944694d66123183845a106547a15944fbbb7154788cbf7272789536/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:776f352e8329135506a1d6bf16ac3f87bc25b28e765949282dcc627af36123aa", size = 239096, upload-time = "2025-10-06T05:36:22.129Z" }, - { url = "https://files.pythonhosted.org/packages/66/bb/852b9d6db2fa40be96f29c0d1205c306288f0684df8fd26ca1951d461a56/frozenlist-1.8.0-cp312-cp312-win32.whl", hash = "sha256:433403ae80709741ce34038da08511d4a77062aa924baf411ef73d1146e74faf", size = 39985, upload-time = "2025-10-06T05:36:23.661Z" }, - { url = "https://files.pythonhosted.org/packages/b8/af/38e51a553dd66eb064cdf193841f16f077585d4d28394c2fa6235cb41765/frozenlist-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:34187385b08f866104f0c0617404c8eb08165ab1272e884abc89c112e9c00746", size = 44591, upload-time = "2025-10-06T05:36:24.958Z" }, - { url = "https://files.pythonhosted.org/packages/a7/06/1dc65480ab147339fecc70797e9c2f69d9cea9cf38934ce08df070fdb9cb/frozenlist-1.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:fe3c58d2f5db5fbd18c2987cba06d51b0529f52bc3a6cdc33d3f4eab725104bd", size = 40102, upload-time = "2025-10-06T05:36:26.333Z" }, - { url = "https://files.pythonhosted.org/packages/2d/40/0832c31a37d60f60ed79e9dfb5a92e1e2af4f40a16a29abcc7992af9edff/frozenlist-1.8.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8d92f1a84bb12d9e56f818b3a746f3efba93c1b63c8387a73dde655e1e42282a", size = 85717, upload-time = "2025-10-06T05:36:27.341Z" }, - { url = "https://files.pythonhosted.org/packages/30/ba/b0b3de23f40bc55a7057bd38434e25c34fa48e17f20ee273bbde5e0650f3/frozenlist-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96153e77a591c8adc2ee805756c61f59fef4cf4073a9275ee86fe8cba41241f7", size = 49651, upload-time = "2025-10-06T05:36:28.855Z" }, - { url = "https://files.pythonhosted.org/packages/0c/ab/6e5080ee374f875296c4243c381bbdef97a9ac39c6e3ce1d5f7d42cb78d6/frozenlist-1.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f21f00a91358803399890ab167098c131ec2ddd5f8f5fd5fe9c9f2c6fcd91e40", size = 49417, upload-time = "2025-10-06T05:36:29.877Z" }, - { url = "https://files.pythonhosted.org/packages/d5/4e/e4691508f9477ce67da2015d8c00acd751e6287739123113a9fca6f1604e/frozenlist-1.8.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fb30f9626572a76dfe4293c7194a09fb1fe93ba94c7d4f720dfae3b646b45027", size = 234391, upload-time = "2025-10-06T05:36:31.301Z" }, - { url = "https://files.pythonhosted.org/packages/40/76/c202df58e3acdf12969a7895fd6f3bc016c642e6726aa63bd3025e0fc71c/frozenlist-1.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaa352d7047a31d87dafcacbabe89df0aa506abb5b1b85a2fb91bc3faa02d822", size = 233048, upload-time = "2025-10-06T05:36:32.531Z" }, - { url = "https://files.pythonhosted.org/packages/f9/c0/8746afb90f17b73ca5979c7a3958116e105ff796e718575175319b5bb4ce/frozenlist-1.8.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:03ae967b4e297f58f8c774c7eabcce57fe3c2434817d4385c50661845a058121", size = 226549, upload-time = "2025-10-06T05:36:33.706Z" }, - { url = "https://files.pythonhosted.org/packages/7e/eb/4c7eefc718ff72f9b6c4893291abaae5fbc0c82226a32dcd8ef4f7a5dbef/frozenlist-1.8.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6292f1de555ffcc675941d65fffffb0a5bcd992905015f85d0592201793e0e5", size = 239833, upload-time = "2025-10-06T05:36:34.947Z" }, - { url = "https://files.pythonhosted.org/packages/c2/4e/e5c02187cf704224f8b21bee886f3d713ca379535f16893233b9d672ea71/frozenlist-1.8.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29548f9b5b5e3460ce7378144c3010363d8035cea44bc0bf02d57f5a685e084e", size = 245363, upload-time = "2025-10-06T05:36:36.534Z" }, - { url = "https://files.pythonhosted.org/packages/1f/96/cb85ec608464472e82ad37a17f844889c36100eed57bea094518bf270692/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ec3cc8c5d4084591b4237c0a272cc4f50a5b03396a47d9caaf76f5d7b38a4f11", size = 229314, upload-time = "2025-10-06T05:36:38.582Z" }, - { url = "https://files.pythonhosted.org/packages/5d/6f/4ae69c550e4cee66b57887daeebe006fe985917c01d0fff9caab9883f6d0/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:517279f58009d0b1f2e7c1b130b377a349405da3f7621ed6bfae50b10adf20c1", size = 243365, upload-time = "2025-10-06T05:36:40.152Z" }, - { url = "https://files.pythonhosted.org/packages/7a/58/afd56de246cf11780a40a2c28dc7cbabbf06337cc8ddb1c780a2d97e88d8/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:db1e72ede2d0d7ccb213f218df6a078a9c09a7de257c2fe8fcef16d5925230b1", size = 237763, upload-time = "2025-10-06T05:36:41.355Z" }, - { url = "https://files.pythonhosted.org/packages/cb/36/cdfaf6ed42e2644740d4a10452d8e97fa1c062e2a8006e4b09f1b5fd7d63/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b4dec9482a65c54a5044486847b8a66bf10c9cb4926d42927ec4e8fd5db7fed8", size = 240110, upload-time = "2025-10-06T05:36:42.716Z" }, - { url = "https://files.pythonhosted.org/packages/03/a8/9ea226fbefad669f11b52e864c55f0bd57d3c8d7eb07e9f2e9a0b39502e1/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:21900c48ae04d13d416f0e1e0c4d81f7931f73a9dfa0b7a8746fb2fe7dd970ed", size = 233717, upload-time = "2025-10-06T05:36:44.251Z" }, - { url = "https://files.pythonhosted.org/packages/1e/0b/1b5531611e83ba7d13ccc9988967ea1b51186af64c42b7a7af465dcc9568/frozenlist-1.8.0-cp313-cp313-win32.whl", hash = "sha256:8b7b94a067d1c504ee0b16def57ad5738701e4ba10cec90529f13fa03c833496", size = 39628, upload-time = "2025-10-06T05:36:45.423Z" }, - { url = "https://files.pythonhosted.org/packages/d8/cf/174c91dbc9cc49bc7b7aab74d8b734e974d1faa8f191c74af9b7e80848e6/frozenlist-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:878be833caa6a3821caf85eb39c5ba92d28e85df26d57afb06b35b2efd937231", size = 43882, upload-time = "2025-10-06T05:36:46.796Z" }, - { url = "https://files.pythonhosted.org/packages/c1/17/502cd212cbfa96eb1388614fe39a3fc9ab87dbbe042b66f97acb57474834/frozenlist-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:44389d135b3ff43ba8cc89ff7f51f5a0bb6b63d829c8300f79a2fe4fe61bcc62", size = 39676, upload-time = "2025-10-06T05:36:47.8Z" }, - { url = "https://files.pythonhosted.org/packages/d2/5c/3bbfaa920dfab09e76946a5d2833a7cbdf7b9b4a91c714666ac4855b88b4/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:e25ac20a2ef37e91c1b39938b591457666a0fa835c7783c3a8f33ea42870db94", size = 89235, upload-time = "2025-10-06T05:36:48.78Z" }, - { url = "https://files.pythonhosted.org/packages/d2/d6/f03961ef72166cec1687e84e8925838442b615bd0b8854b54923ce5b7b8a/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07cdca25a91a4386d2e76ad992916a85038a9b97561bf7a3fd12d5d9ce31870c", size = 50742, upload-time = "2025-10-06T05:36:49.837Z" }, - { url = "https://files.pythonhosted.org/packages/1e/bb/a6d12b7ba4c3337667d0e421f7181c82dda448ce4e7ad7ecd249a16fa806/frozenlist-1.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e0c11f2cc6717e0a741f84a527c52616140741cd812a50422f83dc31749fb52", size = 51725, upload-time = "2025-10-06T05:36:50.851Z" }, - { url = "https://files.pythonhosted.org/packages/bc/71/d1fed0ffe2c2ccd70b43714c6cab0f4188f09f8a67a7914a6b46ee30f274/frozenlist-1.8.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b3210649ee28062ea6099cfda39e147fa1bc039583c8ee4481cb7811e2448c51", size = 284533, upload-time = "2025-10-06T05:36:51.898Z" }, - { url = "https://files.pythonhosted.org/packages/c9/1f/fb1685a7b009d89f9bf78a42d94461bc06581f6e718c39344754a5d9bada/frozenlist-1.8.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:581ef5194c48035a7de2aefc72ac6539823bb71508189e5de01d60c9dcd5fa65", size = 292506, upload-time = "2025-10-06T05:36:53.101Z" }, - { url = "https://files.pythonhosted.org/packages/e6/3b/b991fe1612703f7e0d05c0cf734c1b77aaf7c7d321df4572e8d36e7048c8/frozenlist-1.8.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3ef2d026f16a2b1866e1d86fc4e1291e1ed8a387b2c333809419a2f8b3a77b82", size = 274161, upload-time = "2025-10-06T05:36:54.309Z" }, - { url = "https://files.pythonhosted.org/packages/ca/ec/c5c618767bcdf66e88945ec0157d7f6c4a1322f1473392319b7a2501ded7/frozenlist-1.8.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5500ef82073f599ac84d888e3a8c1f77ac831183244bfd7f11eaa0289fb30714", size = 294676, upload-time = "2025-10-06T05:36:55.566Z" }, - { url = "https://files.pythonhosted.org/packages/7c/ce/3934758637d8f8a88d11f0585d6495ef54b2044ed6ec84492a91fa3b27aa/frozenlist-1.8.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:50066c3997d0091c411a66e710f4e11752251e6d2d73d70d8d5d4c76442a199d", size = 300638, upload-time = "2025-10-06T05:36:56.758Z" }, - { url = "https://files.pythonhosted.org/packages/fc/4f/a7e4d0d467298f42de4b41cbc7ddaf19d3cfeabaf9ff97c20c6c7ee409f9/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:5c1c8e78426e59b3f8005e9b19f6ff46e5845895adbde20ece9218319eca6506", size = 283067, upload-time = "2025-10-06T05:36:57.965Z" }, - { url = "https://files.pythonhosted.org/packages/dc/48/c7b163063d55a83772b268e6d1affb960771b0e203b632cfe09522d67ea5/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:eefdba20de0d938cec6a89bd4d70f346a03108a19b9df4248d3cf0d88f1b0f51", size = 292101, upload-time = "2025-10-06T05:36:59.237Z" }, - { url = "https://files.pythonhosted.org/packages/9f/d0/2366d3c4ecdc2fd391e0afa6e11500bfba0ea772764d631bbf82f0136c9d/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cf253e0e1c3ceb4aaff6df637ce033ff6535fb8c70a764a8f46aafd3d6ab798e", size = 289901, upload-time = "2025-10-06T05:37:00.811Z" }, - { url = "https://files.pythonhosted.org/packages/b8/94/daff920e82c1b70e3618a2ac39fbc01ae3e2ff6124e80739ce5d71c9b920/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:032efa2674356903cd0261c4317a561a6850f3ac864a63fc1583147fb05a79b0", size = 289395, upload-time = "2025-10-06T05:37:02.115Z" }, - { url = "https://files.pythonhosted.org/packages/e3/20/bba307ab4235a09fdcd3cc5508dbabd17c4634a1af4b96e0f69bfe551ebd/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6da155091429aeba16851ecb10a9104a108bcd32f6c1642867eadaee401c1c41", size = 283659, upload-time = "2025-10-06T05:37:03.711Z" }, - { url = "https://files.pythonhosted.org/packages/fd/00/04ca1c3a7a124b6de4f8a9a17cc2fcad138b4608e7a3fc5877804b8715d7/frozenlist-1.8.0-cp313-cp313t-win32.whl", hash = "sha256:0f96534f8bfebc1a394209427d0f8a63d343c9779cda6fc25e8e121b5fd8555b", size = 43492, upload-time = "2025-10-06T05:37:04.915Z" }, - { url = "https://files.pythonhosted.org/packages/59/5e/c69f733a86a94ab10f68e496dc6b7e8bc078ebb415281d5698313e3af3a1/frozenlist-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5d63a068f978fc69421fb0e6eb91a9603187527c86b7cd3f534a5b77a592b888", size = 48034, upload-time = "2025-10-06T05:37:06.343Z" }, - { url = "https://files.pythonhosted.org/packages/16/6c/be9d79775d8abe79b05fa6d23da99ad6e7763a1d080fbae7290b286093fd/frozenlist-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf0a7e10b077bf5fb9380ad3ae8ce20ef919a6ad93b4552896419ac7e1d8e042", size = 41749, upload-time = "2025-10-06T05:37:07.431Z" }, - { url = "https://files.pythonhosted.org/packages/f1/c8/85da824b7e7b9b6e7f7705b2ecaf9591ba6f79c1177f324c2735e41d36a2/frozenlist-1.8.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cee686f1f4cadeb2136007ddedd0aaf928ab95216e7691c63e50a8ec066336d0", size = 86127, upload-time = "2025-10-06T05:37:08.438Z" }, - { url = "https://files.pythonhosted.org/packages/8e/e8/a1185e236ec66c20afd72399522f142c3724c785789255202d27ae992818/frozenlist-1.8.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:119fb2a1bd47307e899c2fac7f28e85b9a543864df47aa7ec9d3c1b4545f096f", size = 49698, upload-time = "2025-10-06T05:37:09.48Z" }, - { url = "https://files.pythonhosted.org/packages/a1/93/72b1736d68f03fda5fdf0f2180fb6caaae3894f1b854d006ac61ecc727ee/frozenlist-1.8.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4970ece02dbc8c3a92fcc5228e36a3e933a01a999f7094ff7c23fbd2beeaa67c", size = 49749, upload-time = "2025-10-06T05:37:10.569Z" }, - { url = "https://files.pythonhosted.org/packages/a7/b2/fabede9fafd976b991e9f1b9c8c873ed86f202889b864756f240ce6dd855/frozenlist-1.8.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:cba69cb73723c3f329622e34bdbf5ce1f80c21c290ff04256cff1cd3c2036ed2", size = 231298, upload-time = "2025-10-06T05:37:11.993Z" }, - { url = "https://files.pythonhosted.org/packages/3a/3b/d9b1e0b0eed36e70477ffb8360c49c85c8ca8ef9700a4e6711f39a6e8b45/frozenlist-1.8.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:778a11b15673f6f1df23d9586f83c4846c471a8af693a22e066508b77d201ec8", size = 232015, upload-time = "2025-10-06T05:37:13.194Z" }, - { url = "https://files.pythonhosted.org/packages/dc/94/be719d2766c1138148564a3960fc2c06eb688da592bdc25adcf856101be7/frozenlist-1.8.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0325024fe97f94c41c08872db482cf8ac4800d80e79222c6b0b7b162d5b13686", size = 225038, upload-time = "2025-10-06T05:37:14.577Z" }, - { url = "https://files.pythonhosted.org/packages/e4/09/6712b6c5465f083f52f50cf74167b92d4ea2f50e46a9eea0523d658454ae/frozenlist-1.8.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:97260ff46b207a82a7567b581ab4190bd4dfa09f4db8a8b49d1a958f6aa4940e", size = 240130, upload-time = "2025-10-06T05:37:15.781Z" }, - { url = "https://files.pythonhosted.org/packages/f8/d4/cd065cdcf21550b54f3ce6a22e143ac9e4836ca42a0de1022da8498eac89/frozenlist-1.8.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:54b2077180eb7f83dd52c40b2750d0a9f175e06a42e3213ce047219de902717a", size = 242845, upload-time = "2025-10-06T05:37:17.037Z" }, - { url = "https://files.pythonhosted.org/packages/62/c3/f57a5c8c70cd1ead3d5d5f776f89d33110b1addae0ab010ad774d9a44fb9/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2f05983daecab868a31e1da44462873306d3cbfd76d1f0b5b69c473d21dbb128", size = 229131, upload-time = "2025-10-06T05:37:18.221Z" }, - { url = "https://files.pythonhosted.org/packages/6c/52/232476fe9cb64f0742f3fde2b7d26c1dac18b6d62071c74d4ded55e0ef94/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:33f48f51a446114bc5d251fb2954ab0164d5be02ad3382abcbfe07e2531d650f", size = 240542, upload-time = "2025-10-06T05:37:19.771Z" }, - { url = "https://files.pythonhosted.org/packages/5f/85/07bf3f5d0fb5414aee5f47d33c6f5c77bfe49aac680bfece33d4fdf6a246/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:154e55ec0655291b5dd1b8731c637ecdb50975a2ae70c606d100750a540082f7", size = 237308, upload-time = "2025-10-06T05:37:20.969Z" }, - { url = "https://files.pythonhosted.org/packages/11/99/ae3a33d5befd41ac0ca2cc7fd3aa707c9c324de2e89db0e0f45db9a64c26/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:4314debad13beb564b708b4a496020e5306c7333fa9a3ab90374169a20ffab30", size = 238210, upload-time = "2025-10-06T05:37:22.252Z" }, - { url = "https://files.pythonhosted.org/packages/b2/60/b1d2da22f4970e7a155f0adde9b1435712ece01b3cd45ba63702aea33938/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:073f8bf8becba60aa931eb3bc420b217bb7d5b8f4750e6f8b3be7f3da85d38b7", size = 231972, upload-time = "2025-10-06T05:37:23.5Z" }, - { url = "https://files.pythonhosted.org/packages/3f/ab/945b2f32de889993b9c9133216c068b7fcf257d8595a0ac420ac8677cab0/frozenlist-1.8.0-cp314-cp314-win32.whl", hash = "sha256:bac9c42ba2ac65ddc115d930c78d24ab8d4f465fd3fc473cdedfccadb9429806", size = 40536, upload-time = "2025-10-06T05:37:25.581Z" }, - { url = "https://files.pythonhosted.org/packages/59/ad/9caa9b9c836d9ad6f067157a531ac48b7d36499f5036d4141ce78c230b1b/frozenlist-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:3e0761f4d1a44f1d1a47996511752cf3dcec5bbdd9cc2b4fe595caf97754b7a0", size = 44330, upload-time = "2025-10-06T05:37:26.928Z" }, - { url = "https://files.pythonhosted.org/packages/82/13/e6950121764f2676f43534c555249f57030150260aee9dcf7d64efda11dd/frozenlist-1.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:d1eaff1d00c7751b7c6662e9c5ba6eb2c17a2306ba5e2a37f24ddf3cc953402b", size = 40627, upload-time = "2025-10-06T05:37:28.075Z" }, - { url = "https://files.pythonhosted.org/packages/c0/c7/43200656ecc4e02d3f8bc248df68256cd9572b3f0017f0a0c4e93440ae23/frozenlist-1.8.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d3bb933317c52d7ea5004a1c442eef86f426886fba134ef8cf4226ea6ee1821d", size = 89238, upload-time = "2025-10-06T05:37:29.373Z" }, - { url = "https://files.pythonhosted.org/packages/d1/29/55c5f0689b9c0fb765055629f472c0de484dcaf0acee2f7707266ae3583c/frozenlist-1.8.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8009897cdef112072f93a0efdce29cd819e717fd2f649ee3016efd3cd885a7ed", size = 50738, upload-time = "2025-10-06T05:37:30.792Z" }, - { url = "https://files.pythonhosted.org/packages/ba/7d/b7282a445956506fa11da8c2db7d276adcbf2b17d8bb8407a47685263f90/frozenlist-1.8.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2c5dcbbc55383e5883246d11fd179782a9d07a986c40f49abe89ddf865913930", size = 51739, upload-time = "2025-10-06T05:37:32.127Z" }, - { url = "https://files.pythonhosted.org/packages/62/1c/3d8622e60d0b767a5510d1d3cf21065b9db874696a51ea6d7a43180a259c/frozenlist-1.8.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:39ecbc32f1390387d2aa4f5a995e465e9e2f79ba3adcac92d68e3e0afae6657c", size = 284186, upload-time = "2025-10-06T05:37:33.21Z" }, - { url = "https://files.pythonhosted.org/packages/2d/14/aa36d5f85a89679a85a1d44cd7a6657e0b1c75f61e7cad987b203d2daca8/frozenlist-1.8.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92db2bf818d5cc8d9c1f1fc56b897662e24ea5adb36ad1f1d82875bd64e03c24", size = 292196, upload-time = "2025-10-06T05:37:36.107Z" }, - { url = "https://files.pythonhosted.org/packages/05/23/6bde59eb55abd407d34f77d39a5126fb7b4f109a3f611d3929f14b700c66/frozenlist-1.8.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2dc43a022e555de94c3b68a4ef0b11c4f747d12c024a520c7101709a2144fb37", size = 273830, upload-time = "2025-10-06T05:37:37.663Z" }, - { url = "https://files.pythonhosted.org/packages/d2/3f/22cff331bfad7a8afa616289000ba793347fcd7bc275f3b28ecea2a27909/frozenlist-1.8.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb89a7f2de3602cfed448095bab3f178399646ab7c61454315089787df07733a", size = 294289, upload-time = "2025-10-06T05:37:39.261Z" }, - { url = "https://files.pythonhosted.org/packages/a4/89/5b057c799de4838b6c69aa82b79705f2027615e01be996d2486a69ca99c4/frozenlist-1.8.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:33139dc858c580ea50e7e60a1b0ea003efa1fd42e6ec7fdbad78fff65fad2fd2", size = 300318, upload-time = "2025-10-06T05:37:43.213Z" }, - { url = "https://files.pythonhosted.org/packages/30/de/2c22ab3eb2a8af6d69dc799e48455813bab3690c760de58e1bf43b36da3e/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:168c0969a329b416119507ba30b9ea13688fafffac1b7822802537569a1cb0ef", size = 282814, upload-time = "2025-10-06T05:37:45.337Z" }, - { url = "https://files.pythonhosted.org/packages/59/f7/970141a6a8dbd7f556d94977858cfb36fa9b66e0892c6dd780d2219d8cd8/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:28bd570e8e189d7f7b001966435f9dac6718324b5be2990ac496cf1ea9ddb7fe", size = 291762, upload-time = "2025-10-06T05:37:46.657Z" }, - { url = "https://files.pythonhosted.org/packages/c1/15/ca1adae83a719f82df9116d66f5bb28bb95557b3951903d39135620ef157/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b2a095d45c5d46e5e79ba1e5b9cb787f541a8dee0433836cea4b96a2c439dcd8", size = 289470, upload-time = "2025-10-06T05:37:47.946Z" }, - { url = "https://files.pythonhosted.org/packages/ac/83/dca6dc53bf657d371fbc88ddeb21b79891e747189c5de990b9dfff2ccba1/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:eab8145831a0d56ec9c4139b6c3e594c7a83c2c8be25d5bcf2d86136a532287a", size = 289042, upload-time = "2025-10-06T05:37:49.499Z" }, - { url = "https://files.pythonhosted.org/packages/96/52/abddd34ca99be142f354398700536c5bd315880ed0a213812bc491cff5e4/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:974b28cf63cc99dfb2188d8d222bc6843656188164848c4f679e63dae4b0708e", size = 283148, upload-time = "2025-10-06T05:37:50.745Z" }, - { url = "https://files.pythonhosted.org/packages/af/d3/76bd4ed4317e7119c2b7f57c3f6934aba26d277acc6309f873341640e21f/frozenlist-1.8.0-cp314-cp314t-win32.whl", hash = "sha256:342c97bf697ac5480c0a7ec73cd700ecfa5a8a40ac923bd035484616efecc2df", size = 44676, upload-time = "2025-10-06T05:37:52.222Z" }, - { url = "https://files.pythonhosted.org/packages/89/76/c615883b7b521ead2944bb3480398cbb07e12b7b4e4d073d3752eb721558/frozenlist-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:06be8f67f39c8b1dc671f5d83aaefd3358ae5cdcf8314552c57e7ed3e6475bdd", size = 49451, upload-time = "2025-10-06T05:37:53.425Z" }, - { url = "https://files.pythonhosted.org/packages/e0/a3/5982da14e113d07b325230f95060e2169f5311b1017ea8af2a29b374c289/frozenlist-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:102e6314ca4da683dca92e3b1355490fed5f313b768500084fbe6371fddfdb79", size = 42507, upload-time = "2025-10-06T05:37:54.513Z" }, - { url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" }, -] - -[[package]] -name = "h11" -version = "0.16.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, -] - -[[package]] -name = "httpcore" -version = "1.0.9" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "certifi" }, - { name = "h11" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, -] - -[[package]] -name = "httpx" -version = "0.28.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "certifi" }, - { name = "httpcore" }, - { name = "idna" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, -] - -[[package]] -name = "idna" -version = "3.11" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, -] - -[[package]] -name = "isodate" -version = "0.7.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/54/4d/e940025e2ce31a8ce1202635910747e5a87cc3a6a6bb2d00973375014749/isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6", size = 29705, upload-time = "2024-10-08T23:04:11.5Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/15/aa/0aca39a37d3c7eb941ba736ede56d689e7be91cab5d9ca846bde3999eba6/isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15", size = 22320, upload-time = "2024-10-08T23:04:09.501Z" }, -] - -[[package]] -name = "jiter" -version = "0.12.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/45/9d/e0660989c1370e25848bb4c52d061c71837239738ad937e83edca174c273/jiter-0.12.0.tar.gz", hash = "sha256:64dfcd7d5c168b38d3f9f8bba7fc639edb3418abcc74f22fdbe6b8938293f30b", size = 168294, upload-time = "2025-11-09T20:49:23.302Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/92/c9/5b9f7b4983f1b542c64e84165075335e8a236fa9e2ea03a0c79780062be8/jiter-0.12.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:305e061fa82f4680607a775b2e8e0bcb071cd2205ac38e6ef48c8dd5ebe1cf37", size = 314449, upload-time = "2025-11-09T20:47:22.999Z" }, - { url = "https://files.pythonhosted.org/packages/98/6e/e8efa0e78de00db0aee82c0cf9e8b3f2027efd7f8a71f859d8f4be8e98ef/jiter-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c1860627048e302a528333c9307c818c547f214d8659b0705d2195e1a94b274", size = 319855, upload-time = "2025-11-09T20:47:24.779Z" }, - { url = "https://files.pythonhosted.org/packages/20/26/894cd88e60b5d58af53bec5c6759d1292bd0b37a8b5f60f07abf7a63ae5f/jiter-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df37577a4f8408f7e0ec3205d2a8f87672af8f17008358063a4d6425b6081ce3", size = 350171, upload-time = "2025-11-09T20:47:26.469Z" }, - { url = "https://files.pythonhosted.org/packages/f5/27/a7b818b9979ac31b3763d25f3653ec3a954044d5e9f5d87f2f247d679fd1/jiter-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:75fdd787356c1c13a4f40b43c2156276ef7a71eb487d98472476476d803fb2cf", size = 365590, upload-time = "2025-11-09T20:47:27.918Z" }, - { url = "https://files.pythonhosted.org/packages/ba/7e/e46195801a97673a83746170b17984aa8ac4a455746354516d02ca5541b4/jiter-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1eb5db8d9c65b112aacf14fcd0faae9913d07a8afea5ed06ccdd12b724e966a1", size = 479462, upload-time = "2025-11-09T20:47:29.654Z" }, - { url = "https://files.pythonhosted.org/packages/ca/75/f833bfb009ab4bd11b1c9406d333e3b4357709ed0570bb48c7c06d78c7dd/jiter-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73c568cc27c473f82480abc15d1301adf333a7ea4f2e813d6a2c7d8b6ba8d0df", size = 378983, upload-time = "2025-11-09T20:47:31.026Z" }, - { url = "https://files.pythonhosted.org/packages/71/b3/7a69d77943cc837d30165643db753471aff5df39692d598da880a6e51c24/jiter-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4321e8a3d868919bcb1abb1db550d41f2b5b326f72df29e53b2df8b006eb9403", size = 361328, upload-time = "2025-11-09T20:47:33.286Z" }, - { url = "https://files.pythonhosted.org/packages/b0/ac/a78f90caf48d65ba70d8c6efc6f23150bc39dc3389d65bbec2a95c7bc628/jiter-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a51bad79f8cc9cac2b4b705039f814049142e0050f30d91695a2d9a6611f126", size = 386740, upload-time = "2025-11-09T20:47:34.703Z" }, - { url = "https://files.pythonhosted.org/packages/39/b6/5d31c2cc8e1b6a6bcf3c5721e4ca0a3633d1ab4754b09bc7084f6c4f5327/jiter-0.12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a67b678f6a5f1dd6c36d642d7db83e456bc8b104788262aaefc11a22339f5a9", size = 520875, upload-time = "2025-11-09T20:47:36.058Z" }, - { url = "https://files.pythonhosted.org/packages/30/b5/4df540fae4e9f68c54b8dab004bd8c943a752f0b00efd6e7d64aa3850339/jiter-0.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efe1a211fe1fd14762adea941e3cfd6c611a136e28da6c39272dbb7a1bbe6a86", size = 511457, upload-time = "2025-11-09T20:47:37.932Z" }, - { url = "https://files.pythonhosted.org/packages/07/65/86b74010e450a1a77b2c1aabb91d4a91dd3cd5afce99f34d75fd1ac64b19/jiter-0.12.0-cp312-cp312-win32.whl", hash = "sha256:d779d97c834b4278276ec703dc3fc1735fca50af63eb7262f05bdb4e62203d44", size = 204546, upload-time = "2025-11-09T20:47:40.47Z" }, - { url = "https://files.pythonhosted.org/packages/1c/c7/6659f537f9562d963488e3e55573498a442503ced01f7e169e96a6110383/jiter-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8269062060212b373316fe69236096aaf4c49022d267c6736eebd66bbbc60bb", size = 205196, upload-time = "2025-11-09T20:47:41.794Z" }, - { url = "https://files.pythonhosted.org/packages/21/f4/935304f5169edadfec7f9c01eacbce4c90bb9a82035ac1de1f3bd2d40be6/jiter-0.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:06cb970936c65de926d648af0ed3d21857f026b1cf5525cb2947aa5e01e05789", size = 186100, upload-time = "2025-11-09T20:47:43.007Z" }, - { url = "https://files.pythonhosted.org/packages/3d/a6/97209693b177716e22576ee1161674d1d58029eb178e01866a0422b69224/jiter-0.12.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6cc49d5130a14b732e0612bc76ae8db3b49898732223ef8b7599aa8d9810683e", size = 313658, upload-time = "2025-11-09T20:47:44.424Z" }, - { url = "https://files.pythonhosted.org/packages/06/4d/125c5c1537c7d8ee73ad3d530a442d6c619714b95027143f1b61c0b4dfe0/jiter-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f27a32ce36364d2fa4f7fdc507279db604d27d239ea2e044c8f148410defe1", size = 318605, upload-time = "2025-11-09T20:47:45.973Z" }, - { url = "https://files.pythonhosted.org/packages/99/bf/a840b89847885064c41a5f52de6e312e91fa84a520848ee56c97e4fa0205/jiter-0.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc0944aa3d4b4773e348cda635252824a78f4ba44328e042ef1ff3f6080d1cf", size = 349803, upload-time = "2025-11-09T20:47:47.535Z" }, - { url = "https://files.pythonhosted.org/packages/8a/88/e63441c28e0db50e305ae23e19c1d8fae012d78ed55365da392c1f34b09c/jiter-0.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da25c62d4ee1ffbacb97fac6dfe4dcd6759ebdc9015991e92a6eae5816287f44", size = 365120, upload-time = "2025-11-09T20:47:49.284Z" }, - { url = "https://files.pythonhosted.org/packages/0a/7c/49b02714af4343970eb8aca63396bc1c82fa01197dbb1e9b0d274b550d4e/jiter-0.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:048485c654b838140b007390b8182ba9774621103bd4d77c9c3f6f117474ba45", size = 479918, upload-time = "2025-11-09T20:47:50.807Z" }, - { url = "https://files.pythonhosted.org/packages/69/ba/0a809817fdd5a1db80490b9150645f3aae16afad166960bcd562be194f3b/jiter-0.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:635e737fbb7315bef0037c19b88b799143d2d7d3507e61a76751025226b3ac87", size = 379008, upload-time = "2025-11-09T20:47:52.211Z" }, - { url = "https://files.pythonhosted.org/packages/5f/c3/c9fc0232e736c8877d9e6d83d6eeb0ba4e90c6c073835cc2e8f73fdeef51/jiter-0.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e017c417b1ebda911bd13b1e40612704b1f5420e30695112efdbed8a4b389ed", size = 361785, upload-time = "2025-11-09T20:47:53.512Z" }, - { url = "https://files.pythonhosted.org/packages/96/61/61f69b7e442e97ca6cd53086ddc1cf59fb830549bc72c0a293713a60c525/jiter-0.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:89b0bfb8b2bf2351fba36bb211ef8bfceba73ef58e7f0c68fb67b5a2795ca2f9", size = 386108, upload-time = "2025-11-09T20:47:54.893Z" }, - { url = "https://files.pythonhosted.org/packages/e9/2e/76bb3332f28550c8f1eba3bf6e5efe211efda0ddbbaf24976bc7078d42a5/jiter-0.12.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:f5aa5427a629a824a543672778c9ce0c5e556550d1569bb6ea28a85015287626", size = 519937, upload-time = "2025-11-09T20:47:56.253Z" }, - { url = "https://files.pythonhosted.org/packages/84/d6/fa96efa87dc8bff2094fb947f51f66368fa56d8d4fc9e77b25d7fbb23375/jiter-0.12.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed53b3d6acbcb0fd0b90f20c7cb3b24c357fe82a3518934d4edfa8c6898e498c", size = 510853, upload-time = "2025-11-09T20:47:58.32Z" }, - { url = "https://files.pythonhosted.org/packages/8a/28/93f67fdb4d5904a708119a6ab58a8f1ec226ff10a94a282e0215402a8462/jiter-0.12.0-cp313-cp313-win32.whl", hash = "sha256:4747de73d6b8c78f2e253a2787930f4fffc68da7fa319739f57437f95963c4de", size = 204699, upload-time = "2025-11-09T20:47:59.686Z" }, - { url = "https://files.pythonhosted.org/packages/c4/1f/30b0eb087045a0abe2a5c9c0c0c8da110875a1d3be83afd4a9a4e548be3c/jiter-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:e25012eb0c456fcc13354255d0338cd5397cce26c77b2832b3c4e2e255ea5d9a", size = 204258, upload-time = "2025-11-09T20:48:01.01Z" }, - { url = "https://files.pythonhosted.org/packages/2c/f4/2b4daf99b96bce6fc47971890b14b2a36aef88d7beb9f057fafa032c6141/jiter-0.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:c97b92c54fe6110138c872add030a1f99aea2401ddcdaa21edf74705a646dd60", size = 185503, upload-time = "2025-11-09T20:48:02.35Z" }, - { url = "https://files.pythonhosted.org/packages/39/ca/67bb15a7061d6fe20b9b2a2fd783e296a1e0f93468252c093481a2f00efa/jiter-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53839b35a38f56b8be26a7851a48b89bc47e5d88e900929df10ed93b95fea3d6", size = 317965, upload-time = "2025-11-09T20:48:03.783Z" }, - { url = "https://files.pythonhosted.org/packages/18/af/1788031cd22e29c3b14bc6ca80b16a39a0b10e611367ffd480c06a259831/jiter-0.12.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94f669548e55c91ab47fef8bddd9c954dab1938644e715ea49d7e117015110a4", size = 345831, upload-time = "2025-11-09T20:48:05.55Z" }, - { url = "https://files.pythonhosted.org/packages/05/17/710bf8472d1dff0d3caf4ced6031060091c1320f84ee7d5dcbed1f352417/jiter-0.12.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:351d54f2b09a41600ffea43d081522d792e81dcfb915f6d2d242744c1cc48beb", size = 361272, upload-time = "2025-11-09T20:48:06.951Z" }, - { url = "https://files.pythonhosted.org/packages/fb/f1/1dcc4618b59761fef92d10bcbb0b038b5160be653b003651566a185f1a5c/jiter-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2a5e90604620f94bf62264e7c2c038704d38217b7465b863896c6d7c902b06c7", size = 204604, upload-time = "2025-11-09T20:48:08.328Z" }, - { url = "https://files.pythonhosted.org/packages/d9/32/63cb1d9f1c5c6632a783c0052cde9ef7ba82688f7065e2f0d5f10a7e3edb/jiter-0.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:88ef757017e78d2860f96250f9393b7b577b06a956ad102c29c8237554380db3", size = 185628, upload-time = "2025-11-09T20:48:09.572Z" }, - { url = "https://files.pythonhosted.org/packages/a8/99/45c9f0dbe4a1416b2b9a8a6d1236459540f43d7fb8883cff769a8db0612d/jiter-0.12.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c46d927acd09c67a9fb1416df45c5a04c27e83aae969267e98fba35b74e99525", size = 312478, upload-time = "2025-11-09T20:48:10.898Z" }, - { url = "https://files.pythonhosted.org/packages/4c/a7/54ae75613ba9e0f55fcb0bc5d1f807823b5167cc944e9333ff322e9f07dd/jiter-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:774ff60b27a84a85b27b88cd5583899c59940bcc126caca97eb2a9df6aa00c49", size = 318706, upload-time = "2025-11-09T20:48:12.266Z" }, - { url = "https://files.pythonhosted.org/packages/59/31/2aa241ad2c10774baf6c37f8b8e1f39c07db358f1329f4eb40eba179c2a2/jiter-0.12.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5433fab222fb072237df3f637d01b81f040a07dcac1cb4a5c75c7aa9ed0bef1", size = 351894, upload-time = "2025-11-09T20:48:13.673Z" }, - { url = "https://files.pythonhosted.org/packages/54/4f/0f2759522719133a9042781b18cc94e335b6d290f5e2d3e6899d6af933e3/jiter-0.12.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f8c593c6e71c07866ec6bfb790e202a833eeec885022296aff6b9e0b92d6a70e", size = 365714, upload-time = "2025-11-09T20:48:15.083Z" }, - { url = "https://files.pythonhosted.org/packages/dc/6f/806b895f476582c62a2f52c453151edd8a0fde5411b0497baaa41018e878/jiter-0.12.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90d32894d4c6877a87ae00c6b915b609406819dce8bc0d4e962e4de2784e567e", size = 478989, upload-time = "2025-11-09T20:48:16.706Z" }, - { url = "https://files.pythonhosted.org/packages/86/6c/012d894dc6e1033acd8db2b8346add33e413ec1c7c002598915278a37f79/jiter-0.12.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:798e46eed9eb10c3adbbacbd3bdb5ecd4cf7064e453d00dbef08802dae6937ff", size = 378615, upload-time = "2025-11-09T20:48:18.614Z" }, - { url = "https://files.pythonhosted.org/packages/87/30/d718d599f6700163e28e2c71c0bbaf6dace692e7df2592fd793ac9276717/jiter-0.12.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3f1368f0a6719ea80013a4eb90ba72e75d7ea67cfc7846db2ca504f3df0169a", size = 364745, upload-time = "2025-11-09T20:48:20.117Z" }, - { url = "https://files.pythonhosted.org/packages/8f/85/315b45ce4b6ddc7d7fceca24068543b02bdc8782942f4ee49d652e2cc89f/jiter-0.12.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65f04a9d0b4406f7e51279710b27484af411896246200e461d80d3ba0caa901a", size = 386502, upload-time = "2025-11-09T20:48:21.543Z" }, - { url = "https://files.pythonhosted.org/packages/74/0b/ce0434fb40c5b24b368fe81b17074d2840748b4952256bab451b72290a49/jiter-0.12.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:fd990541982a24281d12b67a335e44f117e4c6cbad3c3b75c7dea68bf4ce3a67", size = 519845, upload-time = "2025-11-09T20:48:22.964Z" }, - { url = "https://files.pythonhosted.org/packages/e8/a3/7a7a4488ba052767846b9c916d208b3ed114e3eb670ee984e4c565b9cf0d/jiter-0.12.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:b111b0e9152fa7df870ecaebb0bd30240d9f7fff1f2003bcb4ed0f519941820b", size = 510701, upload-time = "2025-11-09T20:48:24.483Z" }, - { url = "https://files.pythonhosted.org/packages/c3/16/052ffbf9d0467b70af24e30f91e0579e13ded0c17bb4a8eb2aed3cb60131/jiter-0.12.0-cp314-cp314-win32.whl", hash = "sha256:a78befb9cc0a45b5a5a0d537b06f8544c2ebb60d19d02c41ff15da28a9e22d42", size = 205029, upload-time = "2025-11-09T20:48:25.749Z" }, - { url = "https://files.pythonhosted.org/packages/e4/18/3cf1f3f0ccc789f76b9a754bdb7a6977e5d1d671ee97a9e14f7eb728d80e/jiter-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:e1fe01c082f6aafbe5c8faf0ff074f38dfb911d53f07ec333ca03f8f6226debf", size = 204960, upload-time = "2025-11-09T20:48:27.415Z" }, - { url = "https://files.pythonhosted.org/packages/02/68/736821e52ecfdeeb0f024b8ab01b5a229f6b9293bbdb444c27efade50b0f/jiter-0.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:d72f3b5a432a4c546ea4bedc84cce0c3404874f1d1676260b9c7f048a9855451", size = 185529, upload-time = "2025-11-09T20:48:29.125Z" }, - { url = "https://files.pythonhosted.org/packages/30/61/12ed8ee7a643cce29ac97c2281f9ce3956eb76b037e88d290f4ed0d41480/jiter-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e6ded41aeba3603f9728ed2b6196e4df875348ab97b28fc8afff115ed42ba7a7", size = 318974, upload-time = "2025-11-09T20:48:30.87Z" }, - { url = "https://files.pythonhosted.org/packages/2d/c6/f3041ede6d0ed5e0e79ff0de4c8f14f401bbf196f2ef3971cdbe5fd08d1d/jiter-0.12.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a947920902420a6ada6ad51892082521978e9dd44a802663b001436e4b771684", size = 345932, upload-time = "2025-11-09T20:48:32.658Z" }, - { url = "https://files.pythonhosted.org/packages/d5/5d/4d94835889edd01ad0e2dbfc05f7bdfaed46292e7b504a6ac7839aa00edb/jiter-0.12.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:add5e227e0554d3a52cf390a7635edaffdf4f8fce4fdbcef3cc2055bb396a30c", size = 367243, upload-time = "2025-11-09T20:48:34.093Z" }, - { url = "https://files.pythonhosted.org/packages/fd/76/0051b0ac2816253a99d27baf3dda198663aff882fa6ea7deeb94046da24e/jiter-0.12.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9b1cda8fcb736250d7e8711d4580ebf004a46771432be0ae4796944b5dfa5d", size = 479315, upload-time = "2025-11-09T20:48:35.507Z" }, - { url = "https://files.pythonhosted.org/packages/70/ae/83f793acd68e5cb24e483f44f482a1a15601848b9b6f199dacb970098f77/jiter-0.12.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb12a2223fe0135c7ff1356a143d57f95bbf1f4a66584f1fc74df21d86b993", size = 380714, upload-time = "2025-11-09T20:48:40.014Z" }, - { url = "https://files.pythonhosted.org/packages/b1/5e/4808a88338ad2c228b1126b93fcd8ba145e919e886fe910d578230dabe3b/jiter-0.12.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c596cc0f4cb574877550ce4ecd51f8037469146addd676d7c1a30ebe6391923f", size = 365168, upload-time = "2025-11-09T20:48:41.462Z" }, - { url = "https://files.pythonhosted.org/packages/0c/d4/04619a9e8095b42aef436b5aeb4c0282b4ff1b27d1db1508df9f5dc82750/jiter-0.12.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ab4c823b216a4aeab3fdbf579c5843165756bd9ad87cc6b1c65919c4715f783", size = 387893, upload-time = "2025-11-09T20:48:42.921Z" }, - { url = "https://files.pythonhosted.org/packages/17/ea/d3c7e62e4546fdc39197fa4a4315a563a89b95b6d54c0d25373842a59cbe/jiter-0.12.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e427eee51149edf962203ff8db75a7514ab89be5cb623fb9cea1f20b54f1107b", size = 520828, upload-time = "2025-11-09T20:48:44.278Z" }, - { url = "https://files.pythonhosted.org/packages/cc/0b/c6d3562a03fd767e31cb119d9041ea7958c3c80cb3d753eafb19b3b18349/jiter-0.12.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:edb868841f84c111255ba5e80339d386d937ec1fdce419518ce1bd9370fac5b6", size = 511009, upload-time = "2025-11-09T20:48:45.726Z" }, - { url = "https://files.pythonhosted.org/packages/aa/51/2cb4468b3448a8385ebcd15059d325c9ce67df4e2758d133ab9442b19834/jiter-0.12.0-cp314-cp314t-win32.whl", hash = "sha256:8bbcfe2791dfdb7c5e48baf646d37a6a3dcb5a97a032017741dea9f817dca183", size = 205110, upload-time = "2025-11-09T20:48:47.033Z" }, - { url = "https://files.pythonhosted.org/packages/b2/c5/ae5ec83dec9c2d1af805fd5fe8f74ebded9c8670c5210ec7820ce0dbeb1e/jiter-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2fa940963bf02e1d8226027ef461e36af472dea85d36054ff835aeed944dd873", size = 205223, upload-time = "2025-11-09T20:48:49.076Z" }, - { url = "https://files.pythonhosted.org/packages/97/9a/3c5391907277f0e55195550cf3fa8e293ae9ee0c00fb402fec1e38c0c82f/jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473", size = 185564, upload-time = "2025-11-09T20:48:50.376Z" }, - { url = "https://files.pythonhosted.org/packages/cb/f5/12efb8ada5f5c9edc1d4555fe383c1fb2eac05ac5859258a72d61981d999/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb", size = 309974, upload-time = "2025-11-09T20:49:17.187Z" }, - { url = "https://files.pythonhosted.org/packages/85/15/d6eb3b770f6a0d332675141ab3962fd4a7c270ede3515d9f3583e1d28276/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b", size = 304233, upload-time = "2025-11-09T20:49:18.734Z" }, - { url = "https://files.pythonhosted.org/packages/8c/3e/e7e06743294eea2cf02ced6aa0ff2ad237367394e37a0e2b4a1108c67a36/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f", size = 338537, upload-time = "2025-11-09T20:49:20.317Z" }, - { url = "https://files.pythonhosted.org/packages/2f/9c/6753e6522b8d0ef07d3a3d239426669e984fb0eba15a315cdbc1253904e4/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c", size = 346110, upload-time = "2025-11-09T20:49:21.817Z" }, -] - -[[package]] -name = "msal" -version = "1.34.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cryptography" }, - { name = "pyjwt", extra = ["crypto"] }, - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/cf/0e/c857c46d653e104019a84f22d4494f2119b4fe9f896c92b4b864b3b045cc/msal-1.34.0.tar.gz", hash = "sha256:76ba83b716ea5a6d75b0279c0ac353a0e05b820ca1f6682c0eb7f45190c43c2f", size = 153961, upload-time = "2025-09-22T23:05:48.989Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c2/dc/18d48843499e278538890dc709e9ee3dea8375f8be8e82682851df1b48b5/msal-1.34.0-py3-none-any.whl", hash = "sha256:f669b1644e4950115da7a176441b0e13ec2975c29528d8b9e81316023676d6e1", size = 116987, upload-time = "2025-09-22T23:05:47.294Z" }, -] - -[[package]] -name = "msal-extensions" -version = "1.3.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "msal" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/01/99/5d239b6156eddf761a636bded1118414d161bd6b7b37a9335549ed159396/msal_extensions-1.3.1.tar.gz", hash = "sha256:c5b0fd10f65ef62b5f1d62f4251d51cbcaf003fcedae8c91b040a488614be1a4", size = 23315, upload-time = "2025-03-14T23:51:03.902Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5e/75/bd9b7bb966668920f06b200e84454c8f3566b102183bc55c5473d96cb2b9/msal_extensions-1.3.1-py3-none-any.whl", hash = "sha256:96d3de4d034504e969ac5e85bae8106c8373b5c6568e4c8fa7af2eca9dbe6bca", size = 20583, upload-time = "2025-03-14T23:51:03.016Z" }, -] - -[[package]] -name = "multidict" -version = "6.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c2/9e/9f61ac18d9c8b475889f32ccfa91c9f59363480613fc807b6e3023d6f60b/multidict-6.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8a3862568a36d26e650a19bb5cbbba14b71789032aebc0423f8cc5f150730184", size = 76877, upload-time = "2025-10-06T14:49:20.884Z" }, - { url = "https://files.pythonhosted.org/packages/38/6f/614f09a04e6184f8824268fce4bc925e9849edfa654ddd59f0b64508c595/multidict-6.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:960c60b5849b9b4f9dcc9bea6e3626143c252c74113df2c1540aebce70209b45", size = 45467, upload-time = "2025-10-06T14:49:22.054Z" }, - { url = "https://files.pythonhosted.org/packages/b3/93/c4f67a436dd026f2e780c433277fff72be79152894d9fc36f44569cab1a6/multidict-6.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2049be98fb57a31b4ccf870bf377af2504d4ae35646a19037ec271e4c07998aa", size = 43834, upload-time = "2025-10-06T14:49:23.566Z" }, - { url = "https://files.pythonhosted.org/packages/7f/f5/013798161ca665e4a422afbc5e2d9e4070142a9ff8905e482139cd09e4d0/multidict-6.7.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0934f3843a1860dd465d38895c17fce1f1cb37295149ab05cd1b9a03afacb2a7", size = 250545, upload-time = "2025-10-06T14:49:24.882Z" }, - { url = "https://files.pythonhosted.org/packages/71/2f/91dbac13e0ba94669ea5119ba267c9a832f0cb65419aca75549fcf09a3dc/multidict-6.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3e34f3a1b8131ba06f1a73adab24f30934d148afcd5f5de9a73565a4404384e", size = 258305, upload-time = "2025-10-06T14:49:26.778Z" }, - { url = "https://files.pythonhosted.org/packages/ef/b0/754038b26f6e04488b48ac621f779c341338d78503fb45403755af2df477/multidict-6.7.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:efbb54e98446892590dc2458c19c10344ee9a883a79b5cec4bc34d6656e8d546", size = 242363, upload-time = "2025-10-06T14:49:28.562Z" }, - { url = "https://files.pythonhosted.org/packages/87/15/9da40b9336a7c9fa606c4cf2ed80a649dffeb42b905d4f63a1d7eb17d746/multidict-6.7.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a35c5fc61d4f51eb045061e7967cfe3123d622cd500e8868e7c0c592a09fedc4", size = 268375, upload-time = "2025-10-06T14:49:29.96Z" }, - { url = "https://files.pythonhosted.org/packages/82/72/c53fcade0cc94dfaad583105fd92b3a783af2091eddcb41a6d5a52474000/multidict-6.7.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29fe6740ebccba4175af1b9b87bf553e9c15cd5868ee967e010efcf94e4fd0f1", size = 269346, upload-time = "2025-10-06T14:49:31.404Z" }, - { url = "https://files.pythonhosted.org/packages/0d/e2/9baffdae21a76f77ef8447f1a05a96ec4bc0a24dae08767abc0a2fe680b8/multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:123e2a72e20537add2f33a79e605f6191fba2afda4cbb876e35c1a7074298a7d", size = 256107, upload-time = "2025-10-06T14:49:32.974Z" }, - { url = "https://files.pythonhosted.org/packages/3c/06/3f06f611087dc60d65ef775f1fb5aca7c6d61c6db4990e7cda0cef9b1651/multidict-6.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b284e319754366c1aee2267a2036248b24eeb17ecd5dc16022095e747f2f4304", size = 253592, upload-time = "2025-10-06T14:49:34.52Z" }, - { url = "https://files.pythonhosted.org/packages/20/24/54e804ec7945b6023b340c412ce9c3f81e91b3bf5fa5ce65558740141bee/multidict-6.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:803d685de7be4303b5a657b76e2f6d1240e7e0a8aa2968ad5811fa2285553a12", size = 251024, upload-time = "2025-10-06T14:49:35.956Z" }, - { url = "https://files.pythonhosted.org/packages/14/48/011cba467ea0b17ceb938315d219391d3e421dfd35928e5dbdc3f4ae76ef/multidict-6.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c04a328260dfd5db8c39538f999f02779012268f54614902d0afc775d44e0a62", size = 251484, upload-time = "2025-10-06T14:49:37.631Z" }, - { url = "https://files.pythonhosted.org/packages/0d/2f/919258b43bb35b99fa127435cfb2d91798eb3a943396631ef43e3720dcf4/multidict-6.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8a19cdb57cd3df4cd865849d93ee14920fb97224300c88501f16ecfa2604b4e0", size = 263579, upload-time = "2025-10-06T14:49:39.502Z" }, - { url = "https://files.pythonhosted.org/packages/31/22/a0e884d86b5242b5a74cf08e876bdf299e413016b66e55511f7a804a366e/multidict-6.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b2fd74c52accced7e75de26023b7dccee62511a600e62311b918ec5c168fc2a", size = 259654, upload-time = "2025-10-06T14:49:41.32Z" }, - { url = "https://files.pythonhosted.org/packages/b2/e5/17e10e1b5c5f5a40f2fcbb45953c9b215f8a4098003915e46a93f5fcaa8f/multidict-6.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3e8bfdd0e487acf992407a140d2589fe598238eaeffa3da8448d63a63cd363f8", size = 251511, upload-time = "2025-10-06T14:49:46.021Z" }, - { url = "https://files.pythonhosted.org/packages/e3/9a/201bb1e17e7af53139597069c375e7b0dcbd47594604f65c2d5359508566/multidict-6.7.0-cp312-cp312-win32.whl", hash = "sha256:dd32a49400a2c3d52088e120ee00c1e3576cbff7e10b98467962c74fdb762ed4", size = 41895, upload-time = "2025-10-06T14:49:48.718Z" }, - { url = "https://files.pythonhosted.org/packages/46/e2/348cd32faad84eaf1d20cce80e2bb0ef8d312c55bca1f7fa9865e7770aaf/multidict-6.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:92abb658ef2d7ef22ac9f8bb88e8b6c3e571671534e029359b6d9e845923eb1b", size = 46073, upload-time = "2025-10-06T14:49:50.28Z" }, - { url = "https://files.pythonhosted.org/packages/25/ec/aad2613c1910dce907480e0c3aa306905830f25df2e54ccc9dea450cb5aa/multidict-6.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:490dab541a6a642ce1a9d61a4781656b346a55c13038f0b1244653828e3a83ec", size = 43226, upload-time = "2025-10-06T14:49:52.304Z" }, - { url = "https://files.pythonhosted.org/packages/d2/86/33272a544eeb36d66e4d9a920602d1a2f57d4ebea4ef3cdfe5a912574c95/multidict-6.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bee7c0588aa0076ce77c0ea5d19a68d76ad81fcd9fe8501003b9a24f9d4000f6", size = 76135, upload-time = "2025-10-06T14:49:54.26Z" }, - { url = "https://files.pythonhosted.org/packages/91/1c/eb97db117a1ebe46d457a3d235a7b9d2e6dcab174f42d1b67663dd9e5371/multidict-6.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7ef6b61cad77091056ce0e7ce69814ef72afacb150b7ac6a3e9470def2198159", size = 45117, upload-time = "2025-10-06T14:49:55.82Z" }, - { url = "https://files.pythonhosted.org/packages/f1/d8/6c3442322e41fb1dd4de8bd67bfd11cd72352ac131f6368315617de752f1/multidict-6.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c0359b1ec12b1d6849c59f9d319610b7f20ef990a6d454ab151aa0e3b9f78ca", size = 43472, upload-time = "2025-10-06T14:49:57.048Z" }, - { url = "https://files.pythonhosted.org/packages/75/3f/e2639e80325af0b6c6febdf8e57cc07043ff15f57fa1ef808f4ccb5ac4cd/multidict-6.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cd240939f71c64bd658f186330603aac1a9a81bf6273f523fca63673cb7378a8", size = 249342, upload-time = "2025-10-06T14:49:58.368Z" }, - { url = "https://files.pythonhosted.org/packages/5d/cc/84e0585f805cbeaa9cbdaa95f9a3d6aed745b9d25700623ac89a6ecff400/multidict-6.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60a4d75718a5efa473ebd5ab685786ba0c67b8381f781d1be14da49f1a2dc60", size = 257082, upload-time = "2025-10-06T14:49:59.89Z" }, - { url = "https://files.pythonhosted.org/packages/b0/9c/ac851c107c92289acbbf5cfb485694084690c1b17e555f44952c26ddc5bd/multidict-6.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53a42d364f323275126aff81fb67c5ca1b7a04fda0546245730a55c8c5f24bc4", size = 240704, upload-time = "2025-10-06T14:50:01.485Z" }, - { url = "https://files.pythonhosted.org/packages/50/cc/5f93e99427248c09da95b62d64b25748a5f5c98c7c2ab09825a1d6af0e15/multidict-6.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3b29b980d0ddbecb736735ee5bef69bb2ddca56eff603c86f3f29a1128299b4f", size = 266355, upload-time = "2025-10-06T14:50:02.955Z" }, - { url = "https://files.pythonhosted.org/packages/ec/0c/2ec1d883ceb79c6f7f6d7ad90c919c898f5d1c6ea96d322751420211e072/multidict-6.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f8a93b1c0ed2d04b97a5e9336fd2d33371b9a6e29ab7dd6503d63407c20ffbaf", size = 267259, upload-time = "2025-10-06T14:50:04.446Z" }, - { url = "https://files.pythonhosted.org/packages/c6/2d/f0b184fa88d6630aa267680bdb8623fb69cb0d024b8c6f0d23f9a0f406d3/multidict-6.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff96e8815eecacc6645da76c413eb3b3d34cfca256c70b16b286a687d013c32", size = 254903, upload-time = "2025-10-06T14:50:05.98Z" }, - { url = "https://files.pythonhosted.org/packages/06/c9/11ea263ad0df7dfabcad404feb3c0dd40b131bc7f232d5537f2fb1356951/multidict-6.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7516c579652f6a6be0e266aec0acd0db80829ca305c3d771ed898538804c2036", size = 252365, upload-time = "2025-10-06T14:50:07.511Z" }, - { url = "https://files.pythonhosted.org/packages/41/88/d714b86ee2c17d6e09850c70c9d310abac3d808ab49dfa16b43aba9d53fd/multidict-6.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:040f393368e63fb0f3330e70c26bfd336656bed925e5cbe17c9da839a6ab13ec", size = 250062, upload-time = "2025-10-06T14:50:09.074Z" }, - { url = "https://files.pythonhosted.org/packages/15/fe/ad407bb9e818c2b31383f6131ca19ea7e35ce93cf1310fce69f12e89de75/multidict-6.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b3bc26a951007b1057a1c543af845f1c7e3e71cc240ed1ace7bf4484aa99196e", size = 249683, upload-time = "2025-10-06T14:50:10.714Z" }, - { url = "https://files.pythonhosted.org/packages/8c/a4/a89abdb0229e533fb925e7c6e5c40201c2873efebc9abaf14046a4536ee6/multidict-6.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7b022717c748dd1992a83e219587aabe45980d88969f01b316e78683e6285f64", size = 261254, upload-time = "2025-10-06T14:50:12.28Z" }, - { url = "https://files.pythonhosted.org/packages/8d/aa/0e2b27bd88b40a4fb8dc53dd74eecac70edaa4c1dd0707eb2164da3675b3/multidict-6.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9600082733859f00d79dee64effc7aef1beb26adb297416a4ad2116fd61374bd", size = 257967, upload-time = "2025-10-06T14:50:14.16Z" }, - { url = "https://files.pythonhosted.org/packages/d0/8e/0c67b7120d5d5f6d874ed85a085f9dc770a7f9d8813e80f44a9fec820bb7/multidict-6.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94218fcec4d72bc61df51c198d098ce2b378e0ccbac41ddbed5ef44092913288", size = 250085, upload-time = "2025-10-06T14:50:15.639Z" }, - { url = "https://files.pythonhosted.org/packages/ba/55/b73e1d624ea4b8fd4dd07a3bb70f6e4c7c6c5d9d640a41c6ffe5cdbd2a55/multidict-6.7.0-cp313-cp313-win32.whl", hash = "sha256:a37bd74c3fa9d00be2d7b8eca074dc56bd8077ddd2917a839bd989612671ed17", size = 41713, upload-time = "2025-10-06T14:50:17.066Z" }, - { url = "https://files.pythonhosted.org/packages/32/31/75c59e7d3b4205075b4c183fa4ca398a2daf2303ddf616b04ae6ef55cffe/multidict-6.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:30d193c6cc6d559db42b6bcec8a5d395d34d60c9877a0b71ecd7c204fcf15390", size = 45915, upload-time = "2025-10-06T14:50:18.264Z" }, - { url = "https://files.pythonhosted.org/packages/31/2a/8987831e811f1184c22bc2e45844934385363ee61c0a2dcfa8f71b87e608/multidict-6.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:ea3334cabe4d41b7ccd01e4d349828678794edbc2d3ae97fc162a3312095092e", size = 43077, upload-time = "2025-10-06T14:50:19.853Z" }, - { url = "https://files.pythonhosted.org/packages/e8/68/7b3a5170a382a340147337b300b9eb25a9ddb573bcdfff19c0fa3f31ffba/multidict-6.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ad9ce259f50abd98a1ca0aa6e490b58c316a0fce0617f609723e40804add2c00", size = 83114, upload-time = "2025-10-06T14:50:21.223Z" }, - { url = "https://files.pythonhosted.org/packages/55/5c/3fa2d07c84df4e302060f555bbf539310980362236ad49f50eeb0a1c1eb9/multidict-6.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07f5594ac6d084cbb5de2df218d78baf55ef150b91f0ff8a21cc7a2e3a5a58eb", size = 48442, upload-time = "2025-10-06T14:50:22.871Z" }, - { url = "https://files.pythonhosted.org/packages/fc/56/67212d33239797f9bd91962bb899d72bb0f4c35a8652dcdb8ed049bef878/multidict-6.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0591b48acf279821a579282444814a2d8d0af624ae0bc600aa4d1b920b6e924b", size = 46885, upload-time = "2025-10-06T14:50:24.258Z" }, - { url = "https://files.pythonhosted.org/packages/46/d1/908f896224290350721597a61a69cd19b89ad8ee0ae1f38b3f5cd12ea2ac/multidict-6.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:749a72584761531d2b9467cfbdfd29487ee21124c304c4b6cb760d8777b27f9c", size = 242588, upload-time = "2025-10-06T14:50:25.716Z" }, - { url = "https://files.pythonhosted.org/packages/ab/67/8604288bbd68680eee0ab568fdcb56171d8b23a01bcd5cb0c8fedf6e5d99/multidict-6.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b4c3d199f953acd5b446bf7c0de1fe25d94e09e79086f8dc2f48a11a129cdf1", size = 249966, upload-time = "2025-10-06T14:50:28.192Z" }, - { url = "https://files.pythonhosted.org/packages/20/33/9228d76339f1ba51e3efef7da3ebd91964d3006217aae13211653193c3ff/multidict-6.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9fb0211dfc3b51efea2f349ec92c114d7754dd62c01f81c3e32b765b70c45c9b", size = 228618, upload-time = "2025-10-06T14:50:29.82Z" }, - { url = "https://files.pythonhosted.org/packages/f8/2d/25d9b566d10cab1c42b3b9e5b11ef79c9111eaf4463b8c257a3bd89e0ead/multidict-6.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a027ec240fe73a8d6281872690b988eed307cd7d91b23998ff35ff577ca688b5", size = 257539, upload-time = "2025-10-06T14:50:31.731Z" }, - { url = "https://files.pythonhosted.org/packages/b6/b1/8d1a965e6637fc33de3c0d8f414485c2b7e4af00f42cab3d84e7b955c222/multidict-6.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1d964afecdf3a8288789df2f5751dc0a8261138c3768d9af117ed384e538fad", size = 256345, upload-time = "2025-10-06T14:50:33.26Z" }, - { url = "https://files.pythonhosted.org/packages/ba/0c/06b5a8adbdeedada6f4fb8d8f193d44a347223b11939b42953eeb6530b6b/multidict-6.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:caf53b15b1b7df9fbd0709aa01409000a2b4dd03a5f6f5cc548183c7c8f8b63c", size = 247934, upload-time = "2025-10-06T14:50:34.808Z" }, - { url = "https://files.pythonhosted.org/packages/8f/31/b2491b5fe167ca044c6eb4b8f2c9f3b8a00b24c432c365358eadac5d7625/multidict-6.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:654030da3197d927f05a536a66186070e98765aa5142794c9904555d3a9d8fb5", size = 245243, upload-time = "2025-10-06T14:50:36.436Z" }, - { url = "https://files.pythonhosted.org/packages/61/1a/982913957cb90406c8c94f53001abd9eafc271cb3e70ff6371590bec478e/multidict-6.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2090d3718829d1e484706a2f525e50c892237b2bf9b17a79b059cb98cddc2f10", size = 235878, upload-time = "2025-10-06T14:50:37.953Z" }, - { url = "https://files.pythonhosted.org/packages/be/c0/21435d804c1a1cf7a2608593f4d19bca5bcbd7a81a70b253fdd1c12af9c0/multidict-6.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d2cfeec3f6f45651b3d408c4acec0ebf3daa9bc8a112a084206f5db5d05b754", size = 243452, upload-time = "2025-10-06T14:50:39.574Z" }, - { url = "https://files.pythonhosted.org/packages/54/0a/4349d540d4a883863191be6eb9a928846d4ec0ea007d3dcd36323bb058ac/multidict-6.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4ef089f985b8c194d341eb2c24ae6e7408c9a0e2e5658699c92f497437d88c3c", size = 252312, upload-time = "2025-10-06T14:50:41.612Z" }, - { url = "https://files.pythonhosted.org/packages/26/64/d5416038dbda1488daf16b676e4dbfd9674dde10a0cc8f4fc2b502d8125d/multidict-6.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e93a0617cd16998784bf4414c7e40f17a35d2350e5c6f0bd900d3a8e02bd3762", size = 246935, upload-time = "2025-10-06T14:50:43.972Z" }, - { url = "https://files.pythonhosted.org/packages/9f/8c/8290c50d14e49f35e0bd4abc25e1bc7711149ca9588ab7d04f886cdf03d9/multidict-6.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0feece2ef8ebc42ed9e2e8c78fc4aa3cf455733b507c09ef7406364c94376c6", size = 243385, upload-time = "2025-10-06T14:50:45.648Z" }, - { url = "https://files.pythonhosted.org/packages/ef/a0/f83ae75e42d694b3fbad3e047670e511c138be747bc713cf1b10d5096416/multidict-6.7.0-cp313-cp313t-win32.whl", hash = "sha256:19a1d55338ec1be74ef62440ca9e04a2f001a04d0cc49a4983dc320ff0f3212d", size = 47777, upload-time = "2025-10-06T14:50:47.154Z" }, - { url = "https://files.pythonhosted.org/packages/dc/80/9b174a92814a3830b7357307a792300f42c9e94664b01dee8e457551fa66/multidict-6.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3da4fb467498df97e986af166b12d01f05d2e04f978a9c1c680ea1988e0bc4b6", size = 53104, upload-time = "2025-10-06T14:50:48.851Z" }, - { url = "https://files.pythonhosted.org/packages/cc/28/04baeaf0428d95bb7a7bea0e691ba2f31394338ba424fb0679a9ed0f4c09/multidict-6.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b4121773c49a0776461f4a904cdf6264c88e42218aaa8407e803ca8025872792", size = 45503, upload-time = "2025-10-06T14:50:50.16Z" }, - { url = "https://files.pythonhosted.org/packages/e2/b1/3da6934455dd4b261d4c72f897e3a5728eba81db59959f3a639245891baa/multidict-6.7.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bab1e4aff7adaa34410f93b1f8e57c4b36b9af0426a76003f441ee1d3c7e842", size = 75128, upload-time = "2025-10-06T14:50:51.92Z" }, - { url = "https://files.pythonhosted.org/packages/14/2c/f069cab5b51d175a1a2cb4ccdf7a2c2dabd58aa5bd933fa036a8d15e2404/multidict-6.7.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b8512bac933afc3e45fb2b18da8e59b78d4f408399a960339598374d4ae3b56b", size = 44410, upload-time = "2025-10-06T14:50:53.275Z" }, - { url = "https://files.pythonhosted.org/packages/42/e2/64bb41266427af6642b6b128e8774ed84c11b80a90702c13ac0a86bb10cc/multidict-6.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:79dcf9e477bc65414ebfea98ffd013cb39552b5ecd62908752e0e413d6d06e38", size = 43205, upload-time = "2025-10-06T14:50:54.911Z" }, - { url = "https://files.pythonhosted.org/packages/02/68/6b086fef8a3f1a8541b9236c594f0c9245617c29841f2e0395d979485cde/multidict-6.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:31bae522710064b5cbeddaf2e9f32b1abab70ac6ac91d42572502299e9953128", size = 245084, upload-time = "2025-10-06T14:50:56.369Z" }, - { url = "https://files.pythonhosted.org/packages/15/ee/f524093232007cd7a75c1d132df70f235cfd590a7c9eaccd7ff422ef4ae8/multidict-6.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a0df7ff02397bb63e2fd22af2c87dfa39e8c7f12947bc524dbdc528282c7e34", size = 252667, upload-time = "2025-10-06T14:50:57.991Z" }, - { url = "https://files.pythonhosted.org/packages/02/a5/eeb3f43ab45878f1895118c3ef157a480db58ede3f248e29b5354139c2c9/multidict-6.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a0222514e8e4c514660e182d5156a415c13ef0aabbd71682fc714e327b95e99", size = 233590, upload-time = "2025-10-06T14:50:59.589Z" }, - { url = "https://files.pythonhosted.org/packages/6a/1e/76d02f8270b97269d7e3dbd45644b1785bda457b474315f8cf999525a193/multidict-6.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2397ab4daaf2698eb51a76721e98db21ce4f52339e535725de03ea962b5a3202", size = 264112, upload-time = "2025-10-06T14:51:01.183Z" }, - { url = "https://files.pythonhosted.org/packages/76/0b/c28a70ecb58963847c2a8efe334904cd254812b10e535aefb3bcce513918/multidict-6.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8891681594162635948a636c9fe0ff21746aeb3dd5463f6e25d9bea3a8a39ca1", size = 261194, upload-time = "2025-10-06T14:51:02.794Z" }, - { url = "https://files.pythonhosted.org/packages/b4/63/2ab26e4209773223159b83aa32721b4021ffb08102f8ac7d689c943fded1/multidict-6.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18706cc31dbf402a7945916dd5cddf160251b6dab8a2c5f3d6d5a55949f676b3", size = 248510, upload-time = "2025-10-06T14:51:04.724Z" }, - { url = "https://files.pythonhosted.org/packages/93/cd/06c1fa8282af1d1c46fd55c10a7930af652afdce43999501d4d68664170c/multidict-6.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f844a1bbf1d207dd311a56f383f7eda2d0e134921d45751842d8235e7778965d", size = 248395, upload-time = "2025-10-06T14:51:06.306Z" }, - { url = "https://files.pythonhosted.org/packages/99/ac/82cb419dd6b04ccf9e7e61befc00c77614fc8134362488b553402ecd55ce/multidict-6.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d4393e3581e84e5645506923816b9cc81f5609a778c7e7534054091acc64d1c6", size = 239520, upload-time = "2025-10-06T14:51:08.091Z" }, - { url = "https://files.pythonhosted.org/packages/fa/f3/a0f9bf09493421bd8716a362e0cd1d244f5a6550f5beffdd6b47e885b331/multidict-6.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:fbd18dc82d7bf274b37aa48d664534330af744e03bccf696d6f4c6042e7d19e7", size = 245479, upload-time = "2025-10-06T14:51:10.365Z" }, - { url = "https://files.pythonhosted.org/packages/8d/01/476d38fc73a212843f43c852b0eee266b6971f0e28329c2184a8df90c376/multidict-6.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b6234e14f9314731ec45c42fc4554b88133ad53a09092cc48a88e771c125dadb", size = 258903, upload-time = "2025-10-06T14:51:12.466Z" }, - { url = "https://files.pythonhosted.org/packages/49/6d/23faeb0868adba613b817d0e69c5f15531b24d462af8012c4f6de4fa8dc3/multidict-6.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:08d4379f9744d8f78d98c8673c06e202ffa88296f009c71bbafe8a6bf847d01f", size = 252333, upload-time = "2025-10-06T14:51:14.48Z" }, - { url = "https://files.pythonhosted.org/packages/1e/cc/48d02ac22b30fa247f7dad82866e4b1015431092f4ba6ebc7e77596e0b18/multidict-6.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9fe04da3f79387f450fd0061d4dd2e45a72749d31bf634aecc9e27f24fdc4b3f", size = 243411, upload-time = "2025-10-06T14:51:16.072Z" }, - { url = "https://files.pythonhosted.org/packages/4a/03/29a8bf5a18abf1fe34535c88adbdfa88c9fb869b5a3b120692c64abe8284/multidict-6.7.0-cp314-cp314-win32.whl", hash = "sha256:fbafe31d191dfa7c4c51f7a6149c9fb7e914dcf9ffead27dcfd9f1ae382b3885", size = 40940, upload-time = "2025-10-06T14:51:17.544Z" }, - { url = "https://files.pythonhosted.org/packages/82/16/7ed27b680791b939de138f906d5cf2b4657b0d45ca6f5dd6236fdddafb1a/multidict-6.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2f67396ec0310764b9222a1728ced1ab638f61aadc6226f17a71dd9324f9a99c", size = 45087, upload-time = "2025-10-06T14:51:18.875Z" }, - { url = "https://files.pythonhosted.org/packages/cd/3c/e3e62eb35a1950292fe39315d3c89941e30a9d07d5d2df42965ab041da43/multidict-6.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:ba672b26069957ee369cfa7fc180dde1fc6f176eaf1e6beaf61fbebbd3d9c000", size = 42368, upload-time = "2025-10-06T14:51:20.225Z" }, - { url = "https://files.pythonhosted.org/packages/8b/40/cd499bd0dbc5f1136726db3153042a735fffd0d77268e2ee20d5f33c010f/multidict-6.7.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:c1dcc7524066fa918c6a27d61444d4ee7900ec635779058571f70d042d86ed63", size = 82326, upload-time = "2025-10-06T14:51:21.588Z" }, - { url = "https://files.pythonhosted.org/packages/13/8a/18e031eca251c8df76daf0288e6790561806e439f5ce99a170b4af30676b/multidict-6.7.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:27e0b36c2d388dc7b6ced3406671b401e84ad7eb0656b8f3a2f46ed0ce483718", size = 48065, upload-time = "2025-10-06T14:51:22.93Z" }, - { url = "https://files.pythonhosted.org/packages/40/71/5e6701277470a87d234e433fb0a3a7deaf3bcd92566e421e7ae9776319de/multidict-6.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a7baa46a22e77f0988e3b23d4ede5513ebec1929e34ee9495be535662c0dfe2", size = 46475, upload-time = "2025-10-06T14:51:24.352Z" }, - { url = "https://files.pythonhosted.org/packages/fe/6a/bab00cbab6d9cfb57afe1663318f72ec28289ea03fd4e8236bb78429893a/multidict-6.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bf77f54997a9166a2f5675d1201520586439424c2511723a7312bdb4bcc034e", size = 239324, upload-time = "2025-10-06T14:51:25.822Z" }, - { url = "https://files.pythonhosted.org/packages/2a/5f/8de95f629fc22a7769ade8b41028e3e5a822c1f8904f618d175945a81ad3/multidict-6.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e011555abada53f1578d63389610ac8a5400fc70ce71156b0aa30d326f1a5064", size = 246877, upload-time = "2025-10-06T14:51:27.604Z" }, - { url = "https://files.pythonhosted.org/packages/23/b4/38881a960458f25b89e9f4a4fdcb02ac101cfa710190db6e5528841e67de/multidict-6.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:28b37063541b897fd6a318007373930a75ca6d6ac7c940dbe14731ffdd8d498e", size = 225824, upload-time = "2025-10-06T14:51:29.664Z" }, - { url = "https://files.pythonhosted.org/packages/1e/39/6566210c83f8a261575f18e7144736059f0c460b362e96e9cf797a24b8e7/multidict-6.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05047ada7a2fde2631a0ed706f1fd68b169a681dfe5e4cf0f8e4cb6618bbc2cd", size = 253558, upload-time = "2025-10-06T14:51:31.684Z" }, - { url = "https://files.pythonhosted.org/packages/00/a3/67f18315100f64c269f46e6c0319fa87ba68f0f64f2b8e7fd7c72b913a0b/multidict-6.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:716133f7d1d946a4e1b91b1756b23c088881e70ff180c24e864c26192ad7534a", size = 252339, upload-time = "2025-10-06T14:51:33.699Z" }, - { url = "https://files.pythonhosted.org/packages/c8/2a/1cb77266afee2458d82f50da41beba02159b1d6b1f7973afc9a1cad1499b/multidict-6.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d1bed1b467ef657f2a0ae62844a607909ef1c6889562de5e1d505f74457d0b96", size = 244895, upload-time = "2025-10-06T14:51:36.189Z" }, - { url = "https://files.pythonhosted.org/packages/dd/72/09fa7dd487f119b2eb9524946ddd36e2067c08510576d43ff68469563b3b/multidict-6.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ca43bdfa5d37bd6aee89d85e1d0831fb86e25541be7e9d376ead1b28974f8e5e", size = 241862, upload-time = "2025-10-06T14:51:41.291Z" }, - { url = "https://files.pythonhosted.org/packages/65/92/bc1f8bd0853d8669300f732c801974dfc3702c3eeadae2f60cef54dc69d7/multidict-6.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:44b546bd3eb645fd26fb949e43c02a25a2e632e2ca21a35e2e132c8105dc8599", size = 232376, upload-time = "2025-10-06T14:51:43.55Z" }, - { url = "https://files.pythonhosted.org/packages/09/86/ac39399e5cb9d0c2ac8ef6e10a768e4d3bc933ac808d49c41f9dc23337eb/multidict-6.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a6ef16328011d3f468e7ebc326f24c1445f001ca1dec335b2f8e66bed3006394", size = 240272, upload-time = "2025-10-06T14:51:45.265Z" }, - { url = "https://files.pythonhosted.org/packages/3d/b6/fed5ac6b8563ec72df6cb1ea8dac6d17f0a4a1f65045f66b6d3bf1497c02/multidict-6.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:5aa873cbc8e593d361ae65c68f85faadd755c3295ea2c12040ee146802f23b38", size = 248774, upload-time = "2025-10-06T14:51:46.836Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8d/b954d8c0dc132b68f760aefd45870978deec6818897389dace00fcde32ff/multidict-6.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:3d7b6ccce016e29df4b7ca819659f516f0bc7a4b3efa3bb2012ba06431b044f9", size = 242731, upload-time = "2025-10-06T14:51:48.541Z" }, - { url = "https://files.pythonhosted.org/packages/16/9d/a2dac7009125d3540c2f54e194829ea18ac53716c61b655d8ed300120b0f/multidict-6.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:171b73bd4ee683d307599b66793ac80981b06f069b62eea1c9e29c9241aa66b0", size = 240193, upload-time = "2025-10-06T14:51:50.355Z" }, - { url = "https://files.pythonhosted.org/packages/39/ca/c05f144128ea232ae2178b008d5011d4e2cea86e4ee8c85c2631b1b94802/multidict-6.7.0-cp314-cp314t-win32.whl", hash = "sha256:b2d7f80c4e1fd010b07cb26820aae86b7e73b681ee4889684fb8d2d4537aab13", size = 48023, upload-time = "2025-10-06T14:51:51.883Z" }, - { url = "https://files.pythonhosted.org/packages/ba/8f/0a60e501584145588be1af5cc829265701ba3c35a64aec8e07cbb71d39bb/multidict-6.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:09929cab6fcb68122776d575e03c6cc64ee0b8fca48d17e135474b042ce515cd", size = 53507, upload-time = "2025-10-06T14:51:53.672Z" }, - { url = "https://files.pythonhosted.org/packages/7f/ae/3148b988a9c6239903e786eac19c889fab607c31d6efa7fb2147e5680f23/multidict-6.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:cc41db090ed742f32bd2d2c721861725e6109681eddf835d0a82bd3a5c382827", size = 44804, upload-time = "2025-10-06T14:51:55.415Z" }, - { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, -] - -[[package]] -name = "openai" -version = "2.8.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "distro" }, - { name = "httpx" }, - { name = "jiter" }, - { name = "pydantic" }, - { name = "sniffio" }, - { name = "tqdm" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/d5/e4/42591e356f1d53c568418dc7e30dcda7be31dd5a4d570bca22acb0525862/openai-2.8.1.tar.gz", hash = "sha256:cb1b79eef6e809f6da326a7ef6038719e35aa944c42d081807bfa1be8060f15f", size = 602490, upload-time = "2025-11-17T22:39:59.549Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/55/4f/dbc0c124c40cb390508a82770fb9f6e3ed162560181a85089191a851c59a/openai-2.8.1-py3-none-any.whl", hash = "sha256:c6c3b5a04994734386e8dad3c00a393f56d3b68a27cd2e8acae91a59e4122463", size = 1022688, upload-time = "2025-11-17T22:39:57.675Z" }, -] - -[[package]] -name = "propcache" -version = "0.4.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9e/da/e9fc233cf63743258bff22b3dfa7ea5baef7b5bc324af47a0ad89b8ffc6f/propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d", size = 46442, upload-time = "2025-10-08T19:49:02.291Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a2/0f/f17b1b2b221d5ca28b4b876e8bb046ac40466513960646bda8e1853cdfa2/propcache-0.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e153e9cd40cc8945138822807139367f256f89c6810c2634a4f6902b52d3b4e2", size = 80061, upload-time = "2025-10-08T19:46:46.075Z" }, - { url = "https://files.pythonhosted.org/packages/76/47/8ccf75935f51448ba9a16a71b783eb7ef6b9ee60f5d14c7f8a8a79fbeed7/propcache-0.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cd547953428f7abb73c5ad82cbb32109566204260d98e41e5dfdc682eb7f8403", size = 46037, upload-time = "2025-10-08T19:46:47.23Z" }, - { url = "https://files.pythonhosted.org/packages/0a/b6/5c9a0e42df4d00bfb4a3cbbe5cf9f54260300c88a0e9af1f47ca5ce17ac0/propcache-0.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f048da1b4f243fc44f205dfd320933a951b8d89e0afd4c7cacc762a8b9165207", size = 47324, upload-time = "2025-10-08T19:46:48.384Z" }, - { url = "https://files.pythonhosted.org/packages/9e/d3/6c7ee328b39a81ee877c962469f1e795f9db87f925251efeb0545e0020d0/propcache-0.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec17c65562a827bba85e3872ead335f95405ea1674860d96483a02f5c698fa72", size = 225505, upload-time = "2025-10-08T19:46:50.055Z" }, - { url = "https://files.pythonhosted.org/packages/01/5d/1c53f4563490b1d06a684742cc6076ef944bc6457df6051b7d1a877c057b/propcache-0.4.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:405aac25c6394ef275dee4c709be43745d36674b223ba4eb7144bf4d691b7367", size = 230242, upload-time = "2025-10-08T19:46:51.815Z" }, - { url = "https://files.pythonhosted.org/packages/20/e1/ce4620633b0e2422207c3cb774a0ee61cac13abc6217763a7b9e2e3f4a12/propcache-0.4.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0013cb6f8dde4b2a2f66903b8ba740bdfe378c943c4377a200551ceb27f379e4", size = 238474, upload-time = "2025-10-08T19:46:53.208Z" }, - { url = "https://files.pythonhosted.org/packages/46/4b/3aae6835b8e5f44ea6a68348ad90f78134047b503765087be2f9912140ea/propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15932ab57837c3368b024473a525e25d316d8353016e7cc0e5ba9eb343fbb1cf", size = 221575, upload-time = "2025-10-08T19:46:54.511Z" }, - { url = "https://files.pythonhosted.org/packages/6e/a5/8a5e8678bcc9d3a1a15b9a29165640d64762d424a16af543f00629c87338/propcache-0.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:031dce78b9dc099f4c29785d9cf5577a3faf9ebf74ecbd3c856a7b92768c3df3", size = 216736, upload-time = "2025-10-08T19:46:56.212Z" }, - { url = "https://files.pythonhosted.org/packages/f1/63/b7b215eddeac83ca1c6b934f89d09a625aa9ee4ba158338854c87210cc36/propcache-0.4.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ab08df6c9a035bee56e31af99be621526bd237bea9f32def431c656b29e41778", size = 213019, upload-time = "2025-10-08T19:46:57.595Z" }, - { url = "https://files.pythonhosted.org/packages/57/74/f580099a58c8af587cac7ba19ee7cb418506342fbbe2d4a4401661cca886/propcache-0.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4d7af63f9f93fe593afbf104c21b3b15868efb2c21d07d8732c0c4287e66b6a6", size = 220376, upload-time = "2025-10-08T19:46:59.067Z" }, - { url = "https://files.pythonhosted.org/packages/c4/ee/542f1313aff7eaf19c2bb758c5d0560d2683dac001a1c96d0774af799843/propcache-0.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cfc27c945f422e8b5071b6e93169679e4eb5bf73bbcbf1ba3ae3a83d2f78ebd9", size = 226988, upload-time = "2025-10-08T19:47:00.544Z" }, - { url = "https://files.pythonhosted.org/packages/8f/18/9c6b015dd9c6930f6ce2229e1f02fb35298b847f2087ea2b436a5bfa7287/propcache-0.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:35c3277624a080cc6ec6f847cbbbb5b49affa3598c4535a0a4682a697aaa5c75", size = 215615, upload-time = "2025-10-08T19:47:01.968Z" }, - { url = "https://files.pythonhosted.org/packages/80/9e/e7b85720b98c45a45e1fca6a177024934dc9bc5f4d5dd04207f216fc33ed/propcache-0.4.1-cp312-cp312-win32.whl", hash = "sha256:671538c2262dadb5ba6395e26c1731e1d52534bfe9ae56d0b5573ce539266aa8", size = 38066, upload-time = "2025-10-08T19:47:03.503Z" }, - { url = "https://files.pythonhosted.org/packages/54/09/d19cff2a5aaac632ec8fc03737b223597b1e347416934c1b3a7df079784c/propcache-0.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:cb2d222e72399fcf5890d1d5cc1060857b9b236adff2792ff48ca2dfd46c81db", size = 41655, upload-time = "2025-10-08T19:47:04.973Z" }, - { url = "https://files.pythonhosted.org/packages/68/ab/6b5c191bb5de08036a8c697b265d4ca76148efb10fa162f14af14fb5f076/propcache-0.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:204483131fb222bdaaeeea9f9e6c6ed0cac32731f75dfc1d4a567fc1926477c1", size = 37789, upload-time = "2025-10-08T19:47:06.077Z" }, - { url = "https://files.pythonhosted.org/packages/bf/df/6d9c1b6ac12b003837dde8a10231a7344512186e87b36e855bef32241942/propcache-0.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:43eedf29202c08550aac1d14e0ee619b0430aaef78f85864c1a892294fbc28cf", size = 77750, upload-time = "2025-10-08T19:47:07.648Z" }, - { url = "https://files.pythonhosted.org/packages/8b/e8/677a0025e8a2acf07d3418a2e7ba529c9c33caf09d3c1f25513023c1db56/propcache-0.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d62cdfcfd89ccb8de04e0eda998535c406bf5e060ffd56be6c586cbcc05b3311", size = 44780, upload-time = "2025-10-08T19:47:08.851Z" }, - { url = "https://files.pythonhosted.org/packages/89/a4/92380f7ca60f99ebae761936bc48a72a639e8a47b29050615eef757cb2a7/propcache-0.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cae65ad55793da34db5f54e4029b89d3b9b9490d8abe1b4c7ab5d4b8ec7ebf74", size = 46308, upload-time = "2025-10-08T19:47:09.982Z" }, - { url = "https://files.pythonhosted.org/packages/2d/48/c5ac64dee5262044348d1d78a5f85dd1a57464a60d30daee946699963eb3/propcache-0.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:333ddb9031d2704a301ee3e506dc46b1fe5f294ec198ed6435ad5b6a085facfe", size = 208182, upload-time = "2025-10-08T19:47:11.319Z" }, - { url = "https://files.pythonhosted.org/packages/c6/0c/cd762dd011a9287389a6a3eb43aa30207bde253610cca06824aeabfe9653/propcache-0.4.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd0858c20f078a32cf55f7e81473d96dcf3b93fd2ccdb3d40fdf54b8573df3af", size = 211215, upload-time = "2025-10-08T19:47:13.146Z" }, - { url = "https://files.pythonhosted.org/packages/30/3e/49861e90233ba36890ae0ca4c660e95df565b2cd15d4a68556ab5865974e/propcache-0.4.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:678ae89ebc632c5c204c794f8dab2837c5f159aeb59e6ed0539500400577298c", size = 218112, upload-time = "2025-10-08T19:47:14.913Z" }, - { url = "https://files.pythonhosted.org/packages/f1/8b/544bc867e24e1bd48f3118cecd3b05c694e160a168478fa28770f22fd094/propcache-0.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d472aeb4fbf9865e0c6d622d7f4d54a4e101a89715d8904282bb5f9a2f476c3f", size = 204442, upload-time = "2025-10-08T19:47:16.277Z" }, - { url = "https://files.pythonhosted.org/packages/50/a6/4282772fd016a76d3e5c0df58380a5ea64900afd836cec2c2f662d1b9bb3/propcache-0.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4d3df5fa7e36b3225954fba85589da77a0fe6a53e3976de39caf04a0db4c36f1", size = 199398, upload-time = "2025-10-08T19:47:17.962Z" }, - { url = "https://files.pythonhosted.org/packages/3e/ec/d8a7cd406ee1ddb705db2139f8a10a8a427100347bd698e7014351c7af09/propcache-0.4.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ee17f18d2498f2673e432faaa71698032b0127ebf23ae5974eeaf806c279df24", size = 196920, upload-time = "2025-10-08T19:47:19.355Z" }, - { url = "https://files.pythonhosted.org/packages/f6/6c/f38ab64af3764f431e359f8baf9e0a21013e24329e8b85d2da32e8ed07ca/propcache-0.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:580e97762b950f993ae618e167e7be9256b8353c2dcd8b99ec100eb50f5286aa", size = 203748, upload-time = "2025-10-08T19:47:21.338Z" }, - { url = "https://files.pythonhosted.org/packages/d6/e3/fa846bd70f6534d647886621388f0a265254d30e3ce47e5c8e6e27dbf153/propcache-0.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:501d20b891688eb8e7aa903021f0b72d5a55db40ffaab27edefd1027caaafa61", size = 205877, upload-time = "2025-10-08T19:47:23.059Z" }, - { url = "https://files.pythonhosted.org/packages/e2/39/8163fc6f3133fea7b5f2827e8eba2029a0277ab2c5beee6c1db7b10fc23d/propcache-0.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a0bd56e5b100aef69bd8562b74b46254e7c8812918d3baa700c8a8009b0af66", size = 199437, upload-time = "2025-10-08T19:47:24.445Z" }, - { url = "https://files.pythonhosted.org/packages/93/89/caa9089970ca49c7c01662bd0eeedfe85494e863e8043565aeb6472ce8fe/propcache-0.4.1-cp313-cp313-win32.whl", hash = "sha256:bcc9aaa5d80322bc2fb24bb7accb4a30f81e90ab8d6ba187aec0744bc302ad81", size = 37586, upload-time = "2025-10-08T19:47:25.736Z" }, - { url = "https://files.pythonhosted.org/packages/f5/ab/f76ec3c3627c883215b5c8080debb4394ef5a7a29be811f786415fc1e6fd/propcache-0.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:381914df18634f5494334d201e98245c0596067504b9372d8cf93f4bb23e025e", size = 40790, upload-time = "2025-10-08T19:47:26.847Z" }, - { url = "https://files.pythonhosted.org/packages/59/1b/e71ae98235f8e2ba5004d8cb19765a74877abf189bc53fc0c80d799e56c3/propcache-0.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:8873eb4460fd55333ea49b7d189749ecf6e55bf85080f11b1c4530ed3034cba1", size = 37158, upload-time = "2025-10-08T19:47:27.961Z" }, - { url = "https://files.pythonhosted.org/packages/83/ce/a31bbdfc24ee0dcbba458c8175ed26089cf109a55bbe7b7640ed2470cfe9/propcache-0.4.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:92d1935ee1f8d7442da9c0c4fa7ac20d07e94064184811b685f5c4fada64553b", size = 81451, upload-time = "2025-10-08T19:47:29.445Z" }, - { url = "https://files.pythonhosted.org/packages/25/9c/442a45a470a68456e710d96cacd3573ef26a1d0a60067e6a7d5e655621ed/propcache-0.4.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:473c61b39e1460d386479b9b2f337da492042447c9b685f28be4f74d3529e566", size = 46374, upload-time = "2025-10-08T19:47:30.579Z" }, - { url = "https://files.pythonhosted.org/packages/f4/bf/b1d5e21dbc3b2e889ea4327044fb16312a736d97640fb8b6aa3f9c7b3b65/propcache-0.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c0ef0aaafc66fbd87842a3fe3902fd889825646bc21149eafe47be6072725835", size = 48396, upload-time = "2025-10-08T19:47:31.79Z" }, - { url = "https://files.pythonhosted.org/packages/f4/04/5b4c54a103d480e978d3c8a76073502b18db0c4bc17ab91b3cb5092ad949/propcache-0.4.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95393b4d66bfae908c3ca8d169d5f79cd65636ae15b5e7a4f6e67af675adb0e", size = 275950, upload-time = "2025-10-08T19:47:33.481Z" }, - { url = "https://files.pythonhosted.org/packages/b4/c1/86f846827fb969c4b78b0af79bba1d1ea2156492e1b83dea8b8a6ae27395/propcache-0.4.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c07fda85708bc48578467e85099645167a955ba093be0a2dcba962195676e859", size = 273856, upload-time = "2025-10-08T19:47:34.906Z" }, - { url = "https://files.pythonhosted.org/packages/36/1d/fc272a63c8d3bbad6878c336c7a7dea15e8f2d23a544bda43205dfa83ada/propcache-0.4.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:af223b406d6d000830c6f65f1e6431783fc3f713ba3e6cc8c024d5ee96170a4b", size = 280420, upload-time = "2025-10-08T19:47:36.338Z" }, - { url = "https://files.pythonhosted.org/packages/07/0c/01f2219d39f7e53d52e5173bcb09c976609ba30209912a0680adfb8c593a/propcache-0.4.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a78372c932c90ee474559c5ddfffd718238e8673c340dc21fe45c5b8b54559a0", size = 263254, upload-time = "2025-10-08T19:47:37.692Z" }, - { url = "https://files.pythonhosted.org/packages/2d/18/cd28081658ce597898f0c4d174d4d0f3c5b6d4dc27ffafeef835c95eb359/propcache-0.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:564d9f0d4d9509e1a870c920a89b2fec951b44bf5ba7d537a9e7c1ccec2c18af", size = 261205, upload-time = "2025-10-08T19:47:39.659Z" }, - { url = "https://files.pythonhosted.org/packages/7a/71/1f9e22eb8b8316701c2a19fa1f388c8a3185082607da8e406a803c9b954e/propcache-0.4.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:17612831fda0138059cc5546f4d12a2aacfb9e47068c06af35c400ba58ba7393", size = 247873, upload-time = "2025-10-08T19:47:41.084Z" }, - { url = "https://files.pythonhosted.org/packages/4a/65/3d4b61f36af2b4eddba9def857959f1016a51066b4f1ce348e0cf7881f58/propcache-0.4.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:41a89040cb10bd345b3c1a873b2bf36413d48da1def52f268a055f7398514874", size = 262739, upload-time = "2025-10-08T19:47:42.51Z" }, - { url = "https://files.pythonhosted.org/packages/2a/42/26746ab087faa77c1c68079b228810436ccd9a5ce9ac85e2b7307195fd06/propcache-0.4.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e35b88984e7fa64aacecea39236cee32dd9bd8c55f57ba8a75cf2399553f9bd7", size = 263514, upload-time = "2025-10-08T19:47:43.927Z" }, - { url = "https://files.pythonhosted.org/packages/94/13/630690fe201f5502d2403dd3cfd451ed8858fe3c738ee88d095ad2ff407b/propcache-0.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f8b465489f927b0df505cbe26ffbeed4d6d8a2bbc61ce90eb074ff129ef0ab1", size = 257781, upload-time = "2025-10-08T19:47:45.448Z" }, - { url = "https://files.pythonhosted.org/packages/92/f7/1d4ec5841505f423469efbfc381d64b7b467438cd5a4bbcbb063f3b73d27/propcache-0.4.1-cp313-cp313t-win32.whl", hash = "sha256:2ad890caa1d928c7c2965b48f3a3815c853180831d0e5503d35cf00c472f4717", size = 41396, upload-time = "2025-10-08T19:47:47.202Z" }, - { url = "https://files.pythonhosted.org/packages/48/f0/615c30622316496d2cbbc29f5985f7777d3ada70f23370608c1d3e081c1f/propcache-0.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:f7ee0e597f495cf415bcbd3da3caa3bd7e816b74d0d52b8145954c5e6fd3ff37", size = 44897, upload-time = "2025-10-08T19:47:48.336Z" }, - { url = "https://files.pythonhosted.org/packages/fd/ca/6002e46eccbe0e33dcd4069ef32f7f1c9e243736e07adca37ae8c4830ec3/propcache-0.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:929d7cbe1f01bb7baffb33dc14eb5691c95831450a26354cd210a8155170c93a", size = 39789, upload-time = "2025-10-08T19:47:49.876Z" }, - { url = "https://files.pythonhosted.org/packages/8e/5c/bca52d654a896f831b8256683457ceddd490ec18d9ec50e97dfd8fc726a8/propcache-0.4.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3f7124c9d820ba5548d431afb4632301acf965db49e666aa21c305cbe8c6de12", size = 78152, upload-time = "2025-10-08T19:47:51.051Z" }, - { url = "https://files.pythonhosted.org/packages/65/9b/03b04e7d82a5f54fb16113d839f5ea1ede58a61e90edf515f6577c66fa8f/propcache-0.4.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c0d4b719b7da33599dfe3b22d3db1ef789210a0597bc650b7cee9c77c2be8c5c", size = 44869, upload-time = "2025-10-08T19:47:52.594Z" }, - { url = "https://files.pythonhosted.org/packages/b2/fa/89a8ef0468d5833a23fff277b143d0573897cf75bd56670a6d28126c7d68/propcache-0.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9f302f4783709a78240ebc311b793f123328716a60911d667e0c036bc5dcbded", size = 46596, upload-time = "2025-10-08T19:47:54.073Z" }, - { url = "https://files.pythonhosted.org/packages/86/bd/47816020d337f4a746edc42fe8d53669965138f39ee117414c7d7a340cfe/propcache-0.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c80ee5802e3fb9ea37938e7eecc307fb984837091d5fd262bb37238b1ae97641", size = 206981, upload-time = "2025-10-08T19:47:55.715Z" }, - { url = "https://files.pythonhosted.org/packages/df/f6/c5fa1357cc9748510ee55f37173eb31bfde6d94e98ccd9e6f033f2fc06e1/propcache-0.4.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ed5a841e8bb29a55fb8159ed526b26adc5bdd7e8bd7bf793ce647cb08656cdf4", size = 211490, upload-time = "2025-10-08T19:47:57.499Z" }, - { url = "https://files.pythonhosted.org/packages/80/1e/e5889652a7c4a3846683401a48f0f2e5083ce0ec1a8a5221d8058fbd1adf/propcache-0.4.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:55c72fd6ea2da4c318e74ffdf93c4fe4e926051133657459131a95c846d16d44", size = 215371, upload-time = "2025-10-08T19:47:59.317Z" }, - { url = "https://files.pythonhosted.org/packages/b2/f2/889ad4b2408f72fe1a4f6a19491177b30ea7bf1a0fd5f17050ca08cfc882/propcache-0.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8326e144341460402713f91df60ade3c999d601e7eb5ff8f6f7862d54de0610d", size = 201424, upload-time = "2025-10-08T19:48:00.67Z" }, - { url = "https://files.pythonhosted.org/packages/27/73/033d63069b57b0812c8bd19f311faebeceb6ba31b8f32b73432d12a0b826/propcache-0.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:060b16ae65bc098da7f6d25bf359f1f31f688384858204fe5d652979e0015e5b", size = 197566, upload-time = "2025-10-08T19:48:02.604Z" }, - { url = "https://files.pythonhosted.org/packages/dc/89/ce24f3dc182630b4e07aa6d15f0ff4b14ed4b9955fae95a0b54c58d66c05/propcache-0.4.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:89eb3fa9524f7bec9de6e83cf3faed9d79bffa560672c118a96a171a6f55831e", size = 193130, upload-time = "2025-10-08T19:48:04.499Z" }, - { url = "https://files.pythonhosted.org/packages/a9/24/ef0d5fd1a811fb5c609278d0209c9f10c35f20581fcc16f818da959fc5b4/propcache-0.4.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:dee69d7015dc235f526fe80a9c90d65eb0039103fe565776250881731f06349f", size = 202625, upload-time = "2025-10-08T19:48:06.213Z" }, - { url = "https://files.pythonhosted.org/packages/f5/02/98ec20ff5546f68d673df2f7a69e8c0d076b5abd05ca882dc7ee3a83653d/propcache-0.4.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5558992a00dfd54ccbc64a32726a3357ec93825a418a401f5cc67df0ac5d9e49", size = 204209, upload-time = "2025-10-08T19:48:08.432Z" }, - { url = "https://files.pythonhosted.org/packages/a0/87/492694f76759b15f0467a2a93ab68d32859672b646aa8a04ce4864e7932d/propcache-0.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c9b822a577f560fbd9554812526831712c1436d2c046cedee4c3796d3543b144", size = 197797, upload-time = "2025-10-08T19:48:09.968Z" }, - { url = "https://files.pythonhosted.org/packages/ee/36/66367de3575db1d2d3f3d177432bd14ee577a39d3f5d1b3d5df8afe3b6e2/propcache-0.4.1-cp314-cp314-win32.whl", hash = "sha256:ab4c29b49d560fe48b696cdcb127dd36e0bc2472548f3bf56cc5cb3da2b2984f", size = 38140, upload-time = "2025-10-08T19:48:11.232Z" }, - { url = "https://files.pythonhosted.org/packages/0c/2a/a758b47de253636e1b8aef181c0b4f4f204bf0dd964914fb2af90a95b49b/propcache-0.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:5a103c3eb905fcea0ab98be99c3a9a5ab2de60228aa5aceedc614c0281cf6153", size = 41257, upload-time = "2025-10-08T19:48:12.707Z" }, - { url = "https://files.pythonhosted.org/packages/34/5e/63bd5896c3fec12edcbd6f12508d4890d23c265df28c74b175e1ef9f4f3b/propcache-0.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:74c1fb26515153e482e00177a1ad654721bf9207da8a494a0c05e797ad27b992", size = 38097, upload-time = "2025-10-08T19:48:13.923Z" }, - { url = "https://files.pythonhosted.org/packages/99/85/9ff785d787ccf9bbb3f3106f79884a130951436f58392000231b4c737c80/propcache-0.4.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:824e908bce90fb2743bd6b59db36eb4f45cd350a39637c9f73b1c1ea66f5b75f", size = 81455, upload-time = "2025-10-08T19:48:15.16Z" }, - { url = "https://files.pythonhosted.org/packages/90/85/2431c10c8e7ddb1445c1f7c4b54d886e8ad20e3c6307e7218f05922cad67/propcache-0.4.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2b5e7db5328427c57c8e8831abda175421b709672f6cfc3d630c3b7e2146393", size = 46372, upload-time = "2025-10-08T19:48:16.424Z" }, - { url = "https://files.pythonhosted.org/packages/01/20/b0972d902472da9bcb683fa595099911f4d2e86e5683bcc45de60dd05dc3/propcache-0.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6f6ff873ed40292cd4969ef5310179afd5db59fdf055897e282485043fc80ad0", size = 48411, upload-time = "2025-10-08T19:48:17.577Z" }, - { url = "https://files.pythonhosted.org/packages/e2/e3/7dc89f4f21e8f99bad3d5ddb3a3389afcf9da4ac69e3deb2dcdc96e74169/propcache-0.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49a2dc67c154db2c1463013594c458881a069fcf98940e61a0569016a583020a", size = 275712, upload-time = "2025-10-08T19:48:18.901Z" }, - { url = "https://files.pythonhosted.org/packages/20/67/89800c8352489b21a8047c773067644e3897f02ecbbd610f4d46b7f08612/propcache-0.4.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:005f08e6a0529984491e37d8dbc3dd86f84bd78a8ceb5fa9a021f4c48d4984be", size = 273557, upload-time = "2025-10-08T19:48:20.762Z" }, - { url = "https://files.pythonhosted.org/packages/e2/a1/b52b055c766a54ce6d9c16d9aca0cad8059acd9637cdf8aa0222f4a026ef/propcache-0.4.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5c3310452e0d31390da9035c348633b43d7e7feb2e37be252be6da45abd1abcc", size = 280015, upload-time = "2025-10-08T19:48:22.592Z" }, - { url = "https://files.pythonhosted.org/packages/48/c8/33cee30bd890672c63743049f3c9e4be087e6780906bfc3ec58528be59c1/propcache-0.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c3c70630930447f9ef1caac7728c8ad1c56bc5015338b20fed0d08ea2480b3a", size = 262880, upload-time = "2025-10-08T19:48:23.947Z" }, - { url = "https://files.pythonhosted.org/packages/0c/b1/8f08a143b204b418285c88b83d00edbd61afbc2c6415ffafc8905da7038b/propcache-0.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e57061305815dfc910a3634dcf584f08168a8836e6999983569f51a8544cd89", size = 260938, upload-time = "2025-10-08T19:48:25.656Z" }, - { url = "https://files.pythonhosted.org/packages/cf/12/96e4664c82ca2f31e1c8dff86afb867348979eb78d3cb8546a680287a1e9/propcache-0.4.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:521a463429ef54143092c11a77e04056dd00636f72e8c45b70aaa3140d639726", size = 247641, upload-time = "2025-10-08T19:48:27.207Z" }, - { url = "https://files.pythonhosted.org/packages/18/ed/e7a9cfca28133386ba52278136d42209d3125db08d0a6395f0cba0c0285c/propcache-0.4.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:120c964da3fdc75e3731aa392527136d4ad35868cc556fd09bb6d09172d9a367", size = 262510, upload-time = "2025-10-08T19:48:28.65Z" }, - { url = "https://files.pythonhosted.org/packages/f5/76/16d8bf65e8845dd62b4e2b57444ab81f07f40caa5652b8969b87ddcf2ef6/propcache-0.4.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:d8f353eb14ee3441ee844ade4277d560cdd68288838673273b978e3d6d2c8f36", size = 263161, upload-time = "2025-10-08T19:48:30.133Z" }, - { url = "https://files.pythonhosted.org/packages/e7/70/c99e9edb5d91d5ad8a49fa3c1e8285ba64f1476782fed10ab251ff413ba1/propcache-0.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ab2943be7c652f09638800905ee1bab2c544e537edb57d527997a24c13dc1455", size = 257393, upload-time = "2025-10-08T19:48:31.567Z" }, - { url = "https://files.pythonhosted.org/packages/08/02/87b25304249a35c0915d236575bc3574a323f60b47939a2262b77632a3ee/propcache-0.4.1-cp314-cp314t-win32.whl", hash = "sha256:05674a162469f31358c30bcaa8883cb7829fa3110bf9c0991fe27d7896c42d85", size = 42546, upload-time = "2025-10-08T19:48:32.872Z" }, - { url = "https://files.pythonhosted.org/packages/cb/ef/3c6ecf8b317aa982f309835e8f96987466123c6e596646d4e6a1dfcd080f/propcache-0.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:990f6b3e2a27d683cb7602ed6c86f15ee6b43b1194736f9baaeb93d0016633b1", size = 46259, upload-time = "2025-10-08T19:48:34.226Z" }, - { url = "https://files.pythonhosted.org/packages/c4/2d/346e946d4951f37eca1e4f55be0f0174c52cd70720f84029b02f296f4a38/propcache-0.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:ecef2343af4cc68e05131e45024ba34f6095821988a9d0a02aa7c73fcc448aa9", size = 40428, upload-time = "2025-10-08T19:48:35.441Z" }, - { url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" }, -] - -[[package]] -name = "pycparser" -version = "2.23" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, -] - -[[package]] -name = "pydantic" -version = "2.12.4" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "annotated-types" }, - { name = "pydantic-core" }, - { name = "typing-extensions" }, - { name = "typing-inspection" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/96/ad/a17bc283d7d81837c061c49e3eaa27a45991759a1b7eae1031921c6bd924/pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac", size = 821038, upload-time = "2025-11-05T10:50:08.59Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/82/2f/e68750da9b04856e2a7ec56fc6f034a5a79775e9b9a81882252789873798/pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e", size = 463400, upload-time = "2025-11-05T10:50:06.732Z" }, -] - -[[package]] -name = "pydantic-core" -version = "2.41.5" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, - { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, - { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, - { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, - { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, - { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, - { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, - { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, - { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, - { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, - { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, - { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, - { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, - { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, - { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, - { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, - { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, - { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, - { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, - { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, - { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, - { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, - { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, - { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, - { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, - { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, - { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, - { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, - { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, - { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, - { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, - { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, - { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, - { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, - { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, - { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, - { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, - { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, - { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, - { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, - { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, - { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, - { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, - { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, - { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, - { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, - { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, - { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, - { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, - { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, - { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, - { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, - { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, - { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, - { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, - { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, - { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, - { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, - { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, - { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, -] - -[[package]] -name = "pyjwt" -version = "2.10.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, -] - -[package.optional-dependencies] -crypto = [ - { name = "cryptography" }, -] - -[[package]] -name = "python-dotenv" -version = "1.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, -] - -[[package]] -name = "requests" -version = "2.32.5" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "certifi" }, - { name = "charset-normalizer" }, - { name = "idna" }, - { name = "urllib3" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, -] - -[[package]] -name = "tqdm" -version = "4.67.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, -] - -[[package]] -name = "typing-extensions" -version = "4.15.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, -] - -[[package]] -name = "typing-inspection" -version = "0.4.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, -] - -[[package]] -name = "urllib3" -version = "2.5.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, -] - -[[package]] -name = "yarl" -version = "1.22.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "idna" }, - { name = "multidict" }, - { name = "propcache" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/57/63/0c6ebca57330cd313f6102b16dd57ffaf3ec4c83403dcb45dbd15c6f3ea1/yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71", size = 187169, upload-time = "2025-10-06T14:12:55.963Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/75/ff/46736024fee3429b80a165a732e38e5d5a238721e634ab41b040d49f8738/yarl-1.22.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e340382d1afa5d32b892b3ff062436d592ec3d692aeea3bef3a5cfe11bbf8c6f", size = 142000, upload-time = "2025-10-06T14:09:44.631Z" }, - { url = "https://files.pythonhosted.org/packages/5a/9a/b312ed670df903145598914770eb12de1bac44599549b3360acc96878df8/yarl-1.22.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f1e09112a2c31ffe8d80be1b0988fa6a18c5d5cad92a9ffbb1c04c91bfe52ad2", size = 94338, upload-time = "2025-10-06T14:09:46.372Z" }, - { url = "https://files.pythonhosted.org/packages/ba/f5/0601483296f09c3c65e303d60c070a5c19fcdbc72daa061e96170785bc7d/yarl-1.22.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:939fe60db294c786f6b7c2d2e121576628468f65453d86b0fe36cb52f987bd74", size = 94909, upload-time = "2025-10-06T14:09:48.648Z" }, - { url = "https://files.pythonhosted.org/packages/60/41/9a1fe0b73dbcefce72e46cf149b0e0a67612d60bfc90fb59c2b2efdfbd86/yarl-1.22.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e1651bf8e0398574646744c1885a41198eba53dc8a9312b954073f845c90a8df", size = 372940, upload-time = "2025-10-06T14:09:50.089Z" }, - { url = "https://files.pythonhosted.org/packages/17/7a/795cb6dfee561961c30b800f0ed616b923a2ec6258b5def2a00bf8231334/yarl-1.22.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b8a0588521a26bf92a57a1705b77b8b59044cdceccac7151bd8d229e66b8dedb", size = 345825, upload-time = "2025-10-06T14:09:52.142Z" }, - { url = "https://files.pythonhosted.org/packages/d7/93/a58f4d596d2be2ae7bab1a5846c4d270b894958845753b2c606d666744d3/yarl-1.22.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:42188e6a615c1a75bcaa6e150c3fe8f3e8680471a6b10150c5f7e83f47cc34d2", size = 386705, upload-time = "2025-10-06T14:09:54.128Z" }, - { url = "https://files.pythonhosted.org/packages/61/92/682279d0e099d0e14d7fd2e176bd04f48de1484f56546a3e1313cd6c8e7c/yarl-1.22.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f6d2cb59377d99718913ad9a151030d6f83ef420a2b8f521d94609ecc106ee82", size = 396518, upload-time = "2025-10-06T14:09:55.762Z" }, - { url = "https://files.pythonhosted.org/packages/db/0f/0d52c98b8a885aeda831224b78f3be7ec2e1aa4a62091f9f9188c3c65b56/yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50678a3b71c751d58d7908edc96d332af328839eea883bb554a43f539101277a", size = 377267, upload-time = "2025-10-06T14:09:57.958Z" }, - { url = "https://files.pythonhosted.org/packages/22/42/d2685e35908cbeaa6532c1fc73e89e7f2efb5d8a7df3959ea8e37177c5a3/yarl-1.22.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e8fbaa7cec507aa24ea27a01456e8dd4b6fab829059b69844bd348f2d467124", size = 365797, upload-time = "2025-10-06T14:09:59.527Z" }, - { url = "https://files.pythonhosted.org/packages/a2/83/cf8c7bcc6355631762f7d8bdab920ad09b82efa6b722999dfb05afa6cfac/yarl-1.22.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:433885ab5431bc3d3d4f2f9bd15bfa1614c522b0f1405d62c4f926ccd69d04fa", size = 365535, upload-time = "2025-10-06T14:10:01.139Z" }, - { url = "https://files.pythonhosted.org/packages/25/e1/5302ff9b28f0c59cac913b91fe3f16c59a033887e57ce9ca5d41a3a94737/yarl-1.22.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b790b39c7e9a4192dc2e201a282109ed2985a1ddbd5ac08dc56d0e121400a8f7", size = 382324, upload-time = "2025-10-06T14:10:02.756Z" }, - { url = "https://files.pythonhosted.org/packages/bf/cd/4617eb60f032f19ae3a688dc990d8f0d89ee0ea378b61cac81ede3e52fae/yarl-1.22.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:31f0b53913220599446872d757257be5898019c85e7971599065bc55065dc99d", size = 383803, upload-time = "2025-10-06T14:10:04.552Z" }, - { url = "https://files.pythonhosted.org/packages/59/65/afc6e62bb506a319ea67b694551dab4a7e6fb7bf604e9bd9f3e11d575fec/yarl-1.22.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a49370e8f711daec68d09b821a34e1167792ee2d24d405cbc2387be4f158b520", size = 374220, upload-time = "2025-10-06T14:10:06.489Z" }, - { url = "https://files.pythonhosted.org/packages/e7/3d/68bf18d50dc674b942daec86a9ba922d3113d8399b0e52b9897530442da2/yarl-1.22.0-cp312-cp312-win32.whl", hash = "sha256:70dfd4f241c04bd9239d53b17f11e6ab672b9f1420364af63e8531198e3f5fe8", size = 81589, upload-time = "2025-10-06T14:10:09.254Z" }, - { url = "https://files.pythonhosted.org/packages/c8/9a/6ad1a9b37c2f72874f93e691b2e7ecb6137fb2b899983125db4204e47575/yarl-1.22.0-cp312-cp312-win_amd64.whl", hash = "sha256:8884d8b332a5e9b88e23f60bb166890009429391864c685e17bd73a9eda9105c", size = 87213, upload-time = "2025-10-06T14:10:11.369Z" }, - { url = "https://files.pythonhosted.org/packages/44/c5/c21b562d1680a77634d748e30c653c3ca918beb35555cff24986fff54598/yarl-1.22.0-cp312-cp312-win_arm64.whl", hash = "sha256:ea70f61a47f3cc93bdf8b2f368ed359ef02a01ca6393916bc8ff877427181e74", size = 81330, upload-time = "2025-10-06T14:10:13.112Z" }, - { url = "https://files.pythonhosted.org/packages/ea/f3/d67de7260456ee105dc1d162d43a019ecad6b91e2f51809d6cddaa56690e/yarl-1.22.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8dee9c25c74997f6a750cd317b8ca63545169c098faee42c84aa5e506c819b53", size = 139980, upload-time = "2025-10-06T14:10:14.601Z" }, - { url = "https://files.pythonhosted.org/packages/01/88/04d98af0b47e0ef42597b9b28863b9060bb515524da0a65d5f4db160b2d5/yarl-1.22.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01e73b85a5434f89fc4fe27dcda2aff08ddf35e4d47bbbea3bdcd25321af538a", size = 93424, upload-time = "2025-10-06T14:10:16.115Z" }, - { url = "https://files.pythonhosted.org/packages/18/91/3274b215fd8442a03975ce6bee5fe6aa57a8326b29b9d3d56234a1dca244/yarl-1.22.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:22965c2af250d20c873cdbee8ff958fb809940aeb2e74ba5f20aaf6b7ac8c70c", size = 93821, upload-time = "2025-10-06T14:10:17.993Z" }, - { url = "https://files.pythonhosted.org/packages/61/3a/caf4e25036db0f2da4ca22a353dfeb3c9d3c95d2761ebe9b14df8fc16eb0/yarl-1.22.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4f15793aa49793ec8d1c708ab7f9eded1aa72edc5174cae703651555ed1b601", size = 373243, upload-time = "2025-10-06T14:10:19.44Z" }, - { url = "https://files.pythonhosted.org/packages/6e/9e/51a77ac7516e8e7803b06e01f74e78649c24ee1021eca3d6a739cb6ea49c/yarl-1.22.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5542339dcf2747135c5c85f68680353d5cb9ffd741c0f2e8d832d054d41f35a", size = 342361, upload-time = "2025-10-06T14:10:21.124Z" }, - { url = "https://files.pythonhosted.org/packages/d4/f8/33b92454789dde8407f156c00303e9a891f1f51a0330b0fad7c909f87692/yarl-1.22.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5c401e05ad47a75869c3ab3e35137f8468b846770587e70d71e11de797d113df", size = 387036, upload-time = "2025-10-06T14:10:22.902Z" }, - { url = "https://files.pythonhosted.org/packages/d9/9a/c5db84ea024f76838220280f732970aa4ee154015d7f5c1bfb60a267af6f/yarl-1.22.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:243dda95d901c733f5b59214d28b0120893d91777cb8aa043e6ef059d3cddfe2", size = 397671, upload-time = "2025-10-06T14:10:24.523Z" }, - { url = "https://files.pythonhosted.org/packages/11/c9/cd8538dc2e7727095e0c1d867bad1e40c98f37763e6d995c1939f5fdc7b1/yarl-1.22.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bec03d0d388060058f5d291a813f21c011041938a441c593374da6077fe21b1b", size = 377059, upload-time = "2025-10-06T14:10:26.406Z" }, - { url = "https://files.pythonhosted.org/packages/a1/b9/ab437b261702ced75122ed78a876a6dec0a1b0f5e17a4ac7a9a2482d8abe/yarl-1.22.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0748275abb8c1e1e09301ee3cf90c8a99678a4e92e4373705f2a2570d581273", size = 365356, upload-time = "2025-10-06T14:10:28.461Z" }, - { url = "https://files.pythonhosted.org/packages/b2/9d/8e1ae6d1d008a9567877b08f0ce4077a29974c04c062dabdb923ed98e6fe/yarl-1.22.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:47fdb18187e2a4e18fda2c25c05d8251a9e4a521edaed757fef033e7d8498d9a", size = 361331, upload-time = "2025-10-06T14:10:30.541Z" }, - { url = "https://files.pythonhosted.org/packages/ca/5a/09b7be3905962f145b73beb468cdd53db8aa171cf18c80400a54c5b82846/yarl-1.22.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c7044802eec4524fde550afc28edda0dd5784c4c45f0be151a2d3ba017daca7d", size = 382590, upload-time = "2025-10-06T14:10:33.352Z" }, - { url = "https://files.pythonhosted.org/packages/aa/7f/59ec509abf90eda5048b0bc3e2d7b5099dffdb3e6b127019895ab9d5ef44/yarl-1.22.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:139718f35149ff544caba20fce6e8a2f71f1e39b92c700d8438a0b1d2a631a02", size = 385316, upload-time = "2025-10-06T14:10:35.034Z" }, - { url = "https://files.pythonhosted.org/packages/e5/84/891158426bc8036bfdfd862fabd0e0fa25df4176ec793e447f4b85cf1be4/yarl-1.22.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e1b51bebd221006d3d2f95fbe124b22b247136647ae5dcc8c7acafba66e5ee67", size = 374431, upload-time = "2025-10-06T14:10:37.76Z" }, - { url = "https://files.pythonhosted.org/packages/bb/49/03da1580665baa8bef5e8ed34c6df2c2aca0a2f28bf397ed238cc1bbc6f2/yarl-1.22.0-cp313-cp313-win32.whl", hash = "sha256:d3e32536234a95f513bd374e93d717cf6b2231a791758de6c509e3653f234c95", size = 81555, upload-time = "2025-10-06T14:10:39.649Z" }, - { url = "https://files.pythonhosted.org/packages/9a/ee/450914ae11b419eadd067c6183ae08381cfdfcb9798b90b2b713bbebddda/yarl-1.22.0-cp313-cp313-win_amd64.whl", hash = "sha256:47743b82b76d89a1d20b83e60d5c20314cbd5ba2befc9cda8f28300c4a08ed4d", size = 86965, upload-time = "2025-10-06T14:10:41.313Z" }, - { url = "https://files.pythonhosted.org/packages/98/4d/264a01eae03b6cf629ad69bae94e3b0e5344741e929073678e84bf7a3e3b/yarl-1.22.0-cp313-cp313-win_arm64.whl", hash = "sha256:5d0fcda9608875f7d052eff120c7a5da474a6796fe4d83e152e0e4d42f6d1a9b", size = 81205, upload-time = "2025-10-06T14:10:43.167Z" }, - { url = "https://files.pythonhosted.org/packages/88/fc/6908f062a2f77b5f9f6d69cecb1747260831ff206adcbc5b510aff88df91/yarl-1.22.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:719ae08b6972befcba4310e49edb1161a88cdd331e3a694b84466bd938a6ab10", size = 146209, upload-time = "2025-10-06T14:10:44.643Z" }, - { url = "https://files.pythonhosted.org/packages/65/47/76594ae8eab26210b4867be6f49129861ad33da1f1ebdf7051e98492bf62/yarl-1.22.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:47d8a5c446df1c4db9d21b49619ffdba90e77c89ec6e283f453856c74b50b9e3", size = 95966, upload-time = "2025-10-06T14:10:46.554Z" }, - { url = "https://files.pythonhosted.org/packages/ab/ce/05e9828a49271ba6b5b038b15b3934e996980dd78abdfeb52a04cfb9467e/yarl-1.22.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cfebc0ac8333520d2d0423cbbe43ae43c8838862ddb898f5ca68565e395516e9", size = 97312, upload-time = "2025-10-06T14:10:48.007Z" }, - { url = "https://files.pythonhosted.org/packages/d1/c5/7dffad5e4f2265b29c9d7ec869c369e4223166e4f9206fc2243ee9eea727/yarl-1.22.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4398557cbf484207df000309235979c79c4356518fd5c99158c7d38203c4da4f", size = 361967, upload-time = "2025-10-06T14:10:49.997Z" }, - { url = "https://files.pythonhosted.org/packages/50/b2/375b933c93a54bff7fc041e1a6ad2c0f6f733ffb0c6e642ce56ee3b39970/yarl-1.22.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2ca6fd72a8cd803be290d42f2dec5cdcd5299eeb93c2d929bf060ad9efaf5de0", size = 323949, upload-time = "2025-10-06T14:10:52.004Z" }, - { url = "https://files.pythonhosted.org/packages/66/50/bfc2a29a1d78644c5a7220ce2f304f38248dc94124a326794e677634b6cf/yarl-1.22.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca1f59c4e1ab6e72f0a23c13fca5430f889634166be85dbf1013683e49e3278e", size = 361818, upload-time = "2025-10-06T14:10:54.078Z" }, - { url = "https://files.pythonhosted.org/packages/46/96/f3941a46af7d5d0f0498f86d71275696800ddcdd20426298e572b19b91ff/yarl-1.22.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c5010a52015e7c70f86eb967db0f37f3c8bd503a695a49f8d45700144667708", size = 372626, upload-time = "2025-10-06T14:10:55.767Z" }, - { url = "https://files.pythonhosted.org/packages/c1/42/8b27c83bb875cd89448e42cd627e0fb971fa1675c9ec546393d18826cb50/yarl-1.22.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d7672ecf7557476642c88497c2f8d8542f8e36596e928e9bcba0e42e1e7d71f", size = 341129, upload-time = "2025-10-06T14:10:57.985Z" }, - { url = "https://files.pythonhosted.org/packages/49/36/99ca3122201b382a3cf7cc937b95235b0ac944f7e9f2d5331d50821ed352/yarl-1.22.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b7c88eeef021579d600e50363e0b6ee4f7f6f728cd3486b9d0f3ee7b946398d", size = 346776, upload-time = "2025-10-06T14:10:59.633Z" }, - { url = "https://files.pythonhosted.org/packages/85/b4/47328bf996acd01a4c16ef9dcd2f59c969f495073616586f78cd5f2efb99/yarl-1.22.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f4afb5c34f2c6fecdcc182dfcfc6af6cccf1aa923eed4d6a12e9d96904e1a0d8", size = 334879, upload-time = "2025-10-06T14:11:01.454Z" }, - { url = "https://files.pythonhosted.org/packages/c2/ad/b77d7b3f14a4283bffb8e92c6026496f6de49751c2f97d4352242bba3990/yarl-1.22.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:59c189e3e99a59cf8d83cbb31d4db02d66cda5a1a4374e8a012b51255341abf5", size = 350996, upload-time = "2025-10-06T14:11:03.452Z" }, - { url = "https://files.pythonhosted.org/packages/81/c8/06e1d69295792ba54d556f06686cbd6a7ce39c22307100e3fb4a2c0b0a1d/yarl-1.22.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:5a3bf7f62a289fa90f1990422dc8dff5a458469ea71d1624585ec3a4c8d6960f", size = 356047, upload-time = "2025-10-06T14:11:05.115Z" }, - { url = "https://files.pythonhosted.org/packages/4b/b8/4c0e9e9f597074b208d18cef227d83aac36184bfbc6eab204ea55783dbc5/yarl-1.22.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:de6b9a04c606978fdfe72666fa216ffcf2d1a9f6a381058d4378f8d7b1e5de62", size = 342947, upload-time = "2025-10-06T14:11:08.137Z" }, - { url = "https://files.pythonhosted.org/packages/e0/e5/11f140a58bf4c6ad7aca69a892bff0ee638c31bea4206748fc0df4ebcb3a/yarl-1.22.0-cp313-cp313t-win32.whl", hash = "sha256:1834bb90991cc2999f10f97f5f01317f99b143284766d197e43cd5b45eb18d03", size = 86943, upload-time = "2025-10-06T14:11:10.284Z" }, - { url = "https://files.pythonhosted.org/packages/31/74/8b74bae38ed7fe6793d0c15a0c8207bbb819cf287788459e5ed230996cdd/yarl-1.22.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ff86011bd159a9d2dfc89c34cfd8aff12875980e3bd6a39ff097887520e60249", size = 93715, upload-time = "2025-10-06T14:11:11.739Z" }, - { url = "https://files.pythonhosted.org/packages/69/66/991858aa4b5892d57aef7ee1ba6b4d01ec3b7eb3060795d34090a3ca3278/yarl-1.22.0-cp313-cp313t-win_arm64.whl", hash = "sha256:7861058d0582b847bc4e3a4a4c46828a410bca738673f35a29ba3ca5db0b473b", size = 83857, upload-time = "2025-10-06T14:11:13.586Z" }, - { url = "https://files.pythonhosted.org/packages/46/b3/e20ef504049f1a1c54a814b4b9bed96d1ac0e0610c3b4da178f87209db05/yarl-1.22.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:34b36c2c57124530884d89d50ed2c1478697ad7473efd59cfd479945c95650e4", size = 140520, upload-time = "2025-10-06T14:11:15.465Z" }, - { url = "https://files.pythonhosted.org/packages/e4/04/3532d990fdbab02e5ede063676b5c4260e7f3abea2151099c2aa745acc4c/yarl-1.22.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:0dd9a702591ca2e543631c2a017e4a547e38a5c0f29eece37d9097e04a7ac683", size = 93504, upload-time = "2025-10-06T14:11:17.106Z" }, - { url = "https://files.pythonhosted.org/packages/11/63/ff458113c5c2dac9a9719ac68ee7c947cb621432bcf28c9972b1c0e83938/yarl-1.22.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:594fcab1032e2d2cc3321bb2e51271e7cd2b516c7d9aee780ece81b07ff8244b", size = 94282, upload-time = "2025-10-06T14:11:19.064Z" }, - { url = "https://files.pythonhosted.org/packages/a7/bc/315a56aca762d44a6aaaf7ad253f04d996cb6b27bad34410f82d76ea8038/yarl-1.22.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3d7a87a78d46a2e3d5b72587ac14b4c16952dd0887dbb051451eceac774411e", size = 372080, upload-time = "2025-10-06T14:11:20.996Z" }, - { url = "https://files.pythonhosted.org/packages/3f/3f/08e9b826ec2e099ea6e7c69a61272f4f6da62cb5b1b63590bb80ca2e4a40/yarl-1.22.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:852863707010316c973162e703bddabec35e8757e67fcb8ad58829de1ebc8590", size = 338696, upload-time = "2025-10-06T14:11:22.847Z" }, - { url = "https://files.pythonhosted.org/packages/e3/9f/90360108e3b32bd76789088e99538febfea24a102380ae73827f62073543/yarl-1.22.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:131a085a53bfe839a477c0845acf21efc77457ba2bcf5899618136d64f3303a2", size = 387121, upload-time = "2025-10-06T14:11:24.889Z" }, - { url = "https://files.pythonhosted.org/packages/98/92/ab8d4657bd5b46a38094cfaea498f18bb70ce6b63508fd7e909bd1f93066/yarl-1.22.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:078a8aefd263f4d4f923a9677b942b445a2be970ca24548a8102689a3a8ab8da", size = 394080, upload-time = "2025-10-06T14:11:27.307Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e7/d8c5a7752fef68205296201f8ec2bf718f5c805a7a7e9880576c67600658/yarl-1.22.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bca03b91c323036913993ff5c738d0842fc9c60c4648e5c8d98331526df89784", size = 372661, upload-time = "2025-10-06T14:11:29.387Z" }, - { url = "https://files.pythonhosted.org/packages/b6/2e/f4d26183c8db0bb82d491b072f3127fb8c381a6206a3a56332714b79b751/yarl-1.22.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:68986a61557d37bb90d3051a45b91fa3d5c516d177dfc6dd6f2f436a07ff2b6b", size = 364645, upload-time = "2025-10-06T14:11:31.423Z" }, - { url = "https://files.pythonhosted.org/packages/80/7c/428e5812e6b87cd00ee8e898328a62c95825bf37c7fa87f0b6bb2ad31304/yarl-1.22.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:4792b262d585ff0dff6bcb787f8492e40698443ec982a3568c2096433660c694", size = 355361, upload-time = "2025-10-06T14:11:33.055Z" }, - { url = "https://files.pythonhosted.org/packages/ec/2a/249405fd26776f8b13c067378ef4d7dd49c9098d1b6457cdd152a99e96a9/yarl-1.22.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ebd4549b108d732dba1d4ace67614b9545b21ece30937a63a65dd34efa19732d", size = 381451, upload-time = "2025-10-06T14:11:35.136Z" }, - { url = "https://files.pythonhosted.org/packages/67/a8/fb6b1adbe98cf1e2dd9fad71003d3a63a1bc22459c6e15f5714eb9323b93/yarl-1.22.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f87ac53513d22240c7d59203f25cc3beac1e574c6cd681bbfd321987b69f95fd", size = 383814, upload-time = "2025-10-06T14:11:37.094Z" }, - { url = "https://files.pythonhosted.org/packages/d9/f9/3aa2c0e480fb73e872ae2814c43bc1e734740bb0d54e8cb2a95925f98131/yarl-1.22.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:22b029f2881599e2f1b06f8f1db2ee63bd309e2293ba2d566e008ba12778b8da", size = 370799, upload-time = "2025-10-06T14:11:38.83Z" }, - { url = "https://files.pythonhosted.org/packages/50/3c/af9dba3b8b5eeb302f36f16f92791f3ea62e3f47763406abf6d5a4a3333b/yarl-1.22.0-cp314-cp314-win32.whl", hash = "sha256:6a635ea45ba4ea8238463b4f7d0e721bad669f80878b7bfd1f89266e2ae63da2", size = 82990, upload-time = "2025-10-06T14:11:40.624Z" }, - { url = "https://files.pythonhosted.org/packages/ac/30/ac3a0c5bdc1d6efd1b41fa24d4897a4329b3b1e98de9449679dd327af4f0/yarl-1.22.0-cp314-cp314-win_amd64.whl", hash = "sha256:0d6e6885777af0f110b0e5d7e5dda8b704efed3894da26220b7f3d887b839a79", size = 88292, upload-time = "2025-10-06T14:11:42.578Z" }, - { url = "https://files.pythonhosted.org/packages/df/0a/227ab4ff5b998a1b7410abc7b46c9b7a26b0ca9e86c34ba4b8d8bc7c63d5/yarl-1.22.0-cp314-cp314-win_arm64.whl", hash = "sha256:8218f4e98d3c10d683584cb40f0424f4b9fd6e95610232dd75e13743b070ee33", size = 82888, upload-time = "2025-10-06T14:11:44.863Z" }, - { url = "https://files.pythonhosted.org/packages/06/5e/a15eb13db90abd87dfbefb9760c0f3f257ac42a5cac7e75dbc23bed97a9f/yarl-1.22.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:45c2842ff0e0d1b35a6bf1cd6c690939dacb617a70827f715232b2e0494d55d1", size = 146223, upload-time = "2025-10-06T14:11:46.796Z" }, - { url = "https://files.pythonhosted.org/packages/18/82/9665c61910d4d84f41a5bf6837597c89e665fa88aa4941080704645932a9/yarl-1.22.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:d947071e6ebcf2e2bee8fce76e10faca8f7a14808ca36a910263acaacef08eca", size = 95981, upload-time = "2025-10-06T14:11:48.845Z" }, - { url = "https://files.pythonhosted.org/packages/5d/9a/2f65743589809af4d0a6d3aa749343c4b5f4c380cc24a8e94a3c6625a808/yarl-1.22.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:334b8721303e61b00019474cc103bdac3d7b1f65e91f0bfedeec2d56dfe74b53", size = 97303, upload-time = "2025-10-06T14:11:50.897Z" }, - { url = "https://files.pythonhosted.org/packages/b0/ab/5b13d3e157505c43c3b43b5a776cbf7b24a02bc4cccc40314771197e3508/yarl-1.22.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e7ce67c34138a058fd092f67d07a72b8e31ff0c9236e751957465a24b28910c", size = 361820, upload-time = "2025-10-06T14:11:52.549Z" }, - { url = "https://files.pythonhosted.org/packages/fb/76/242a5ef4677615cf95330cfc1b4610e78184400699bdda0acb897ef5e49a/yarl-1.22.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d77e1b2c6d04711478cb1c4ab90db07f1609ccf06a287d5607fcd90dc9863acf", size = 323203, upload-time = "2025-10-06T14:11:54.225Z" }, - { url = "https://files.pythonhosted.org/packages/8c/96/475509110d3f0153b43d06164cf4195c64d16999e0c7e2d8a099adcd6907/yarl-1.22.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4647674b6150d2cae088fc07de2738a84b8bcedebef29802cf0b0a82ab6face", size = 363173, upload-time = "2025-10-06T14:11:56.069Z" }, - { url = "https://files.pythonhosted.org/packages/c9/66/59db471aecfbd559a1fd48aedd954435558cd98c7d0da8b03cc6c140a32c/yarl-1.22.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efb07073be061c8f79d03d04139a80ba33cbd390ca8f0297aae9cce6411e4c6b", size = 373562, upload-time = "2025-10-06T14:11:58.783Z" }, - { url = "https://files.pythonhosted.org/packages/03/1f/c5d94abc91557384719da10ff166b916107c1b45e4d0423a88457071dd88/yarl-1.22.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e51ac5435758ba97ad69617e13233da53908beccc6cfcd6c34bbed8dcbede486", size = 339828, upload-time = "2025-10-06T14:12:00.686Z" }, - { url = "https://files.pythonhosted.org/packages/5f/97/aa6a143d3afba17b6465733681c70cf175af89f76ec8d9286e08437a7454/yarl-1.22.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:33e32a0dd0c8205efa8e83d04fc9f19313772b78522d1bdc7d9aed706bfd6138", size = 347551, upload-time = "2025-10-06T14:12:02.628Z" }, - { url = "https://files.pythonhosted.org/packages/43/3c/45a2b6d80195959239a7b2a8810506d4eea5487dce61c2a3393e7fc3c52e/yarl-1.22.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:bf4a21e58b9cde0e401e683ebd00f6ed30a06d14e93f7c8fd059f8b6e8f87b6a", size = 334512, upload-time = "2025-10-06T14:12:04.871Z" }, - { url = "https://files.pythonhosted.org/packages/86/a0/c2ab48d74599c7c84cb104ebd799c5813de252bea0f360ffc29d270c2caa/yarl-1.22.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:e4b582bab49ac33c8deb97e058cd67c2c50dac0dd134874106d9c774fd272529", size = 352400, upload-time = "2025-10-06T14:12:06.624Z" }, - { url = "https://files.pythonhosted.org/packages/32/75/f8919b2eafc929567d3d8411f72bdb1a2109c01caaab4ebfa5f8ffadc15b/yarl-1.22.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:0b5bcc1a9c4839e7e30b7b30dd47fe5e7e44fb7054ec29b5bb8d526aa1041093", size = 357140, upload-time = "2025-10-06T14:12:08.362Z" }, - { url = "https://files.pythonhosted.org/packages/cf/72/6a85bba382f22cf78add705d8c3731748397d986e197e53ecc7835e76de7/yarl-1.22.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c0232bce2170103ec23c454e54a57008a9a72b5d1c3105dc2496750da8cfa47c", size = 341473, upload-time = "2025-10-06T14:12:10.994Z" }, - { url = "https://files.pythonhosted.org/packages/35/18/55e6011f7c044dc80b98893060773cefcfdbf60dfefb8cb2f58b9bacbd83/yarl-1.22.0-cp314-cp314t-win32.whl", hash = "sha256:8009b3173bcd637be650922ac455946197d858b3630b6d8787aa9e5c4564533e", size = 89056, upload-time = "2025-10-06T14:12:13.317Z" }, - { url = "https://files.pythonhosted.org/packages/f9/86/0f0dccb6e59a9e7f122c5afd43568b1d31b8ab7dda5f1b01fb5c7025c9a9/yarl-1.22.0-cp314-cp314t-win_amd64.whl", hash = "sha256:9fb17ea16e972c63d25d4a97f016d235c78dd2344820eb35bc034bc32012ee27", size = 96292, upload-time = "2025-10-06T14:12:15.398Z" }, - { url = "https://files.pythonhosted.org/packages/48/b7/503c98092fb3b344a179579f55814b613c1fbb1c23b3ec14a7b008a66a6e/yarl-1.22.0-cp314-cp314t-win_arm64.whl", hash = "sha256:9f6d73c1436b934e3f01df1e1b21ff765cd1d28c77dfb9ace207f746d4610ee1", size = 85171, upload-time = "2025-10-06T14:12:16.935Z" }, - { url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, -] diff --git a/samples/python/hosted-agents/custom/system-utility-agent/.env.sample b/samples/python/hosted-agents/custom/system-utility-agent/.env.sample deleted file mode 100644 index 6fdd68099..000000000 --- a/samples/python/hosted-agents/custom/system-utility-agent/.env.sample +++ /dev/null @@ -1,4 +0,0 @@ -AZURE_AI_MODEL_DEPLOYMENT_NAME= -AZURE_AI_PROJECT_ENDPOINT= -AGENT_MAX_TURNS= -AGENT_CHAT_HISTORY_LENGTH= \ No newline at end of file diff --git a/samples/python/hosted-agents/custom/system-utility-agent/README.md b/samples/python/hosted-agents/custom/system-utility-agent/README.md deleted file mode 100644 index d6d70a379..000000000 --- a/samples/python/hosted-agents/custom/system-utility-agent/README.md +++ /dev/null @@ -1,144 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to build and host a **System Utility Agent** that can inspect its runtime environment using tool calls (processes, ports, resources, DNS, and environment variables), hosted using the -[Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. - -## How It Works - -### System Utility Agent - -This agent is designed for diagnostics and “what’s running here?” questions. It is **container-aware** and will report whether the agent is likely seeing the container namespace vs the host. - -The agent exposes a small set of tools (implemented locally in Python) and uses an OpenAI-style tool-calling loop to answer questions. - -### Tracing (custom spans) - -This sample also demonstrates how to add **custom spans** to hosted agent traces using OpenTelemetry. The agent creates spans around the overall request and each tool-calling iteration, and annotates them with useful attributes (conversation ID, model name, token usage, tool name, tool arguments, and tool result). This is useful when you want richer observability than the default hosted-agent traces. - -Tools included: - -1. **capability_report** - Report what the agent can likely observe (host vs container scope) -2. **system_info** - OS / Python / CPU metadata -3. **resource_snapshot** - CPU / memory / disk snapshot -4. **list_processes** - List running processes (visibility depends on container scope) -5. **process_details** - Get details for a specific process -6. **check_port** - Check whether a TCP port is listening / reachable -7. **dns_lookup** - Resolve a hostname -8. **list_environment_variables** - List environment variables (supports redaction) - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://aka.ms/azdaiagent/docs) extension. -The extension builds a container image for the agent, deploys it to Azure Container Instances (ACI), and creates a hosted agent version and deployment on Foundry Agent Service. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. Python 3.10+ installed -2. Either: - - A Microsoft Foundry project endpoint configured (recommended), or - - An Azure OpenAI endpoint + API key configured -3. If using Foundry project auth, Azure CLI installed and authenticated (`az login`) so `DefaultAzureCredential` can acquire a token - -### Environment Variables - -This sample supports loading environment variables from a `.env` file (via `python-dotenv`) or your shell environment. - -#### Option A (Recommended): Microsoft Foundry project endpoint - -- `AZURE_AI_PROJECT_ENDPOINT` - Your Foundry project endpoint (required for this option) -- `AZURE_AI_MODEL_DEPLOYMENT_NAME` - Model deployment name (optional, defaults to `gpt-4o-mini`) - -```powershell -# Replace with your Foundry project endpoint -$env:AZURE_AI_PROJECT_ENDPOINT="https://your-project.ai.azure.com" - -# Optional, defaults to gpt-4o-mini -$env:AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o-mini" -``` - -#### Option B: Azure OpenAI key-based configuration - -- `AZURE_ENDPOINT` - Your Azure OpenAI endpoint URL (required for this option) -- `OPENAI_API_KEY` - Your Azure OpenAI API key (required for this option) -- `OPENAI_API_VERSION` - Optional, defaults to `2025-03-01-preview` -- `AZURE_AI_MODEL_DEPLOYMENT_NAME` - Optional, defaults to `gpt-4o-mini` - -```powershell -# Replace with your Azure OpenAI endpoint -$env:AZURE_ENDPOINT="https://your-openai-resource.openai.azure.com/" - -$env:OPENAI_API_KEY="" - -# Optional -$env:OPENAI_API_VERSION="2025-03-01-preview" -$env:AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o-mini" -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```powershell -pip install -r requirements.txt -``` - -### Running the Sample - -To run the agent locally, execute: - -```powershell -python main.py -``` - -This will start the hosted agent locally and expose an OpenAI Responses-compatible endpoint (typically on `http://localhost:8088/`). - -### Interacting with the Agent - -```powershell -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses -d '{"input": "What environment are you running in? Summarize what you can observe","stream":false}' -``` - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://aka.ms/azdaiagent/docs - -## Troubleshooting - -### After deployed, the agent appears stateless (chat history is not preserved) - -- Make sure you are using a model deployment hosted in foundry. -- If you are not chat with your agent from UI, be sure to pass an existing foundry conversation ID in the `conversation` field of your create responses request. - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. - diff --git a/samples/python/hosted-agents/custom/system-utility-agent/agent.yaml b/samples/python/hosted-agents/custom/system-utility-agent/agent.yaml deleted file mode 100644 index 62be13ab0..000000000 --- a/samples/python/hosted-agents/custom/system-utility-agent/agent.yaml +++ /dev/null @@ -1,42 +0,0 @@ -name: SystemUtilityAgent -description: |- - System Utility Agent (cross-OS, container-aware) — NO local files required. - - Tools included (per your request): - - capability_report - - system_info - - resource_snapshot - - list_processes - - process_details - - check_port - - dns_lookup - - list_environment_variables -metadata: - example: - - role: user - content: |- - What is the current value of AZURE_AI_MODEL_DEPLOYMENT_NAME? - tags: - - example - - learning - authors: - - mengla -template: - name: SystemUtilityAgent - kind: hosted - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_AI_PROJECT_ENDPOINT - value: ${AZURE_AI_PROJECT_ENDPOINT} - - name: AZURE_AI_MODEL_DEPLOYMENT_NAME - value: "{{chat}}" - - name: AGENT_MAX_TURNS - value: 10 - - name: AGENT_CHAT_HISTORY_LENGTH - value: 20 -resources: - - kind: model - id: gpt-5 - name: chat \ No newline at end of file diff --git a/samples/python/hosted-agents/custom/system-utility-agent/local_tools.py b/samples/python/hosted-agents/custom/system-utility-agent/local_tools.py deleted file mode 100644 index c9dd8d2c8..000000000 --- a/samples/python/hosted-agents/custom/system-utility-agent/local_tools.py +++ /dev/null @@ -1,554 +0,0 @@ -""" -System Utility Agent (cross-OS, container-aware) — NO local files required. - -Tools included (per your request): -- capability_report -- system_info -- resource_snapshot -- list_processes -- process_details -- check_port -- dns_lookup - -Notes: -- This is designed to work with any model/server that supports an OpenAI-style tool calling contract. -""" - -import os -import platform -import re -import socket -import sys -import time -from typing import Any, Dict, List, Optional - -import psutil - -# ----------------------------- -# Tool implementations -# ----------------------------- - -def _is_running_in_container() -> bool: - """ - Best-effort detection for Linux containers. On Windows/macOS Docker Desktop, - you're still in a Linux VM container, so this often works as well. - """ - # Common heuristics: /.dockerenv, cgroup hints - if os.path.exists("/.dockerenv"): - return True - cgroup_path = "/proc/1/cgroup" - if os.path.exists(cgroup_path): - try: - with open(cgroup_path, "r", encoding="utf-8", errors="ignore") as f: - txt = f.read() - if "docker" in txt or "containerd" in txt or "kubepods" in txt: - return True - except Exception: - pass - return False - - -def _read_first_existing(paths: List[str]) -> Optional[str]: - for p in paths: - if os.path.exists(p): - try: - with open(p, "r", encoding="utf-8", errors="ignore") as f: - return f.read().strip() - except Exception: - continue - return None - - -def _cgroup_limits() -> Dict[str, Any]: - """ - Best-effort cgroup limits (mostly Linux). Returns supported=false on non-Linux. - """ - if platform.system().lower() != "linux": - return {"supported": False, "reason": "cgroup limits only available on Linux", "data": None} - - # Handle cgroup v2 (common) and some v1. - # v2 memory limit: /sys/fs/cgroup/memory.max - # v2 cpu max: /sys/fs/cgroup/cpu.max - mem_max = _read_first_existing([ - "/sys/fs/cgroup/memory.max", # cgroup v2 - "/sys/fs/cgroup/memory/memory.limit_in_bytes" # cgroup v1 - ]) - cpu_max = _read_first_existing([ - "/sys/fs/cgroup/cpu.max", # cgroup v2 - "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" # cgroup v1 - ]) - cpu_period = _read_first_existing([ - "/sys/fs/cgroup/cpu/cpu.cfs_period_us" # cgroup v1 only - ]) - - data: Dict[str, Any] = {} - - # Memory - if mem_max is not None: - if mem_max.isdigit(): - lim = int(mem_max) - # Some systems show huge numbers when "max"/unlimited; v2 uses "max" literal. - data["memory_limit_bytes"] = lim - data["memory_limit_human"] = f"{lim / (1024**3):.2f} GiB" - else: - data["memory_limit_bytes"] = None - data["memory_limit_human"] = mem_max # e.g. "max" - - # CPU - if cpu_max is not None: - # cgroup v2: "quota period" like "200000 100000" or "max 100000" - if " " in cpu_max: - quota, period = cpu_max.split()[:2] - data["cpu_quota_raw"] = quota - data["cpu_period_us"] = int(period) if period.isdigit() else None - data["cpu_quota_us"] = int(quota) if quota.isdigit() else None - if quota.isdigit() and period.isdigit() and int(period) > 0: - data["cpu_limit_cores"] = int(quota) / int(period) - else: - # cgroup v1 quota only - data["cpu_quota_us"] = int(cpu_max) if cpu_max.isdigit() else None - data["cpu_period_us"] = int(cpu_period) if (cpu_period and cpu_period.isdigit()) else None - if data.get("cpu_quota_us") is not None and data.get("cpu_period_us"): - data["cpu_limit_cores"] = data["cpu_quota_us"] / data["cpu_period_us"] - - return {"supported": True, "reason": None, "data": data or None} - - -def capability_report() -> Dict[str, Any]: - """ - Report what the agent can likely observe in this runtime environment. - """ - os_name = platform.system() - in_container = _is_running_in_container() - - # process listing typically works; may be limited by PID namespace (containers) - proc_supported = True - - # net connections sometimes restricted by permissions - try: - _ = psutil.net_connections(kind="inet") - net_supported = True - net_reason = None - except Exception as e: - net_supported = False - net_reason = f"net_connections not accessible: {type(e).__name__}: {e}" - - cgroups = _cgroup_limits() - - # Determine "scope" we can confidently claim - scope = "container" if in_container else "host" - - return { - "supported": True, - "scope": scope, - "data": { - "os": os_name, - "platform": platform.platform(), - "python": sys.version.split()[0], - "in_container": in_container, - "process_visibility": { - "supported": proc_supported, - "scope": scope, - "notes": "In containers, you usually only see container processes (PID namespace)." - }, - "network_visibility": { - "supported": net_supported, - "scope": scope, - "notes": "In containers, ports reflect the container network namespace unless using host networking.", - "reason": net_reason - }, - "cgroup_limits": cgroups, - "optional_binaries": { - "nvidia_smi": bool(shutil_which("nvidia-smi")), - "ip": bool(shutil_which("ip")), - "ss": bool(shutil_which("ss")), - "netstat": bool(shutil_which("netstat")), - }, - }, - } - - -def shutil_which(cmd: str) -> Optional[str]: - # tiny local equivalent to shutil.which, without importing more - paths = os.environ.get("PATH", "").split(os.pathsep) - exts = [""] # Unix - if platform.system().lower() == "windows": - pathext = os.environ.get("PATHEXT", ".EXE;.BAT;.CMD").split(";") - exts = pathext - - for p in paths: - p = p.strip('"') - if not p: - continue - for ext in exts: - full = os.path.join(p, cmd + ext) - if os.path.isfile(full) and os.access(full, os.X_OK): - return full - return None - - -def system_info() -> Dict[str, Any]: - boot = None - try: - boot = psutil.boot_time() - except Exception: - pass - - return { - "supported": True, - "scope": "container" if _is_running_in_container() else "host", - "data": { - "os": platform.system(), - "release": platform.release(), - "version": platform.version(), - "machine": platform.machine(), - "processor": platform.processor(), - "python": sys.version, - "executable": sys.executable, - "uptime_seconds": (time.time() - boot) if boot else None, - "cpu_logical": psutil.cpu_count(logical=True), - "cpu_physical": psutil.cpu_count(logical=False), - }, - } - - -def resource_snapshot(sample_cpu_seconds: float = 0.8) -> Dict[str, Any]: - # CPU percent: sample over a short interval for more meaningful value - try: - cpu = psutil.cpu_percent(interval=sample_cpu_seconds) - except Exception: - cpu = None - - try: - mem = psutil.virtual_memory() - mem_data = { - "total": mem.total, - "available": mem.available, - "used": mem.used, - "percent": mem.percent, - } - except Exception: - mem_data = None - - # Disk: use current working dir's mount - try: - disk = psutil.disk_usage(os.getcwd()) - disk_data = { - "path": os.getcwd(), - "total": disk.total, - "used": disk.used, - "free": disk.free, - "percent": disk.percent, - } - except Exception: - disk_data = None - - # Load average is not available on Windows - load_avg = None - try: - if hasattr(os, "getloadavg"): - load_avg = os.getloadavg() - except Exception: - pass - - return { - "supported": True, - "scope": "container" if _is_running_in_container() else "host", - "data": { - "cpu_percent": cpu, - "load_avg": load_avg, - "memory": mem_data, - "disk": disk_data, - }, - } - - -def list_processes(limit: int = 30, name_regex: Optional[str] = None) -> Dict[str, Any]: - """ - Lists processes visible in the current PID namespace. - """ - regex = re.compile(name_regex, re.IGNORECASE) if name_regex else None - rows: List[Dict[str, Any]] = [] - - for p in psutil.process_iter(attrs=["pid", "name", "username", "cpu_percent", "memory_percent", "cmdline", "status"]): - try: - info = p.info - name = info.get("name") or "" - if regex and not regex.search(name): - continue - cmdline = info.get("cmdline") or [] - rows.append({ - "pid": info.get("pid"), - "name": name, - "username": info.get("username"), - "status": info.get("status"), - "cpu_percent": info.get("cpu_percent"), - "memory_percent": info.get("memory_percent"), - "cmdline": " ".join(cmdline) if isinstance(cmdline, list) else str(cmdline), - }) - if len(rows) >= limit: - break - except (psutil.NoSuchProcess, psutil.AccessDenied): - continue - except Exception: - continue - - return { - "supported": True, - "scope": "container" if _is_running_in_container() else "host", - "data": {"processes": rows, "limit": limit, "filter": {"name_regex": name_regex}}, - } - - -def process_details(pid: int) -> Dict[str, Any]: - try: - p = psutil.Process(pid) - with p.oneshot(): - data = { - "pid": p.pid, - "name": p.name(), - "status": p.status(), - "username": safe_call(p.username), - "create_time": safe_call(p.create_time), - "ppid": safe_call(p.ppid), - "cmdline": " ".join(safe_call(p.cmdline) or []), - "cpu_percent": safe_call(p.cpu_percent), - "memory_info": safe_call(lambda: p.memory_info()._asdict()), - "memory_percent": safe_call(p.memory_percent), - "num_threads": safe_call(p.num_threads), - "children": [{"pid": c.pid, "name": safe_call(c.name)} for c in safe_call(lambda: p.children(recursive=False)) or []], - "connections_count": safe_call(lambda: len(p.connections(kind="inet"))) if hasattr(p, "connections") else None, - } - return {"supported": True, "scope": "container" if _is_running_in_container() else "host", "data": data} - except psutil.NoSuchProcess: - return {"supported": False, "scope": "container" if _is_running_in_container() else "host", "reason": "No such process", "data": None} - except psutil.AccessDenied as e: - return {"supported": False, "scope": "container" if _is_running_in_container() else "host", "reason": f"Access denied: {e}", "data": None} - - -def safe_call(fn): - try: - return fn() - except Exception: - return None - - -def check_port(port: int, protocol: str = "tcp") -> Dict[str, Any]: - """ - Returns listeners on a port visible to this runtime (container or host). - """ - proto = protocol.lower() - kind = "inet" # includes tcp+udp - try: - conns = psutil.net_connections(kind=kind) - except Exception as e: - return { - "supported": False, - "scope": "container" if _is_running_in_container() else "host", - "reason": f"Cannot read net connections: {type(e).__name__}: {e}", - "data": None, - } - - listeners = [] - for c in conns: - try: - if c.laddr is None: - continue - lport = c.laddr.port if hasattr(c.laddr, "port") else None - if lport != port: - continue - - # Filter protocol if requested - if proto == "tcp" and c.type != socket.SOCK_STREAM: - continue - if proto == "udp" and c.type != socket.SOCK_DGRAM: - continue - - listeners.append({ - "pid": c.pid, - "status": getattr(c, "status", None), - "local_address": f"{c.laddr.ip}:{c.laddr.port}" if hasattr(c.laddr, "ip") else str(c.laddr), - "remote_address": ( - f"{c.raddr.ip}:{c.raddr.port}" if getattr(c, "raddr", None) and hasattr(c.raddr, "ip") else (str(c.raddr) if getattr(c, "raddr", None) else None) - ), - "family": str(c.family), - "type": str(c.type), - }) - except Exception: - continue - - # Attach process names when possible - for item in listeners: - pid = item.get("pid") - if pid: - try: - item["process_name"] = psutil.Process(pid).name() - except Exception: - item["process_name"] = None - - return { - "supported": True, - "scope": "container" if _is_running_in_container() else "host", - "data": {"port": port, "protocol": proto, "listeners": listeners, "count": len(listeners)}, - } - - -def dns_lookup(name: str, record_type: str = "A") -> Dict[str, Any]: - """ - Portable DNS check using getaddrinfo. - record_type is advisory; getaddrinfo returns what the system resolver provides. - """ - try: - infos = socket.getaddrinfo(name, None) - ips = sorted({i[4][0] for i in infos if i and i[4]}) - return { - "supported": True, - "scope": "container" if _is_running_in_container() else "host", - "data": {"name": name, "record_type": record_type, "ips": ips}, - } - except Exception as e: - return { - "supported": True, - "scope": "container" if _is_running_in_container() else "host", - "data": {"name": name, "record_type": record_type, "ips": []}, - "error": {"type": type(e).__name__, "message": str(e)}, - } - -def list_environment_variables(redact: bool = True) -> Dict[str, Any]: - """ - List environment variables visible to this process. - By default, redact values that look sensitive. - """ - sensitive_patterns = [ - "KEY", "TOKEN", "SECRET", "PASSWORD", "PWD", - "API_KEY", "AUTH", "CREDENTIAL", "PRIVATE" - ] - - def is_sensitive(name: str) -> bool: - upper = name.upper() - return any(p in upper for p in sensitive_patterns) - - env = {} - for k, v in os.environ.items(): - if redact and is_sensitive(k): - env[k] = "***REDACTED***" - else: - env[k] = v - - return { - "supported": True, - "scope": "container" if _is_running_in_container() else "host", - "data": { - "count": len(env), - "redacted": redact, - "variables": env, - }, - } - - -# ----------------------------- -# Tool schemas for OpenAI-style tool calling -# ----------------------------- - -TOOLS = [ - { - "type": "function", - "name": "capability_report", - "description": "Report what the agent can observe (container/host scope, visibility limits, optional binaries, cgroup limits).", - "parameters": {"type": "object", "properties": {}, "required": []}, - }, - { - "type": "function", - "name": "system_info", - "description": "Return OS, kernel, CPU counts, Python runtime and uptime.", - "parameters": {"type": "object", "properties": {}, "required": []}, - }, - { - "type": "function", - "name": "resource_snapshot", - "description": "Return CPU/memory/disk usage (best-effort).", - "parameters": { - "type": "object", - "properties": { - "sample_cpu_seconds": {"type": "number", "description": "Sampling interval for CPU percent.", "default": 0.8}, - }, - "required": [], - }, - }, - { - "type": "function", - "name": "list_processes", - "description": "List processes visible to this runtime. Optional name regex filter.", - "parameters": { - "type": "object", - "properties": { - "limit": {"type": "integer", "minimum": 1, "maximum": 200, "default": 30}, - "name_regex": {"type": ["string", "null"], "description": "Regex to filter by process name.", "default": None}, - }, - "required": [], - }, - }, - { - "type": "function", - "name": "process_details", - "description": "Return detailed info for a PID (name, cmdline, cpu/mem, children, etc.).", - "parameters": { - "type": "object", - "properties": {"pid": {"type": "integer", "minimum": 1}}, - "required": ["pid"], - }, - }, - { - "type": "function", - "name": "check_port", - "description": "Check listeners for a given port in the current network namespace.", - "parameters": { - "type": "object", - "properties": { - "port": {"type": "integer", "minimum": 1, "maximum": 65535}, - "protocol": {"type": "string", "enum": ["tcp", "udp"], "default": "tcp"}, - }, - "required": ["port"], - }, - }, - { - "type": "function", - "name": "dns_lookup", - "description": "Resolve a hostname using the system resolver.", - "parameters": { - "type": "object", - "properties": { - "name": {"type": "string"}, - "record_type": {"type": "string", "default": "A"}, - }, - "required": ["name"], - }, - }, - { - "type": "function", - "name": "list_environment_variables", - "description": "List environment variables visible to the current process. Sensitive values are redacted by default.", - "parameters": { - "type": "object", - "properties": { - "redact": { - "type": "boolean", - "description": "Whether to redact sensitive variables (recommended).", - "default": True - } - }, - "required": [] - }, - }, -] - -TOOL_IMPL = { - "capability_report": lambda **kwargs: capability_report(), - "system_info": lambda **kwargs: system_info(), - "resource_snapshot": lambda **kwargs: resource_snapshot(**kwargs), - "list_processes": lambda **kwargs: list_processes(**kwargs), - "process_details": lambda **kwargs: process_details(**kwargs), - "check_port": lambda **kwargs: check_port(**kwargs), - "dns_lookup": lambda **kwargs: dns_lookup(**kwargs), - "list_environment_variables": lambda **kwargs: list_environment_variables(**kwargs), -} \ No newline at end of file diff --git a/samples/python/hosted-agents/custom/system-utility-agent/main.py b/samples/python/hosted-agents/custom/system-utility-agent/main.py deleted file mode 100644 index f060041f7..000000000 --- a/samples/python/hosted-agents/custom/system-utility-agent/main.py +++ /dev/null @@ -1,381 +0,0 @@ -""" -System Utility Agent (cross-OS, container-aware) — NO local files required. - -Tools included (per your request): -- capability_report -- system_info -- resource_snapshot -- list_processes -- process_details -- check_port -- dns_lookup -- list_environment_variables -""" - -import datetime -import os -import json - -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -from dataclasses import dataclass, field -from typing import Any, Dict, List, AsyncGenerator, Union -from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent -from azure.ai.agentserver.core.models import ( - Response as OpenAIResponse, - ResponseStreamEvent, -) -from azure.ai.agentserver.core.models.projects import ( - ItemContentOutputText, - ResponseCompletedEvent, - ResponseCreatedEvent, - ResponseOutputItemAddedEvent, - ResponsesAssistantMessageItemResource, - ResponseTextDeltaEvent, - ResponseTextDoneEvent, -) -from azure.ai.agentserver.core.logger import get_logger -from dotenv import load_dotenv -from openai import AzureOpenAI -from local_tools import TOOLS, TOOL_IMPL - -from opentelemetry import trace -from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter -from opentelemetry.trace import Status, StatusCode - -logger = get_logger() - -# ----------------------------- -# Agent loop (OpenAI-style tool calling) -# ----------------------------- - -SYSTEM_PROMPT = """You are a System Utility Agent. -You can inspect the runtime environment using tools (processes, ports, resources, DNS). -Important: -- Always call capability_report early when the user asks questions that might depend on host vs container visibility. -- Never claim you can see host-wide processes/ports unless capability_report indicates it. -- Prefer using tools over guessing. -- Keep outputs clear and actionable. -""" - - -@dataclass -class AgentConfig: - model: str = field(default_factory=lambda: os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-5")) - project_endpoint: str = field(default_factory=lambda: os.getenv("AZURE_AI_PROJECT_ENDPOINT", "")) - max_turns: int = field(default_factory=lambda: int(os.getenv("AGENT_MAX_TURNS", "10"))) - chat_history_length: int = field(default_factory=lambda: int(os.getenv("AGENT_CHAT_HISTORY_LENGTH", "20"))) - openai_api_version: str = field(default_factory=lambda: os.getenv("OPENAI_API_VERSION", "2025-11-15-preview")) - openai_api_key: str = field(default_factory=lambda: os.getenv("AZURE_OPENAI_API_KEY", "")) - azure_endpoint: str = field(default_factory=lambda: os.getenv("AZURE_ENDPOINT", "")) - - -class SystemUtilityAgent(FoundryCBAgent): - def __init__(self, **kwargs: Any): - super().__init__(**kwargs) - - self.cfg = AgentConfig() - if not self.cfg.project_endpoint: - self.client = AzureOpenAI( - api_version=self.cfg.openai_api_version, - azure_endpoint=self.cfg.azure_endpoint, - api_key=self.cfg.openai_api_key, - ) - logger.info("Using AzureOpenAI client with key-based auth.") - else: - self.project_client = AIProjectClient( - endpoint=self.cfg.project_endpoint, - credential=DefaultAzureCredential(), - ) - self.client = self.project_client.get_openai_client() - - self.hit_limit_warning = f"I hit the {self.cfg.max_turns} max turn limit for this turn. Try rephrasing." - - def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): - # optional: for local debugging, export spans to console - trace.get_tracer_provider().add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) - - def _stream_final_text(self, final_text: str, context: AgentRunContext): - """Yield streaming events for the provided final text.""" - - async def _async_stream(): - assembled = "" - sequence_number = 0 - - def next_sequence_number() -> int: - nonlocal sequence_number - current = sequence_number - sequence_number += 1 - return current - - yield ResponseCreatedEvent( - sequence_number=next_sequence_number(), - response=OpenAIResponse( - output=[], - conversation=context.get_conversation_object(), - agent=context.get_agent_id_object(), - id=context.response_id) - ) - item_id = context.id_generator.generate_message_id() - yield ResponseOutputItemAddedEvent( - sequence_number=next_sequence_number(), - output_index=0, - item=ResponsesAssistantMessageItemResource( - id=item_id, - status="in_progress", - content=[ItemContentOutputText(text="", annotations=[])], - ), - ) - - words = final_text.split(" ") - for idx, token in enumerate(words): - piece = token if idx == len(words) - 1 else token + " " - assembled += piece - yield ResponseTextDeltaEvent( - sequence_number=next_sequence_number(), - output_index=0, - content_index=0, - delta=piece, - ) - - yield ResponseTextDoneEvent( - sequence_number=next_sequence_number(), - output_index=0, - content_index=0, - text=assembled, - ) - yield ResponseCompletedEvent( - sequence_number=next_sequence_number(), - response=OpenAIResponse( - agent=context.get_agent_id_object(), - conversation=context.get_conversation_object(), - metadata={}, - temperature=0.0, - top_p=0.0, - user="user", - id=context.response_id, - created_at=int(datetime.datetime.now(datetime.timezone.utc).timestamp()), - output=[ - ResponsesAssistantMessageItemResource( - id=item_id, - status="completed", - content=[ItemContentOutputText(text=assembled, annotations=[])], - ) - ], - ) - ) - - return _async_stream() - - def _final_text_to_response(self, final_text: str, context: AgentRunContext) -> OpenAIResponse: - """Convert final text to a non-streaming OpenAIResponse.""" - return OpenAIResponse({ - "object": "response", - "agent": context.get_agent_id_object(), - "conversation": context.get_conversation_object(), - "metadata": {}, - "type": "message", - "role": "assistant", - "user": "", - "id": context.response_id, - "created_at": int(datetime.datetime.now(datetime.timezone.utc).timestamp()), - "output": [ - ResponsesAssistantMessageItemResource( - id=context.id_generator.generate_message_id(), - status="completed", - content=[ItemContentOutputText(text=final_text, annotations=[])], - ) - ], - "status": "completed", - }) - - async def agent_run( # pylint: disable=too-many-statements - self, context: AgentRunContext - ) -> Union[ - OpenAIResponse, - AsyncGenerator[ResponseStreamEvent, Any], - ]: - span = trace.get_current_span() - is_stream = context.request.get("stream", False) - request_input = context.request.get("input") - logger.info(f"Received user input: {request_input}") - if isinstance(request_input, str): - request_input = [{"type": "message", "role": "user", "content": request_input}] - - input_messages: List[Dict[str, Any]] = [ - {"type": "message", "role": "system", "content": SYSTEM_PROMPT} - ] - input_messages += request_input - span.set_attribute("gen_ai.conversation.id", context.conversation_id) - - current_conv = None - try: - current_conv = self.client.conversations.retrieve(conversation_id=context.conversation_id) - except Exception as e: - logger.warning(f"Failed to retrieve conversation {context.conversation_id}: {e}. Agent will work without prior history.") - total_input_tokens = 0 - total_output_tokens = 0 - # Tool-calling loop: keep asking the model until it returns a final answer - for n in range(self.cfg.max_turns): # prevent runaway loops - with self.tracer.start_as_current_span("SystemUtilityAgent.agent_run_iteration") as iter_span: - iter_span.set_attribute("gen_ai.request.model", self.cfg.model) - request_payload = { - "model": self.cfg.model, - "input": input_messages[-self.cfg.chat_history_length :], - "tools": TOOLS, - } - if current_conv: - request_payload["conversation"] = current_conv.id - - resp = self.client.responses.create(**request_payload) - - if current_conv: - # reset this to avoid duplicate input items in conversation - input_messages = [] - else: - # in local mode, keep accumulating input messages for current agent run - input_messages += resp.output - - iter_span.set_attribute("current_iteration", n) - iter_span.set_attribute("gen_ai.input.messages", json.dumps(input_messages, default=str)[:2048]) - usage = getattr(resp, "usage", None) or (resp.get("usage") if isinstance(resp, dict) else None) - if usage: - def uget(k): - return getattr(usage, k, None) if not isinstance(usage, dict) else usage.get(k) - input_tokens = uget("input_tokens") or uget("prompt_tokens") or 0 - output_tokens = uget("output_tokens") or uget("completion_tokens") or 0 - total_input_tokens += input_tokens - total_output_tokens += output_tokens - iter_span.set_attribute("gen_ai.usage.input_tokens", input_tokens) - iter_span.set_attribute("gen_ai.usage.output_tokens", output_tokens) - # Find tool calls; if none, print assistant text and break - called_any = False - assistant_text_chunks: List[str] = [] - for item in resp.output: - item_type = item.type - if item_type == "message": - # Try to extract assistant text - txt = extract_text(item) - if txt: - assistant_text_chunks.append(txt) - continue - # Tool call items often look like: {"type":"function_call", "name":..., "arguments":...} - if item_type == "function_call": - with self.tracer.start_as_current_span("SystemUtilityAgent.tool_call_execution") as tool_span: - called_any = True - name, args, call_id = extract_tool_call(item) - tool_span.set_attribute("gen_ai.tool.name", name) - tool_span.set_attribute("gen_ai.tool.type", "function") - tool_span.set_attribute("gen_ai.tool.call.id", call_id or "") - tool_span.set_attribute( - "gen_ai.tool.call.arguments", - json.dumps(args or {}, default=str)[:1024], - ) - if name not in TOOL_IMPL: - tool_result = {"supported": False, "reason": f"Unknown tool: {name}", "data": None} - tool_span.set_status(Status(StatusCode.ERROR, "Unknown tool")) - else: - try: - tool_result = TOOL_IMPL[name](**(args or {})) - tool_span.set_status(Status(StatusCode.OK)) - except Exception as e: - tool_span.record_exception(e) - tool_span.set_status(Status(StatusCode.ERROR, str(e))) - tool_result = {"supported": False, "reason": f"Tool error: {type(e).__name__}: {e}", "data": None} - # Append tool result back to the conversation - input_messages.append({ - "type": "function_call_output", - "call_id": call_id or name, - "output": json.dumps(tool_result), - }) - tool_span.set_attribute("gen_ai.tool.call.result", json.dumps(tool_result, default=str)) - if not called_any: - # No tool calls; return final assistant text - final_text = "\n".join(assistant_text_chunks).strip() - span.set_attribute("gen_ai.usage.input_tokens", total_input_tokens) - span.set_attribute("gen_ai.usage.output_tokens", total_output_tokens) - if is_stream: - return self._stream_final_text(final_text, context) - else: - return self._final_text_to_response(final_text, context) - - span.set_attribute("gen_ai.usage.input_tokens", total_input_tokens) - span.set_attribute("gen_ai.usage.output_tokens", total_output_tokens) - logger.warning(self.hit_limit_warning) - if is_stream: - return self._stream_final_text(self.hit_limit_warning, context) - else: - return self._final_text_to_response(self.hit_limit_warning, context) - -def extract_text(item: Any) -> str: - # Best-effort extraction across server variants - if isinstance(item, dict): - if item.get("type") == "output_text": - return item.get("text", "") or "" - if item.get("type") == "message": - content = item.get("content", []) - out = [] - for c in content: - if isinstance(c, dict) and c.get("type") == "output_text": - out.append(c.get("text", "") or "") - return "\n".join(out).strip() - return "" - - # SDK objects - t = getattr(item, "type", None) - if t == "output_text": - return getattr(item, "text", "") or "" - if t == "message": - content = getattr(item, "content", None) or [] - out = [] - for c in content: - if getattr(c, "type", None) == "output_text": - out.append(getattr(c, "text", "") or "") - return "\n".join(out).strip() - return "" - - -def extract_tool_call(item: Any): - """ - Return (name, args_dict, call_id) from tool call objects/dicts. - """ - if isinstance(item, dict): - name = item.get("name") or (item.get("function", {}) or {}).get("name") - arguments = item.get("arguments") or (item.get("function", {}) or {}).get("arguments") - call_id = item.get("call_id") or item.get("id") - - args = {} - if isinstance(arguments, dict): - args = arguments - elif isinstance(arguments, str) and arguments.strip(): - # local servers sometimes send JSON string - try: - import json - args = json.loads(arguments) - except Exception: - args = {} - return name, args, call_id - - # SDK object - name = getattr(item, "name", None) or getattr(getattr(item, "function", None), "name", None) - arguments = getattr(item, "arguments", None) or getattr(getattr(item, "function", None), "arguments", None) - call_id = getattr(item, "call_id", None) or getattr(item, "id", None) - - args = {} - if isinstance(arguments, dict): - args = arguments - elif isinstance(arguments, str) and arguments.strip(): - try: - import json - args = json.loads(arguments) - except Exception: - args = {} - return name, args, call_id - - -if __name__ == "__main__": - # used for local development and testing, for hosted agent deployed to Foundry, please put env vars into agent.yaml - load_dotenv() - - system_agent = SystemUtilityAgent() - system_agent.run() diff --git a/samples/python/hosted-agents/custom/system-utility-agent/requirements.txt b/samples/python/hosted-agents/custom/system-utility-agent/requirements.txt deleted file mode 100644 index f0b792a40..000000000 --- a/samples/python/hosted-agents/custom/system-utility-agent/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -azure-identity==1.25.1 -azure-ai-projects==2.0.0b2 -azure-ai-agentserver-core==1.0.0b12 -openai==2.14.0 -python-dotenv==1.0.0 -psutil==5.9.4 diff --git a/samples/python/hosted-agents/langgraph/calculator-agent/Dockerfile b/samples/python/hosted-agents/langgraph/calculator-agent/Dockerfile deleted file mode 100644 index 0cc939d9b..000000000 --- a/samples/python/hosted-agents/langgraph/calculator-agent/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.12-slim - -WORKDIR /app - -COPY . user_agent/ -WORKDIR /app/user_agent - -RUN if [ -f requirements.txt ]; then \ - pip install -r requirements.txt; \ - else \ - echo "No requirements.txt found"; \ - fi - -EXPOSE 8088 - -CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/langgraph/calculator-agent/README.md b/samples/python/hosted-agents/langgraph/calculator-agent/README.md deleted file mode 100644 index 110e5eacc..000000000 --- a/samples/python/hosted-agents/langgraph/calculator-agent/README.md +++ /dev/null @@ -1,17 +0,0 @@ -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/python/hosted-agents/langgraph/calculator-agent/agent.yaml b/samples/python/hosted-agents/langgraph/calculator-agent/agent.yaml deleted file mode 100644 index f63e6a997..000000000 --- a/samples/python/hosted-agents/langgraph/calculator-agent/agent.yaml +++ /dev/null @@ -1,29 +0,0 @@ -name: CalculatorAgent -description: This LangGraph agent can perform arithmetic calculations such as addition, subtraction, multiplication, and division. -metadata: - example: - - role: user - content: |- - What is the size of France in square miles, divided by 27? - tags: - - example - - learning - authors: - - migu -template: - name: CalculatorAgentLG - kind: hosted - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: OPENAI_API_VERSION - value: 2025-03-01-preview - - name: AZURE_AI_MODEL_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4o-mini - name: chat diff --git a/samples/python/hosted-agents/langgraph/calculator-agent/main.py b/samples/python/hosted-agents/langgraph/calculator-agent/main.py deleted file mode 100644 index 140788acc..000000000 --- a/samples/python/hosted-agents/langgraph/calculator-agent/main.py +++ /dev/null @@ -1,154 +0,0 @@ -import os -import logging - -from langchain.chat_models import init_chat_model -from langchain_core.messages import SystemMessage, ToolMessage -from langchain_core.tools import tool -from langgraph.graph import ( - END, - START, - MessagesState, - StateGraph, -) -from typing_extensions import Literal -from azure.identity import DefaultAzureCredential, get_bearer_token_provider - -from azure.ai.agentserver.langgraph import from_langgraph - -logger = logging.getLogger(__name__) - - -# Define tools -@tool -def multiply(a: int, b: int) -> int: - """Multiply a and b. - - Args: - a: first int - b: second int - """ - return a * b - - -@tool -def add(a: int, b: int) -> int: - """Adds a and b. - - Args: - a: first int - b: second int - """ - return a + b - - -@tool -def divide(a: int, b: int) -> float: - """Divide a and b. - - Args: - a: first int - b: second int - """ - return a / b - - -# Augment the LLM with tools -tools = [add, multiply, divide] -tools_by_name = {tool.name: tool for tool in tools} -_llm_with_tools = None - -def llm(): - try: - deployment_name = os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o-mini") - credential = DefaultAzureCredential() - token_provider = get_bearer_token_provider( - credential, "https://cognitiveservices.azure.com/.default" - ) - llm = init_chat_model( - f"azure_openai:{deployment_name}", - azure_ad_token_provider=token_provider, - ) - return llm - except Exception: - logger.exception("Failed to initialize client of large language model") - raise - -def llm_with_tools(): - global _llm_with_tools - if _llm_with_tools is None: - _llm_with_tools = llm().bind_tools(tools) - return _llm_with_tools - -# Nodes -def llm_call(state: MessagesState): - """LLM decides whether to call a tool or not""" - return { - "messages": [ - llm_with_tools().invoke( - [ - SystemMessage( - content="You are a helpful assistant tasked with performing arithmetic on a set of inputs." - ) - ] - + state["messages"] - ) - ] - } - - -def tool_node(state: dict): - """Performs the tool call""" - - result = [] - for tool_call in state["messages"][-1].tool_calls: - tool = tools_by_name[tool_call["name"]] - observation = tool.invoke(tool_call["args"]) - result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"])) - return {"messages": result} - - -# Conditional edge function to route to the tool node or end based upon whether the LLM made a tool call -def should_continue(state: MessagesState) -> Literal["environment", END]: - """Decide if we should continue the loop or stop based upon whether the LLM made a tool call""" - - messages = state["messages"] - last_message = messages[-1] - # If the LLM makes a tool call, then perform an action - if last_message.tool_calls: - return "Action" - # Otherwise, we stop (reply to the user) - return END - - -# Build workflow -def build_agent() -> "StateGraph": - agent_builder = StateGraph(MessagesState) - - # Add nodes - agent_builder.add_node("llm_call", llm_call) - agent_builder.add_node("environment", tool_node) - - # Add edges to connect nodes - agent_builder.add_edge(START, "llm_call") - agent_builder.add_conditional_edges( - "llm_call", - should_continue, - { - "Action": "environment", - END: END, - }, - ) - agent_builder.add_edge("environment", "llm_call") - - # Compile the agent - return agent_builder.compile() - -# Build workflow and run agent -if __name__ == "__main__": - try: - agent = build_agent() - adapter = from_langgraph(agent) - adapter.run() - except Exception: - logger.exception("Calculator Agent encountered an error while running") - raise diff --git a/samples/python/hosted-agents/langgraph/calculator-agent/requirements.txt b/samples/python/hosted-agents/langgraph/calculator-agent/requirements.txt deleted file mode 100644 index c96237247..000000000 --- a/samples/python/hosted-agents/langgraph/calculator-agent/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -azure-ai-agentserver-langgraph==1.0.0b12 diff --git a/samples/python/hosted-agents/langgraph/human-in-the-loop/Dockerfile b/samples/python/hosted-agents/langgraph/human-in-the-loop/Dockerfile deleted file mode 100644 index 0cc939d9b..000000000 --- a/samples/python/hosted-agents/langgraph/human-in-the-loop/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.12-slim - -WORKDIR /app - -COPY . user_agent/ -WORKDIR /app/user_agent - -RUN if [ -f requirements.txt ]; then \ - pip install -r requirements.txt; \ - else \ - echo "No requirements.txt found"; \ - fi - -EXPOSE 8088 - -CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/langgraph/human-in-the-loop/README.md b/samples/python/hosted-agents/langgraph/human-in-the-loop/README.md deleted file mode 100644 index 5b3d105f9..000000000 --- a/samples/python/hosted-agents/langgraph/human-in-the-loop/README.md +++ /dev/null @@ -1,269 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [LangGraph](https://docs.langchain.com/oss/python/langgraph/workflows-agents). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to build a LangGraph agent with **human-in-the-loop capabilities** that can interrupt execution to ask for human input when needed, host it using the -[Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-langgraph/), -and deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. - -## How It Works - -### Human-in-the-Loop Integration - -In [main.py](main.py), the agent is created using LangGraph's `StateGraph` and includes a custom `AskHuman` tool that uses the `interrupt()` function to pause execution and wait for human feedback. The key components are: - -- **LangGraph Agent**: An AI agent that can intelligently decide when to ask humans for input during task execution -- **Human Interrupt Mechanism**: Uses LangGraph's `interrupt()` function to pause execution and wait for human feedback -- **Conditional Routing**: The agent determines whether to execute tools, ask for human input, or complete the task - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-langgraph/), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. -The extension builds a container image into Azure Container Registry (ACR), and creates a hosted agent version and deployment on Microsoft Foundry. - -## Validate the deployed Agent -```python -# Before running the sample: -# pip install --pre azure-ai-projects>=2.0.0b4 - -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -import json - -foundry_account = "" -foundry_project = "" -agent_name = "" - -project_endpoint = f"https://{foundry_account}.services.ai.azure.com/api/projects/{foundry_project}" - -project_client = AIProjectClient( - endpoint=project_endpoint, - credential=DefaultAzureCredential(), -) - -# Get an existing agent -agent = project_client.agents.get(agent_name=agent_name) -print(f"Retrieved agent: {agent.name}") - -openai_client = project_client.get_openai_client() -conversation = openai_client.conversations.create() - -response = openai_client.responses.create( - input=[{"role": "user", "content": "Ask the user where they are, then look up the weather there."}], - conversation=conversation.id, - extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, -) - -call_id = "" -for item in response.output: - if item.type == "function_call" and item.name == "__hosted_agent_adapter_hitl__": - print(f"Agent ask: {item.arguments}") - call_id = item.call_id - -if not call_id: - print(f"No human input is required, output: {response.output_text}") -else: - human_response = {"resume": "San Francisco"} - response = openai_client.responses.create( - input=[ - { - "type": "function_call_output", - "call_id": call_id, - "output": json.dumps(human_response) - }], - conversation=conversation.id, - extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, - ) - print(f"Human response: {human_response['resume']}") - print(f"Agent response: {response.output_text}") -``` - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. **Azure OpenAI Service** - - Endpoint configured - - Chat model deployed (e.g., `gpt-4o-mini` or `gpt-4`) - - Note your endpoint URL and deployment name - -2. **Azure CLI** - - Installed and authenticated - - Run `az login` and verify with `az account show` - -3. **Python 3.10 or higher** - - Verify your version: `python --version` - - If you have Python 3.9 or older, install a newer version: - - Windows: `winget install Python.Python.3.12` - - macOS: `brew install python@3.12` - - Linux: Use your package manager - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_AI_MODEL_DEPLOYMENT_NAME` - The deployment name for your chat model (required) - -This sample loads environment variables from a local `.env` file if present. - -```powershell -# Replace with your actual values -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" -$env:AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o-mini" -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```powershell -pip install -r requirements.txt -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -python main.py -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent locally - -#### Initial Request (Triggering Human Input) - -Send a request that will cause the agent to ask for human input: - -**PowerShell (Windows):** -```powershell -$body = @{ - input = "Ask the user where they are, then look up the weather there." - stream = $false -} | ConvertTo-Json - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" -``` - -**Bash/curl (Linux/macOS):** -```bash -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ - -d '{"input": "Ask the user where they are, then look up the weather there.", "stream": false}' -``` - -**Response Structure:** - -The agent will respond with an interrupt request: - -```json -{ - "conversation": { - "id": "conv_abc123..." - }, - "output": [ - { - "type": "function_call", - "name": "__hosted_agent_adapter_interrupt__", - "call_id": "call_xyz789...", - "arguments": "{\"question\": \"Where are you located?\"}" - } - ] -} -``` - -#### Providing Human Feedback - -Resume the conversation by providing the human's response: - -**PowerShell (Windows):** -```powershell -$body = @{ - input = @( - @{ - type = "function_call_output" - call_id = "call_xyz789..." - output = '{"resume": "San Francisco"}' - } - ) - stream = $false - conversation = @{ - id = "conv_abc123..." - } -} | ConvertTo-Json -Depth 4 - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" -``` - -**Bash/curl (Linux/macOS):** -```bash -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ - -d '{ - "input": [ - { - "type": "function_call_output", - "call_id": "call_xyz789...", - "output": "{\"resume\": \"San Francisco\"}" - } - ], - "stream": false, - "conversation": { - "id": "conv_abc123..." - } - }' -``` - -**Final Response:** - -The agent will continue execution and provide the final result: - -```json -{ - "conversation": { - "id": "conv_abc123..." - }, - "output": [ - { - "type": "message", - "role": "assistant", - "content": "I looked up the weather in San Francisco. Result: It's sunny in San Francisco." - } - ] -} -``` - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. diff --git a/samples/python/hosted-agents/langgraph/human-in-the-loop/agent.yaml b/samples/python/hosted-agents/langgraph/human-in-the-loop/agent.yaml deleted file mode 100644 index 3c25e50c2..000000000 --- a/samples/python/hosted-agents/langgraph/human-in-the-loop/agent.yaml +++ /dev/null @@ -1,29 +0,0 @@ -name: HumanInTheLoopAgent -description: This LangGraph agent demonstrates human-in-the-loop capabilities. -metadata: - example: - - role: user - content: |- - Ask the user where they are, then look up the weather there. - tags: - - example - - learning - authors: - - junanchen -template: - name: HumanInTheLoopAgentLG - kind: hosted - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: OPENAI_API_VERSION - value: 2025-03-01-preview - - name: AZURE_AI_MODEL_DEPLOYMENT_NAME - value: "{{chat}}" -resources: - - kind: model - id: gpt-4o - name: chat diff --git a/samples/python/hosted-agents/langgraph/human-in-the-loop/main.py b/samples/python/hosted-agents/langgraph/human-in-the-loop/main.py deleted file mode 100644 index ddbea4cc7..000000000 --- a/samples/python/hosted-agents/langgraph/human-in-the-loop/main.py +++ /dev/null @@ -1,191 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -""" -Human-in-the-Loop Agent Example - -This sample demonstrates how to create a LangGraph agent that can interrupt -execution to ask for human input when needed. The agent uses Azure OpenAI -and includes a custom tool for asking human questions. -""" - -import os - -from pydantic import BaseModel - -from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from langchain.chat_models import init_chat_model -from langchain_core.messages import ToolMessage -from langchain_core.tools import tool -from langgraph.checkpoint.memory import InMemorySaver -from langgraph.graph import END, START, MessagesState, StateGraph -from langgraph.prebuilt import ToolNode -from langgraph.types import interrupt - -from azure.ai.agentserver.langgraph import from_langgraph - - -# ============================================================================= -# Model Initialization -# ============================================================================= - -def initialize_llm(): - """Initialize the language model with Azure OpenAI credentials.""" - deployment_name = os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o-mini") - return init_chat_model( - f"azure_openai:{deployment_name}", - azure_ad_token_provider=get_bearer_token_provider( - DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" - ) - ) - - -llm = initialize_llm() - -# ============================================================================= -# Tools and Models -# ============================================================================= - -@tool -def search(query: str) -> str: - """ - Call to search the web for information. - - Args: - query: The search query string - - Returns: - Search results as a string - """ - # This is a placeholder for the actual implementation - return f"I looked up: {query}. Result: It's sunny in San Francisco." - - -class AskHuman(BaseModel): - """Schema for asking the human a question.""" - question: str - - -# Initialize tools and bind to model -tools = [search] -tool_node = ToolNode(tools) -model = llm.bind_tools(tools + [AskHuman]) - - -# ============================================================================= -# Graph Nodes -# ============================================================================= - -def call_model(state: MessagesState) -> dict: - """ - Call the language model with the current conversation state. - - Args: - state: The current messages state - - Returns: - Dictionary with the model's response message - """ - messages = state["messages"] - response = model.invoke(messages) - return {"messages": [response]} - - -def ask_human(state: MessagesState) -> dict: - """ - Interrupt execution to ask the human for input. - - Args: - state: The current messages state - - Returns: - Dictionary with the human's response as a tool message - """ - last_message = state["messages"][-1] - tool_call_id = last_message.tool_calls[0]["id"] - ask = AskHuman.model_validate(last_message.tool_calls[0]["args"]) - - # Interrupt and wait for human input - location = interrupt(ask.question) - - tool_message = ToolMessage(tool_call_id=tool_call_id, content=location) - return {"messages": [tool_message]} - - -# ============================================================================= -# Graph Logic -# ============================================================================= - -def should_continue(state: MessagesState) -> str: - """ - Determine the next step in the graph based on the last message. - - Args: - state: The current messages state - - Returns: - The name of the next node to execute, or END to finish - """ - messages = state["messages"] - last_message = messages[-1] - - # If there's no function call, we're done - if not last_message.tool_calls: - return END - - # If asking for human input, route to ask_human node - if last_message.tool_calls[0]["name"] == "AskHuman": - return "ask_human" - - # Otherwise, execute the tool call - return "action" - - -# ============================================================================= -# Graph Construction -# ============================================================================= - -def build_graph() -> StateGraph: - """ - Build and compile the LangGraph workflow. - - Returns: - Compiled StateGraph with checkpointing enabled - """ - workflow = StateGraph(MessagesState) - - # Add nodes - workflow.add_node("agent", call_model) - workflow.add_node("action", tool_node) - workflow.add_node("ask_human", ask_human) - - # Set entry point - workflow.add_edge(START, "agent") - - # Add conditional routing from agent - workflow.add_conditional_edges( - "agent", - should_continue, - path_map=["ask_human", "action", END], - ) - - # Add edges back to agent - workflow.add_edge("action", "agent") - workflow.add_edge("ask_human", "agent") - - # Compile with memory checkpointer - memory = InMemorySaver() - return workflow.compile(checkpointer=memory) - - -app = build_graph() - - -# ============================================================================= -# Main Entry Point -# ============================================================================= - -if __name__ == "__main__": - adapter = from_langgraph(app) - adapter.run() - diff --git a/samples/python/hosted-agents/langgraph/human-in-the-loop/requirements.txt b/samples/python/hosted-agents/langgraph/human-in-the-loop/requirements.txt deleted file mode 100644 index c96237247..000000000 --- a/samples/python/hosted-agents/langgraph/human-in-the-loop/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -azure-ai-agentserver-langgraph==1.0.0b12 diff --git a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/Dockerfile b/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/Dockerfile deleted file mode 100644 index 0cc939d9b..000000000 --- a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.12-slim - -WORKDIR /app - -COPY . user_agent/ -WORKDIR /app/user_agent - -RUN if [ -f requirements.txt ]; then \ - pip install -r requirements.txt; \ - else \ - echo "No requirements.txt found"; \ - fi - -EXPOSE 8088 - -CMD ["python", "main.py"] diff --git a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/README.md b/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/README.md deleted file mode 100644 index cc3898d9b..000000000 --- a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/README.md +++ /dev/null @@ -1,146 +0,0 @@ -**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [LangGraph](https://docs.langchain.com/oss/python/langgraph/workflows-agents). - -Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. - -Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. - -Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. - -# What this sample demonstrates - -This sample demonstrates how to build a LangGraph react agent that can use **Foundry tools** -(for example, code interpreter and MCP tools), host it using the -[Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-langgraph/), -and deploy it to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. - -## How It Works - -### Foundry tools integration - -In [main.py](main.py), the agent is created using `langchain.agents.create_agent` and is configured with -`use_foundry_tools`. The middleware enables tool usage via Foundry-supported tool types: - -- `code_interpreter` (foundry configured tools) -- `mcp` (connected mcp tool, configured with a Foundry project connection id) - -### Agent Hosting - -The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-langgraph/), -which provisions a REST API endpoint compatible with the OpenAI Responses protocol. This allows interaction with the agent using OpenAI Responses compatible clients. - -### Agent Deployment - -The hosted agent can be seamlessly deployed to Microsoft Foundry using the Azure Developer CLI [ai agent](https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli#create-a-hosted-agent) extension. -The extension builds a container image into Azure Container Registry (ACR), and creates a hosted agent version and deployment on Microsoft Foundry. - -## Running the Agent Locally - -### Prerequisites - -Before running this sample, ensure you have: - -1. **Azure OpenAI Service** - - Endpoint configured - - Chat model deployed (e.g., `gpt-4o-mini` or `gpt-4`) - - Note your endpoint URL and deployment name - -2. **Azure AI Foundry Project** - - Project created in [Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/what-is-foundry?view=foundry#microsoft-foundry-portals) - - Add 'Microsoft Learn' MCP from foundry tool catalog. - ![microsoft_learn](microsoft_learn.png) - -3. **Azure CLI** - - Installed and authenticated - - Run `az login` and verify with `az account show` - -4. **Python 3.10 or higher** - - Verify your version: `python --version` - - If you have Python 3.9 or older, install a newer version: - - Windows: `winget install Python.Python.3.12` - - macOS: `brew install python@3.12` - - Linux: Use your package manager - -### Environment Variables - -Set the following environment variables: - -- `AZURE_OPENAI_ENDPOINT` - Your Azure OpenAI endpoint URL (required) -- `AZURE_AI_MODEL_DEPLOYMENT_NAME` - The deployment name for your chat model (required) -- `AZURE_AI_PROJECT_ENDPOINT` - Your Azure AI Foundry project endpoint (required) -- `AZURE_AI_PROJECT_TOOL_CONNECTION_ID` - Foundry project connection id used to configure the `mcp` tool (required) - -This sample loads environment variables from a local `.env` file if present. - -**Finding your tool connection id** (portal names may vary): -1. Go to [Azure AI Foundry portal](https://ai.azure.com) -2. Navigate to your project -> Build -> Tools -3. Find your connected MCP tool (e.g., "Microsoft Learn") -4. Copy your tool's name and set it as `AZURE_AI_PROJECT_TOOL_CONNECTION_ID` - -```powershell -# Replace with your actual values -$env:AZURE_OPENAI_ENDPOINT="https://your-openai-resource.openai.azure.com/" -$env:AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o-mini" -$env:AZURE_AI_PROJECT_ENDPOINT="https://{resource}.services.ai.azure.com/api/projects/{project-name}" -$env:AZURE_AI_PROJECT_TOOL_CONNECTION_ID="" -``` - -### Installing Dependencies - -Install the required Python dependencies using pip: - -```powershell -pip install -r requirements.txt -``` - -### Running the Sample - -To run the agent, execute the following command in your terminal: - -```powershell -python main.py -``` - -This will start the hosted agent locally on `http://localhost:8088/`. - -### Interacting with the Agent - -**PowerShell (Windows):** -```powershell -$body = @{ - input = "use the python tool to calculate what is 4 * 3.82. and then find its square root and then find the square root of that result" - stream = $false -} | ConvertTo-Json - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" -``` - -**Bash/curl (Linux/macOS):** -```bash -curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ - -d '{"input": "use the python tool to calculate what is 4 * 3.82. and then find its square root and then find the square root of that result","stream":false}' -``` - -The agent may use Foundry tools (for example `web_search_preview` and/or `mcp`) as needed to answer. - -### Deploying the Agent to Microsoft Foundry - -To deploy your agent to Microsoft Foundry, follow the comprehensive deployment guide at https://learn.microsoft.com/en-us/azure/ai-foundry/agents/concepts/hosted-agents?view=foundry&tabs=cli - -## Troubleshooting - -### Images built on Apple Silicon or other ARM64 machines do not work on our service - -We **recommend using `azd` cloud build**, which always builds images with the correct architecture. - -If you choose to **build locally**, and your machine is **not `linux/amd64`** (for example, an Apple Silicon Mac), the image will **not be compatible with our service**, causing runtime failures. - -**Fix for local builds** - -Use this command to build the image locally: - -```shell -docker build --platform=linux/amd64 -t image . -``` - -This forces the image to be built for the required `amd64` architecture. \ No newline at end of file diff --git a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/agent.yaml b/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/agent.yaml deleted file mode 100644 index bb9837909..000000000 --- a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/agent.yaml +++ /dev/null @@ -1,32 +0,0 @@ -name: FoundryToolsReactAgent -description: This LangGraph agent uses Foundry tools to perform tasks such as interpreting python code. -metadata: - example: - - role: user - content: |- - use the python tool to calculate what is 4 * 3.82. - and then find its square root and then find the square root of that result - tags: - - example - - learning - authors: - - junanchen -template: - name: FoundryToolsReactAgentLG - kind: hosted - protocols: - - protocol: responses - version: v1 - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: OPENAI_API_VERSION - value: 2025-03-01-preview - - name: AZURE_AI_MODEL_DEPLOYMENT_NAME - value: "{{chat}}" - - name: AZURE_AI_PROJECT_TOOL_CONNECTION_ID - value: "" -resources: - - kind: model - id: gpt-4o - name: chat diff --git a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/main.py b/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/main.py deleted file mode 100644 index 8493d32bc..000000000 --- a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/main.py +++ /dev/null @@ -1,35 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -import os - -from azure.ai.agentserver.langgraph import from_langgraph -from azure.ai.agentserver.langgraph.tools import use_foundry_tools -from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from langchain.agents import create_agent -from langchain.chat_models import init_chat_model -from langgraph.checkpoint.memory import MemorySaver - -deployment_name = os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o-mini") -model = init_chat_model( - f"azure_openai:{deployment_name}", - azure_ad_token_provider=get_bearer_token_provider( - DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" - ) -) - -foundry_tools = [ - { - # test prompt: - # use the python tool to calculate what is 4 * 3.82. and then find its square root and then find the square root of that result - "type": "code_interpreter" - } -] -if project_tool_connection_id := os.environ.get("AZURE_AI_PROJECT_TOOL_CONNECTION_ID"): - foundry_tools.append({"type": "mcp", "project_connection_id": project_tool_connection_id}) - -agent = create_agent(model, checkpointer=MemorySaver(), middleware=[use_foundry_tools(foundry_tools)]) - -if __name__ == "__main__": - # host the langgraph agent - from_langgraph(agent).run() diff --git a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/microsoft_learn.png b/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/microsoft_learn.png deleted file mode 100644 index 0a8ebaafb..000000000 Binary files a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/microsoft_learn.png and /dev/null differ diff --git a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/requirements.txt b/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/requirements.txt deleted file mode 100644 index 251a1a754..000000000 --- a/samples/python/hosted-agents/langgraph/react-agent-with-foundry-tools/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -azure-ai-agentserver-langgraph==1.0.0b12 \ No newline at end of file diff --git a/samples/python/toolbox/README.md b/samples/python/toolbox/README.md new file mode 100644 index 000000000..5f43e246b --- /dev/null +++ b/samples/python/toolbox/README.md @@ -0,0 +1,271 @@ + +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + + +# Python Toolbox Samples + +Python samples for running Microsoft Foundry agents connected to a **toolbox in Foundry** via the +MCP Streamable HTTP protocol. Four framework options are provided — pick the one that +matches your existing stack. + +## Which sample should I use? + +| I want to… | Use | +|-------------|-----| +| Get started quickly with full `azd` deployment (infra + deploy) and GitHub toolbox | [`azd/`](./azd/) | +| Write a LangGraph agent with maximum flexibility | [`langgraph/`](./langgraph/) | +| Use the Microsoft Agent Framework SDK without LangChain/LangGraph | [`maf/`](./maf/) | +| Use GitHub Copilot SDK combined with local skills and toolbox tools | [`copilot-sdk/`](./copilot-sdk/) | + +## Sample Comparison + +| Capability | `azd/` | `langgraph/` | `maf/` | `copilot-sdk/` | +|-----------|:---:|:---:|:---:|:---:| +| Multi-turn conversation | ✅ | ✅ | ✅ | ✅ | +| Streaming (SSE) | ✅ | ✅ | ✅ | ✅ | +| OAuth consent handling | ✅ | ✅ | ✅ | ✅ | +| Tool schema sanitization | ✅ | ✅ | ✅ | ✅ | +| Tracing | ✅ | ✅ | ✅ | ✅ | +| SDK | LangGraph | LangGraph | Microsoft Agent Framework | GitHub Copilot SDK | + +All samples: +- Serve the **Responses Protocol** on port `8088` +- Authenticate to the toolbox endpoint using `DefaultAzureCredential` (bearer token, auto-refreshed) +- Read `FOUNDRY_PROJECT_ENDPOINT`, `FOUNDRY_AGENT_TOOLBOX_ENDPOINT`, and `AZURE_AI_MODEL_DEPLOYMENT_NAME` from the environment +- Send the `Foundry-Features: Toolboxes=V1Preview` header on every MCP request (required — requests without it are rejected) +- Support local dev via a `.env` file (copy `.env.example` → `.env` and fill in values) + +## Supported Toolbox Tools + +Canonical tool and auth type definitions are documented in +[SUPPORTED_TOOLBOX_TOOLS.md](./SUPPORTED_TOOLBOX_TOOLS.md). + +For runnable SDK examples of creating every tool type (MCP, OpenAPI, Azure AI Search, +Bing, etc.), see [sample_toolboxes_crud.py](./sample_toolboxes_crud.py). + +## Prerequisites (all samples) + +- Python 3.12+ +- A [Microsoft Foundry](https://ai.azure.com) project +- A toolbox already created in that project — see [`sample_toolboxes_crud.py`](./sample_toolboxes_crud.py) to create one + (**The `azd/` sample creates the toolbox automatically during `azd deploy` — no pre-created toolbox needed**) +- Azure CLI installed and logged in: + + ```bash + az login + ``` + +## Getting Your `FOUNDRY_PROJECT_ENDPOINT` + +1. Go to [ai.azure.com](https://ai.azure.com) and open your project. +2. Navigate to **Settings** → **Project details**. +3. Copy the **Project endpoint** — it looks like: + + ``` + https://.services.ai.azure.com/api/projects/ + ``` + +## What is a Toolbox? + +A **Toolbox** is a named collection of tools (MCP, OpenAPI, Azure AI Search, Web Search, +File Search, Code Interpreter, A2A) hosted in your Microsoft Foundry project. Agents +connect to a toolbox via its MCP endpoint and dynamically discover available tools at startup. + +The toolbox MCP endpoint URL supports two forms: + +``` +# Latest version: +https://.services.ai.azure.com/api/projects//toolboxes//mcp?api-version=v1 + +# Pinned to a specific version: +https://.services.ai.azure.com/api/projects//toolboxes//versions//mcp?api-version=v1 +``` + +> **Note:** The `?api-version=v1` query parameter is **required**. Requests without it return HTTP 400. + +Use [`sample_toolboxes_crud.py`](./sample_toolboxes_crud.py) to create a toolbox before running any of the agent samples. + +## Troubleshooting: Validating a Toolbox Endpoint + +After creating a toolbox, confirm that the MCP endpoint works: + +1. Call `tools/list` — should return the tool list without errors. +2. Call `tools/call` on a specific tool — confirms end-to-end MCP protocol behavior. + +The full MCP endpoint URL has the form: + +``` +https://.services.ai.azure.com/api/projects//toolboxes//versions//mcp?api-version=v1 +``` + +> **Note:** The `?api-version=v1` query parameter is required. Omitting it returns HTTP 400. + +## Troubleshooting Multi-Tool Toolbox Creation + +When creating a toolbox with multiple tools, Foundry validates tool identity. + +### Symptom + +You may see this error when combining multiple tools that do not expose a unique identifier field: + +`(invalid_payload) Multiple tools without identifiers found. All tools except a single tool must have unique identifiers ('name' or 'server_label').` + +### Why This Happens + +- Some tool types do not accept `name` or `server_label` in toolbox definitions (for example `file_search`, `web_search`, `azure_ai_search`, `code_interpreter`). +- Foundry allows only one such unnamed tool in a single toolbox payload. + +### Fix Pattern + +- Keep at most one unnamed tool per toolbox. +- If you need multiple tools in one toolbox, add tools that provide identifiers, such as `MCPTool` with a unique `server_label`. + +The combinations in `sample_toolboxes_crud.py` use this pattern: + +- `multi-filesearch-codeinterp`: `FileSearchTool` + `MCPTool(server_label=...)` +- `multi-websearch-codeinterp`: `WebSearchTool` + `MCPTool(server_label=...)` +- `multi-aisearch-codeinterp`: `AzureAISearchTool` + `MCPTool(server_label=...)` + +### Quick Validation + +After creating a toolbox sample, validate the MCP endpoint with: + +1. `tools/list` +2. `tools/call` + +This confirms both toolbox provisioning and MCP protocol behavior end-to-end. + +## Source Data Patterns by Tool Type for Citation + +Different toolbox tools return citation/source data in different shapes inside the `tools/call` response. + +### Azure AI Search + +Citation data is in `result.structuredContent.documents[]`: + +| Field | Description | +|-------|-------------| +| `title` | Display label for the citation | +| `url` | Clickable source link | +| `id` | Stable source identifier | +| `score` | Relevance score | +| `knowledgeSourceIndex` | Knowledge source grouping/index | + +- `result.structuredContent.summary` — explains retrieval outcome (e.g. number of retrieved docs) +- `result.structuredContent.additionalProperties.num_docs_retrieved` — useful for diagnostics +- `result.content[]` — tool text output; this is response text, **not** the authoritative citation list + +### File Search + +Chunk metadata is embedded in the `tools/call` response as `〔index† filename† file_id〕` markers inside +`result.content[].resource.text`. Full metadata for each matched chunk is in the `_meta` block +of the same resource item: + +| Field | Location | Description | +|-------|----------|-------------| +| `title` | `resource._meta.title` | Source file name | +| `file_id` | `resource._meta.file_id` | Stable identifier for the source file | +| `document_chunk_id` | `resource._meta.document_chunk_id` | Identifier for the specific chunk | +| `score` | `resource._meta.score` | Relevance score for the chunk | + +Example `tools/call` response: + +```json +{ + "jsonrpc": "2.0", + "id": "fs-call-1", + "result": { + "content": [ + { + "type": "resource", + "resource": { + "uri": "file://assistant-tvfqncbtruyffxkfewenyy/", + "_meta": { + "title": "mcp-test-file.txt", + "file_id": "assistant-TVfQnCBtRuyfFxkfeweNYY", + "document_chunk_id": "f7327b7f-5ed0-43c6-9bee-e8e9552afcb5", + "score": 0.03333333507180214 + }, + "text": "# \u30100\u2020mcp-test-file.txt\u2020assistant-TVfQnCBtRuyfFxkfeweNYY\u3011\nContent Snippet:\nAzure OpenAI Service is a cloud service..." + } + } + ] + } +} +``` + +Use the `_meta` fields to build citation links or deep-link back to the source file. + +### Web Search + +The response is a single resource content item with the synthesized answer. URL citations are in +`result.content[].resource._meta.annotations[]`. + +| Field | Location | Description | +|-------|----------|-------------| +| `text` | `resource.text` | Synthesized answer with inline Markdown source links | +| `type` | `_meta.annotations[].type` | Always `"url_citation"` | +| `url` | `_meta.annotations[].url` | Source URL | +| `title` | `_meta.annotations[].title` | Source page title | +| `start_index` / `end_index` | `_meta.annotations[].start_index` / `end_index` | Character offsets into `resource.text` where the citation appears | +| `query` | `_meta.action.query` | The search query the model issued | + +Example `tools/call` response: + +```json +{ + "jsonrpc": "2.0", + "id": "ws-call-1", + "result": { + "_meta": { + "tool_configuration": { + "type": "web_search", + "name": "web-search-default" + } + }, + "content": [ + { + "type": "resource", + "resource": { + "uri": "about:web-search-answer", + "mimeType": "text/plain", + "text": "Here are the latest updates...\n\n- **GPT-image-1 Release** ([serverless-solutions.com](https://...))." + }, + "annotations": { "audience": ["assistant"] }, + "_meta": { + "annotations": [ + { + "type": "url_citation", + "url": "https://www.serverless-solutions.com/blog/...", + "title": "Microsoft Expands Azure AI Foundry with Powerful New OpenAI Models", + "start_index": 741, + "end_index": 879 + } + ], + "action": { + "type": "search", + "query": "Azure OpenAI service updates 2026", + "queries": ["Azure OpenAI service updates 2026"] + }, + "response_id": "resp_001fcebcc300..." + } + } + ], + "isError": false + } +} +``` + +## Contributing + +This project welcomes contributions and suggestions. + +## Trademarks + +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/samples/python/toolbox/SUPPORTED_TOOLBOX_TOOLS.md b/samples/python/toolbox/SUPPORTED_TOOLBOX_TOOLS.md new file mode 100644 index 000000000..e6d2ea302 --- /dev/null +++ b/samples/python/toolbox/SUPPORTED_TOOLBOX_TOOLS.md @@ -0,0 +1,143 @@ +# Supported Toolbox Tools + +Use this file as the single source of truth for toolbox tool support and authentication across Python toolbox samples. + +## Tool Support Matrix + +| Toolbox Tool Type | Supported Auth | +|-------------------|----------------| +| **MCP Tool** | Key-based, OAuth (identity passthrough), Entra ID (agent identity), Entra ID (managed identity) | +| **File Search Tool** | N/A | +| **OpenAPI Tool** | Anonymous, Key-based, Entra ID (managed identity on Foundry project) | +| **Azure AI Search Tool** | Key-based, Entra ID (agent identity), Entra ID (managed identity) | +| **Web Search Tool** | Anonymous, Key-based (domain-restricted via Bing Custom Search) | +| **Code Interpreter Tool** | N/A | +| **A2A Tool** (preview) | Key-based, OAuth (identity passthrough), Entra ID | + +## Detailed Tool Definitions + +### MCP Tool + +Connects to a remote Model Context Protocol server. + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `server_label` | Yes | Unique label for this MCP server within the toolbox | +| `server_url` | Yes | HTTPS URL of the MCP server | +| `project_connection_id` | Yes | Project connection for auth (key, OAuth, Entra) | +| `allowed_tools` | No | List of tool names to expose (filters the full set) | +| `headers` | No | Extra HTTP headers sent with every MCP request | + +**Auth options:** + +| Mode | User context preserved | How to configure | +|------|------------------------|------------------| +| Key-based | No | Set `project_connection_id` to a Custom Keys connection holding the API key or PAT | +| OAuth identity passthrough | Yes | Set `project_connection_id` to an OAuth-type connection. At runtime the agent returns an `oauth_consent_request` with a consent URL | +| Entra ID - agent identity (preview) | No | Assign required roles to the agent identity on the underlying service | +| Entra ID - project managed identity | No | Assign required roles to the project managed identity | + +### File Search Tool + +Searches indexed files/documents via project vector stores. + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `vector_store_ids` | Yes | One or more vector store IDs to search | + +Auth: N/A. + +### OpenAPI Tool + +Calls HTTP APIs described by an OpenAPI 3.0/3.1 specification. + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `openapi.name` | Yes | Logical name for the tool | +| `openapi.spec` | Yes | Inline OpenAPI spec (dict) or a reference | +| `openapi.auth` | Yes | OpenAPI auth details object | + +**Auth options:** + +| Mode | How to configure | +|------|------------------| +| Anonymous | `OpenApiAnonymousAuthDetails()` | +| Key-based | Use project connection-backed OpenAPI auth details | +| Entra ID - managed identity (Foundry project) | Use managed auth details backed by the project managed identity | + +### Azure AI Search Tool + +Grounds responses in Azure AI Search indexes. + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `project_connection_id` | Yes | Resource ID of the project connection to Azure AI Search | +| `index_name` | Yes | Name of the search index (case-sensitive) | +| `top_k` | No | Number of results to return (default: 5) | +| `query_type` | No | `simple`, `vector`, `semantic`, `vector_simple_hybrid`, or `vector_semantic_hybrid` | +| `filter` | No | OData filter applied to every query | + +**Auth options:** + +| Mode | How to configure | +|------|------------------| +| Key-based | Store the API key in the project connection | +| Entra ID - project managed identity | Assign Search Index Data Contributor and Search Service Contributor roles | +| Entra ID - agent identity | Assign the same roles to the agent identity | + +### Web Search Tool + +Enables web grounding (Bing search). Two modes: anonymous (general Bing) or domain-restricted via Bing Custom Search. + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `custom_search_configuration` | No | Restrict to specific domains via Bing Custom Search | + +**Domain-restricted search** (`custom_search_configuration`) requires a `GroundingWithCustomSearch` connection: + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `project_connection_id` | Yes | Connection name (references a `GroundingWithCustomSearch` connection) | +| `instance_name` | Yes | Name of the Bing Custom Search instance (e.g., `agentdoc`) | + +**Connection requirements** for Bing Custom Search: + +| Field | Value | +|-------|-------| +| `category` | `GroundingWithCustomSearch` | +| `authType` | `ApiKey` | +| `target` | `https://api.bing.microsoft.com/` | +| `credentials.key` | Bing API key | +| `metadata.type` | `bing_custom_search_preview` | +| `metadata.ApiType` | `Azure` | +| `metadata.ResourceId` | ARM resource ID of the `Microsoft.Bing/accounts` resource | + +> **Note:** Web Search tools only return results when called through the Responses API (which injects APIM model headers). Direct MCP `tools/call` also works via the MCP gateway when a valid model deployment exists on the project. + +### Code Interpreter Tool + +Runs Python code in a sandboxed environment for analysis, math, and chart generation. + +No required parameters. Auth: N/A. + +### A2A Tool (preview) + +Delegates tasks to another agent via the Agent-to-Agent protocol. The remote agent must expose an A2A endpoint with an agent card at `/.well-known/agent.json`. Tools are auto-discovered from the agent card's skills. + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `name` | Yes | Logical name for the sub-agent tool | +| `project_connection_id` | Yes | Connection name pointing to the remote agent (`RemoteA2A` category) | +| `base_url` | No | Override the base URL from the connection (defaults to connection target) | +| `agent_card_path` | No | Override the agent card path (defaults to `/.well-known/agent.json`) | + +The connection must use `category: RemoteA2A` and `metadata.type: custom_A2A`. + +The MCP tool name is auto-generated as `{connection_name}.SendMessage` (e.g., `helloworld.SendMessage`). + +Auth options are the same as MCP Tool. + +## Notes + +- All tool types are served through the same Foundry MCP gateway endpoint. +- Use [sample_toolboxes_crud.py](./sample_toolboxes_crud.py) for runnable SDK examples. diff --git a/samples/python/toolbox/sample_toolboxes_crud.py b/samples/python/toolbox/sample_toolboxes_crud.py new file mode 100644 index 000000000..56a122c3e --- /dev/null +++ b/samples/python/toolbox/sample_toolboxes_crud.py @@ -0,0 +1,627 @@ +""" +Comprehensive SDK samples for toolbox in Microsoft Foundry CRUD operations. + +Tested with azure-ai-projects 2.1.0a20260408001. + +API: client.beta.toolboxes + - create_version(toolbox_name, tools=[], description=..., metadata=..., policies=...) + - get(toolbox_name) -> ToolboxObject (id, name, default_version) + - get_version(toolbox_name, ver) -> ToolboxVersionObject + - list() -> ItemPaged[ToolboxObject] + - list_versions(toolbox_name) -> ItemPaged[ToolboxVersionObject] + - update(toolbox_name, default_version=ver) -> promote a version to default + - delete_version(toolbox_name, ver) + - delete(toolbox_name) + +All tool types demonstrated: + - MCPTool (no-auth, key-auth, OAuth, Entra token passthrough, filtered) + - OpenApiTool (anonymous, project-connection auth) + - A2APreviewTool (agent-to-agent) + - FileSearchTool + - AzureAISearchTool + - WebSearchTool / BingCustomSearchConfiguration + - BingGroundingTool + - CodeInterpreterTool + - Multi-tool combinations + +Prerequisites: + pip install azure-identity python-dotenv httpx + pip install azure-ai-projects --pre + Set environment variables in .env (see bottom of file). +""" + +import json +import os +import sys +import traceback +from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import ( + MCPTool, + FileSearchTool, + OpenApiTool, + A2APreviewTool, + AzureAISearchTool, + AzureAISearchToolResource, + AISearchIndexResource, + CodeInterpreterTool, + OpenApiAnonymousAuthDetails, + OpenApiProjectConnectionAuthDetails, + OpenApiProjectConnectionSecurityScheme, + WebSearchTool, + BingCustomSearchConfiguration, +) + +load_dotenv() + +ENDPOINT = os.environ["FOUNDRY_PROJECT_ENDPOINT"] + +credential = DefaultAzureCredential() +client = AIProjectClient(endpoint=ENDPOINT, credential=credential) + + +# ═══════════════════════════════════════════════════════════════════════════ +# Helper: MCP tools/list + tools/call via REST (validates toolbox is live) +# ═══════════════════════════════════════════════════════════════════════════ +def _toolbox_mcp_endpoint(toolbox_name: str) -> str: + """Build the MCP gateway URL for a toolbox.""" + return f"{ENDPOINT}/toolboxes/{toolbox_name}/mcp?api-version=v1" + + +_MCP_SCOPE = "https://ai.azure.com/.default" +_MCP_FEATURE_HEADER = "Toolboxes=V1Preview" + + +def _mcp_headers() -> dict: + token = credential.get_token(_MCP_SCOPE).token + return { + "Authorization": f"Bearer {token}", + "Content-Type": "application/json", + "Foundry-Features": _MCP_FEATURE_HEADER, + } + + +def _mcp_tools_list(toolbox_name: str) -> list: + """Call tools/list on the toolbox MCP endpoint.""" + import httpx + + url = _toolbox_mcp_endpoint(toolbox_name) + payload = {"jsonrpc": "2.0", "id": 1, "method": "tools/list", "params": {}} + resp = httpx.post(url, json=payload, headers=_mcp_headers(), timeout=60) + resp.raise_for_status() + data = resp.json() + tools = data.get("result", {}).get("tools", []) + print(f" tools/list → {len(tools)} tool(s)") + for t in tools[:5]: + print(f" - {t.get('name', '?')}") + return tools + + +def _mcp_tools_call(toolbox_name: str, tool_name: str, arguments: dict) -> dict: + """Call tools/call on the toolbox MCP endpoint.""" + import httpx + + url = _toolbox_mcp_endpoint(toolbox_name) + payload = { + "jsonrpc": "2.0", + "id": 2, + "method": "tools/call", + "params": {"name": tool_name, "arguments": arguments}, + } + resp = httpx.post(url, json=payload, headers=_mcp_headers(), timeout=60) + resp.raise_for_status() + data = resp.json() + result = data.get("result", {}) + content = result.get("content", []) + print(f" tools/call({tool_name}) → {len(content)} content block(s)") + if content: + first = content[0] + text = first.get("text", "") + print(f" preview: {text[:200]}...") + return result + + +# ═══════════════════════════════════════════════════════════════════════════ +# Lifecycle helpers: create → list versions → new version → promote → delete +# ═══════════════════════════════════════════════════════════════════════════ +def _full_lifecycle(toolbox_name: str, tools: list, *, validate_call=None): + """Run the full CRUD lifecycle for a toolbox. + + 1. create_version (v1) + 2. get + 3. tools/list (MCP validation) + 4. optional tools/call + 5. create_version (v2 — same tools, new description) + 6. list_versions + 7. update → promote v2 to default + 8. get_version v2 + 9. delete_version v1 + 10. delete toolbox + """ + print(f"\n{'='*60}") + print(f"LIFECYCLE: {toolbox_name}") + print(f"{'='*60}") + + # 1. create v1 + v1 = client.beta.toolboxes.create_version( + toolbox_name=toolbox_name, + tools=tools, + description=f"{toolbox_name} v1", + ) + print(f" 1. create_version → version={v1.version}, name={v1.name}") + + # 2. get toolbox + tb = client.beta.toolboxes.get(toolbox_name=toolbox_name) + print(f" 2. get → name={tb.name}, default_version={tb.default_version}") + + # 3. tools/list + listed_tools = _mcp_tools_list(toolbox_name) + + # 4. optional tools/call + if validate_call: + tool_name, args = validate_call + # find match + matching = [t for t in listed_tools if t.get("name") == tool_name] + if matching: + _mcp_tools_call(toolbox_name, tool_name, args) + else: + print(f" ⚠ tool '{tool_name}' not found in tools/list — skipping call") + + # 5. create v2 + v2 = client.beta.toolboxes.create_version( + toolbox_name=toolbox_name, + tools=tools, + description=f"{toolbox_name} v2 (promoted)", + ) + print(f" 5. create_version → version={v2.version}") + + # 6. list versions + versions = list(client.beta.toolboxes.list_versions(toolbox_name=toolbox_name)) + print(f" 6. list_versions → {len(versions)} version(s): {[v.version for v in versions]}") + + # 7. promote v2 + updated = client.beta.toolboxes.update(toolbox_name=toolbox_name, default_version=v2.version) + print(f" 7. update (promote) → default_version={updated.default_version}") + + # 8. get version v2 + v2_detail = client.beta.toolboxes.get_version(toolbox_name=toolbox_name, version=v2.version) + print(f" 8. get_version → version={v2_detail.version}, desc={v2_detail.description}") + + # 9. delete v1 + client.beta.toolboxes.delete_version(toolbox_name=toolbox_name, version=v1.version) + print(f" 9. delete_version v1 → OK") + + # 10. delete toolbox + client.beta.toolboxes.delete(toolbox_name=toolbox_name) + print(f" 10. delete → OK") + + return True + + +# ═══════════════════════════════════════════════════════════════════════════ +# Individual tool samples +# ═══════════════════════════════════════════════════════════════════════════ + +# --------------------------------------------------------------------------- +# 1. MCP — No Auth (public server, e.g. gitmcp.io) +# --------------------------------------------------------------------------- +def sample_mcp_no_auth(): + return _full_lifecycle( + "mcp-noauth-sample", + [ + MCPTool( + server_label="gitmcp", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 2. MCP — Key Auth +# --------------------------------------------------------------------------- +def sample_mcp_key_auth(): + return _full_lifecycle( + "mcp-keyauth-sample", + [ + MCPTool( + server_label="github", + server_url="https://api.githubcopilot.com/mcp", + project_connection_id=os.environ["MCP_CONNECTION_ID"], + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 3. MCP — OAuth +# --------------------------------------------------------------------------- +def sample_mcp_oauth(): + return _full_lifecycle( + "mcp-oauth-sample", + [ + MCPTool( + server_label="github-oauth", + server_url="https://api.githubcopilot.com/mcp", + project_connection_id=os.environ["MCP_OAUTH_CONNECTION_ID"], + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 4. MCP — Entra Token Passthrough (e.g. Outlook Mail via agent365) +# --------------------------------------------------------------------------- +def sample_mcp_entra_passthrough(): + return _full_lifecycle( + "mcp-entra-passthrough-sample", + [ + MCPTool( + server_label="outlook-mail", + server_url="https://agent365.svc.cloud.microsoft/agents/servers/mcp_MailTools", + project_connection_id=os.environ["MCP_ENTRA_PASSTHROUGH_CONNECTION_ID"], + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 5. MCP — Filtered tools +# --------------------------------------------------------------------------- +def sample_mcp_filtered(): + return _full_lifecycle( + "mcp-filtered-sample", + [ + MCPTool( + server_label="github-filtered", + server_url="https://api.githubcopilot.com/mcp", + project_connection_id=os.environ["MCP_CONNECTION_ID"], + allowed_tools=["search_repositories", "get_file_contents"], + headers={"Accept": "application/json"}, + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 6. OpenAPI — No Auth (anonymous) +# --------------------------------------------------------------------------- +def sample_openapi_no_auth(): + spec = { + "openapi": "3.0.0", + "info": {"title": "JSON Placeholder", "version": "1.0"}, + "servers": [{"url": "https://jsonplaceholder.typicode.com"}], + "paths": { + "/posts/{id}": { + "get": { + "operationId": "getPost", + "summary": "Get a post by ID", + "parameters": [ + { + "name": "id", + "in": "path", + "required": True, + "schema": {"type": "integer"}, + } + ], + "responses": {"200": {"description": "A post object"}}, + } + } + }, + } + return _full_lifecycle( + "openapi-noauth-sample", + [ + OpenApiTool( + openapi={ + "name": "jsonplaceholder", + "spec": spec, + "auth": OpenApiAnonymousAuthDetails(), + } + ) + ], + validate_call=("getPost", {"id": 1}), + ) + + +# --------------------------------------------------------------------------- +# 7. OpenAPI — With Project Connection Auth +# --------------------------------------------------------------------------- +def sample_openapi_with_connection(): + spec = { + "openapi": "3.0.1", + "info": {"title": "TripAdvisor API", "version": "1.0"}, + "servers": [{"url": "https://api.content.tripadvisor.com/api/v1"}], + "paths": { + "/location/search": { + "get": { + "operationId": "searchLocations", + "summary": "Search for locations", + "parameters": [ + { + "name": "searchQuery", + "in": "query", + "required": True, + "schema": {"type": "string"}, + }, + { + "name": "language", + "in": "query", + "schema": {"type": "string", "default": "en"}, + }, + ], + "responses": {"200": {"description": "Search results"}}, + "security": [{"apiKeyAuth": []}], + } + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "name": "key", + "in": "query", + } + } + }, + } + return _full_lifecycle( + "openapi-tripadvisor-sample", + [ + OpenApiTool( + openapi={ + "name": "tripadvisor", + "spec": spec, + "auth": OpenApiProjectConnectionAuthDetails( + security_scheme=OpenApiProjectConnectionSecurityScheme( + project_connection_id=os.environ["TRIPADVISOR_CONNECTION_ID"], + ), + ), + } + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 8. A2A — Agent-to-Agent +# --------------------------------------------------------------------------- +def sample_a2a(): + return _full_lifecycle( + "a2a-sample", + [ + A2APreviewTool( + project_connection_id=os.environ.get("A2A_CONNECTION_ID", ""), + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 9. File Search +# --------------------------------------------------------------------------- +def sample_file_search(): + return _full_lifecycle( + "filesearch-sample", + [ + FileSearchTool( + name="filesearch_docs", + vector_store_ids=[os.environ["FILE_SEARCH_VECTOR_STORE_ID"]], + description="Search uploaded files for grounded passages.", + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 10. Azure AI Search +# --------------------------------------------------------------------------- +def sample_azure_ai_search(): + return _full_lifecycle( + "aisearch-sample", + [ + AzureAISearchTool( + azure_ai_search=AzureAISearchToolResource( + indexes=[ + AISearchIndexResource( + index_name=os.environ["AI_SEARCH_INDEX_NAME"], + project_connection_id=os.environ["AI_SEARCH_CONNECTION_ID"], + ) + ] + ) + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 11. Code Interpreter +# --------------------------------------------------------------------------- +def sample_code_interpreter(): + return _full_lifecycle( + "codeinterp-sample", + [CodeInterpreterTool()], + ) + + +# --------------------------------------------------------------------------- +# 12. Web Search +# --------------------------------------------------------------------------- +def sample_websearch_tool(): + return _full_lifecycle( + "websearch-sample", + [WebSearchTool()], + validate_call=("web_search", {"query": "Microsoft Foundry documentation"}), + ) + + +# --------------------------------------------------------------------------- +# 13. Web Search — Bing Custom Search +# --------------------------------------------------------------------------- +def sample_websearch_custom(): + return _full_lifecycle( + "websearch-customsearch-sample", + [ + WebSearchTool( + custom_search_configuration=BingCustomSearchConfiguration( + project_connection_id=os.environ["BING_SEARCH_CONNECTION_ID"], + instance_name=os.environ["BING_SEARCH_INSTANCE_NAME"], + ) + ) + ], + ) + + +# --------------------------------------------------------------------------- +# 14. Multi-Tool (MCP + MCP) +# --------------------------------------------------------------------------- +def sample_multi_tool(): + return _full_lifecycle( + "multi-tool-sample", + [ + MCPTool( + server_label="gitmcp", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ), + MCPTool( + server_label="github", + server_url="https://api.githubcopilot.com/mcp", + project_connection_id=os.environ["MCP_CONNECTION_ID"], + ), + ], + ) + + +# --------------------------------------------------------------------------- +# 15. Multi-Tool (file search + MCP) +# --------------------------------------------------------------------------- +def sample_multi_filesearch_mcp(): + return _full_lifecycle( + "multi-filesearch-mcp-sample", + [ + FileSearchTool( + name="filesearch_project_docs", + vector_store_ids=[os.environ["FILE_SEARCH_VECTOR_STORE_ID"]], + description="Find relevant passages from uploaded project files.", + ), + MCPTool( + server_label="gitmcp-files", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ), + ], + ) + + +# --------------------------------------------------------------------------- +# 16. Multi-Tool (web search + MCP) +# --------------------------------------------------------------------------- +def sample_multi_websearch_mcp(): + return _full_lifecycle( + "multi-websearch-mcp-sample", + [ + WebSearchTool(), + MCPTool( + server_label="gitmcp-web", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ), + ], + ) + + +# --------------------------------------------------------------------------- +# 17. Multi-Tool (AI Search + MCP) +# --------------------------------------------------------------------------- +def sample_multi_aisearch_mcp(): + return _full_lifecycle( + "multi-aisearch-mcp-sample", + [ + AzureAISearchTool( + azure_ai_search=AzureAISearchToolResource( + indexes=[ + AISearchIndexResource( + index_name=os.environ["AI_SEARCH_INDEX_NAME"], + project_connection_id=os.environ["AI_SEARCH_CONNECTION_ID"], + ) + ] + ), + ), + MCPTool( + server_label="gitmcp-aisearch", + server_url="https://gitmcp.io/Azure-Samples/agent-openai-python-prompty", + ), + ], + ) + + +# --------------------------------------------------------------------------- +# 18. List all toolboxes +# --------------------------------------------------------------------------- +def sample_list_all(): + import httpx as _httpx + token = credential.get_token(_MCP_SCOPE).token + resp = _httpx.get( + f"{ENDPOINT}/toolboxes", + params={"api-version": "v1", "limit": 100}, + headers={"Authorization": f"Bearer {token}", "Foundry-Features": _MCP_FEATURE_HEADER}, + timeout=30, + ) + resp.raise_for_status() + toolboxes = resp.json().get("data", []) + print(f"\n{len(toolboxes)} toolbox(es):") + for tb in toolboxes: + print(f" {tb['name']} default_version={tb.get('default_version')}") + return toolboxes + + +# ═══════════════════════════════════════════════════════════════════════════ +# Runner +# ═══════════════════════════════════════════════════════════════════════════ +SAMPLES = { + "mcp-noauth": sample_mcp_no_auth, + "mcp-keyauth": sample_mcp_key_auth, + "mcp-oauth": sample_mcp_oauth, + "mcp-entra-passthrough": sample_mcp_entra_passthrough, + "mcp-filtered": sample_mcp_filtered, + "openapi-noauth": sample_openapi_no_auth, + "openapi-conn": sample_openapi_with_connection, + "a2a": sample_a2a, + "filesearch": sample_file_search, + "aisearch": sample_azure_ai_search, + "codeinterp": sample_code_interpreter, + "websearch": sample_websearch_tool, + "websearch-custom": sample_websearch_custom, + "multi": sample_multi_tool, + "multi-filesearch-mcp": sample_multi_filesearch_mcp, + "multi-websearch-mcp": sample_multi_websearch_mcp, + "multi-aisearch-mcp": sample_multi_aisearch_mcp, + "list": sample_list_all, +} + +if __name__ == "__main__": + if len(sys.argv) >= 2 and sys.argv[1] == "all": + # Run all samples, collect pass/fail report + results = {} + for name, fn in SAMPLES.items(): + if name == "list": + continue + try: + fn() + results[name] = "PASS" + except Exception as exc: + results[name] = f"FAIL: {exc}" + traceback.print_exc() + print("\n" + "=" * 60) + print("CRUD TEST REPORT") + print("=" * 60) + for name, status in results.items(): + mark = "✓" if status == "PASS" else "✗" + print(f" {mark} {name}: {status}") + passed = sum(1 for v in results.values() if v == "PASS") + print(f"\n {passed}/{len(results)} passed") + elif len(sys.argv) >= 2 and sys.argv[1] in SAMPLES: + SAMPLES[sys.argv[1]]() + else: + print(f"Usage: python {sys.argv[0]} ") + print(f"Samples: {', '.join(SAMPLES.keys())}") + sys.exit(1)