From 2f8d9166ce40c2f63a239ed7c010f6525afa78ab Mon Sep 17 00:00:00 2001 From: SSharma-10 Date: Thu, 23 Apr 2026 17:09:43 +0530 Subject: [PATCH] Add Inference APIs pydo and dots example --- specification/DigitalOcean-public.v2.yaml | 8 ----- ...agent_inference_create_chat_completion.yml | 2 ++ .../examples/curl/inference_async_invoke.yml | 32 +++++++++++++++++++ .../curl/inference_async_invoke_audio.yml | 17 ---------- .../curl/inference_async_invoke_tts.yml | 16 ---------- .../agentInference_create_chat_completion.yml | 17 ++++++++++ .../examples/dots/inference_async_invoke.yml | 29 +++++++++++++++++ .../dots/inference_create_chat_completion.yml | 16 ++++++++++ .../dots/inference_create_embeddings.yml | 18 +++++++++++ .../examples/dots/inference_create_image.yml | 17 ++++++++++ .../dots/inference_create_messages.yml | 17 ++++++++++ .../dots/inference_create_response.yml | 16 ++++++++++ .../examples/dots/inference_list_models.yml | 13 ++++++++ .../agentInference_create_chat_completion.yml | 18 +++++++++++ .../python/inference_async_invoke.yml | 28 ++++++++++++++++ .../inference_create_chat_completion.yml | 15 +++++++++ .../python/inference_create_embeddings.yml | 16 ++++++++++ .../python/inference_create_image.yml | 16 ++++++++++ .../python/inference_create_messages.yml | 16 ++++++++++ .../python/inference_create_response.yml | 15 +++++++++ .../examples/python/inference_list_models.yml | 11 +++++++ .../inference/inference_async_invoke.yml | 2 ++ .../inference_create_chat_completion.yml | 2 ++ .../inference/inference_create_embeddings.yml | 2 ++ .../inference/inference_create_image.yml | 2 ++ .../inference/inference_create_messages.yml | 2 ++ .../inference/inference_create_response.yml | 2 ++ .../inference/inference_list_models.yml | 2 ++ 28 files changed, 326 insertions(+), 41 deletions(-) delete mode 100644 specification/resources/inference/examples/curl/inference_async_invoke_audio.yml delete mode 100644 specification/resources/inference/examples/curl/inference_async_invoke_tts.yml create mode 100644 specification/resources/inference/examples/dots/agentInference_create_chat_completion.yml create mode 100644 specification/resources/inference/examples/dots/inference_async_invoke.yml create mode 100644 specification/resources/inference/examples/dots/inference_create_chat_completion.yml create mode 100644 specification/resources/inference/examples/dots/inference_create_embeddings.yml create mode 100644 specification/resources/inference/examples/dots/inference_create_image.yml create mode 100644 specification/resources/inference/examples/dots/inference_create_messages.yml create mode 100644 specification/resources/inference/examples/dots/inference_create_response.yml create mode 100644 specification/resources/inference/examples/dots/inference_list_models.yml create mode 100644 specification/resources/inference/examples/python/agentInference_create_chat_completion.yml create mode 100644 specification/resources/inference/examples/python/inference_async_invoke.yml create mode 100644 specification/resources/inference/examples/python/inference_create_chat_completion.yml create mode 100644 specification/resources/inference/examples/python/inference_create_embeddings.yml create mode 100644 specification/resources/inference/examples/python/inference_create_image.yml create mode 100644 specification/resources/inference/examples/python/inference_create_messages.yml create mode 100644 specification/resources/inference/examples/python/inference_create_response.yml create mode 100644 specification/resources/inference/examples/python/inference_list_models.yml diff --git a/specification/DigitalOcean-public.v2.yaml b/specification/DigitalOcean-public.v2.yaml index 2e8f1149..b128dbb6 100644 --- a/specification/DigitalOcean-public.v2.yaml +++ b/specification/DigitalOcean-public.v2.yaml @@ -2972,14 +2972,6 @@ components: curl -X POST -H "Authorization: Bearer $MODEL_ACCESS_KEY" "https://inference.do-ai.run/v1/chat/completions" ``` - ``` - curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer $MODEL_ACCESS_KEY" -d '{"model":"claude-opus-4-6","max_tokens":1024,"messages":[{"role":"user","content":"Hello"}]}' "https://inference.do-ai.run/v1/messages" - ``` - - ``` - curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer $MODEL_ACCESS_KEY" -d '{"model":"qwen3-embedding-0.6b","input":["hello world","goodbye world"],"encoding_format":"float","user":"user-1234"}' "https://inference.do-ai.run/v1/embeddings" - ``` - **Agent Inference:** ``` diff --git a/specification/resources/inference/agent_inference_create_chat_completion.yml b/specification/resources/inference/agent_inference_create_chat_completion.yml index faa325a9..5b4ffc23 100644 --- a/specification/resources/inference/agent_inference_create_chat_completion.yml +++ b/specification/resources/inference/agent_inference_create_chat_completion.yml @@ -54,5 +54,7 @@ responses: $ref: '../../shared/responses/unexpected_error.yml' x-codeSamples: - $ref: 'examples/curl/agentInference_create_chat_completion.yml' + - $ref: 'examples/python/agentInference_create_chat_completion.yml' + - $ref: 'examples/dots/agentInference_create_chat_completion.yml' security: - inference_bearer_auth: [] diff --git a/specification/resources/inference/examples/curl/inference_async_invoke.yml b/specification/resources/inference/examples/curl/inference_async_invoke.yml index cc6086eb..a51a46c8 100644 --- a/specification/resources/inference/examples/curl/inference_async_invoke.yml +++ b/specification/resources/inference/examples/curl/inference_async_invoke.yml @@ -1,5 +1,6 @@ lang: cURL source: |- + # Image Generation curl -X POST \ -H "Authorization: Bearer $MODEL_ACCESS_KEY" \ -H "Content-Type: application/json" \ @@ -10,3 +11,34 @@ source: |- } }' \ "https://inference.do-ai.run/v1/async-invoke" + + # Audio Generation + curl -X POST \ + -H "Authorization: Bearer $MODEL_ACCESS_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "model_id": "fal-ai/stable-audio-25/text-to-audio", + "input": { + "prompt": "Techno song with futuristic sounds", + "seconds_total": 60 + }, + "tags": [ + {"key": "type", "value": "test"} + ] + }' \ + "https://inference.do-ai.run/v1/async-invoke" + + # Text-to-Speech + curl -X POST \ + -H "Authorization: Bearer $MODEL_ACCESS_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "model_id": "fal-ai/elevenlabs/tts/multilingual-v2", + "input": { + "text": "This text-to-speech example uses DigitalOcean multilingual voice." + }, + "tags": [ + {"key": "type", "value": "test"} + ] + }' \ + "https://inference.do-ai.run/v1/async-invoke" diff --git a/specification/resources/inference/examples/curl/inference_async_invoke_audio.yml b/specification/resources/inference/examples/curl/inference_async_invoke_audio.yml deleted file mode 100644 index 52774174..00000000 --- a/specification/resources/inference/examples/curl/inference_async_invoke_audio.yml +++ /dev/null @@ -1,17 +0,0 @@ -lang: cURL -label: Generate Audio -source: |- - curl -X POST \ - -H "Authorization: Bearer $MODEL_ACCESS_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "model_id": "fal-ai/stable-audio-25/text-to-audio", - "input": { - "prompt": "Techno song with futuristic sounds", - "seconds_total": 60 - }, - "tags": [ - {"key": "type", "value": "test"} - ] - }' \ - "https://inference.do-ai.run/v1/async-invoke" diff --git a/specification/resources/inference/examples/curl/inference_async_invoke_tts.yml b/specification/resources/inference/examples/curl/inference_async_invoke_tts.yml deleted file mode 100644 index ffa1760b..00000000 --- a/specification/resources/inference/examples/curl/inference_async_invoke_tts.yml +++ /dev/null @@ -1,16 +0,0 @@ -lang: cURL -label: Text-to-Speech -source: |- - curl -X POST \ - -H "Authorization: Bearer $MODEL_ACCESS_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "model_id": "fal-ai/elevenlabs/tts/multilingual-v2", - "input": { - "text": "This text-to-speech example uses DigitalOcean multilingual voice." - }, - "tags": [ - {"key": "type", "value": "test"} - ] - }' \ - "https://inference.do-ai.run/v1/async-invoke" diff --git a/specification/resources/inference/examples/dots/agentInference_create_chat_completion.yml b/specification/resources/inference/examples/dots/agentInference_create_chat_completion.yml new file mode 100644 index 00000000..11e62fd1 --- /dev/null +++ b/specification/resources/inference/examples/dots/agentInference_create_chat_completion.yml @@ -0,0 +1,17 @@ +lang: JavaScript +source: |- + import { InferenceClient } from "@digitalocean/dots"; + + const client = new InferenceClient({ + apiKey: process.env.AGENT_ACCESS_KEY, + baseURL: `https://${process.env.AGENT_URL}/api`, + }); + + const completion = await client.chat.completions.create({ + model: "llama3.3-70b-instruct", + messages: [ + { role: "user", content: "What is the capital of Portugal?" }, + ], + }); + + console.log(completion.choices[0].message.content); diff --git a/specification/resources/inference/examples/dots/inference_async_invoke.yml b/specification/resources/inference/examples/dots/inference_async_invoke.yml new file mode 100644 index 00000000..ed9de1e1 --- /dev/null +++ b/specification/resources/inference/examples/dots/inference_async_invoke.yml @@ -0,0 +1,29 @@ +lang: JavaScript +source: |- + import { InferenceClient } from "@digitalocean/dots"; + + const client = new InferenceClient({ + apiKey: process.env.MODEL_ACCESS_KEY, + }); + + // Image Generation + const imgResp = await client.async_images.generate({ + model_id: "fal-ai/flux/schnell", + prompt: "A futuristic city at sunset", + }); + console.log(imgResp.request_id, imgResp.status); + + // Audio Generation + const audioResp = await client.audio.generate({ + model_id: "fal-ai/stable-audio-25/text-to-audio", + prompt: "Techno song with futuristic sounds", + seconds_total: 60, + }); + console.log(audioResp.request_id, audioResp.status); + + // Text-to-Speech + const ttsResp = await client.audio.speech.create({ + model_id: "fal-ai/elevenlabs/tts/multilingual-v2", + input: "This text-to-speech example uses DigitalOcean multilingual voice.", + }); + console.log(ttsResp.request_id, ttsResp.status); diff --git a/specification/resources/inference/examples/dots/inference_create_chat_completion.yml b/specification/resources/inference/examples/dots/inference_create_chat_completion.yml new file mode 100644 index 00000000..3e47a1a4 --- /dev/null +++ b/specification/resources/inference/examples/dots/inference_create_chat_completion.yml @@ -0,0 +1,16 @@ +lang: JavaScript +source: |- + import { InferenceClient } from "@digitalocean/dots"; + + const client = new InferenceClient({ + apiKey: process.env.MODEL_ACCESS_KEY, + }); + + const completion = await client.chat.completions.create({ + model: "llama3.3-70b-instruct", + messages: [ + { role: "user", content: "What is the capital of Portugal?" }, + ], + }); + + console.log(completion.choices[0].message.content); diff --git a/specification/resources/inference/examples/dots/inference_create_embeddings.yml b/specification/resources/inference/examples/dots/inference_create_embeddings.yml new file mode 100644 index 00000000..84e5f40b --- /dev/null +++ b/specification/resources/inference/examples/dots/inference_create_embeddings.yml @@ -0,0 +1,18 @@ +lang: JavaScript +source: |- + import { InferenceClient } from "@digitalocean/dots"; + + const client = new InferenceClient({ + apiKey: process.env.MODEL_ACCESS_KEY, + }); + + const resp = await client.embeddings.create({ + model: "qwen3-embedding-0.6b", + input: ["hello world", "goodbye world"], + encoding_format: "float", + user: "user-1234", + }); + + for (const item of resp.data) { + console.log(item.index, item.embedding.slice(0, 8)); + } diff --git a/specification/resources/inference/examples/dots/inference_create_image.yml b/specification/resources/inference/examples/dots/inference_create_image.yml new file mode 100644 index 00000000..8e02bd27 --- /dev/null +++ b/specification/resources/inference/examples/dots/inference_create_image.yml @@ -0,0 +1,17 @@ +lang: JavaScript +source: |- + import { InferenceClient } from "@digitalocean/dots"; + + const client = new InferenceClient({ + apiKey: process.env.MODEL_ACCESS_KEY, + }); + + const resp = await client.images.generate({ + model: "openai-gpt-image-1", + prompt: "A cute baby sea otter floating on its back in calm blue water", + size: "auto", + quality: "auto", + n: 1, + }); + + console.log(resp.data[0].b64_json); diff --git a/specification/resources/inference/examples/dots/inference_create_messages.yml b/specification/resources/inference/examples/dots/inference_create_messages.yml new file mode 100644 index 00000000..b8b8d401 --- /dev/null +++ b/specification/resources/inference/examples/dots/inference_create_messages.yml @@ -0,0 +1,17 @@ +lang: JavaScript +source: |- + import { InferenceClient } from "@digitalocean/dots"; + + const client = new InferenceClient({ + apiKey: process.env.MODEL_ACCESS_KEY, + }); + + const resp = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [ + { role: "user", content: "What is the capital of Portugal?" }, + ], + }); + + console.log(resp.content[0].text); diff --git a/specification/resources/inference/examples/dots/inference_create_response.yml b/specification/resources/inference/examples/dots/inference_create_response.yml new file mode 100644 index 00000000..9be9fbce --- /dev/null +++ b/specification/resources/inference/examples/dots/inference_create_response.yml @@ -0,0 +1,16 @@ +lang: JavaScript +source: |- + import { InferenceClient } from "@digitalocean/dots"; + + const client = new InferenceClient({ + apiKey: process.env.MODEL_ACCESS_KEY, + }); + + const resp = await client.responses.create({ + model: "openai-gpt-oss-20b", + input: "What is the capital of France?", + max_output_tokens: 50, + temperature: 0.7, + }); + + console.log(resp.output_text); diff --git a/specification/resources/inference/examples/dots/inference_list_models.yml b/specification/resources/inference/examples/dots/inference_list_models.yml new file mode 100644 index 00000000..df0aec3c --- /dev/null +++ b/specification/resources/inference/examples/dots/inference_list_models.yml @@ -0,0 +1,13 @@ +lang: JavaScript +source: |- + import { InferenceClient } from "@digitalocean/dots"; + + const client = new InferenceClient({ + apiKey: process.env.MODEL_ACCESS_KEY, + }); + + const resp = await client.models.list(); + + for (const model of resp.data) { + console.log(model.id); + } diff --git a/specification/resources/inference/examples/python/agentInference_create_chat_completion.yml b/specification/resources/inference/examples/python/agentInference_create_chat_completion.yml new file mode 100644 index 00000000..e9173e1c --- /dev/null +++ b/specification/resources/inference/examples/python/agentInference_create_chat_completion.yml @@ -0,0 +1,18 @@ +lang: Python +source: |- + import os + from pydo import Client + + client = Client( + token=os.environ.get("AGENT_ACCESS_KEY"), + agent_endpoint=os.environ.get("AGENT_ENDPOINT"), + ) + + resp = client.agent.chat.completions.create( + model="llama3.3-70b-instruct", + messages=[ + {"role": "user", "content": "What is the capital of Portugal?"}, + ], + ) + + print(resp.choices[0].message.content) diff --git a/specification/resources/inference/examples/python/inference_async_invoke.yml b/specification/resources/inference/examples/python/inference_async_invoke.yml new file mode 100644 index 00000000..c77e65de --- /dev/null +++ b/specification/resources/inference/examples/python/inference_async_invoke.yml @@ -0,0 +1,28 @@ +lang: Python +source: |- + import os + from pydo import Client + + client = Client(token=os.environ.get("MODEL_ACCESS_KEY")) + + # Image Generation + resp = client.async_images.generate( + model_id="fal-ai/flux/schnell", + prompt="A futuristic city at sunset", + ) + print(resp.request_id, resp.status) + + # Audio Generation + resp = client.audio.generate( + model_id="fal-ai/stable-audio-25/text-to-audio", + prompt="Techno song with futuristic sounds", + seconds_total=60, + ) + print(resp.request_id, resp.status) + + # Text-to-Speech + resp = client.audio.speech.create( + input="This text-to-speech example uses DigitalOcean multilingual voice.", + model_id="fal-ai/elevenlabs/tts/multilingual-v2", + ) + print(resp.request_id, resp.status) diff --git a/specification/resources/inference/examples/python/inference_create_chat_completion.yml b/specification/resources/inference/examples/python/inference_create_chat_completion.yml new file mode 100644 index 00000000..e33eb0e1 --- /dev/null +++ b/specification/resources/inference/examples/python/inference_create_chat_completion.yml @@ -0,0 +1,15 @@ +lang: Python +source: |- + import os + from pydo import Client + + client = Client(token=os.environ.get("MODEL_ACCESS_KEY")) + + resp = client.chat.completions.create( + model="llama3.3-70b-instruct", + messages=[ + {"role": "user", "content": "What is the capital of Portugal?"}, + ], + ) + + print(resp.choices[0].message.content) diff --git a/specification/resources/inference/examples/python/inference_create_embeddings.yml b/specification/resources/inference/examples/python/inference_create_embeddings.yml new file mode 100644 index 00000000..2ffa7d74 --- /dev/null +++ b/specification/resources/inference/examples/python/inference_create_embeddings.yml @@ -0,0 +1,16 @@ +lang: Python +source: |- + import os + from pydo import Client + + client = Client(token=os.environ.get("MODEL_ACCESS_KEY")) + + resp = client.embeddings.create( + model="qwen3-embedding-0.6b", + input=["hello world", "goodbye world"], + encoding_format="float", + user="user-1234", + ) + + for item in resp.data: + print(item.index, item.embedding[:8]) diff --git a/specification/resources/inference/examples/python/inference_create_image.yml b/specification/resources/inference/examples/python/inference_create_image.yml new file mode 100644 index 00000000..fc9a0062 --- /dev/null +++ b/specification/resources/inference/examples/python/inference_create_image.yml @@ -0,0 +1,16 @@ +lang: Python +source: |- + import os + from pydo import Client + + client = Client(token=os.environ.get("MODEL_ACCESS_KEY")) + + resp = client.images.generate( + model="openai-gpt-image-1", + prompt="A cute baby sea otter floating on its back in calm blue water", + size="auto", + quality="auto", + n=1, + ) + + print(resp.data[0].b64_json) diff --git a/specification/resources/inference/examples/python/inference_create_messages.yml b/specification/resources/inference/examples/python/inference_create_messages.yml new file mode 100644 index 00000000..1c85ab2a --- /dev/null +++ b/specification/resources/inference/examples/python/inference_create_messages.yml @@ -0,0 +1,16 @@ +lang: Python +source: |- + import os + from pydo import Client + + client = Client(token=os.environ.get("MODEL_ACCESS_KEY")) + + resp = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[ + {"role": "user", "content": "What is the capital of Portugal?"}, + ], + ) + + print(resp.content[0].text) diff --git a/specification/resources/inference/examples/python/inference_create_response.yml b/specification/resources/inference/examples/python/inference_create_response.yml new file mode 100644 index 00000000..81a4714b --- /dev/null +++ b/specification/resources/inference/examples/python/inference_create_response.yml @@ -0,0 +1,15 @@ +lang: Python +source: |- + import os + from pydo import Client + + client = Client(token=os.environ.get("MODEL_ACCESS_KEY")) + + resp = client.responses.create( + model="openai-gpt-oss-20b", + input="What is the capital of France?", + max_output_tokens=50, + temperature=0.7, + ) + + print(resp.output[0].content[0].text) diff --git a/specification/resources/inference/examples/python/inference_list_models.yml b/specification/resources/inference/examples/python/inference_list_models.yml new file mode 100644 index 00000000..a7f91811 --- /dev/null +++ b/specification/resources/inference/examples/python/inference_list_models.yml @@ -0,0 +1,11 @@ +lang: Python +source: |- + import os + from pydo import Client + + client = Client(token=os.environ.get("MODEL_ACCESS_KEY")) + + resp = client.models.list() + + for model in resp.data: + print(model.id) diff --git a/specification/resources/inference/inference_async_invoke.yml b/specification/resources/inference/inference_async_invoke.yml index 3e91d797..104e4147 100644 --- a/specification/resources/inference/inference_async_invoke.yml +++ b/specification/resources/inference/inference_async_invoke.yml @@ -75,5 +75,7 @@ responses: $ref: '../../shared/responses/unexpected_error.yml' x-codeSamples: - $ref: 'examples/curl/inference_async_invoke.yml' + - $ref: 'examples/python/inference_async_invoke.yml' + - $ref: 'examples/dots/inference_async_invoke.yml' security: - inference_bearer_auth: [] diff --git a/specification/resources/inference/inference_create_chat_completion.yml b/specification/resources/inference/inference_create_chat_completion.yml index b19f70c8..fe919927 100644 --- a/specification/resources/inference/inference_create_chat_completion.yml +++ b/specification/resources/inference/inference_create_chat_completion.yml @@ -40,5 +40,7 @@ responses: $ref: '../../shared/responses/unexpected_error.yml' x-codeSamples: - $ref: 'examples/curl/inference_create_chat_completion.yml' + - $ref: 'examples/python/inference_create_chat_completion.yml' + - $ref: 'examples/dots/inference_create_chat_completion.yml' security: - inference_bearer_auth: [] diff --git a/specification/resources/inference/inference_create_embeddings.yml b/specification/resources/inference/inference_create_embeddings.yml index 4cd39860..0f29542f 100644 --- a/specification/resources/inference/inference_create_embeddings.yml +++ b/specification/resources/inference/inference_create_embeddings.yml @@ -40,5 +40,7 @@ responses: $ref: '../../shared/responses/unexpected_error.yml' x-codeSamples: - $ref: 'examples/curl/inference_create_embeddings.yml' + - $ref: 'examples/python/inference_create_embeddings.yml' + - $ref: 'examples/dots/inference_create_embeddings.yml' security: - inference_bearer_auth: [] diff --git a/specification/resources/inference/inference_create_image.yml b/specification/resources/inference/inference_create_image.yml index 4ffc5c46..ec598cd2 100644 --- a/specification/resources/inference/inference_create_image.yml +++ b/specification/resources/inference/inference_create_image.yml @@ -39,5 +39,7 @@ responses: $ref: '../../shared/responses/unexpected_error.yml' x-codeSamples: - $ref: 'examples/curl/inference_create_image.yml' + - $ref: 'examples/python/inference_create_image.yml' + - $ref: 'examples/dots/inference_create_image.yml' security: - inference_bearer_auth: [] diff --git a/specification/resources/inference/inference_create_messages.yml b/specification/resources/inference/inference_create_messages.yml index cd866f7e..3b4983ad 100644 --- a/specification/resources/inference/inference_create_messages.yml +++ b/specification/resources/inference/inference_create_messages.yml @@ -58,5 +58,7 @@ responses: $ref: '../../shared/responses/unexpected_error.yml' x-codeSamples: - $ref: 'examples/curl/inference_create_messages.yml' + - $ref: 'examples/python/inference_create_messages.yml' + - $ref: 'examples/dots/inference_create_messages.yml' security: - inference_bearer_auth: [] diff --git a/specification/resources/inference/inference_create_response.yml b/specification/resources/inference/inference_create_response.yml index d5d34dca..50481de7 100644 --- a/specification/resources/inference/inference_create_response.yml +++ b/specification/resources/inference/inference_create_response.yml @@ -41,5 +41,7 @@ responses: $ref: '../../shared/responses/unexpected_error.yml' x-codeSamples: - $ref: 'examples/curl/inference_create_response.yml' + - $ref: 'examples/python/inference_create_response.yml' + - $ref: 'examples/dots/inference_create_response.yml' security: - inference_bearer_auth: [] diff --git a/specification/resources/inference/inference_list_models.yml b/specification/resources/inference/inference_list_models.yml index 6c851669..002321f2 100644 --- a/specification/resources/inference/inference_list_models.yml +++ b/specification/resources/inference/inference_list_models.yml @@ -30,5 +30,7 @@ responses: $ref: '../../shared/responses/unexpected_error.yml' x-codeSamples: - $ref: 'examples/curl/inference_list_models.yml' + - $ref: 'examples/python/inference_list_models.yml' + - $ref: 'examples/dots/inference_list_models.yml' security: - inference_bearer_auth: []