diff --git a/core/llm/autodetect.ts b/core/llm/autodetect.ts index 736caebcc6e..aee55ff754d 100644 --- a/core/llm/autodetect.ts +++ b/core/llm/autodetect.ts @@ -70,6 +70,7 @@ const PROVIDER_HANDLES_TEMPLATING: string[] = [ "xAI", "minimax", "groq", + "perplexity", "gemini", "docker", "nous", diff --git a/core/llm/llms/Perplexity.ts b/core/llm/llms/Perplexity.ts new file mode 100644 index 00000000000..94430f320fb --- /dev/null +++ b/core/llm/llms/Perplexity.ts @@ -0,0 +1,23 @@ +import { LLMOptions } from "../../index.js"; + +import OpenAI from "./OpenAI.js"; + +/** + * Perplexity provider + * + * Integrates with Perplexity's OpenAI-compatible chat completions API. + * Provides access to Sonar models, which include built-in web search — + * useful for coding agents that need to research up-to-date documentation + * or APIs while answering. + * + * More information at: https://docs.perplexity.ai/docs/getting-started + */ +class Perplexity extends OpenAI { + static providerName = "perplexity"; + static defaultOptions: Partial = { + apiBase: "https://api.perplexity.ai/", + model: "sonar", + }; +} + +export default Perplexity; diff --git a/core/llm/llms/Perplexity.vitest.ts b/core/llm/llms/Perplexity.vitest.ts new file mode 100644 index 00000000000..f71b4208fc9 --- /dev/null +++ b/core/llm/llms/Perplexity.vitest.ts @@ -0,0 +1,7 @@ +import { createOpenAISubclassTests } from "./test-utils/openai-test-utils.js"; +import Perplexity from "./Perplexity.js"; + +createOpenAISubclassTests(Perplexity, { + providerName: "perplexity", + defaultApiBase: "https://api.perplexity.ai/", +}); diff --git a/core/llm/llms/index.ts b/core/llm/llms/index.ts index 453b2d90cd8..44793827e15 100644 --- a/core/llm/llms/index.ts +++ b/core/llm/llms/index.ts @@ -52,6 +52,7 @@ import OpenAI from "./OpenAI"; import OpenRouter from "./OpenRouter"; import ClawRouter from "./ClawRouter"; import OVHcloud from "./OVHcloud"; +import Perplexity from "./Perplexity"; import { Relace } from "./Relace"; import Replicate from "./Replicate"; import SageMaker from "./SageMaker"; @@ -130,6 +131,7 @@ export const LLMClasses = [ Scaleway, Relace, Inception, + Perplexity, Voyage, LlamaStack, TARS, diff --git a/docs/customize/model-providers/more/perplexity.mdx b/docs/customize/model-providers/more/perplexity.mdx new file mode 100644 index 00000000000..864865c1772 --- /dev/null +++ b/docs/customize/model-providers/more/perplexity.mdx @@ -0,0 +1,55 @@ +--- +title: "How to Configure Perplexity with Continue" +sidebarTitle: "Perplexity" +--- + + + Get your API key from the [Perplexity API Keys page](https://www.perplexity.ai/account/api/keys) + + +Perplexity exposes its Sonar models through an OpenAI-compatible chat +completions API. Sonar models include built-in web search, which is useful +for coding agents that need to research up-to-date documentation or APIs +while answering. + +Available models include: + +- `sonar` +- `sonar-pro` +- `sonar-reasoning` +- `sonar-reasoning-pro` + +## Configuration + + + + ```yaml title="config.yaml" + name: My Config + version: 0.0.1 + schema: v1 + + models: + - name: Sonar Pro + provider: perplexity + model: sonar-pro + apiKey: + ``` + + + ```json title="config.json" + { + "models": [ + { + "title": "Sonar Pro", + "provider": "perplexity", + "model": "sonar-pro", + "apiKey": "" + } + ] + } + ``` + + + +See the [Perplexity API documentation](https://docs.perplexity.ai/docs/getting-started) +for more details on available models, parameters, and rate limits. diff --git a/docs/customize/model-providers/overview.mdx b/docs/customize/model-providers/overview.mdx index 7ba030dcb4d..0c2529e58e5 100644 --- a/docs/customize/model-providers/overview.mdx +++ b/docs/customize/model-providers/overview.mdx @@ -31,6 +31,7 @@ Beyond the top-level providers, Continue supports many other options: | Provider | Description | | :--------------------------------------------------------------------- | :--------------------------------------------------------- | | [Groq](/customize/model-providers/more/groq) | Ultra-fast inference for various open models | +| [Perplexity](/customize/model-providers/more/perplexity) | Sonar models with built-in web search | | [Together AI](/customize/model-providers/more/together) | Platform for running a variety of open models | | [DeepInfra](/customize/model-providers/more/deepinfra) | Hosting for various open source models | | [OpenRouter](/customize/model-providers/top-level/openrouter) | Gateway to multiple model providers | diff --git a/docs/docs.json b/docs/docs.json index 8556401ffc6..b18e7c7c261 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -180,6 +180,7 @@ "customize/model-providers/more/moonshot", "customize/model-providers/more/nous", "customize/model-providers/more/nvidia", + "customize/model-providers/more/perplexity", "customize/model-providers/more/tensorix", "customize/model-providers/more/together", "customize/model-providers/more/xAI", diff --git a/extensions/vscode/config_schema.json b/extensions/vscode/config_schema.json index 6eb11282100..f5854830d00 100644 --- a/extensions/vscode/config_schema.json +++ b/extensions/vscode/config_schema.json @@ -208,6 +208,7 @@ "mistral-vertexai", "deepinfra", "groq", + "perplexity", "fireworks", "ncompass", "cloudflare", @@ -261,6 +262,7 @@ "### Mistral API on Vertex AI\nTo get started you need to enable the [Vertex AI API](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) and set up the [Google Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc).", "### DeepInfra\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/deepinfra)", "### Groq\nGroq provides extremely fast inference of open-source language models. To get started, obtain an API key from [their console](https://console.groq.com/keys).", + "### Perplexity\nPerplexity offers Sonar models with built-in web search, useful for coding agents that need to research up-to-date documentation. To get started, obtain an API key from [their console](https://www.perplexity.ai/account/api/keys).\n> [Reference](https://docs.perplexity.ai/docs/getting-started)", "### Fireworks\nFireworks is a fast inference engine for open-source language models. To get started, obtain an API key from [their console](https://fireworks.ai/api-keys).", "### Ncompass\nnCompass is an extremely fast inference engine for open-source language models. To get started, obtain an API key from [their console](https://app.ncompass.tech/api-settings).", "### Cloudflare Workers AI\n\n[Reference](https://developers.cloudflare.com/workers-ai/)", @@ -1328,6 +1330,29 @@ } } }, + { + "if": { + "properties": { + "provider": { + "enum": ["perplexity"] + } + }, + "required": ["provider"] + }, + "then": { + "properties": { + "model": { + "enum": [ + "sonar", + "sonar-pro", + "sonar-reasoning", + "sonar-reasoning-pro", + "AUTODETECT" + ] + } + } + } + }, { "if": { "properties": { diff --git a/packages/openai-adapters/src/index.ts b/packages/openai-adapters/src/index.ts index c9eb4da00fa..612b8c8ee6b 100644 --- a/packages/openai-adapters/src/index.ts +++ b/packages/openai-adapters/src/index.ts @@ -143,6 +143,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined { return openAICompatible("http://localhost:8000/v1/", config); case "groq": return openAICompatible("https://api.groq.com/openai/v1/", config); + case "perplexity": + return openAICompatible("https://api.perplexity.ai/", config); case "minimax": return new MiniMaxApi(config); case "sambanova": diff --git a/packages/openai-adapters/src/types.ts b/packages/openai-adapters/src/types.ts index 3b324b0ac6b..16a7da39746 100644 --- a/packages/openai-adapters/src/types.ts +++ b/packages/openai-adapters/src/types.ts @@ -37,6 +37,7 @@ export const OpenAIConfigSchema = BasePlusConfig.extend({ z.literal("voyage"), z.literal("deepinfra"), z.literal("groq"), + z.literal("perplexity"), z.literal("nvidia"), z.literal("ovhcloud"), z.literal("fireworks"),