Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions core/llm/autodetect.ts
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ const PROVIDER_HANDLES_TEMPLATING: string[] = [
"xAI",
"minimax",
"groq",
"perplexity",
"gemini",
"docker",
"nous",
Expand Down
23 changes: 23 additions & 0 deletions core/llm/llms/Perplexity.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import { LLMOptions } from "../../index.js";

import OpenAI from "./OpenAI.js";

/**
* Perplexity provider
*
* Integrates with Perplexity's OpenAI-compatible chat completions API.
* Provides access to Sonar models, which include built-in web search —
* useful for coding agents that need to research up-to-date documentation
* or APIs while answering.
*
* More information at: https://docs.perplexity.ai/docs/getting-started
*/
class Perplexity extends OpenAI {
static providerName = "perplexity";
static defaultOptions: Partial<LLMOptions> = {
apiBase: "https://api.perplexity.ai/",
model: "sonar",
};
}

export default Perplexity;
7 changes: 7 additions & 0 deletions core/llm/llms/Perplexity.vitest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import { createOpenAISubclassTests } from "./test-utils/openai-test-utils.js";
import Perplexity from "./Perplexity.js";

createOpenAISubclassTests(Perplexity, {
providerName: "perplexity",
defaultApiBase: "https://api.perplexity.ai/",
});
2 changes: 2 additions & 0 deletions core/llm/llms/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ import OpenAI from "./OpenAI";
import OpenRouter from "./OpenRouter";
import ClawRouter from "./ClawRouter";
import OVHcloud from "./OVHcloud";
import Perplexity from "./Perplexity";
import { Relace } from "./Relace";
import Replicate from "./Replicate";
import SageMaker from "./SageMaker";
Expand Down Expand Up @@ -130,6 +131,7 @@ export const LLMClasses = [
Scaleway,
Relace,
Inception,
Perplexity,
Voyage,
LlamaStack,
TARS,
Expand Down
55 changes: 55 additions & 0 deletions docs/customize/model-providers/more/perplexity.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
---
title: "How to Configure Perplexity with Continue"
sidebarTitle: "Perplexity"
---

<Info>
Get your API key from the [Perplexity API Keys page](https://www.perplexity.ai/account/api/keys)
</Info>

Perplexity exposes its Sonar models through an OpenAI-compatible chat
completions API. Sonar models include built-in web search, which is useful
for coding agents that need to research up-to-date documentation or APIs
while answering.

Available models include:

- `sonar`
- `sonar-pro`
- `sonar-reasoning`
- `sonar-reasoning-pro`

## Configuration

<Tabs>
<Tab title="YAML">
```yaml title="config.yaml"
name: My Config
version: 0.0.1
schema: v1

models:
- name: Sonar Pro
provider: perplexity
model: sonar-pro
apiKey: <YOUR_PERPLEXITY_API_KEY>
```
</Tab>
<Tab title="JSON (Deprecated)">
```json title="config.json"
{
"models": [
{
"title": "Sonar Pro",
"provider": "perplexity",
"model": "sonar-pro",
"apiKey": "<YOUR_PERPLEXITY_API_KEY>"
}
]
}
```
</Tab>
</Tabs>

See the [Perplexity API documentation](https://docs.perplexity.ai/docs/getting-started)
for more details on available models, parameters, and rate limits.
1 change: 1 addition & 0 deletions docs/customize/model-providers/overview.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ Beyond the top-level providers, Continue supports many other options:
| Provider | Description |
| :--------------------------------------------------------------------- | :--------------------------------------------------------- |
| [Groq](/customize/model-providers/more/groq) | Ultra-fast inference for various open models |
| [Perplexity](/customize/model-providers/more/perplexity) | Sonar models with built-in web search |
| [Together AI](/customize/model-providers/more/together) | Platform for running a variety of open models |
| [DeepInfra](/customize/model-providers/more/deepinfra) | Hosting for various open source models |
| [OpenRouter](/customize/model-providers/top-level/openrouter) | Gateway to multiple model providers |
Expand Down
1 change: 1 addition & 0 deletions docs/docs.json
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@
"customize/model-providers/more/moonshot",
"customize/model-providers/more/nous",
"customize/model-providers/more/nvidia",
"customize/model-providers/more/perplexity",
"customize/model-providers/more/tensorix",
"customize/model-providers/more/together",
"customize/model-providers/more/xAI",
Expand Down
25 changes: 25 additions & 0 deletions extensions/vscode/config_schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,7 @@
"mistral-vertexai",
"deepinfra",
"groq",
"perplexity",
"fireworks",
"ncompass",
"cloudflare",
Expand Down Expand Up @@ -261,6 +262,7 @@
"### Mistral API on Vertex AI\nTo get started you need to enable the [Vertex AI API](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) and set up the [Google Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc).",
"### DeepInfra\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/deepinfra)",
"### Groq\nGroq provides extremely fast inference of open-source language models. To get started, obtain an API key from [their console](https://console.groq.com/keys).",
"### Perplexity\nPerplexity offers Sonar models with built-in web search, useful for coding agents that need to research up-to-date documentation. To get started, obtain an API key from [their console](https://www.perplexity.ai/account/api/keys).\n> [Reference](https://docs.perplexity.ai/docs/getting-started)",
"### Fireworks\nFireworks is a fast inference engine for open-source language models. To get started, obtain an API key from [their console](https://fireworks.ai/api-keys).",
"### Ncompass\nnCompass is an extremely fast inference engine for open-source language models. To get started, obtain an API key from [their console](https://app.ncompass.tech/api-settings).",
"### Cloudflare Workers AI\n\n[Reference](https://developers.cloudflare.com/workers-ai/)",
Expand Down Expand Up @@ -1328,6 +1330,29 @@
}
}
},
{
"if": {
"properties": {
"provider": {
"enum": ["perplexity"]
}
},
"required": ["provider"]
},
"then": {
"properties": {
"model": {
"enum": [
"sonar",
"sonar-pro",
"sonar-reasoning",
"sonar-reasoning-pro",
"AUTODETECT"
]
}
}
}
},
{
"if": {
"properties": {
Expand Down
2 changes: 2 additions & 0 deletions packages/openai-adapters/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined {
return openAICompatible("http://localhost:8000/v1/", config);
case "groq":
return openAICompatible("https://api.groq.com/openai/v1/", config);
case "perplexity":
return openAICompatible("https://api.perplexity.ai/", config);
case "minimax":
return new MiniMaxApi(config);
case "sambanova":
Expand Down
1 change: 1 addition & 0 deletions packages/openai-adapters/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ export const OpenAIConfigSchema = BasePlusConfig.extend({
z.literal("voyage"),
z.literal("deepinfra"),
z.literal("groq"),
z.literal("perplexity"),
z.literal("nvidia"),
z.literal("ovhcloud"),
z.literal("fireworks"),
Expand Down
Loading