Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,3 @@ site/
biome-main/
.review/
pglinter_repo/
.review/
24 changes: 23 additions & 1 deletion crates/pgls_lsp/src/capabilities.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
use crate::adapters::{PositionEncoding, WideEncoding, negotiated_encoding};
use crate::handlers::code_actions::command_id;
use pgls_workspace::features::code_actions::CommandActionCategory;
use pgls_workspace::features::semantic_tokens::{TokenModifier, TokenType};
use strum::IntoEnumIterator;
use tower_lsp::lsp_types::{
ClientCapabilities, CompletionOptions, ExecuteCommandOptions, HoverProviderCapability, OneOf,
PositionEncodingKind, SaveOptions, ServerCapabilities, TextDocumentSyncCapability,
PositionEncodingKind, SaveOptions, SemanticTokenModifier, SemanticTokenType,
SemanticTokensFullOptions, SemanticTokensLegend, SemanticTokensOptions,
SemanticTokensServerCapabilities, ServerCapabilities, TextDocumentSyncCapability,
TextDocumentSyncKind, TextDocumentSyncOptions, TextDocumentSyncSaveOptions,
WorkDoneProgressOptions,
};
Expand Down Expand Up @@ -69,6 +72,25 @@ pub(crate) fn server_capabilities(capabilities: &ClientCapabilities) -> ServerCa
)),
rename_provider: None,
hover_provider: Some(HoverProviderCapability::Simple(true)),
semantic_tokens_provider: Some(SemanticTokensServerCapabilities::SemanticTokensOptions(
SemanticTokensOptions {
work_done_progress_options: WorkDoneProgressOptions {
work_done_progress: None,
},
legend: SemanticTokensLegend {
token_types: TokenType::legend()
.into_iter()
.map(SemanticTokenType::new)
.collect(),
token_modifiers: TokenModifier::legend()
.into_iter()
.map(SemanticTokenModifier::new)
.collect(),
},
range: Some(true),
full: Some(SemanticTokensFullOptions::Bool(true)),
},
)),
..Default::default()
}
}
1 change: 1 addition & 0 deletions crates/pgls_lsp/src/handlers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@ pub(crate) mod code_actions;
pub(crate) mod completions;
pub(crate) mod formatting;
pub(crate) mod hover;
pub(crate) mod semantic_tokens;
pub(crate) mod text_document;
189 changes: 189 additions & 0 deletions crates/pgls_lsp/src/handlers/semantic_tokens.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
use crate::{adapters, diagnostics::LspError, session::Session};
use pgls_text_size::TextRange;
use pgls_workspace::features::semantic_tokens::SemanticTokensParams;
use tower_lsp::lsp_types::{self, SemanticToken, SemanticTokens, SemanticTokensRangeResult};

/// Handles a full semantic tokens request.
#[tracing::instrument(level = "debug", skip(session), err)]
pub fn semantic_tokens_full(
session: &Session,
params: lsp_types::SemanticTokensParams,
) -> Result<Option<lsp_types::SemanticTokensResult>, LspError> {
let url = &params.text_document.uri;
let tokens = get_semantic_tokens(session, url, None)?;
Ok(Some(lsp_types::SemanticTokensResult::Tokens(tokens)))
}

/// Handles a range semantic tokens request.
#[tracing::instrument(level = "debug", skip(session), err)]
pub fn semantic_tokens_range(
session: &Session,
params: lsp_types::SemanticTokensRangeParams,
) -> Result<Option<SemanticTokensRangeResult>, LspError> {
let url = &params.text_document.uri;
let tokens = get_semantic_tokens(session, url, Some(params.range))?;
Ok(Some(SemanticTokensRangeResult::Tokens(tokens)))
}

/// Common implementation for semantic tokens requests.
fn get_semantic_tokens(
session: &Session,
url: &lsp_types::Url,
range: Option<lsp_types::Range>,
) -> Result<SemanticTokens, LspError> {
let path = session.file_path(url)?;
let doc = session.document(url)?;
let encoding = adapters::negotiated_encoding(session.client_capabilities().unwrap());

let text_range = range
.map(|r| -> Result<TextRange, LspError> {
let start = adapters::from_lsp::offset(&doc.line_index, r.start, encoding)?;
let end = adapters::from_lsp::offset(&doc.line_index, r.end, encoding)?;
Ok(TextRange::new(start, end))
})
.transpose()?;

let workspace_result = session
.workspace
.get_semantic_tokens(SemanticTokensParams {
path,
range: text_range,
})?;

let lsp_tokens = encode_tokens(&workspace_result.tokens, &doc.line_index, encoding)?;

Ok(SemanticTokens {
result_id: None,
data: lsp_tokens,
})
}

/// Encodes workspace semantic tokens into the LSP delta-encoded format.
///
/// LSP semantic tokens are encoded as a flat array of integers with 5 values per token:
/// - deltaLine: line difference from previous token
/// - deltaStart: character offset from start of line (or from previous token if same line)
/// - length: the length of the token in characters
/// - tokenType: the token type index
/// - tokenModifiers: bit flags for token modifiers
///
/// Multi-line tokens (like block comments) are split into multiple LSP tokens,
/// one per line, since LSP semantic tokens cannot span lines.
fn encode_tokens(
tokens: &[pgls_workspace::features::semantic_tokens::SemanticToken],
line_index: &adapters::line_index::LineIndex,
encoding: adapters::PositionEncoding,
) -> Result<Vec<SemanticToken>, LspError> {
let mut result = Vec::with_capacity(tokens.len());
let mut prev_line = 0u32;
let mut prev_start = 0u32;

for token in tokens {
// Convert token range to LSP positions
let start_pos = adapters::to_lsp::position(line_index, token.range.start(), encoding)
.map_err(|e| LspError::from(anyhow::anyhow!("Failed to convert position: {}", e)))?;

let end_pos = adapters::to_lsp::position(line_index, token.range.end(), encoding)
.map_err(|e| LspError::from(anyhow::anyhow!("Failed to convert position: {}", e)))?;

if start_pos.line == end_pos.line {
// Single-line token - emit one LSP token
let length = end_pos.character - start_pos.character;
let delta_line = start_pos.line - prev_line;
let delta_start = if delta_line == 0 {
start_pos.character - prev_start
} else {
start_pos.character
};

result.push(SemanticToken {
delta_line,
delta_start,
length,
token_type: token.token_type,
token_modifiers_bitset: token.token_modifiers,
});

prev_line = start_pos.line;
prev_start = start_pos.character;
} else {
// Multi-line token - emit one LSP token per line
for line in start_pos.line..=end_pos.line {
let (line_start, line_length) = if line == start_pos.line {
// First line: from token start to end of line
let line_len = get_line_length(line_index, line, encoding);
(start_pos.character, line_len.saturating_sub(start_pos.character))
} else if line == end_pos.line {
// Last line: from start of line to token end
(0, end_pos.character)
} else {
// Middle lines: entire line
(0, get_line_length(line_index, line, encoding))
};

// Skip empty segments
if line_length == 0 {
continue;
}

let delta_line = line - prev_line;
let delta_start = if delta_line == 0 {
line_start - prev_start
} else {
line_start
};

result.push(SemanticToken {
delta_line,
delta_start,
length: line_length,
token_type: token.token_type,
token_modifiers_bitset: token.token_modifiers,
});

prev_line = line;
prev_start = line_start;
}
}
}

Ok(result)
}

/// Gets the length of a line in the appropriate encoding (excluding the newline character).
fn get_line_length(
line_index: &adapters::line_index::LineIndex,
line: u32,
encoding: adapters::PositionEncoding,
) -> u32 {
let line_usize = line as usize;

// Get the start offset of this line and the next line
let line_start = line_index.newlines.get(line_usize).copied();
let next_line_start = line_index.newlines.get(line_usize + 1).copied();

let (Some(start), Some(end)) = (line_start, next_line_start) else {
// Last line or invalid line - estimate from offset
// For the last line, we don't have a next newline offset
// Return 0 as a safe fallback (the token end position should handle this)
return 0;
};

// Line length in bytes (excluding newline)
let byte_length = end - start - pgls_text_size::TextSize::from(1u32);

// Convert to the appropriate encoding
match encoding {
adapters::PositionEncoding::Utf8 => byte_length.into(),
adapters::PositionEncoding::Wide(enc) => {
let line_col = adapters::LineCol {
line,
col: byte_length.into(),
};
line_index
.to_wide(enc, line_col)
.map(|wlc| wlc.col)
.unwrap_or(byte_length.into())
}
}
}
23 changes: 23 additions & 0 deletions crates/pgls_lsp/src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,28 @@ impl LanguageServer for LSPServer {
Err(e) => LspResult::Err(into_lsp_error(e)),
}
}

#[tracing::instrument(level = "trace", skip_all)]
async fn semantic_tokens_full(
&self,
params: SemanticTokensParams,
) -> LspResult<Option<SemanticTokensResult>> {
match handlers::semantic_tokens::semantic_tokens_full(&self.session, params) {
Ok(result) => LspResult::Ok(result),
Err(e) => LspResult::Err(into_lsp_error(e)),
}
}

#[tracing::instrument(level = "trace", skip_all)]
async fn semantic_tokens_range(
&self,
params: SemanticTokensRangeParams,
) -> LspResult<Option<SemanticTokensRangeResult>> {
match handlers::semantic_tokens::semantic_tokens_range(&self.session, params) {
Ok(result) => LspResult::Ok(result),
Err(e) => LspResult::Err(into_lsp_error(e)),
}
}
}

impl Drop for LSPServer {
Expand Down Expand Up @@ -483,6 +505,7 @@ impl ServerFactory {
workspace_method!(builder, register_project_folder);
workspace_method!(builder, unregister_project_folder);
workspace_method!(builder, invalidate_schema_cache);
workspace_method!(builder, get_semantic_tokens);

let (service, socket) = builder.finish();
ServerConnection { socket, service }
Expand Down
1 change: 1 addition & 0 deletions crates/pgls_workspace/src/features/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ pub mod completions;
pub mod diagnostics;
pub mod format;
pub mod on_hover;
pub mod semantic_tokens;
29 changes: 29 additions & 0 deletions crates/pgls_workspace/src/features/semantic_tokens.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
use pgls_fs::PgLSPath;
use pgls_text_size::TextRange;

// Re-export from the workspace implementation
pub use crate::workspace::server::semantic_tokens::{
SemanticToken, TokenModifier, TokenType, NO_MODIFIERS,
};

#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
pub struct SemanticTokensParams {
/// The file path for which semantic tokens are requested.
pub path: PgLSPath,
/// Optional range to limit the tokens. If None, returns tokens for the entire file.
pub range: Option<TextRange>,
}

#[derive(Debug, serde::Serialize, serde::Deserialize, Default)]
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
pub struct SemanticTokensResult {
/// The semantic tokens for the requested file/range
pub tokens: Vec<SemanticToken>,
}

impl SemanticTokensResult {
pub fn new(tokens: Vec<SemanticToken>) -> Self {
Self { tokens }
}
}
7 changes: 7 additions & 0 deletions crates/pgls_workspace/src/workspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ use crate::{
},
format::{PullFileFormattingParams, PullFormattingResult},
on_hover::{OnHoverParams, OnHoverResult},
semantic_tokens::{SemanticTokensParams, SemanticTokensResult},
},
};

Expand Down Expand Up @@ -131,6 +132,12 @@ pub trait Workspace: Send + Sync + RefUnwindSafe {

fn on_hover(&self, params: OnHoverParams) -> Result<OnHoverResult, WorkspaceError>;

/// Retrieves semantic tokens for syntax highlighting
fn get_semantic_tokens(
&self,
params: SemanticTokensParams,
) -> Result<SemanticTokensResult, WorkspaceError>;

/// Register a possible workspace project folder. Returns the key of said project. Use this key when you want to switch to different projects.
fn register_project_folder(
&self,
Expand Down
7 changes: 7 additions & 0 deletions crates/pgls_workspace/src/workspace/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,13 @@ where
self.request("pgls/on_hover", params)
}

fn get_semantic_tokens(
&self,
params: crate::features::semantic_tokens::SemanticTokensParams,
) -> Result<crate::features::semantic_tokens::SemanticTokensResult, WorkspaceError> {
self.request("pgls/get_semantic_tokens", params)
}

fn invalidate_schema_cache(&self, all: bool) -> Result<(), WorkspaceError> {
self.request("pgls/invalidate_schema_cache", all)
}
Expand Down
25 changes: 25 additions & 0 deletions crates/pgls_workspace/src/workspace/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ use crate::{
diagnostics::{PullDiagnosticsResult, PullFileDiagnosticsParams},
format::{PullFileFormattingParams, PullFormattingResult, StatementFormatResult},
on_hover::{OnHoverParams, OnHoverResult},
semantic_tokens::{SemanticTokensParams, SemanticTokensResult},
},
settings::{WorkspaceSettings, WorkspaceSettingsHandle, WorkspaceSettingsHandleMut},
workspace::{AnalyserDiagnosticsMapper, WithCSTandASTMapper},
Expand All @@ -71,6 +72,7 @@ pub(crate) mod document;
mod migration;
mod pg_query;
mod schema_cache_manager;
pub mod semantic_tokens;
mod sql_function;
mod statement_identifier;
mod tree_sitter;
Expand Down Expand Up @@ -1058,6 +1060,29 @@ impl Workspace for WorkspaceServer {
None => Ok(OnHoverResult::default()),
}
}

#[ignored_path(path=&params.path)]
#[tracing::instrument(level = "debug", skip_all, fields(
path = params.path.as_os_str().to_str(),
), err)]
fn get_semantic_tokens(
&self,
params: SemanticTokensParams,
) -> Result<SemanticTokensResult, WorkspaceError> {
use document::{RangeOverlapFilter, SemanticTokenMapper};

let documents = self.documents.read().unwrap();
let doc = documents
.get(&params.path)
.ok_or(WorkspaceError::not_found())?;

let tokens: Vec<_> = doc
.iter_with_filter(SemanticTokenMapper, RangeOverlapFilter::new(params.range))
.flat_map(|(_, tokens)| tokens)
.collect();

Ok(SemanticTokensResult::new(tokens))
}
}

/// Returns `true` if `path` is a directory or
Expand Down
Loading
Loading