diff --git a/.github/workflows/external-tests.yml b/.github/workflows/external-tests.yml index d1010000..81b35a1a 100644 --- a/.github/workflows/external-tests.yml +++ b/.github/workflows/external-tests.yml @@ -9,6 +9,9 @@ on: env: CARGO_TERM_COLOR: always MACOSX_DEPLOYMENT_TARGET: "13" + # Force Node 24 for all JS-based actions to avoid the libuv + # process_title assertion crash on Windows (known Node 20 bug). + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true jobs: lua-tests: @@ -20,12 +23,12 @@ jobs: fail-fast: false matrix: include: - # - os: ubuntu-latest TODO uncomment once bun stop crashing + - os: ubuntu-latest - os: macos-latest - os: windows-latest target: x86_64-pc-windows-msvc steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: oven-sh/setup-bun@v2 - name: Install Zig @@ -34,11 +37,11 @@ jobs: version: 0.15.2 - name: Install Rust - uses: actions-rust-lang/setup-rust-toolchain@v1 + uses: actions-rust-lang/setup-rust-toolchain@v1.15.4 with: cache: true - cache-on-failure: true - cache-key: "v1-lua-e2e" + cache-on-failure: false + cache-key: "v2-lua-e2e" rustflags: "" target: ${{ matrix.target || '' }} diff --git a/.github/workflows/lua.yml b/.github/workflows/lua.yml index 099a22bf..3835e36f 100644 --- a/.github/workflows/lua.yml +++ b/.github/workflows/lua.yml @@ -11,7 +11,7 @@ jobs: name: lua-language-server type check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install Neovim run: | @@ -38,7 +38,7 @@ jobs: name: luacheck lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install luacheck run: | diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index fd580835..97fadd78 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -13,7 +13,7 @@ jobs: id-token: "write" contents: "read" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: DeterminateSystems/nix-installer-action@main - uses: DeterminateSystems/magic-nix-cache-action@main - uses: DeterminateSystems/flake-checker-action@main diff --git a/.github/workflows/panvimdoc.yaml b/.github/workflows/panvimdoc.yaml index 8f3bc156..c20a1c18 100644 --- a/.github/workflows/panvimdoc.yaml +++ b/.github/workflows/panvimdoc.yaml @@ -10,7 +10,7 @@ jobs: permissions: contents: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: # fetch last 2 commits required for auto force push back fetch-depth: 2 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 6ee24c13..5affa562 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -5,6 +5,9 @@ on: branches: [main, feat/mcp-ai] pull_request: +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true + jobs: build-nvim: name: Build Neovim ${{ matrix.target }} @@ -62,7 +65,7 @@ jobs: ext: dll steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false @@ -193,7 +196,7 @@ jobs: ext: dll steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false @@ -306,7 +309,7 @@ jobs: artifact_name: target/aarch64-pc-windows-msvc/release/fff-mcp.exe steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false @@ -360,7 +363,7 @@ jobs: permissions: contents: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Download artifacts uses: actions/download-artifact@v4 @@ -469,7 +472,7 @@ jobs: permissions: contents: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Setup Node.js uses: actions/setup-node@v4 diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 18201e2a..caa788b0 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -20,7 +20,7 @@ jobs: matrix: os: [ubuntu-latest, macos-latest] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 # Zig is required to compile zlob - name: Install Zig @@ -29,7 +29,7 @@ jobs: version: 0.15.2 - name: Install Rust - uses: actions-rust-lang/setup-rust-toolchain@v1 + uses: actions-rust-lang/setup-rust-toolchain@v1.15.4 with: cache: true cache-on-failure: true @@ -45,7 +45,7 @@ jobs: name: cargo fmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install Rust uses: dtolnay/rust-toolchain@master with: @@ -58,7 +58,7 @@ jobs: name: cargo clippy runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 # Zig is required to compile zlob - name: Install Zig diff --git a/.github/workflows/spelling.yaml b/.github/workflows/spelling.yaml index c0aeaef4..b3b8d41c 100644 --- a/.github/workflows/spelling.yaml +++ b/.github/workflows/spelling.yaml @@ -17,7 +17,7 @@ jobs: name: Spell Check with Typos runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false diff --git a/.github/workflows/stylua.yaml b/.github/workflows/stylua.yaml index 64d2dbd1..fa7edb11 100644 --- a/.github/workflows/stylua.yaml +++ b/.github/workflows/stylua.yaml @@ -25,7 +25,7 @@ jobs: name: Check lua files using Stylua runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false diff --git a/Cargo.lock b/Cargo.lock index 38f4a7f4..ee3e1661 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -81,7 +81,7 @@ version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -92,7 +92,7 @@ checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 39e343d1..0e60477a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,11 @@ parking_lot = "0.12" pathdiff = "0.2.1" rayon = "1.8.0" regex = "1.11" -smallvec = { version = "1.13", features = ["const_generics", "union"] } +smallvec = { version = "1.13", features = [ + "const_generics", + "union", + "may_dangle", +] } thiserror = "2.0.10" tracing = "0.1" diff --git a/crates/fff-c/src/lib.rs b/crates/fff-c/src/lib.rs index c3490e2e..378ba99a 100644 --- a/crates/fff-c/src/lib.rs +++ b/crates/fff-c/src/lib.rs @@ -261,8 +261,7 @@ pub unsafe extern "C" fn fff_search( let results = FilePicker::fuzzy_search( picker.get_files(), - query_str, - parsed, + &parsed, FuzzySearchOptions { max_threads: opts.max_threads.unwrap_or(0), current_file: opts.current_file.as_deref(), @@ -349,8 +348,7 @@ pub unsafe extern "C" fn fff_live_grep( classify_definitions: opts.classify_definitions.unwrap_or(false), }; - let result = - fff_core::grep::grep_search(picker.get_files(), query_str, parsed.as_ref(), &options); + let result = fff_core::grep::grep_search(picker.get_files(), &parsed, &options); let json_result = ffi_types::GrepResultJson::from_grep_result(&result); match serde_json::to_string(&json_result) { @@ -406,7 +404,7 @@ pub unsafe extern "C" fn fff_multi_grep( let is_ai = picker.mode().is_ai(); // Parse constraints from the optional string (e.g. "*.rs /src/") - let parsed_constraints = opts.constraints.as_deref().and_then(|c| { + let parsed_constraints = opts.constraints.as_deref().map(|c| { if is_ai { fff_core::QueryParser::new(fff_query_parser::AiGrepConfig).parse(c) } else { diff --git a/crates/fff-core/src/background_watcher.rs b/crates/fff-core/src/background_watcher.rs index 89bc29cd..bd13358e 100644 --- a/crates/fff-core/src/background_watcher.rs +++ b/crates/fff-core/src/background_watcher.rs @@ -8,14 +8,30 @@ use notify::event::{AccessKind, AccessMode}; use notify::{Config, EventKind, RecursiveMode}; use notify_debouncer_full::{DebounceEventResult, DebouncedEvent, NoCache, new_debouncer_opt}; use std::path::{Path, PathBuf}; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use tracing::{Level, debug, error, info, warn}; type Debouncer = notify_debouncer_full::Debouncer; +/// Owns the file-system watcher and guarantees that all background threads +/// are fully joined before `stop()` / `Drop` returns. +/// +/// Architecture: +/// - The debouncer (and its internal watcher) live inside an **owner thread** +/// that we spawn and hold the `JoinHandle` for. +/// - `stop()` sets a flag, unparks the owner thread, and **joins** it. +/// - Inside the owner thread, `Debouncer::stop()` is called which joins the +/// debouncer's event-processing thread. +/// - On Windows an additional short sleep is added after `Debouncer::stop()` +/// because `notify`'s `ReadDirectoryChangesWatcher` discards its thread +/// `JoinHandle`, so we cannot join it directly. The watcher's `Drop` does +/// signal the thread via semaphore so it exits almost immediately, but we +/// need to give the OS a moment to reclaim it. pub struct BackgroundWatcher { - debouncer: Arc>>, + stop_signal: Arc, + owner_thread: Option>, } const DEBOUNCE_TIMEOUT: Duration = Duration::from_millis(250); @@ -43,8 +59,33 @@ impl BackgroundWatcher { Self::create_debouncer(base_path, git_workdir, shared_picker, shared_frecency, mode)?; info!("Background file watcher initialized successfully"); + let stop_signal = Arc::new(AtomicBool::new(false)); + let stop_clone = Arc::clone(&stop_signal); + + // The owner thread keeps the debouncer alive and ensures proper + // cleanup: `Debouncer::stop()` joins its internal thread, then the + // watcher `Drop` signals its I/O thread to exit. + let owner_thread = std::thread::Builder::new() + .name("fff-watcher-owner".into()) + .spawn(move || { + while !stop_clone.load(Ordering::Acquire) { + std::thread::park_timeout(Duration::from_secs(1)); + } + // Debouncer::stop() joins the debouncer's event thread, then + // drops the watcher (whose Drop signals the I/O thread). + debouncer.stop(); + // On Windows the notify crate discards the ReadDirectoryChangesW + // thread's JoinHandle — we cannot join it. Its Drop signals the + // thread via semaphore so it exits almost immediately; give the + // OS a moment to fully reclaim it. + #[cfg(windows)] + std::thread::sleep(Duration::from_millis(250)); + }) + .expect("failed to spawn fff-watcher-owner thread"); + Ok(Self { - debouncer: Arc::new(Mutex::new(Some(debouncer))), + stop_signal, + owner_thread: Some(owner_thread), }) } @@ -130,25 +171,23 @@ impl BackgroundWatcher { Ok(debouncer) } - pub fn stop(&self) { - if let Ok(Some(debouncer)) = self.debouncer.lock().map(|mut debouncer| debouncer.take()) { - drop(debouncer); - info!("Background file watcher stopped successfully"); - } else { - error!("Failed to stop background watcher"); + pub fn stop(&mut self) { + self.stop_signal.store(true, Ordering::Release); + if let Some(handle) = self.owner_thread.take() { + handle.thread().unpark(); + + if let Err(e) = handle.join() { + error!("Watcher owner thread panicked: {:?}", e); + } } + + info!("Background file watcher stopped successfully"); } } impl Drop for BackgroundWatcher { fn drop(&mut self) { - if let Ok(mut debouncer_guard) = self.debouncer.lock() { - if let Some(debouncer) = debouncer_guard.take() { - drop(debouncer); - } - } else { - error!("Failed to acquire debouncer lock to drop"); - } + self.stop(); } } diff --git a/crates/fff-core/src/file_picker.rs b/crates/fff-core/src/file_picker.rs index 09ebcbc5..010f8d75 100644 --- a/crates/fff-core/src/file_picker.rs +++ b/crates/fff-core/src/file_picker.rs @@ -321,11 +321,10 @@ impl FilePicker { /// /// # Returns /// SearchResult containing matched files, scores, and location information - pub fn fuzzy_search<'a>( + pub fn fuzzy_search<'a, 'q>( files: &'a [FileItem], - query: &'a str, - parsed: Option>, - options: FuzzySearchOptions<'a>, + query: &'q FFFQuery<'q>, + options: FuzzySearchOptions<'q>, ) -> SearchResult<'a> { let max_threads = if options.max_threads == 0 { std::thread::available_parallelism() @@ -335,8 +334,7 @@ impl FilePicker { options.max_threads }; debug!( - ?query, - parsed_is_some = parsed.is_some(), + raw_query = ?query.raw_query, pagination = ?options.pagination, ?max_threads, current_file = ?options.current_file, @@ -345,25 +343,20 @@ impl FilePicker { let total_files = files.len(); - // Extract location from parsed query - let location = parsed.as_ref().and_then(|p| p.location); + let location = query.location; // Get effective query for max_typos calculation (without location suffix) - let effective_query = match &parsed { - Some(p) => match &p.fuzzy_query { - fff_query_parser::FuzzyQuery::Text(t) => *t, - fff_query_parser::FuzzyQuery::Parts(parts) if !parts.is_empty() => parts[0], - _ => query.trim(), - }, - None => query.trim(), + let effective_query = match &query.fuzzy_query { + fff_query_parser::FuzzyQuery::Text(t) => *t, + fff_query_parser::FuzzyQuery::Parts(parts) if !parts.is_empty() => parts[0], + _ => query.raw_query.trim(), }; // small queries with a large number of results can match absolutely everything let max_typos = (effective_query.len() as u16 / 4).clamp(2, 6); let context = ScoringContext { - raw_query: query, - parsed_query: parsed, + query, project_path: options.project_path, max_typos, max_threads, @@ -620,7 +613,7 @@ impl FilePicker { } pub fn stop_background_monitor(&mut self) { - if let Some(watcher) = self.background_watcher.take() { + if let Some(mut watcher) = self.background_watcher.take() { watcher.stop(); } } diff --git a/crates/fff-core/src/grep.rs b/crates/fff-core/src/grep.rs index 19156676..8f32c05f 100644 --- a/crates/fff-core/src/grep.rs +++ b/crates/fff-core/src/grep.rs @@ -165,6 +165,67 @@ pub fn has_regex_metacharacters(text: &str) -> bool { regex::escape(text) != text } +/// Check if `text` contains `\n` that is NOT preceded by another `\`. +/// +/// `\n` → true (user wants multiline search) +/// `\\n` → false (escaped backslash followed by literal `n`, e.g. `\\nvim-data`) +#[inline] +fn has_unescaped_newline_escape(text: &str) -> bool { + let bytes = text.as_bytes(); + let mut i = 0; + while i < bytes.len().saturating_sub(1) { + if bytes[i] == b'\\' { + if bytes[i + 1] == b'n' { + // Count consecutive backslashes ending at position i + let mut backslash_count = 1; + while backslash_count <= i && bytes[i - backslash_count] == b'\\' { + backslash_count += 1; + } + // Odd number of backslashes before 'n' → real \n escape + if backslash_count % 2 == 1 { + return true; + } + } + // Skip past the escaped character + i += 2; + } else { + i += 1; + } + } + false +} + +/// Replace only unescaped `\n` sequences with real newlines. +/// +/// `\n` → newline character +/// `\\n` → preserved as-is (literal backslash + `n`) +fn replace_unescaped_newline_escapes(text: &str) -> String { + let bytes = text.as_bytes(); + let mut result = Vec::with_capacity(bytes.len()); + let mut i = 0; + while i < bytes.len() { + if bytes[i] == b'\\' && i + 1 < bytes.len() { + if bytes[i + 1] == b'n' { + let mut backslash_count = 1; + while backslash_count <= i && bytes[i - backslash_count] == b'\\' { + backslash_count += 1; + } + if backslash_count % 2 == 1 { + result.push(b'\n'); + i += 2; + continue; + } + } + result.push(bytes[i]); + i += 1; + } else { + result.push(bytes[i]); + i += 1; + } + } + String::from_utf8(result).unwrap_or_else(|_| text.to_string()) +} + /// Controls how the grep pattern is interpreted. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum GrepMode { @@ -1496,8 +1557,7 @@ fn fuzzy_grep_search<'a>( /// frecency for the "welcome state" UI. pub fn grep_search<'a>( files: &'a [FileItem], - raw_query: &str, - query: Option<&FFFQuery<'_>>, + query: &FFFQuery<'_>, options: &GrepSearchOptions, ) -> GrepResult<'a> { let total_files = files.len(); @@ -1507,36 +1567,23 @@ pub fn grep_search<'a>( // removed. All non-constraint text tokens are collected and joined with // spaces to form the grep pattern: // "name = *.rs someth" -> grep "name = someth" with constraint Extension("rs") - let constraints_from_query: &[fff_query_parser::Constraint<'_>]; + let constraints_from_query = &query.constraints[..]; - let grep_text = match query { - Some(p) => { - constraints_from_query = &p.constraints[..]; - p.grep_text() - } - None => { - constraints_from_query = &[]; - // Single-token query (parser returned None). If the token is a - // backslash-escaped constraint (e.g. `\*.rs`, `\/src/`, `\!test`), - // strip the leading `\` so the literal text is searched. Other - // backslash sequences (e.g. `\bfoo\b` in regex mode) are left alone. - let t = raw_query.trim(); - if t.starts_with('\\') && t.len() > 1 { - // Re-parse the unescaped suffix: if it would be a constraint, - // the user intended an escape; strip the backslash. - let suffix = &t[1..]; - let parser = QueryParser::new(GrepConfig); - if parser - .parse(suffix) - .is_some_and(|q| !q.constraints.is_empty()) - { - suffix.to_string() - } else { - t.to_string() - } + let grep_text = if !matches!(query.fuzzy_query, fff_query_parser::FuzzyQuery::Empty) { + query.grep_text() + } else { + // Constraint-only or empty query — use raw_query for backslash-escape handling. + let t = query.raw_query.trim(); + if t.starts_with('\\') && t.len() > 1 { + let suffix = &t[1..]; + let parser = QueryParser::new(GrepConfig); + if !parser.parse(suffix).constraints.is_empty() { + suffix.to_string() } else { t.to_string() } + } else { + t.to_string() } }; @@ -1605,10 +1652,10 @@ pub fn grep_search<'a>( .ok(), }; - let is_multiline = grep_text.contains("\\n"); + let is_multiline = has_unescaped_newline_escape(&grep_text); let effective_pattern = if is_multiline { - grep_text.replace("\\n", "\n") + replace_unescaped_newline_escapes(&grep_text) } else { grep_text.to_string() }; @@ -1685,7 +1732,7 @@ pub fn grep_search<'a>( } /// Parse a grep query using the GrepConfig parser. -pub fn parse_grep_query(query: &str) -> Option> { +pub fn parse_grep_query(query: &str) -> FFFQuery<'_> { let parser = QueryParser::new(GrepConfig); parser.parse(query) } @@ -1711,6 +1758,38 @@ fn strip_file_path_constraints<'a>( #[cfg(test)] mod tests { + use super::*; + + #[test] + fn test_unescaped_newline_detection() { + // Single \n → multiline + assert!(has_unescaped_newline_escape("foo\\nbar")); + // \\n → escaped backslash + literal n, NOT multiline + // (this is what the user types when grepping Rust source with `\\nvim`) + assert!(!has_unescaped_newline_escape("foo\\\\nvim-data")); + // Real-world: source file has literal \\AppData\\Local\\nvim-data + // (double backslash in the file, so user types double backslash) + assert!(!has_unescaped_newline_escape( + r#"format!("{}\\AppData\\Local\\nvim-data","# + )); + // No \n at all + assert!(!has_unescaped_newline_escape("hello world")); + // \\\\n → even number of backslashes before n → NOT multiline + assert!(!has_unescaped_newline_escape("foo\\\\\\\\nbar")); + // \\\n → 3 backslashes: first two pair up, third + n = \n → multiline + assert!(has_unescaped_newline_escape("foo\\\\\\nbar")); + } + + #[test] + fn test_replace_unescaped_newline() { + // \n → real newline + assert_eq!(replace_unescaped_newline_escapes("foo\\nbar"), "foo\nbar"); + // \\n → preserved as-is + assert_eq!( + replace_unescaped_newline_escapes("foo\\\\nvim"), + "foo\\\\nvim" + ); + } #[test] fn test_fuzzy_typo_scoring() { diff --git a/crates/fff-core/src/path_utils.rs b/crates/fff-core/src/path_utils.rs index f72e07c2..2ee43c4a 100644 --- a/crates/fff-core/src/path_utils.rs +++ b/crates/fff-core/src/path_utils.rs @@ -17,6 +17,22 @@ pub fn canonicalize(path: impl AsRef) -> std::io::Result { std::fs::canonicalize(path) } +#[cfg(windows)] +pub fn expand_tilde(path: &str) -> PathBuf { + return PathBuf::from(path); +} + +#[cfg(not(windows))] +pub fn expand_tilde(path: &str) -> PathBuf { + if let Some(stripped) = path.strip_prefix("~/") + && let Some(home_dir) = dirs::home_dir() + { + return home_dir.join(stripped); + } + + PathBuf::from(path) +} + /// Calculate distance penalty based on directory proximity /// Returns a negative penalty score based on how far the candidate is from the current file pub fn calculate_distance_penalty(current_file: Option<&str>, candidate_path: &str) -> i32 { diff --git a/crates/fff-core/src/score.rs b/crates/fff-core/src/score.rs index 4d051454..d74677ab 100644 --- a/crates/fff-core/src/score.rs +++ b/crates/fff-core/src/score.rs @@ -131,39 +131,34 @@ pub fn match_and_score_files<'a>( return (vec![], vec![], 0); } - let parsed = &context.parsed_query; - let working_files: FileItems<'a> = match parsed.as_ref().and_then(|p| { - if p.constraints.is_empty() { - None - } else { - apply_constraints(files, &p.constraints) - } - }) { - Some(filtered) if !filtered.is_empty() => FileItems::Filtered(filtered), - Some(_) => { - return (vec![], vec![], 0); + let parsed = context.query; + let working_files: FileItems<'a> = if parsed.constraints.is_empty() { + FileItems::All(files) + } else { + match apply_constraints(files, &parsed.constraints) { + Some(filtered) if !filtered.is_empty() => FileItems::Filtered(filtered), + Some(_) => { + return (vec![], vec![], 0); + } + None => FileItems::All(files), } - None => FileItems::All(files), }; - let query_trimmed: &str = context.raw_query.trim(); + let query_trimmed: &str = parsed.raw_query.trim(); let single_part_storage: [&str; 1] = [query_trimmed]; - let fuzzy_parts: &[&str] = match parsed { - None => { - tracing::debug!("STEP 3: Query too short (<2 chars), returning frecency-sorted"); + let fuzzy_parts: &[&str] = match &parsed.fuzzy_query { + FuzzyQuery::Text(t) if t.len() >= 2 => std::slice::from_ref(t), + FuzzyQuery::Parts(parts) if !parts.is_empty() => parts.as_slice(), + FuzzyQuery::Text(_) | FuzzyQuery::Parts(_) => { + return score_filtered_by_frecency(&working_files, context); + } + FuzzyQuery::Empty => { if query_trimmed.len() < 2 { return score_filtered_by_frecency(&working_files, context); } &single_part_storage } - Some(p) => match &p.fuzzy_query { - FuzzyQuery::Text(t) if t.len() >= 2 => std::slice::from_ref(t), - FuzzyQuery::Parts(parts) if !parts.is_empty() => parts.as_slice(), - _ => { - return score_filtered_by_frecency(&working_files, context); - } - }, }; let has_uppercase = fuzzy_parts @@ -502,6 +497,7 @@ fn sort_and_paginate<'a>( mod tests { use super::*; use crate::types::PaginationArgs; + use fff_query_parser::QueryParser; use std::path::PathBuf; fn create_test_file(path: &str, score: i32, modified: u64) -> (FileItem, Score) { @@ -553,9 +549,11 @@ mod tests { .map(|(file, score)| (file, score.clone())) .collect(); + let query_str = "test"; + let parser = QueryParser::default(); + let query = parser.parse(query_str); let context = ScoringContext { - raw_query: "test", - parsed_query: None, + query: &query, max_threads: 1, max_typos: 2, current_file: None, @@ -602,9 +600,11 @@ mod tests { .map(|(file, score)| (file, score.clone())) .collect(); + let query_str = "test"; + let parser = QueryParser::default(); + let query = parser.parse(query_str); let context = ScoringContext { - raw_query: "test", - parsed_query: None, + query: &query, max_threads: 1, max_typos: 2, current_file: None, @@ -649,9 +649,11 @@ mod tests { .map(|(file, score)| (file, score.clone())) .collect(); + let query_str = "test"; + let parser = QueryParser::default(); + let query = parser.parse(query_str); let context = ScoringContext { - raw_query: "test", - parsed_query: None, + query: &query, max_threads: 1, max_typos: 2, current_file: None, diff --git a/crates/fff-core/src/types.rs b/crates/fff-core/src/types.rs index 17d4d0f5..dfb0062c 100644 --- a/crates/fff-core/src/types.rs +++ b/crates/fff-core/src/types.rs @@ -148,7 +148,7 @@ impl Constrainable for FileItem { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct Score { pub total: i32, pub base_score: i32, @@ -171,15 +171,13 @@ pub struct PaginationArgs { /// Context for scoring files during search. /// -/// The `parsed_query` field contains the pre-parsed query with constraints, +/// The `query` field contains the pre-parsed query with constraints, /// fuzzy parts, and location information. Parsing is done once at the API /// boundary and passed through. #[derive(Debug, Clone)] pub struct ScoringContext<'a> { - /// The original raw query string (for compatibility and debugging) - pub raw_query: &'a str, - /// Pre-parsed query containing constraints, fuzzy parts, and location - pub parsed_query: Option>, + /// Parsed query containing raw text, constraints, fuzzy parts, and location + pub query: &'a FFFQuery<'a>, pub project_path: Option<&'a Path>, pub current_file: Option<&'a str>, pub max_typos: u16, @@ -190,17 +188,14 @@ pub struct ScoringContext<'a> { pub pagination: PaginationArgs, } -impl<'a> ScoringContext<'a> { +impl ScoringContext<'_> { /// Get the effective fuzzy query string for matching. /// Returns the first fuzzy part, or the raw query if no parsing was done. - pub fn effective_query(&self) -> &'a str { - match &self.parsed_query { - Some(p) => match &p.fuzzy_query { - FuzzyQuery::Text(t) => t, - FuzzyQuery::Parts(parts) if !parts.is_empty() => parts[0], - _ => self.raw_query.trim(), - }, - None => self.raw_query.trim(), + pub fn effective_query(&self) -> &str { + match &self.query.fuzzy_query { + FuzzyQuery::Text(t) => t, + FuzzyQuery::Parts(parts) if !parts.is_empty() => parts[0], + _ => self.query.raw_query.trim(), } } } diff --git a/crates/fff-core/tests/grep_integration.rs b/crates/fff-core/tests/grep_integration.rs index d0422b24..be28be54 100644 --- a/crates/fff-core/tests/grep_integration.rs +++ b/crates/fff-core/tests/grep_integration.rs @@ -72,7 +72,8 @@ fn plain_text_finds_exact_literal() { "Hello, World!\nGoodbye, World!\n", )]; - let result = grep_search(&files, "Hello", None, &plain_opts()); + let parsed = parse_grep_query("Hello"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); assert_eq!(result.matches[0].line_number, 1); @@ -89,7 +90,8 @@ fn plain_text_smart_case_insensitive() { )]; // All lowercase query → smart case → case-insensitive - let result = grep_search(&files, "hello", None, &plain_opts()); + let parsed = parse_grep_query("hello"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result.matches.len(), @@ -108,7 +110,8 @@ fn plain_text_smart_case_sensitive_with_uppercase() { )]; // Query has uppercase → smart case → case-sensitive - let result = grep_search(&files, "Hello", None, &plain_opts()); + let parsed = parse_grep_query("Hello"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result.matches.len(), @@ -128,13 +131,15 @@ fn plain_text_regex_metacharacters_are_literal() { )]; // In plain text mode, these regex metacharacters should be literal - let result = grep_search(&files, "fn main()", None, &plain_opts()); + let parsed = parse_grep_query("fn main()"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); assert_eq!(result.matches[0].line_number, 1); // Parentheses should NOT be treated as regex groups - let result2 = grep_search(&files, "(\"test\")", None, &plain_opts()); + let parsed2 = parse_grep_query("(\"test\")"); + let result2 = grep_search(&files, &parsed2, &plain_opts()); assert_eq!(result2.matches.len(), 1); assert_eq!(result2.matches[0].line_number, 2); } @@ -149,7 +154,8 @@ fn plain_text_dot_is_literal() { )]; // In plain text mode, dot should be literal, not "any char" - let result = grep_search(&files, "1.0", None, &plain_opts()); + let parsed = parse_grep_query("1.0"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result.matches.len(), @@ -168,7 +174,8 @@ fn plain_text_asterisk_is_literal() { "Use **bold** text\nUse *italic* text\nUse normal text\n", )]; - let result = grep_search(&files, "**bold**", None, &plain_opts()); + let parsed = parse_grep_query("**bold**"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); assert_eq!(result.matches[0].line_number, 1); } @@ -182,7 +189,8 @@ fn plain_text_backslash_is_literal() { "C:\\Users\\foo\\bar\n/home/user/bin\n", )]; - let result = grep_search(&files, "C:\\Users", None, &plain_opts()); + let parsed = parse_grep_query("C:\\Users"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); } @@ -195,7 +203,8 @@ fn plain_text_across_multiple_files() { create_file(tmp.path(), "c.txt", "no match here\n"), ]; - let result = grep_search(&files, "use std", None, &plain_opts()); + let parsed = parse_grep_query("use std"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 3); // Should match in files a.txt and b.txt @@ -207,7 +216,8 @@ fn plain_text_highlight_offsets_are_correct() { let tmp = TempDir::new().unwrap(); let files = vec![create_file(tmp.path(), "a.txt", "foo bar foo baz foo\n")]; - let result = grep_search(&files, "foo", None, &plain_opts()); + let parsed = parse_grep_query("foo"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); let m = &result.matches[0]; @@ -224,7 +234,8 @@ fn plain_text_empty_query_returns_no_content_matches() { let tmp = TempDir::new().unwrap(); let files = vec![create_file(tmp.path(), "a.txt", "some content\n")]; - let result = grep_search(&files, "", None, &plain_opts()); + let parsed = parse_grep_query(""); + let result = grep_search(&files, &parsed, &plain_opts()); // Empty query in grep returns git-modified welcome state (no content matches) // Since our test files have no git status, we expect 0 matches @@ -245,7 +256,8 @@ fn plain_text_binary_files_are_skipped() { let files = vec![binary_file, text_file]; - let result = grep_search(&files, "match this text", None, &plain_opts()); + let parsed = parse_grep_query("match this text"); + let result = grep_search(&files, &parsed, &plain_opts()); // Only the text file should be searched, not the binary one assert_eq!(result.files.len(), 1); @@ -264,7 +276,8 @@ fn plain_text_max_matches_per_file() { let mut opts = plain_opts(); opts.max_matches_per_file = 5; - let result = grep_search(&files, "match_target", None, &opts); + let parsed = parse_grep_query("match_target"); + let result = grep_search(&files, &parsed, &opts); assert_eq!( result.matches.len(), @@ -285,7 +298,8 @@ fn plain_text_page_limit() { let mut opts = plain_opts(); opts.page_limit = 10; - let result = grep_search(&files, "target", None, &opts); + let parsed = parse_grep_query("target"); + let result = grep_search(&files, &parsed, &opts); // page_limit is a soft minimum: we always finish the current file, so we // get at least page_limit matches (no data loss) and at most @@ -327,7 +341,8 @@ fn plain_text_file_offset_pagination() { let max_pages = 20; // safety limit loop { - let result = grep_search(&files, "unique_token", None, &opts); + let parsed = parse_grep_query("unique_token"); + let result = grep_search(&files, &parsed, &opts); for m in &result.matches { let text = m.line_content.trim().to_string(); @@ -377,7 +392,8 @@ fn plain_text_line_numbers_are_correct() { "line one\nline two\nline three\nline four\n", )]; - let result = grep_search(&files, "line", None, &plain_opts()); + let parsed = parse_grep_query("line"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 4); assert_eq!(result.matches[0].line_number, 1); @@ -396,7 +412,8 @@ fn plain_text_max_file_size_filter() { let mut opts = plain_opts(); opts.max_file_size = 100; // Only allow files up to 100 bytes - let result = grep_search(&files, "match_me", None, &opts); + let parsed = parse_grep_query("match_me"); + let result = grep_search(&files, &parsed, &opts); assert_eq!(result.matches.len(), 0, "large file should be filtered out"); assert_eq!(result.filtered_file_count, 0); @@ -413,7 +430,8 @@ fn regex_basic_pattern() { "foo123\nbar456\nbaz789\nfoo_bar\n", )]; - let result = grep_search(&files, "foo\\d+", None, ®ex_opts()); + let parsed = parse_grep_query("foo\\d+"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!(result.matches.len(), 1); assert_eq!(result.matches[0].line_number, 1); @@ -426,7 +444,8 @@ fn regex_capture_group_matching() { let files = vec![create_file(tmp.path(), "a.txt", "foobar\nfoobaz\nfoo123\n")]; // Use a capturing group (not lookahead, which regex crate doesn't support) - let result = grep_search(&files, "foo(bar|baz)", None, ®ex_opts()); + let parsed = parse_grep_query("foo(bar|baz)"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!(result.matches.len(), 2); let contents: Vec<&str> = result @@ -448,7 +467,8 @@ fn regex_dot_matches_any_char() { )]; // In regex mode, . matches any character, so v1.0 matches v1.0, v1x0, v1-0, and v100 - let result = grep_search(&files, "v1.0", None, ®ex_opts()); + let parsed = parse_grep_query("v1.0"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!( result.matches.len(), @@ -466,7 +486,8 @@ fn regex_alternation() { "apple\nbanana\ncherry\napricot\n", )]; - let result = grep_search(&files, "apple|cherry", None, ®ex_opts()); + let parsed = parse_grep_query("apple|cherry"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!(result.matches.len(), 2); let lines: Vec = result.matches.iter().map(|m| m.line_number).collect(); @@ -483,7 +504,8 @@ fn regex_character_class() { "cat\ncut\ncot\ncit\ncxt\n", )]; - let result = grep_search(&files, "c[aou]t", None, ®ex_opts()); + let parsed = parse_grep_query("c[aou]t"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!(result.matches.len(), 3); let contents: Vec<&str> = result @@ -505,7 +527,8 @@ fn regex_quantifiers() { "fo\nfoo\nfooo\nfoooo\nbar\n", )]; - let result = grep_search(&files, "fo{2,3}", None, ®ex_opts()); + let parsed = parse_grep_query("fo{2,3}"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!(result.matches.len(), 3, "should match foo, fooo, foooo"); } @@ -519,7 +542,8 @@ fn regex_anchors() { "start of line\nmiddle start end\nend of line\n", )]; - let result = grep_search(&files, "^start", None, ®ex_opts()); + let parsed = parse_grep_query("^start"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!(result.matches.len(), 1); assert_eq!(result.matches[0].line_number, 1); @@ -535,7 +559,8 @@ fn regex_anchors_multiword() { )]; // ^int ff_ should match lines starting with "int ff_" - let result = grep_search(&files, "^int ff_", None, ®ex_opts()); + let parsed = parse_grep_query("^int ff_"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!( result.matches.len(), @@ -551,7 +576,8 @@ fn regex_highlight_offsets_variable_length() { let tmp = TempDir::new().unwrap(); let files = vec![create_file(tmp.path(), "a.txt", "aab aaab aaaab\n")]; - let result = grep_search(&files, "a+b", None, ®ex_opts()); + let parsed = parse_grep_query("a+b"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!(result.matches.len(), 1); let m = &result.matches[0]; @@ -573,7 +599,8 @@ fn regex_invalid_pattern_falls_back_to_literal() { )]; // Invalid regex: unmatched group — should fall back to literal search - let result = grep_search(&files, "name(", None, ®ex_opts()); + let parsed = parse_grep_query("name("); + let result = grep_search(&files, &parsed, ®ex_opts()); // Fallback to literal: finds "name(" in "call name(arg)" assert_eq!( @@ -588,7 +615,8 @@ fn regex_invalid_pattern_falls_back_to_literal() { assert!(result.matches[0].line_content.contains("name(")); // A pattern that doesn't exist anywhere — still falls back but finds nothing - let result2 = grep_search(&files, "zzz(", None, ®ex_opts()); + let parsed2 = parse_grep_query("zzz("); + let result2 = grep_search(&files, &parsed2, ®ex_opts()); assert_eq!(result2.matches.len(), 0); assert!(result2.regex_fallback_error.is_some()); } @@ -603,11 +631,13 @@ fn regex_smart_case() { )]; // Lowercase query → case-insensitive - let result_lower = grep_search(&files, "foo", None, ®ex_opts()); + let parsed_lower = parse_grep_query("foo"); + let result_lower = grep_search(&files, &parsed_lower, ®ex_opts()); assert_eq!(result_lower.matches.len(), 3); // Query with uppercase → case-sensitive - let result_upper = grep_search(&files, "Foo", None, ®ex_opts()); + let parsed_upper = parse_grep_query("Foo"); + let result_upper = grep_search(&files, &parsed_upper, ®ex_opts()); assert_eq!(result_upper.matches.len(), 1); } @@ -628,7 +658,8 @@ fn regex_across_multiple_files() { create_file(tmp.path(), "readme.md", "# Title\nSome text\n"), ]; - let result = grep_search(&files, "fn \\w+\\(\\)", None, ®ex_opts()); + let parsed = parse_grep_query("fn \\w+\\(\\)"); + let result = grep_search(&files, &parsed, ®ex_opts()); // Should match: fn main(), fn helper(), fn test_one(), fn test_two() assert_eq!(result.matches.len(), 4); @@ -646,8 +677,9 @@ fn plain_text_and_regex_agree_on_simple_literal() { "hello world\ngoodbye world\nhello again\n", )]; - let plain_result = grep_search(&files, "hello", None, &plain_opts()); - let regex_result = grep_search(&files, "hello", None, ®ex_opts()); + let parsed = parse_grep_query("hello"); + let plain_result = grep_search(&files, &parsed, &plain_opts()); + let regex_result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!(plain_result.matches.len(), regex_result.matches.len()); for (p, r) in plain_result.matches.iter().zip(regex_result.matches.iter()) { @@ -666,8 +698,10 @@ fn plain_text_escapes_what_regex_does_not() { )]; // "$100" — in plain text, $ is literal; in regex, $ is anchor - let plain_result = grep_search(&files, "$100", None, &plain_opts()); - let regex_result = grep_search(&files, "\\$100", None, ®ex_opts()); + let parsed_plain = parse_grep_query("$100"); + let plain_result = grep_search(&files, &parsed_plain, &plain_opts()); + let parsed_regex = parse_grep_query("\\$100"); + let regex_result = grep_search(&files, &parsed_regex, ®ex_opts()); // Plain text should find "$100" literally assert_eq!(plain_result.matches.len(), 1); @@ -689,7 +723,7 @@ fn grep_with_extension_constraint() { ]; let parsed = parse_grep_query("use std *.rs"); - let result = grep_search(&files, "use std *.rs", parsed.as_ref(), &plain_opts()); + let result = grep_search(&files, &parsed, &plain_opts()); // Should only search .rs files for file in &result.files { @@ -716,7 +750,8 @@ fn plain_text_bracket_is_literal() { "let x = arr[0];\nlet y = arr[1];\nlet z = something;\n", )]; - let result = grep_search(&files, "arr[0]", None, &plain_opts()); + let parsed = parse_grep_query("arr[0]"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result.matches.len(), @@ -738,7 +773,7 @@ fn grep_backslash_escapes_extension_filter() { // Without escape: "*.rs" is an extension filter, so only .rs files are searched let parsed = parse_grep_query("pattern *.rs"); - let result_filter = grep_search(&files, "pattern *.rs", parsed.as_ref(), &plain_opts()); + let result_filter = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result_filter.files.len(), 1, @@ -747,7 +782,7 @@ fn grep_backslash_escapes_extension_filter() { // With escape: "\*.rs" is literal text, both files are searched let parsed_escaped = parse_grep_query("\\*.rs"); - let result_literal = grep_search(&files, "\\*.rs", parsed_escaped.as_ref(), &plain_opts()); + let result_literal = grep_search(&files, &parsed_escaped, &plain_opts()); assert_eq!( result_literal.matches.len(), 2, @@ -764,7 +799,8 @@ fn grep_backslash_escapes_path_segment() { ]; // With escape: "\\/src/" is literal text, not a path constraint - let result = grep_search(&files, "\\/src/", None, &plain_opts()); + let parsed = parse_grep_query("\\/src/"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result.matches.len(), 2, @@ -782,7 +818,8 @@ fn grep_backslash_escapes_negation() { )]; // With escape: "\\!test" is literal text "!test" - let result = grep_search(&files, "\\!test", None, &plain_opts()); + let parsed = parse_grep_query("\\!test"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); assert!(result.matches[0].line_content.contains("!test")); } @@ -797,7 +834,7 @@ fn grep_with_path_constraint() { ]; let parsed = parse_grep_query("target_text /src/"); - let result = grep_search(&files, "target_text /src/", parsed.as_ref(), &plain_opts()); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); assert!(result.files[0].relative_path.starts_with("src/")); @@ -816,7 +853,7 @@ fn grep_with_negated_extension_constraint() { let query = "target_text !*.rs"; let parsed = parse_grep_query(query); - let result = grep_search(&files, query, parsed.as_ref(), &plain_opts()); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result.matches.len(), @@ -842,7 +879,7 @@ fn grep_with_negated_path_constraint() { let query = "target_text !/src/"; let parsed = parse_grep_query(query); - let result = grep_search(&files, query, parsed.as_ref(), &plain_opts()); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result.matches.len(), @@ -868,7 +905,7 @@ fn grep_with_negated_text_constraint() { let query = "target_text !test"; let parsed = parse_grep_query(query); - let result = grep_search(&files, query, parsed.as_ref(), &plain_opts()); + let result = grep_search(&files, &parsed, &plain_opts()); // "tests/helper.rs" contains "test" in path, should be excluded assert_eq!( @@ -898,7 +935,8 @@ fn grep_empty_file_is_skipped() { let text_file = create_file(tmp.path(), "text.txt", "findme\n"); let files = vec![empty_file, text_file]; - let result = grep_search(&files, "findme", None, &plain_opts()); + let parsed = parse_grep_query("findme"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); } @@ -908,7 +946,8 @@ fn grep_single_line_no_trailing_newline() { let tmp = TempDir::new().unwrap(); let files = vec![create_file(tmp.path(), "a.txt", "no newline at end")]; - let result = grep_search(&files, "no newline", None, &plain_opts()); + let parsed = parse_grep_query("no newline"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); assert_eq!(result.matches[0].line_number, 1); @@ -923,11 +962,13 @@ fn grep_unicode_content() { "日本語テスト\nrégulière\nñoño\n", )]; - let result = grep_search(&files, "régulière", None, &plain_opts()); + let parsed = parse_grep_query("régulière"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); assert_eq!(result.matches[0].line_number, 2); - let result2 = grep_search(&files, "ñoño", None, &plain_opts()); + let parsed2 = parse_grep_query("ñoño"); + let result2 = grep_search(&files, &parsed2, &plain_opts()); assert_eq!(result2.matches.len(), 1); assert_eq!(result2.matches[0].line_number, 3); } @@ -938,7 +979,8 @@ fn grep_long_line_is_truncated() { let long_line = "x".repeat(1000) + "NEEDLE" + &"y".repeat(1000); let files = vec![create_file(tmp.path(), "long.txt", &long_line)]; - let result = grep_search(&files, "NEEDLE", None, &plain_opts()); + let parsed = parse_grep_query("NEEDLE"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); // The line_content should be truncated to MAX_LINE_DISPLAY_LEN (512) @@ -958,7 +1000,8 @@ fn regex_word_boundary() { "foo\nfoobar\nbarfoo\nfoo_baz\n", )]; - let result = grep_search(&files, "\\bfoo\\b", None, ®ex_opts()); + let parsed = parse_grep_query("\\bfoo\\b"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!( result.matches.len(), @@ -977,7 +1020,8 @@ fn plain_text_question_mark_is_literal() { "what is this?\nhow does it work?\nno question here\nwhat?\n", )]; - let result = grep_search(&files, "?", None, &plain_opts()); + let parsed = parse_grep_query("?"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result.matches.len(), @@ -995,7 +1039,8 @@ fn plain_text_query_with_question_mark_in_word() { "let x = foo?;\nlet y = bar.baz();\nfoo?.unwrap()\n", )]; - let result = grep_search(&files, "foo?", None, &plain_opts()); + let parsed = parse_grep_query("foo?"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!( result.matches.len(), @@ -1010,7 +1055,8 @@ fn regex_question_mark_is_quantifier() { let files = vec![create_file(tmp.path(), "a.txt", "color\ncolour\ncolouur\n")]; // In regex mode, ? means "zero or one of preceding" - let result = grep_search(&files, "colou?r", None, ®ex_opts()); + let parsed = parse_grep_query("colou?r"); + let result = grep_search(&files, &parsed, ®ex_opts()); assert_eq!( result.matches.len(), @@ -1030,7 +1076,8 @@ fn fuzzy_finds_exact_substring() { "hello world\ngoodbye world\nhello again\n", )]; - let result = grep_search(&files, "hello", None, &fuzzy_opts()); + let parsed = parse_grep_query("hello"); + let result = grep_search(&files, &parsed, &fuzzy_opts()); assert_eq!( result.matches.len(), @@ -1051,7 +1098,8 @@ fn fuzzy_finds_scattered_characters() { )]; // "mutex" should fuzzy match "mutex_lock" (contiguous prefix) - let result = grep_search(&files, "mutex", None, &fuzzy_opts()); + let parsed = parse_grep_query("mutex"); + let result = grep_search(&files, &parsed, &fuzzy_opts()); assert!( !result.matches.is_empty(), @@ -1065,7 +1113,8 @@ fn fuzzy_highlight_offsets_correct() { let tmp = TempDir::new().unwrap(); let files = vec![create_file(tmp.path(), "a.txt", "hello world\n")]; - let result = grep_search(&files, "hell", None, &fuzzy_opts()); + let parsed = parse_grep_query("hell"); + let result = grep_search(&files, &parsed, &fuzzy_opts()); assert_eq!(result.matches.len(), 1); let m = &result.matches[0]; @@ -1089,7 +1138,8 @@ fn fuzzy_unicode_char_indices() { // Use "guli" which is a contiguous ASCII substring within "régulière" // (the chars g-u-l-i appear contiguously between the two accented chars) - let result = grep_search(&files, "guli", None, &fuzzy_opts()); + let parsed = parse_grep_query("guli"); + let result = grep_search(&files, &parsed, &fuzzy_opts()); // Should fuzzy match "régulière" (with multi-byte é and è) // This tests that character-to-byte offset conversion works with UTF-8 @@ -1102,7 +1152,8 @@ fn fuzzy_empty_query_returns_empty() { let tmp = TempDir::new().unwrap(); let files = vec![create_file(tmp.path(), "a.txt", "some content\n")]; - let result = grep_search(&files, "", None, &fuzzy_opts()); + let parsed = parse_grep_query(""); + let result = grep_search(&files, &parsed, &fuzzy_opts()); // Empty query returns git-modified files, not fuzzy matches assert_eq!(result.matches.len(), 0); @@ -1118,7 +1169,7 @@ fn fuzzy_with_extension_constraint() { ]; let parsed = parse_grep_query("use std *.rs"); - let result = grep_search(&files, "use std *.rs", parsed.as_ref(), &fuzzy_opts()); + let result = grep_search(&files, &parsed, &fuzzy_opts()); // Should only search .rs files for file in &result.files { @@ -1143,7 +1194,8 @@ fn fuzzy_respects_page_limit() { opts.page_limit = 10; opts.max_matches_per_file = 50; - let result = grep_search(&files, "target", None, &opts); + let parsed = parse_grep_query("target"); + let result = grep_search(&files, &parsed, &opts); // page_limit is a soft minimum: we always finish the current file, so we // get at least page_limit matches (no data loss) and at most @@ -1178,7 +1230,8 @@ fn fuzzy_respects_max_matches_per_file() { let mut opts = fuzzy_opts(); opts.max_matches_per_file = 5; - let result = grep_search(&files, "match", None, &opts); + let parsed = parse_grep_query("match"); + let result = grep_search(&files, &parsed, &opts); assert_eq!( result.matches.len(), @@ -1198,7 +1251,8 @@ fn fuzzy_filters_low_quality_matches() { // Search for "abc" - should match "abc_def_ghi" and "abcdefghij" with high scores, // but NOT "xyz" (no relation) or "mutex_lock" (only weak letter overlap) - let result = grep_search(&files, "abc", None, &fuzzy_opts()); + let parsed = parse_grep_query("abc"); + let result = grep_search(&files, &parsed, &fuzzy_opts()); // Should only get high-quality matches assert!( @@ -1227,7 +1281,8 @@ fn fuzzy_exact_match_always_passes() { )]; // Exact matches should always pass regardless of score threshold - let result = grep_search(&files, "exact", None, &fuzzy_opts()); + let parsed = parse_grep_query("exact"); + let result = grep_search(&files, &parsed, &fuzzy_opts()); assert_eq!( result.matches.len(), @@ -1246,7 +1301,8 @@ fn fuzzy_score_is_captured() { "hello world\ngoodbye world\n", )]; - let result = grep_search(&files, "hello", None, &fuzzy_opts()); + let parsed = parse_grep_query("hello"); + let result = grep_search(&files, &parsed, &fuzzy_opts()); assert_eq!(result.matches.len(), 1); let m = &result.matches[0]; @@ -1267,7 +1323,8 @@ fn fuzzy_score_is_none_in_plain_mode() { let tmp = TempDir::new().unwrap(); let files = vec![create_file(tmp.path(), "test.txt", "hello world\n")]; - let result = grep_search(&files, "hello", None, &plain_opts()); + let parsed = parse_grep_query("hello"); + let result = grep_search(&files, &parsed, &plain_opts()); assert_eq!(result.matches.len(), 1); let m = &result.matches[0]; diff --git a/crates/fff-mcp/src/server.rs b/crates/fff-mcp/src/server.rs index 4fe831b5..7985f809 100644 --- a/crates/fff-mcp/src/server.rs +++ b/crates/fff-mcp/src/server.rs @@ -253,7 +253,7 @@ impl FffServer { let parser = QueryParser::new(AiGrepConfig); let parsed = parser.parse(query); - let result = grep::grep_search(files, query, parsed.as_ref(), &options); + let result = grep::grep_search(files, &parsed, &options); if result.matches.is_empty() && file_offset == 0 { // Auto-retry: try broadening multi-word queries by dropping first non-constraint word @@ -268,10 +268,7 @@ impl FffServer { let rest_query = parts[1..].join(" "); let rest_parsed = parser.parse(&rest_query); - let rest_text: Cow = rest_parsed - .as_ref() - .map(|p| Cow::Owned(p.grep_text())) - .unwrap_or(Cow::Borrowed(&rest_query)); + let rest_text = rest_parsed.grep_text(); let retry_mode = if has_regex_metacharacters(&rest_text) { GrepMode::Regex } else { @@ -279,8 +276,7 @@ impl FffServer { }; let (retry_options, _) = make_grep_options(output_mode, retry_mode, 0, context); - let retry_result = - grep::grep_search(files, &rest_query, rest_parsed.as_ref(), &retry_options); + let retry_result = grep::grep_search(files, &rest_parsed, &retry_options); if !retry_result.matches.is_empty() && retry_result.matches.len() <= 10 { let mut cs = self.lock_cursors()?; @@ -308,8 +304,7 @@ impl FffServer { let fuzzy_query = cleanup_fuzzy_query(query); let (fuzzy_options, _) = make_grep_options(output_mode, GrepMode::Fuzzy, 0, Some(0)); let fuzzy_parsed = parser.parse(&fuzzy_query); - let fuzzy_result = - grep::grep_search(files, &fuzzy_query, fuzzy_parsed.as_ref(), &fuzzy_options); + let fuzzy_result = grep::grep_search(files, &fuzzy_parsed, &fuzzy_options); if !fuzzy_result.matches.is_empty() { let mut lines: Vec = Vec::new(); @@ -347,7 +342,7 @@ impl FffServer { limit: 1, }, }; - let file_result = FilePicker::fuzzy_search(files, query, file_query, file_opts); + let file_result = FilePicker::fuzzy_search(files, &file_query, file_opts); if let (Some(top), Some(score)) = (file_result.items.first(), file_result.scores.first()) { @@ -439,7 +434,7 @@ impl FffServer { let parser = QueryParser::default(); let fff_query = parser.parse(query); - let result = FilePicker::fuzzy_search(files, query, fff_query, make_opts(page_offset)); + let result = FilePicker::fuzzy_search(files, &fff_query, make_opts(page_offset)); let total_files = result.total_files; // Auto-retry with fewer terms if 3+ words return 0 results @@ -450,8 +445,7 @@ impl FffServer { if result.items.is_empty() && words.len() >= 3 && page_offset == 0 { if let Some(shorter) = &shorter { let shorter_query = parser.parse(shorter); - let retry = - FilePicker::fuzzy_search(files, shorter, shorter_query, make_opts(0)); + let retry = FilePicker::fuzzy_search(files, &shorter_query, make_opts(0)); (retry.items, retry.scores, retry.total_matched) } else { @@ -523,10 +517,7 @@ impl FffServer { let output_mode = OutputMode::new(params.output_mode.as_deref()); let parsed = QueryParser::new(AiGrepConfig).parse(¶ms.query); - let grep_text: Cow = parsed - .as_ref() - .map(|p| Cow::Owned(p.grep_text())) - .unwrap_or(Cow::Borrowed(¶ms.query)); + let grep_text = parsed.grep_text(); let mode = if has_regex_metacharacters(&grep_text) { GrepMode::Regex @@ -592,15 +583,8 @@ impl FffServer { let patterns_refs: Vec<&str> = params.patterns.iter().map(|s| s.as_str()).collect(); let parser = fff_query_parser::QueryParser::new(fff_query_parser::AiGrepConfig); - let parsed_constraints = if !constraint_query.is_empty() { - parser.parse(constraint_query) - } else { - None - }; - let constraints = parsed_constraints - .as_ref() - .map(|p| p.constraints.as_slice()) - .unwrap_or(&[]); + let parsed_constraints = parser.parse(constraint_query); + let constraints = parsed_constraints.constraints.as_slice(); let files = picker.get_files(); let result = grep::multi_grep_search(files, &patterns_refs, constraints, &options); @@ -625,8 +609,7 @@ impl FffServer { }; let parsed = parser.parse(&full_query); - let fb_result = - grep::grep_search(files, &full_query, parsed.as_ref(), &fallback_options); + let fb_result = grep::grep_search(files, &parsed, &fallback_options); if !fb_result.matches.is_empty() { let fb_file_refs: Vec<&FileItem> = fb_result.files.to_vec(); diff --git a/crates/fff-nvim/benches/indexing_and_search.rs b/crates/fff-nvim/benches/indexing_and_search.rs index 584b3792..2421dd3e 100644 --- a/crates/fff-nvim/benches/indexing_and_search.rs +++ b/crates/fff-nvim/benches/indexing_and_search.rs @@ -1,7 +1,7 @@ use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; use fff_core::file_picker::{FFFMode, FilePicker}; use fff_core::types::{FileItem, PaginationArgs}; -use fff_core::{FuzzySearchOptions, SharedFrecency, SharedPicker}; +use fff_core::{FuzzySearchOptions, QueryParser, SharedFrecency, SharedPicker}; use std::path::PathBuf; use std::sync::{Arc, RwLock}; use std::time::Duration; @@ -226,17 +226,18 @@ fn bench_search_queries(c: &mut Criterion) { ("partial", "src/lib"), ]; + let parser = QueryParser::default(); + for (name, query) in test_queries { - group.bench_with_input(BenchmarkId::new("query", name), &query, |b, &query| { + let parsed = parser.parse(query); + group.bench_with_input(BenchmarkId::new("query", name), &query, |b, &_query| { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box(query), - None, + black_box(&parsed), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -269,6 +270,8 @@ fn bench_search_thread_scaling(c: &mut Criterion) { group.sample_size(100); let query = "controller"; + let parser = QueryParser::default(); + let parsed = parser.parse(query); let thread_counts = vec![1, 2, 4, 8]; for threads in thread_counts { @@ -279,12 +282,10 @@ fn bench_search_thread_scaling(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box(query), - None, + black_box(&parsed), FuzzySearchOptions { max_threads: threads, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -318,6 +319,8 @@ fn bench_search_result_limits(c: &mut Criterion) { group.sample_size(100); let query = "mod"; + let parser = QueryParser::default(); + let parsed = parser.parse(query); let result_limits = vec![10, 50, 100, 500]; for limit in result_limits { @@ -325,12 +328,10 @@ fn bench_search_result_limits(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box(query), - None, + black_box(&parsed), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -371,6 +372,8 @@ fn bench_search_scalability(c: &mut Criterion) { group.sample_size(50); let query = "controller"; + let parser = QueryParser::default(); + let parsed = parser.parse(query); let file_counts = vec![100, 1000, 5000, 10000, all_files.len().min(50000)]; for count in file_counts { @@ -383,12 +386,10 @@ fn bench_search_scalability(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(subset), - black_box(query), - None, + black_box(&parsed), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -420,19 +421,19 @@ fn bench_search_ordering(c: &mut Criterion) { let mut group = c.benchmark_group("ordering"); group.sample_size(100); - let query = "controller"; + let parser = QueryParser::default(); + let parsed_controller = parser.parse("controller"); + let parsed_mod = parser.parse("mod"); // Benchmark normal order (descending) group.bench_function("normal_order", |b| { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box(query), - None, + black_box(&parsed_controller), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -452,12 +453,10 @@ fn bench_search_ordering(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box(query), - None, + black_box(&parsed_controller), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -477,12 +476,10 @@ fn bench_search_ordering(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box("mod"), - None, + black_box(&parsed_mod), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -501,12 +498,10 @@ fn bench_search_ordering(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box("mod"), - None, + black_box(&parsed_mod), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -526,12 +521,10 @@ fn bench_search_ordering(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box("controller"), - None, + black_box(&parsed_controller), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -550,12 +543,10 @@ fn bench_search_ordering(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box("controller"), - None, + black_box(&parsed_controller), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -587,6 +578,8 @@ fn bench_pagination_performance(c: &mut Criterion) { group.sample_size(100); let query = "mod"; + let parser = QueryParser::default(); + let parsed = parser.parse(query); let page_size = 40; // Benchmark first page (uses partial sort optimization) @@ -594,12 +587,10 @@ fn bench_pagination_performance(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box(query), - None, + black_box(&parsed), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -619,12 +610,10 @@ fn bench_pagination_performance(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box(query), - None, + black_box(&parsed), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, @@ -644,12 +633,10 @@ fn bench_pagination_performance(c: &mut Criterion) { b.iter(|| { let results = FilePicker::fuzzy_search( black_box(&files), - black_box(query), - None, + black_box(&parsed), FuzzySearchOptions { max_threads: 4, current_file: None, - project_path: None, last_same_query_match: None, combo_boost_score_multiplier: 100, diff --git a/crates/fff-nvim/src/bin/bench_search_only.rs b/crates/fff-nvim/src/bin/bench_search_only.rs index c2176d04..43eac7c6 100644 --- a/crates/fff-nvim/src/bin/bench_search_only.rs +++ b/crates/fff-nvim/src/bin/bench_search_only.rs @@ -87,8 +87,7 @@ fn main() { let parsed = parser.parse(query); let results = FilePicker::fuzzy_search( &files, - query, - parsed, + &parsed, FuzzySearchOptions { max_threads: 4, current_file: None, diff --git a/crates/fff-nvim/src/bin/fuzzy_grep_test.rs b/crates/fff-nvim/src/bin/fuzzy_grep_test.rs index dd3afb72..7f0ddad2 100644 --- a/crates/fff-nvim/src/bin/fuzzy_grep_test.rs +++ b/crates/fff-nvim/src/bin/fuzzy_grep_test.rs @@ -78,7 +78,7 @@ fn run_fuzzy_query(files: &[FileItem], query: &str, label: &str) { let parsed = parse_grep_query(query); let start = Instant::now(); - let result = grep_search(files, query, parsed.as_ref(), &options); + let result = grep_search(files, &parsed, &options); let elapsed = start.elapsed(); eprintln!("══════════════════════════════════════════════════════════════"); diff --git a/crates/fff-nvim/src/bin/grep_profiler.rs b/crates/fff-nvim/src/bin/grep_profiler.rs index 119b298e..6120177b 100644 --- a/crates/fff-nvim/src/bin/grep_profiler.rs +++ b/crates/fff-nvim/src/bin/grep_profiler.rs @@ -144,7 +144,7 @@ impl<'a> GrepBench<'a> { fn run_once(&self, query: &str) -> (Duration, usize, usize) { let parsed = parse_grep_query(query); let start = Instant::now(); - let result = grep_search(self.files, query, parsed.as_ref(), &self.options); + let result = grep_search(self.files, &parsed, &self.options); let elapsed = start.elapsed(); (elapsed, result.matches.len(), result.total_files_searched) } @@ -452,7 +452,7 @@ fn main() { classify_definitions: false, }; let start = Instant::now(); - let result = grep_search(&files, pagination_query, parsed.as_ref(), &opts); + let result = grep_search(&files, &parsed, &opts); let elapsed = start.elapsed(); eprintln!( " {:>6} | {:>12} | {:>8} | {:>6} | {:>12}", diff --git a/crates/fff-nvim/src/bin/grep_vs_rg.rs b/crates/fff-nvim/src/bin/grep_vs_rg.rs index 5bdc1179..5d6f97a3 100644 --- a/crates/fff-nvim/src/bin/grep_vs_rg.rs +++ b/crates/fff-nvim/src/bin/grep_vs_rg.rs @@ -210,17 +210,13 @@ fn run_fff_full(files: &[FileItem], query: &str) -> (usize, Duration) { classify_definitions: false, }; let start = Instant::now(); - let result = grep_search(files, query, parsed.as_ref(), &options); + let result = grep_search(files, &parsed, &options); let elapsed = start.elapsed(); (result.matches.len(), elapsed) } #[allow(dead_code)] -fn benchmark_fff_smart_case( - files: &[FileItem], - query: &str, - parsed: Option>, -) -> (usize, Duration) { +fn benchmark_fff_smart_case(files: &[FileItem], parsed: &FFFQuery<'_>) -> (usize, Duration) { let options = GrepSearchOptions { max_file_size: 10 * 1024 * 1024, max_matches_per_file: usize::MAX, @@ -234,7 +230,7 @@ fn benchmark_fff_smart_case( classify_definitions: false, }; let start = Instant::now(); - let result = grep_search(files, query, parsed.as_ref(), &options); + let result = grep_search(files, parsed, &options); let elapsed = start.elapsed(); (result.matches.len(), elapsed) } @@ -255,7 +251,7 @@ fn run_fff_page(files: &[FileItem], query: &str) -> (usize, Duration) { classify_definitions: false, }; let start = Instant::now(); - let result = grep_search(files, query, parsed.as_ref(), &options); + let result = grep_search(files, &parsed, &options); let elapsed = start.elapsed(); (result.matches.len(), elapsed) } diff --git a/crates/fff-nvim/src/bin/jemalloc_profile.rs b/crates/fff-nvim/src/bin/jemalloc_profile.rs index 12f1235b..12296659 100644 --- a/crates/fff-nvim/src/bin/jemalloc_profile.rs +++ b/crates/fff-nvim/src/bin/jemalloc_profile.rs @@ -90,8 +90,7 @@ fn test_search_memory_pattern( let parsed = parser.parse(&query); let search_result = FilePicker::fuzzy_search( picker.get_files(), - &query, - parsed, + &parsed, FuzzySearchOptions { max_threads: 1 + (i % 4), current_file: None, diff --git a/crates/fff-nvim/src/bin/search_profiler.rs b/crates/fff-nvim/src/bin/search_profiler.rs index ca97a4e5..26616d1d 100644 --- a/crates/fff-nvim/src/bin/search_profiler.rs +++ b/crates/fff-nvim/src/bin/search_profiler.rs @@ -126,8 +126,7 @@ fn main() { let parsed = parser.parse(query); let results = FilePicker::fuzzy_search( &files, - query, - parsed, + &parsed, FuzzySearchOptions { max_threads: 4, current_file: None, diff --git a/crates/fff-nvim/src/bin/test_memory_leak.rs b/crates/fff-nvim/src/bin/test_memory_leak.rs index 0dcf10a9..2ee6df9d 100644 --- a/crates/fff-nvim/src/bin/test_memory_leak.rs +++ b/crates/fff-nvim/src/bin/test_memory_leak.rs @@ -200,8 +200,7 @@ fn main() -> Result<(), Box> { let parsed = parser.parse(query); let search_result = FilePicker::fuzzy_search( picker.get_files(), - query, - parsed, + &parsed, FuzzySearchOptions { max_threads, current_file: None, diff --git a/crates/fff-nvim/src/bin/test_watcher.rs b/crates/fff-nvim/src/bin/test_watcher.rs index 1b2d2618..472a7a5d 100644 --- a/crates/fff-nvim/src/bin/test_watcher.rs +++ b/crates/fff-nvim/src/bin/test_watcher.rs @@ -155,8 +155,7 @@ fn main() -> Result<(), Box> { let parsed = parser.parse("rs"); let search_results = FilePicker::fuzzy_search( files, - "rs", - parsed, + &parsed, FuzzySearchOptions { max_threads: 2, current_file: None, diff --git a/crates/fff-nvim/src/hex_dump.rs b/crates/fff-nvim/src/hex_dump.rs new file mode 100644 index 00000000..05c11938 --- /dev/null +++ b/crates/fff-nvim/src/hex_dump.rs @@ -0,0 +1,190 @@ +use mlua::prelude::*; +use std::fmt::Write as _; +use std::io::{Read, Seek, SeekFrom}; + +// Byte category colors (matching hexyl's default theme) +const COLOR_OFFSET: &str = "#888888"; +const COLOR_NULL: &str = "#555753"; +const COLOR_ASCII_PRINTABLE: &str = "#06989a"; +const COLOR_ASCII_WHITESPACE: &str = "#4e9a06"; +const COLOR_ASCII_OTHER: &str = "#4e9a06"; +const COLOR_NON_ASCII: &str = "#c4a000"; + +fn byte_color(b: u8) -> &'static str { + match b { + 0x00 => COLOR_NULL, + 0x20 | 0x09 | 0x0a | 0x0d => COLOR_ASCII_WHITESPACE, + 0x21..=0x7e => COLOR_ASCII_PRINTABLE, + 0x01..=0x1f | 0x7f => COLOR_ASCII_OTHER, + _ => COLOR_NON_ASCII, + } +} + +fn byte_char(b: u8) -> char { + match b { + 0x20..=0x7e => b as char, + _ => '.', + } +} + +const BYTES_PER_LINE: usize = 16; + +struct Span { + line: usize, + col_start: usize, + col_end: usize, + color: &'static str, +} + +/// Push a span, merging with the previous one if same line and color. +fn push_span( + spans: &mut Vec, + line: usize, + col_start: usize, + col_end: usize, + color: &'static str, +) { + if let Some(last) = spans.last_mut() { + // Merge if same line, same color, and adjacent (allow small gaps for spaces between hex pairs) + if last.line == line && std::ptr::eq(last.color, color) && col_start <= last.col_end + 1 { + last.col_end = col_end; + return; + } + } + spans.push(Span { + line, + col_start, + col_end, + color, + }); +} + +/// Format raw bytes into hex dump lines with coalesced highlight spans. +/// +/// Layout per line: +/// ```text +/// XXXXXXXX HH HH HH HH HH HH HH HH HH HH HH HH HH HH HH HH CCCCCCCCCCCCCCCC +/// ``` +fn format_hex_dump(raw_bytes: &[u8], base_offset: u64) -> (Vec, Vec) { + let mut lines = Vec::new(); + let mut spans = Vec::new(); + + for (chunk_idx, chunk) in raw_bytes.chunks(BYTES_PER_LINE).enumerate() { + let addr = base_offset + (chunk_idx * BYTES_PER_LINE) as u64; + let mut line = format!("{addr:08x} "); + + // Offset label highlight + push_span(&mut spans, chunk_idx, 0, 8, COLOR_OFFSET); + + // Hex pairs with a gap after 8 bytes + for (i, &b) in chunk.iter().enumerate() { + if i == 8 { + line.push(' '); + } + let col = line.len(); + push_span(&mut spans, chunk_idx, col, col + 2, byte_color(b)); + write!(line, "{b:02x} ").unwrap(); + } + + // Pad if the last line is short + if chunk.len() < BYTES_PER_LINE { + let missing = BYTES_PER_LINE - chunk.len(); + let mut pad = missing * 3; + if chunk.len() <= 8 { + pad += 1; + } + for _ in 0..pad { + line.push(' '); + } + } + + // Separator before char panel + line.push(' '); + + // Character panel — consecutive same-color chars merge automatically + let char_start = line.len(); + for (i, &b) in chunk.iter().enumerate() { + let col = char_start + i; + push_span(&mut spans, chunk_idx, col, col + 1, byte_color(b)); + line.push(byte_char(b)); + } + + lines.push(line); + } + + (lines, spans) +} + +/// Generate a hex dump for a binary file with paging support and highlight data. +/// +/// Returns a Lua table: +/// ```text +/// { +/// lines: string[], +/// highlights: {line_0idx, col_start, col_end, color}[], +/// has_more: bool, +/// next_offset: number, +/// } +/// ``` +pub fn hex_dump( + lua: &Lua, + (file_path, offset, length): (String, Option, Option), +) -> LuaResult { + let offset = offset.unwrap_or(0); + let length = length.unwrap_or(4096); + + let file = std::fs::File::open(&file_path) + .map_err(|e| LuaError::RuntimeError(format!("Failed to open file: {e}")))?; + + let file_size = file + .metadata() + .map_err(|e| LuaError::RuntimeError(format!("Failed to get metadata: {e}")))? + .len(); + + let table = lua.create_table()?; + + if offset >= file_size { + table.set("lines", lua.create_table()?)?; + table.set("highlights", lua.create_table()?)?; + table.set("has_more", false)?; + table.set("next_offset", file_size)?; + return Ok(LuaValue::Table(table)); + } + + let mut reader = std::io::BufReader::new(file); + reader + .seek(SeekFrom::Start(offset)) + .map_err(|e| LuaError::RuntimeError(format!("Failed to seek: {e}")))?; + let mut raw_bytes = Vec::with_capacity(length as usize); + reader + .by_ref() + .take(length) + .read_to_end(&mut raw_bytes) + .map_err(|e| LuaError::RuntimeError(format!("Failed to read: {e}")))?; + + let (plain_lines, hl_spans) = format_hex_dump(&raw_bytes, offset); + + let lines_table = lua.create_table()?; + for (i, line) in plain_lines.iter().enumerate() { + lines_table.set(i + 1, line.as_str())?; + } + table.set("lines", lines_table)?; + + let highlights_table = lua.create_table()?; + for (i, span) in hl_spans.iter().enumerate() { + let hl = lua.create_table()?; + hl.raw_set(1, span.line)?; + hl.raw_set(2, span.col_start)?; + hl.raw_set(3, span.col_end)?; + hl.raw_set(4, span.color)?; + highlights_table.raw_set(i + 1, hl)?; + } + table.set("highlights", highlights_table)?; + + let bytes_read = raw_bytes.len() as u64; + let next_offset = offset + bytes_read; + table.set("has_more", next_offset < file_size)?; + table.set("next_offset", next_offset)?; + + Ok(LuaValue::Table(table)) +} diff --git a/crates/fff-nvim/src/lib.rs b/crates/fff-nvim/src/lib.rs index 40406cc0..31c68847 100644 --- a/crates/fff-nvim/src/lib.rs +++ b/crates/fff-nvim/src/lib.rs @@ -2,10 +2,11 @@ use crate::path_shortening::shorten_path_with_cache; use error::{IntoCoreError, IntoLuaResult}; use fff_core::file_picker::FilePicker; use fff_core::frecency::FrecencyTracker; +use fff_core::path_utils::expand_tilde; use fff_core::query_tracker::QueryTracker; use fff_core::{ - DbHealthChecker, Error, FFFMode, FuzzySearchOptions, PaginationArgs, QueryParser, - SharedFrecency, SharedPicker, SharedQueryTracker, + DbHealthChecker, Error, FFFMode, FuzzySearchOptions, PaginationArgs, QueryParser, Score, + SearchResult, SharedFrecency, SharedPicker, SharedQueryTracker, }; use mimalloc::MiMalloc; use mlua::prelude::*; @@ -16,6 +17,7 @@ use std::sync::{Arc, RwLock}; use std::time::Duration; mod error; +mod hex_dump; mod log; mod lua_types; mod path_shortening; @@ -248,10 +250,10 @@ pub fn fuzzy_search_files( let parser = QueryParser::default(); let parsed = parser.parse(&query); + let files = picker.get_files(); let results = FilePicker::fuzzy_search( - picker.get_files(), - &query, - parsed, + files, + &parsed, FuzzySearchOptions { max_threads, current_file: current_file.as_deref(), @@ -266,6 +268,34 @@ pub fn fuzzy_search_files( }, ); + if results.items.is_empty() && query.contains(std::path::MAIN_SEPARATOR) { + let pure_query = match &parsed.fuzzy_query { + fff_query_parser::FuzzyQuery::Text(t) => t.trim(), + _ => query.trim(), + }; + + let path = expand_tilde(pure_query); + if path.is_absolute() && path.is_file() { + if let Ok(idx) = files.binary_search_by(|f| f.path.as_path().cmp(&path)) { + let found = SearchResult { + items: vec![&files[idx]], + scores: vec![Score { + exact_match: true, + match_type: "path", + ..Default::default() + }], + total_matched: 1, + total_files: results.total_files, + location: parsed.location, + }; + + return lua_types::SearchResultLua::from(found).into_lua(lua); + } + + return build_file_path_fallback(lua, &path, results.total_files); + } + } + lua_types::SearchResultLua::from(results).into_lua(lua) } @@ -321,11 +351,61 @@ pub fn live_grep( classify_definitions: false, }; - let result = fff_core::grep::grep_search(picker.get_files(), &query, parsed.as_ref(), &options); + let result = fff_core::grep::grep_search(picker.get_files(), &parsed, &options); lua_types::GrepResultLua::from(result).into_lua(lua) } +/// Build a file-picker result for an absolute path that exists on disk but +/// isn't in the picker index (e.g. file from a different project). +fn build_file_path_fallback(lua: &Lua, path: &Path, total_files: usize) -> LuaResult { + let table = lua.create_table()?; + + let name = path + .file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or_default(); + let path_str = path.to_string_lossy().to_string(); + + let item = lua.create_table()?; + item.set("path", path_str.as_str())?; + item.set("relative_path", path_str.as_str())?; + item.set("name", name.as_str())?; + item.set("size", path.metadata().map(|m| m.len()).unwrap_or(0))?; + item.set("modified", 0u64)?; + item.set("access_frecency_score", 0i64)?; + item.set("modification_frecency_score", 0i64)?; + item.set("total_frecency_score", 0i64)?; + item.set("git_status", "")?; + item.set("is_binary", false)?; + + let items_table = lua.create_table()?; + items_table.set(1, item)?; + table.set("items", items_table)?; + + let score = lua.create_table()?; + score.set("total", 0)?; + score.set("base_score", 0)?; + score.set("filename_bonus", 0)?; + score.set("special_filename_bonus", 0)?; + score.set("frecency_boost", 0)?; + score.set("git_status_boost", 0)?; + score.set("distance_penalty", 0)?; + score.set("current_file_penalty", 0)?; + score.set("combo_match_boost", 0)?; + score.set("exact_match", true)?; + score.set("match_type", "path")?; + + let scores_table = lua.create_table()?; + scores_table.set(1, score)?; + table.set("scores", scores_table)?; + + table.set("total_matched", 1)?; + table.set("total_files", total_files)?; + + Ok(LuaValue::Table(table)) +} + pub fn track_access(_: &Lua, file_path: String) -> LuaResult { let file_path = PathBuf::from(&file_path); @@ -827,6 +907,7 @@ fn create_exports(lua: &Lua) -> LuaResult { )?; exports.set("health_check", lua.create_function(health_check)?)?; exports.set("shorten_path", lua.create_function(shorten_path)?)?; + exports.set("hex_dump", lua.create_function(hex_dump::hex_dump)?)?; Ok(exports) } diff --git a/crates/fff-nvim/src/lua_types.rs b/crates/fff-nvim/src/lua_types.rs index 9f885820..f94aa067 100644 --- a/crates/fff-nvim/src/lua_types.rs +++ b/crates/fff-nvim/src/lua_types.rs @@ -1,12 +1,7 @@ -//! Lua type conversions for fff-core types -//! -//! This module provides IntoLua implementations for core types. - use fff_core::git::format_git_status; use fff_core::{FileItem, GrepResult, Location, Score, SearchResult}; use mlua::prelude::*; -/// Wrapper for SearchResult that implements IntoLua pub struct SearchResultLua<'a> { inner: SearchResult<'a>, } @@ -17,7 +12,6 @@ impl<'a> From> for SearchResultLua<'a> { } } -/// Wrapper for GrepResult that implements IntoLua pub struct GrepResultLua<'a> { inner: GrepResult<'a>, } diff --git a/crates/fff-query-parser/src/config.rs b/crates/fff-query-parser/src/config.rs index 59d97e89..aa1fed66 100644 --- a/crates/fff-query-parser/src/config.rs +++ b/crates/fff-query-parser/src/config.rs @@ -159,9 +159,17 @@ impl ParserConfig for GrepConfig { return true; } - // Brace expansion → useful for directory alternatives - if bytes.contains(&b'{') && bytes.contains(&b'}') { - return true; + // Brace expansion → useful for directory alternatives. + // Require a comma between `{` and `}` AND at least one letter to + // distinguish real glob expansions like `{src,lib}` or `*.{ts,tsx}` + // from code patterns like `format!("{}")` and regex quantifiers `{2,3}`. + if let Some(open) = bytes.iter().position(|&b| b == b'{') + && let Some(close) = bytes.iter().rposition(|&b| b == b'}') + { + let inner = &bytes[open + 1..close]; + if inner.contains(&b',') && inner.iter().any(|b| b.is_ascii_alphabetic()) { + return true; + } } // Everything else (?, [, bare * without /) → treat as literal text diff --git a/crates/fff-query-parser/src/lib.rs b/crates/fff-query-parser/src/lib.rs index 89864a95..8a81cb7a 100644 --- a/crates/fff-query-parser/src/lib.rs +++ b/crates/fff-query-parser/src/lib.rs @@ -13,12 +13,13 @@ //! //! let parser = QueryParser::default(); //! -//! // Single-token queries return None (no parsing needed) +//! // Single-token queries return FFFQuery with Text fuzzy query and no constraints //! let result = parser.parse("hello"); -//! assert!(result.is_none()); +//! assert!(result.constraints.is_empty()); +//! assert_eq!(result.fuzzy_query, FuzzyQuery::Text("hello")); //! //! // Multi-token queries are parsed -//! let result = parser.parse("name *.rs").expect("Should parse"); +//! let result = parser.parse("name *.rs"); //! match &result.fuzzy_query { //! FuzzyQuery::Text(text) => assert_eq!(*text, "name"), //! _ => panic!("Expected text"), @@ -26,11 +27,11 @@ //! assert!(matches!(result.constraints[0], Constraint::Extension("rs"))); //! //! // Parse glob pattern with text -//! let result = parser.parse("**/*.rs foo").expect("Should parse"); +//! let result = parser.parse("**/*.rs foo"); //! assert!(matches!(result.constraints[0], Constraint::Glob("**/*.rs"))); //! //! // Parse negation -//! let result = parser.parse("!*.rs foo").expect("Should parse"); +//! let result = parser.parse("!*.rs foo"); //! match &result.constraints[0] { //! Constraint::Not(inner) => { //! assert!(matches!(inner.as_ref(), Constraint::Extension("rs"))); @@ -64,32 +65,30 @@ mod tests { fn test_empty_query() { let parser = QueryParser::default(); let result = parser.parse(""); - // Empty query returns None (single-token behavior) - assert!(result.is_none()); + assert!(result.constraints.is_empty()); + assert_eq!(result.fuzzy_query, FuzzyQuery::Empty); } #[test] fn test_whitespace_only() { let parser = QueryParser::default(); let result = parser.parse(" "); - // Whitespace-only returns None - assert!(result.is_none()); + assert!(result.constraints.is_empty()); + assert_eq!(result.fuzzy_query, FuzzyQuery::Empty); } #[test] fn test_single_token() { let parser = QueryParser::default(); let result = parser.parse("hello"); - // Single token returns None (no parsing needed) - assert!(result.is_none()); + assert!(result.constraints.is_empty()); + assert_eq!(result.fuzzy_query, FuzzyQuery::Text("hello")); } #[test] fn test_simple_text() { let parser = QueryParser::default(); - let result = parser - .parse("hello world") - .expect("Should parse multi-token"); + let result = parser.parse("hello world"); match &result.fuzzy_query { FuzzyQuery::Parts(parts) => { @@ -107,9 +106,7 @@ mod tests { fn test_extension_only() { let parser = QueryParser::default(); // Single constraint token - returns Some so constraint can be applied - let result = parser - .parse("*.rs") - .expect("Should parse single constraint"); + let result = parser.parse("*.rs"); assert!(matches!(result.fuzzy_query, FuzzyQuery::Empty)); assert_eq!(result.constraints.len(), 1); assert!(matches!(result.constraints[0], Constraint::Extension("rs"))); @@ -118,9 +115,7 @@ mod tests { #[test] fn test_glob_pattern() { let parser = QueryParser::default(); - let result = parser - .parse("**/*.rs foo") - .expect("Should parse multi-token"); + let result = parser.parse("**/*.rs foo"); assert_eq!(result.constraints.len(), 1); // Glob patterns with ** are treated as globs, not extensions match &result.constraints[0] { @@ -132,7 +127,7 @@ mod tests { #[test] fn test_negation_pattern() { let parser = QueryParser::default(); - let result = parser.parse("!test foo").expect("Should parse multi-token"); + let result = parser.parse("!test foo"); assert_eq!(result.constraints.len(), 1); match &result.constraints[0] { Constraint::Not(inner) => { @@ -145,7 +140,7 @@ mod tests { #[test] fn test_path_segment() { let parser = QueryParser::default(); - let result = parser.parse("/src/ foo").expect("Should parse multi-token"); + let result = parser.parse("/src/ foo"); assert_eq!(result.constraints.len(), 1); assert!(matches!( result.constraints[0], @@ -156,9 +151,7 @@ mod tests { #[test] fn test_git_status() { let parser = QueryParser::default(); - let result = parser - .parse("status:modified foo") - .expect("Should parse multi-token"); + let result = parser.parse("status:modified foo"); assert_eq!(result.constraints.len(), 1); assert!(matches!( result.constraints[0], @@ -169,9 +162,7 @@ mod tests { #[test] fn test_file_type() { let parser = QueryParser::default(); - let result = parser - .parse("type:rust foo") - .expect("Should parse multi-token"); + let result = parser.parse("type:rust foo"); assert_eq!(result.constraints.len(), 1); assert!(matches!( result.constraints[0], @@ -182,9 +173,7 @@ mod tests { #[test] fn test_complex_query() { let parser = QueryParser::default(); - let result = parser - .parse("src name *.rs !test /lib/ status:modified") - .expect("Should parse"); + let result = parser.parse("src name *.rs !test /lib/ status:modified"); // Verify we have fuzzy text match &result.fuzzy_query { @@ -226,9 +215,7 @@ mod tests { #[test] fn test_no_heap_allocation_for_small_queries() { let parser = QueryParser::default(); - let result = parser - .parse("*.rs *.toml !test") - .expect("Should parse multi-token"); + let result = parser.parse("*.rs *.toml !test"); // SmallVec should not have spilled to heap assert!(!result.constraints.spilled()); } @@ -236,9 +223,7 @@ mod tests { #[test] fn test_many_fuzzy_parts() { let parser = QueryParser::default(); - let result = parser - .parse("one two three four five six") - .expect("Should parse"); + let result = parser.parse("one two three four five six"); match &result.fuzzy_query { FuzzyQuery::Parts(parts) => { diff --git a/crates/fff-query-parser/src/parser.rs b/crates/fff-query-parser/src/parser.rs index 2fe8c430..7fc6da6a 100644 --- a/crates/fff-query-parser/src/parser.rs +++ b/crates/fff-query-parser/src/parser.rs @@ -14,6 +14,8 @@ pub enum FuzzyQuery<'a> { #[derive(Debug, Clone, PartialEq)] pub struct FFFQuery<'a> { + /// The original raw query string before parsing + pub raw_query: &'a str, /// Parsed constraints (stack-allocated for ≤8 constraints) pub constraints: ConstraintVec<'a>, pub fuzzy_query: FuzzyQuery<'a>, @@ -32,8 +34,8 @@ impl QueryParser { Self { config } } - pub fn parse<'a>(&self, query: &'a str) -> Option> { - let query: &'a str = query; + pub fn parse<'a>(&self, query: &'a str) -> FFFQuery<'a> { + let raw_query = query; let config: &C = &self.config; let mut constraints = ConstraintVec::new(); let query = query.trim(); @@ -47,13 +49,26 @@ impl QueryParser { // Don't treat filename tokens (FilePath) as constraints in single-token // queries — the user is fuzzy-searching, not filtering. FilePath constraints // are only useful as filters in multi-token queries like "score.rs search". - if !matches!(constraint, Constraint::FilePath(_)) { + // + // Also skip PathSegment constraints when the token looks like an absolute + // file path with a location suffix (e.g. /Users/.../file.rs:12). Without + // this, the leading `/` causes the entire path to be consumed as a + // PathSegment, preventing location parsing from running. + let has_location_suffix = matches!(constraint, Constraint::PathSegment(_)) + && query.bytes().any(|b| b == b':') + && query + .bytes() + .rev() + .take_while(|&b| b != b':') + .all(|b| b.is_ascii_digit()); + if !matches!(constraint, Constraint::FilePath(_)) && !has_location_suffix { constraints.push(constraint); - return Some(FFFQuery { + return FFFQuery { + raw_query, constraints, fuzzy_query: FuzzyQuery::Empty, location: None, - }); + }; } } @@ -61,16 +76,26 @@ impl QueryParser { if config.enable_location() { let (query_without_loc, location) = parse_location(query); if location.is_some() { - return Some(FFFQuery { + return FFFQuery { + raw_query, constraints, fuzzy_query: FuzzyQuery::Text(query_without_loc), location, - }); + }; } } - // Plain text single token - return None (caller handles as simple fuzzy match) - return None; + // Plain text single token + return FFFQuery { + raw_query, + constraints, + fuzzy_query: if query.is_empty() { + FuzzyQuery::Empty + } else { + FuzzyQuery::Text(query) + }, + location: None, + }; } let mut text_parts = TextPartsBuffer::new(); @@ -133,11 +158,12 @@ impl QueryParser { } }; - Some(FFFQuery { + FFFQuery { + raw_query, constraints, fuzzy_query, location, - }) + } } } @@ -471,9 +497,7 @@ mod tests { #[test] fn test_trailing_slash_in_query() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("www/ test") - .expect("Should parse multi-token query"); + let result = parser.parse("www/ test"); assert_eq!(result.constraints.len(), 1); assert!(matches!( result.constraints[0], @@ -510,9 +534,7 @@ mod tests { fn test_negation_text() { let parser = QueryParser::new(FilePickerConfig); // Need two tokens for parsing to return Some - let result = parser - .parse("!test foo") - .expect("Should parse multi-token query"); + let result = parser.parse("!test foo"); assert_eq!(result.constraints.len(), 1); match &result.constraints[0] { Constraint::Not(inner) => { @@ -525,9 +547,7 @@ mod tests { #[test] fn test_negation_extension() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("!*.rs foo") - .expect("Should parse multi-token query"); + let result = parser.parse("!*.rs foo"); assert_eq!(result.constraints.len(), 1); match &result.constraints[0] { Constraint::Not(inner) => { @@ -540,9 +560,7 @@ mod tests { #[test] fn test_negation_path_segment() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("!/src/ foo") - .expect("Should parse multi-token query"); + let result = parser.parse("!/src/ foo"); assert_eq!(result.constraints.len(), 1); match &result.constraints[0] { Constraint::Not(inner) => { @@ -555,9 +573,7 @@ mod tests { #[test] fn test_negation_git_status() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("!status:modified foo") - .expect("Should parse multi-token query"); + let result = parser.parse("!status:modified foo"); assert_eq!(result.constraints.len(), 1); match &result.constraints[0] { Constraint::Not(inner) => { @@ -573,9 +589,7 @@ mod tests { #[test] fn test_backslash_escape_extension() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("\\*.rs foo") - .expect("Should parse multi-token query"); + let result = parser.parse("\\*.rs foo"); // \*.rs should NOT be parsed as an Extension constraint assert_eq!(result.constraints.len(), 0); // Both tokens should be text @@ -592,9 +606,7 @@ mod tests { #[test] fn test_backslash_escape_path_segment() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("\\/src/ foo") - .expect("Should parse multi-token query"); + let result = parser.parse("\\/src/ foo"); assert_eq!(result.constraints.len(), 0); match result.fuzzy_query { FuzzyQuery::Parts(parts) => { @@ -608,117 +620,90 @@ mod tests { #[test] fn test_backslash_escape_negation() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("\\!test foo") - .expect("Should parse multi-token query"); + let result = parser.parse("\\!test foo"); assert_eq!(result.constraints.len(), 0); } #[test] fn test_grep_text_plain_text() { // Multi-token plain text — no constraints - let q = QueryParser::new(GrepConfig) - .parse("name =") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("name ="); assert_eq!(q.grep_text(), "name ="); } #[test] fn test_grep_text_strips_constraint() { - let q = QueryParser::new(GrepConfig) - .parse("name = *.rs someth") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("name = *.rs someth"); assert_eq!(q.grep_text(), "name = someth"); } #[test] fn test_grep_text_leading_constraint() { - let q = QueryParser::new(GrepConfig) - .parse("*.rs name =") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("*.rs name ="); assert_eq!(q.grep_text(), "name ="); } #[test] fn test_grep_text_only_constraints() { - let q = QueryParser::new(GrepConfig) - .parse("*.rs /src/") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("*.rs /src/"); assert_eq!(q.grep_text(), ""); } #[test] fn test_grep_text_path_constraint() { - let q = QueryParser::new(GrepConfig) - .parse("name /src/ value") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("name /src/ value"); assert_eq!(q.grep_text(), "name value"); } #[test] fn test_grep_text_negation_constraint() { - let q = QueryParser::new(GrepConfig) - .parse("name !*.rs value") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("name !*.rs value"); assert_eq!(q.grep_text(), "name value"); } #[test] fn test_grep_text_backslash_escape_stripped() { // \*.rs should be text with the leading \ removed - let q = QueryParser::new(GrepConfig) - .parse("\\*.rs foo") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("\\*.rs foo"); assert_eq!(q.grep_text(), "*.rs foo"); - let q = QueryParser::new(GrepConfig) - .parse("\\/src/ foo") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("\\/src/ foo"); assert_eq!(q.grep_text(), "/src/ foo"); - let q = QueryParser::new(GrepConfig) - .parse("\\!test foo") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("\\!test foo"); assert_eq!(q.grep_text(), "!test foo"); } #[test] fn test_grep_text_question_mark_is_text() { - let q = QueryParser::new(GrepConfig) - .parse("foo? bar") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("foo? bar"); assert_eq!(q.grep_text(), "foo? bar"); } #[test] fn test_grep_text_bracket_is_text() { - let q = QueryParser::new(GrepConfig) - .parse("arr[0] more") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("arr[0] more"); assert_eq!(q.grep_text(), "arr[0] more"); } #[test] fn test_grep_text_path_glob_is_constraint() { - let q = QueryParser::new(GrepConfig) - .parse("pattern src/**/*.rs") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("pattern src/**/*.rs"); assert_eq!(q.grep_text(), "pattern"); } #[test] fn test_grep_question_mark_is_text() { let parser = QueryParser::new(GrepConfig); - // Single token "foo?" should return None (treated as plain text by caller) let result = parser.parse("foo?"); - assert!(result.is_none(), "foo? should be plain text in grep mode"); + assert!(result.constraints.is_empty()); + assert_eq!(result.fuzzy_query, FuzzyQuery::Text("foo?")); } #[test] fn test_grep_bracket_is_text() { let parser = QueryParser::new(GrepConfig); let result = parser.parse("arr[0] something"); - let result = result.expect("Should parse multi-token query"); // arr[0] should NOT be a glob in grep mode assert_eq!(result.constraints.len(), 0); } @@ -726,9 +711,7 @@ mod tests { #[test] fn test_grep_path_glob_is_constraint() { let parser = QueryParser::new(GrepConfig); - let result = parser - .parse("pattern src/**/*.rs") - .expect("Should parse with path glob"); + let result = parser.parse("pattern src/**/*.rs"); // src/**/*.rs contains / so it should be treated as a glob assert_eq!(result.constraints.len(), 1); assert!(matches!( @@ -740,9 +723,7 @@ mod tests { #[test] fn test_grep_brace_is_constraint() { let parser = QueryParser::new(GrepConfig); - let result = parser - .parse("pattern {src,lib}") - .expect("Should parse with brace expansion"); + let result = parser.parse("pattern {src,lib}"); assert_eq!(result.constraints.len(), 1); assert!(matches!( result.constraints[0], @@ -755,45 +736,34 @@ mod tests { // Regex patterns like \w+ and \bfoo\b must survive grep_text() // The parser sees \w+ as a text token (not a constraint escape), // but strip_leading_backslash was stripping the \ anyway. - let q = QueryParser::new(GrepConfig) - .parse("pub struct \\w+") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("pub struct \\w+"); assert_eq!( q.grep_text(), "pub struct \\w+", "Backslash-w in regex must be preserved" ); - let q = QueryParser::new(GrepConfig) - .parse("\\bword\\b more") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("\\bword\\b more"); assert_eq!( q.grep_text(), "\\bword\\b more", "Backslash-b word boundaries must be preserved" ); - // Single-token regex like "fn\\s+\\w+" returns None from parse() - // (single token = no parsing needed, caller uses raw_query directly). + // Single-token regex like "fn\\s+\\w+" returns FFFQuery with Text fuzzy query let result = QueryParser::new(GrepConfig).parse("fn\\s+\\w+"); - assert!( - result.is_none(), - "Single-token regex should return None (no parsing)" - ); + assert!(result.constraints.is_empty()); + assert_eq!(result.fuzzy_query, FuzzyQuery::Text("fn\\s+\\w+")); // But the escaped constraint forms SHOULD still be stripped: - let q = QueryParser::new(GrepConfig) - .parse("\\*.rs foo") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("\\*.rs foo"); assert_eq!( q.grep_text(), "*.rs foo", "Escaped constraint \\*.rs should still have backslash stripped" ); - let q = QueryParser::new(GrepConfig) - .parse("\\/src/ foo") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("\\/src/ foo"); assert_eq!( q.grep_text(), "/src/ foo", @@ -806,7 +776,6 @@ mod tests { let parser = QueryParser::new(GrepConfig); // "a*b" contains * but no / or {} — should be text in grep mode let result = parser.parse("a*b something"); - let result = result.expect("Should parse"); assert_eq!( result.constraints.len(), 0, @@ -817,9 +786,7 @@ mod tests { #[test] fn test_grep_negated_text() { let parser = QueryParser::new(GrepConfig); - let result = parser - .parse("pattern !test") - .expect("Should parse negated text in grep mode"); + let result = parser.parse("pattern !test"); assert_eq!(result.constraints.len(), 1); match &result.constraints[0] { Constraint::Not(inner) => { @@ -836,9 +803,7 @@ mod tests { #[test] fn test_grep_negated_path_segment() { let parser = QueryParser::new(GrepConfig); - let result = parser - .parse("pattern !/src/") - .expect("Should parse negated path segment in grep mode"); + let result = parser.parse("pattern !/src/"); assert_eq!(result.constraints.len(), 1); match &result.constraints[0] { Constraint::Not(inner) => { @@ -855,9 +820,7 @@ mod tests { #[test] fn test_grep_negated_extension() { let parser = QueryParser::new(GrepConfig); - let result = parser - .parse("pattern !*.rs") - .expect("Should parse negated extension in grep mode"); + let result = parser.parse("pattern !*.rs"); assert_eq!(result.constraints.len(), 1); match &result.constraints[0] { Constraint::Not(inner) => { @@ -877,9 +840,7 @@ mod tests { fn test_ai_grep_detects_file_path() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser - .parse("libswscale/input.c rgba32ToY") - .expect("Should parse"); + let result = parser.parse("libswscale/input.c rgba32ToY"); assert_eq!(result.constraints.len(), 1); assert!( matches!( @@ -896,7 +857,7 @@ mod tests { fn test_ai_grep_detects_nested_file_path() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser.parse("src/main.rs fn main").expect("Should parse"); + let result = parser.parse("src/main.rs fn main"); assert_eq!(result.constraints.len(), 1); assert!(matches!( result.constraints[0], @@ -909,7 +870,7 @@ mod tests { fn test_ai_grep_no_false_positive_trailing_slash() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser.parse("src/ pattern").expect("Should parse"); + let result = parser.parse("src/ pattern"); // Should be PathSegment, NOT FilePath assert_eq!(result.constraints.len(), 1); assert!( @@ -923,7 +884,7 @@ mod tests { fn test_ai_grep_bare_filename_is_file_path() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser.parse("main.rs pattern").expect("Should parse"); + let result = parser.parse("main.rs pattern"); // Bare filename with valid extension → FilePath constraint assert_eq!(result.constraints.len(), 1); assert!( @@ -938,9 +899,7 @@ mod tests { fn test_ai_grep_bare_filename_schema_rs() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser - .parse("schema.rs part_revisions") - .expect("Should parse"); + let result = parser.parse("schema.rs part_revisions"); assert_eq!(result.constraints.len(), 1); assert!( matches!(result.constraints[0], Constraint::FilePath("schema.rs")), @@ -954,7 +913,7 @@ mod tests { fn test_ai_grep_bare_word_no_extension_not_constraint() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser.parse("schema pattern").expect("Should parse"); + let result = parser.parse("schema pattern"); // No extension → not a file path, just text assert_eq!(result.constraints.len(), 0); assert_eq!(result.grep_text(), "schema pattern"); @@ -964,7 +923,7 @@ mod tests { fn test_ai_grep_no_false_positive_no_extension() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser.parse("src/utils pattern").expect("Should parse"); + let result = parser.parse("src/utils pattern"); // No extension in last component → not a file path, just text assert_eq!(result.constraints.len(), 0); assert_eq!(result.grep_text(), "src/utils pattern"); @@ -974,7 +933,7 @@ mod tests { fn test_ai_grep_wildcard_not_filepath() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser.parse("src/**/*.rs pattern").expect("Should parse"); + let result = parser.parse("src/**/*.rs pattern"); // Contains wildcards → should be a Glob, not FilePath assert_eq!(result.constraints.len(), 1); assert!( @@ -988,7 +947,7 @@ mod tests { fn test_ai_grep_star_text_star_is_glob() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser.parse("*quote* TODO").expect("Should parse"); + let result = parser.parse("*quote* TODO"); // `*quote*` should be recognised as a glob constraint in AI mode assert_eq!(result.constraints.len(), 1); assert!( @@ -1003,7 +962,7 @@ mod tests { fn test_ai_grep_bare_star_not_glob() { use crate::AiGrepConfig; let parser = QueryParser::new(AiGrepConfig); - let result = parser.parse("* pattern").expect("Should parse"); + let result = parser.parse("* pattern"); // Bare `*` should NOT be treated as a glob (too broad) assert!( result.constraints.is_empty(), @@ -1015,20 +974,15 @@ mod tests { #[test] fn test_grep_no_location_parsing_single_token() { let parser = QueryParser::new(GrepConfig); - // localhost:8080 should NOT be parsed as location — it's a search pattern + // localhost:8080 should NOT be parsed as location -- it's a search pattern let result = parser.parse("localhost:8080"); - assert!( - result.is_none(), - "Single-token grep query with colon-number should return None (plain text), got {:?}", - result - ); + assert!(result.constraints.is_empty()); + assert_eq!(result.fuzzy_query, FuzzyQuery::Text("localhost:8080")); } #[test] fn test_grep_no_location_parsing_multi_token() { - let q = QueryParser::new(GrepConfig) - .parse("*.rs localhost:8080") - .expect("should parse"); + let q = QueryParser::new(GrepConfig).parse("*.rs localhost:8080"); assert_eq!( q.grep_text(), "localhost:8080", @@ -1040,11 +994,39 @@ mod tests { ); } + #[test] + fn test_grep_braces_without_comma_is_text() { + let parser = QueryParser::new(GrepConfig); + // Code patterns like format!("{}") should NOT be treated as brace expansion + let result = parser.parse(r#"format!("{}\\AppData", home)"#); + assert!( + result.constraints.is_empty(), + "Braces without comma should be text, got {:?}", + result.constraints + ); + assert_eq!(result.grep_text(), r#"format!("{}\\AppData", home)"#); + } + + #[test] + fn test_grep_format_braces_not_glob() { + let parser = QueryParser::new(GrepConfig); + // Code like format!("{}\\path", var) must not have tokens eaten as glob constraints. + // The trailing comma on the first token means both { } and , are present, + // but the comma is outside the braces so it should NOT trigger brace expansion. + let input = "format!(\"{}\\\\AppData\", home)"; + let result = parser.parse(input); + assert!( + result.constraints.is_empty(), + "format! pattern should have no constraints, got {:?}", + result.constraints + ); + } + #[test] fn test_grep_config_star_text_star_not_glob() { use crate::GrepConfig; let parser = QueryParser::new(GrepConfig); - let result = parser.parse("*quote* TODO").expect("Should parse"); + let result = parser.parse("*quote* TODO"); // Regular grep mode should NOT treat `*quote*` as a glob assert!( result.constraints.is_empty(), @@ -1058,9 +1040,7 @@ mod tests { #[test] fn test_file_picker_bare_filename_constraint() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("score.rs file_picker") - .expect("Should parse multi-token query"); + let result = parser.parse("score.rs file_picker"); assert_eq!(result.constraints.len(), 1); assert!( matches!(result.constraints[0], Constraint::FilePath("score.rs")), @@ -1073,9 +1053,7 @@ mod tests { #[test] fn test_file_picker_path_prefixed_filename_constraint() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("libswscale/slice.c lum_convert") - .expect("Should parse multi-token query"); + let result = parser.parse("libswscale/slice.c lum_convert"); assert_eq!(result.constraints.len(), 1); assert!( matches!( @@ -1091,22 +1069,35 @@ mod tests { #[test] fn test_file_picker_single_token_filename_stays_fuzzy() { let parser = QueryParser::new(FilePickerConfig); - // Single-token filename should NOT become a constraint — it should - // return None so the caller uses the raw query for fuzzy matching. + // Single-token filename should NOT become a constraint -- it should + // return FFFQuery with Text fuzzy query so the caller uses it for fuzzy matching. let result = parser.parse("score.rs"); + assert!(result.constraints.is_empty()); + assert_eq!(result.fuzzy_query, FuzzyQuery::Text("score.rs")); + } + + #[test] + fn test_absolute_path_with_location_not_path_segment() { + let parser = QueryParser::new(FilePickerConfig); + // Absolute file path with :line should parse as text + location, + // NOT as a PathSegment constraint (which would eat the whole token). + let result = parser.parse("/Users/neogoose/dev/fframes/src/renderer/concatenator.rs:12"); assert!( - result.is_none(), - "Single-token filename should return None (fuzzy match), got {:?}", - result + result.constraints.is_empty(), + "Absolute path with location should not become a constraint, got {:?}", + result.constraints + ); + assert_eq!( + result.fuzzy_query, + FuzzyQuery::Text("/Users/neogoose/dev/fframes/src/renderer/concatenator.rs") ); + assert_eq!(result.location, Some(Location::Line(12))); } #[test] fn test_file_picker_filename_with_multiple_fuzzy_parts() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("main.rs src components") - .expect("Should parse multi-token query"); + let result = parser.parse("main.rs src components"); assert_eq!(result.constraints.len(), 1); assert!(matches!( result.constraints[0], @@ -1121,9 +1112,7 @@ mod tests { #[test] fn test_file_picker_version_number_not_filename() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("v2.0 release") - .expect("Should parse multi-token query"); + let result = parser.parse("v2.0 release"); // v2.0 extension starts with digit → not a filename constraint assert!( result.constraints.is_empty(), @@ -1135,9 +1124,7 @@ mod tests { #[test] fn test_file_picker_only_one_filepath_constraint() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("main.rs score.rs") - .expect("Should parse multi-token query"); + let result = parser.parse("main.rs score.rs"); // Only first filename becomes a constraint; second is text assert_eq!(result.constraints.len(), 1); assert!(matches!( @@ -1150,9 +1137,7 @@ mod tests { #[test] fn test_file_picker_filename_with_extension_constraint() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("main.rs *.lua") - .expect("Should parse multi-token query"); + let result = parser.parse("main.rs *.lua"); // main.rs → FilePath, *.lua → Extension assert_eq!(result.constraints.len(), 2); assert!(matches!( @@ -1168,9 +1153,7 @@ mod tests { #[test] fn test_file_picker_dotfile_is_filename() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse(".gitignore src") - .expect("Should parse multi-token query"); + let result = parser.parse(".gitignore src"); assert_eq!(result.constraints.len(), 1); assert!( matches!(result.constraints[0], Constraint::FilePath(".gitignore")), @@ -1183,9 +1166,7 @@ mod tests { #[test] fn test_file_picker_no_extension_not_filename() { let parser = QueryParser::new(FilePickerConfig); - let result = parser - .parse("Makefile src") - .expect("Should parse multi-token query"); + let result = parser.parse("Makefile src"); // No dot → not a filename constraint assert!( result.constraints.is_empty(), diff --git a/doc/fff.nvim.txt b/doc/fff.nvim.txt index 231beeb9..b0ae8310 100644 --- a/doc/fff.nvim.txt +++ b/doc/fff.nvim.txt @@ -1,4 +1,4 @@ -*fff.nvim.txt* For Neovim >= 0.10.0 Last change: 2026 March 13 +*fff.nvim.txt* For Neovim >= 0.10.0 Last change: 2026 March 17 ============================================================================== Table of Contents *fff.nvim-table-of-contents* diff --git a/flake.lock b/flake.lock index 96ee3dc5..9d672730 100644 --- a/flake.lock +++ b/flake.lock @@ -2,11 +2,11 @@ "nodes": { "crane": { "locked": { - "lastModified": 1767744144, - "narHash": "sha256-9/9ntI0D+HbN4G0TrK3KmHbTvwgswz7p8IEJsWyef8Q=", + "lastModified": 1773189535, + "narHash": "sha256-E1G/Or6MWeP+L6mpQ0iTFLpzSzlpGrITfU2220Gq47g=", "owner": "ipetkov", "repo": "crane", - "rev": "2fb033290bf6b23f226d4c8b32f7f7a16b043d7e", + "rev": "6fa2fb4cf4a89ba49fc9dd5a3eb6cde99d388269", "type": "github" }, "original": { @@ -35,11 +35,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1767364772, - "narHash": "sha256-fFUnEYMla8b7UKjijLnMe+oVFOz6HjijGGNS1l7dYaQ=", + "lastModified": 1773597492, + "narHash": "sha256-hQ284SkIeNaeyud+LS0WVLX+WL2rxcVZLFEaK0e03zg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "16c7794d0a28b5a37904d55bcca36003b9109aaa", + "rev": "a07d4ce6bee67d7c838a8a5796e75dff9caa21ef", "type": "github" }, "original": { @@ -64,11 +64,11 @@ ] }, "locked": { - "lastModified": 1770865833, - "narHash": "sha256-oiARqnlvaW6pVGheVi4ye6voqCwhg5hCcGish2ZvQzI=", + "lastModified": 1773716879, + "narHash": "sha256-vXCTasEzzTTd0ZGEuyle20H2hjRom66JeNr7i2ktHD0=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "c8cfbe26238638e2f3a2c0ae7e8d240f5e4ded85", + "rev": "1a9ddeb45c5751b800331363703641b84d1f41f0", "type": "github" }, "original": { diff --git a/lua/fff/file_picker/preview.lua b/lua/fff/file_picker/preview.lua index ecd72d77..955334b5 100644 --- a/lua/fff/file_picker/preview.lua +++ b/lua/fff/file_picker/preview.lua @@ -2,6 +2,7 @@ local utils = require('fff.utils') local file_picker = require('fff.file_picker') local image = require('fff.file_picker.image') local location_utils = require('fff.location_utils') +local rust = require('fff.rust') local M = {} @@ -301,6 +302,8 @@ M.state = { location = nil, -- Current location data for highlighting location_namespace = nil, -- Namespace for location highlighting preview_generation = 0, -- Monotonically increasing token to detect stale async callbacks + is_binary_preview = false, -- Whether the current preview is a hex dump + hex_byte_offset = 0, -- Next byte offset for hex dump paging } --- Setup preview configuration @@ -554,7 +557,69 @@ function M.preview_file(file_path, bufnr) return true end ---- Preview a binary file with async file type detection +-- Hex preview highlight support: dynamically create hl groups from "#rrggbb" +local hex_ns = nil +local hex_hl_cache = {} + +local function ensure_hex_ns() + if not hex_ns then hex_ns = vim.api.nvim_create_namespace('fff_hex_preview') end + return hex_ns +end + +local function get_hex_hl_group(hex_color) + local cached = hex_hl_cache[hex_color] + if cached then return cached end + local group = 'FffHex_' .. hex_color:sub(2) + vim.api.nvim_set_hl(0, group, { fg = hex_color }) + hex_hl_cache[hex_color] = group + return group +end + +--- Apply hex highlight spans to a buffer +--- @param bufnr number Buffer number +--- @param highlights table Array of {line_0idx, col_start, col_end, "#rrggbb"} +--- @param line_offset number Lines to add to each highlight line index (for header) +local function apply_hex_highlights(bufnr, highlights, line_offset) + if not highlights or not vim.api.nvim_buf_is_valid(bufnr) then return end + local ns = ensure_hex_ns() + for _, hl in ipairs(highlights) do + pcall(vim.api.nvim_buf_set_extmark, bufnr, ns, hl[1] + line_offset, hl[2], { + end_col = hl[3], + hl_group = get_hex_hl_group(hl[4]), + }) + end +end + +--- Load a page of hex dump content from the Rust backend +--- @param file_path string Path to the binary file +--- @param byte_offset number Byte offset to start reading from +--- @return table|nil Result with lines, highlights, has_more, next_offset +local function load_hex_page(file_path, byte_offset) + local ok, result = pcall(rust.hex_dump, file_path, byte_offset, 4096) + if ok and result then return result end + return nil +end + +--- Load more hex content when scrolling near the end of the buffer +local function load_more_hex_content() + if not M.state.bufnr or not vim.api.nvim_buf_is_valid(M.state.bufnr) then return end + if not M.state.has_more_content or not M.state.current_file then return end + + local current_lines = vim.api.nvim_buf_line_count(M.state.bufnr) + local result = load_hex_page(M.state.current_file, M.state.hex_byte_offset) + if result and result.lines and #result.lines > 0 then + append_buffer_lines(M.state.bufnr, result.lines) + M.state.hex_byte_offset = result.next_offset + M.state.has_more_content = result.has_more + M.state.content_height = vim.api.nvim_buf_line_count(M.state.bufnr) + M.state.loaded_lines = M.state.content_height + apply_hex_highlights(M.state.bufnr, result.highlights, current_lines) + else + M.state.has_more_content = false + end +end + +--- Preview a binary file using hexyl-powered hex dump with paging --- @param file_path string Path to the file --- @param bufnr number Buffer number for preview --- @return boolean Success status @@ -562,49 +627,40 @@ function M.preview_binary_file(file_path, bufnr) local info = M.get_file_info(file_path) local lines = {} + M.state.is_binary_preview = true + M.state.hex_byte_offset = 0 + + -- Build header synchronously (file -b is fast, typically <10ms) + if vim.fn.executable('file') == 1 then + local output = vim.fn.system({ 'file', '-b', file_path }) + if vim.v.shell_error == 0 and output then + local file_type = output:gsub('\n', '') + table.insert(lines, 'Binary file: ' .. file_type) + if info and info.size_formatted then table.insert(lines, 'Size: ' .. info.size_formatted) end + table.insert(lines, '') + end + end + + local hex_result = load_hex_page(file_path, 0) + if hex_result and hex_result.lines then + for _, hex_line in ipairs(hex_result.lines) do + table.insert(lines, hex_line) + end + M.state.hex_byte_offset = hex_result.next_offset + M.state.has_more_content = hex_result.has_more + end + set_buffer_lines(bufnr, lines) vim.api.nvim_set_option_value('filetype', 'text', { buf = bufnr }) vim.api.nvim_set_option_value('modifiable', false, { buf = bufnr }) vim.api.nvim_set_option_value('readonly', true, { buf = bufnr }) - if vim.fn.executable('file') == 1 then - local cmd = { 'file', '-b', file_path } - vim.system(cmd, { text = true }, function(result) - vim.schedule(function() - if not vim.api.nvim_buf_is_valid(bufnr) then return end - - if result.code == 0 and result.stdout then - local file_type = result.stdout:gsub('\n', '') - table.insert(lines, 'Binary file: ' .. file_type) - if info and info.size_formatted then table.insert(lines, 'Size: ' .. info.size_formatted) end - - if vim.fn.executable('xxd') == 1 then - table.insert(lines, '') - set_buffer_lines(bufnr, lines) - - local hex_cmd = { 'xxd', '-l', '8192', file_path } - vim.system(hex_cmd, { text = true }, function(hex_result) - vim.schedule(function() - if not vim.api.nvim_buf_is_valid(bufnr) then return end - - if hex_result.code == 0 and hex_result.stdout then - local hex_lines = vim.split(hex_result.stdout, '\n') - for _, line in ipairs(hex_lines) do - if line:match('%S') then table.insert(lines, line) end - end - else - table.insert(lines, 'Use a hex editor or appropriate application to view this file.') - end - set_buffer_lines(bufnr, lines) - end) - end) - else - table.insert(lines, 'Use a hex editor or appropriate application to view this file.') - set_buffer_lines(bufnr, lines) - end - end - end) - end) + M.state.content_height = #lines + M.state.loaded_lines = #lines + + if hex_result and hex_result.highlights then + local header_lines = #lines - (hex_result.lines and #hex_result.lines or 0) + apply_hex_highlights(bufnr, hex_result.highlights, header_lines) end return true @@ -641,6 +697,9 @@ function M.preview(file_path, bufnr, location, is_binary) M.state.total_file_lines = nil M.state.has_more_content = true M.state.is_loading = false + M.state.hex_byte_offset = 0 + + M.state.is_binary_preview = false M.state.current_file = file_path M.state.bufnr = bufnr @@ -672,15 +731,19 @@ function M.scroll(lines) -- If scrolling down and approaching end of loaded content, try to load more if lines > 0 and not M.state.is_loading then local target_line = new_offset + win_height - local buffer_needed = target_line + 20 -- Load a bit ahead + local buffer_needed = target_line + 20 if current_buffer_lines < buffer_needed and M.state.has_more_content then - -- Load more content asynchronously but don't wait for it - ensure_content_loaded_async(target_line) + if M.state.is_binary_preview then + load_more_hex_content() + -- Re-read line count after loading more + current_buffer_lines = vim.api.nvim_buf_line_count(M.state.bufnr) + else + ensure_content_loaded_async(target_line) + end end end - -- Use actual buffer line count for scroll calculations local content_height = current_buffer_lines local half_screen = math.floor(win_height / 2) local max_scroll = math.max(0, content_height + half_screen - win_height) @@ -802,6 +865,8 @@ function M.clear() M.state.scroll_offset = 0 M.state.content_height = 0 M.state.location = nil + M.state.is_binary_preview = false + M.state.hex_byte_offset = 0 end --- Apply location highlighting to the preview buffer diff --git a/lua/fff/picker_ui.lua b/lua/fff/picker_ui.lua index 31df6681..21219684 100644 --- a/lua/fff/picker_ui.lua +++ b/lua/fff/picker_ui.lua @@ -1700,6 +1700,23 @@ function M.update_preview() -- Check if we need to update the preview (file changed OR location changed) local effective_location = M.state.location + + -- Fallback: if location is nil but query has a :line suffix, parse it directly + if not effective_location and M.state.query and M.state.query ~= '' then + local line_str = M.state.query:match(':(%d+)$') + if line_str then + local line_num = tonumber(line_str) + if line_num and line_num > 0 then + local l, c = M.state.query:match(':(%d+):(%d+)$') + if l then + effective_location = { line = tonumber(l), col = tonumber(c) } + else + effective_location = { line = line_num } + end + end + end + end + -- In grep mode (or when previewing grep suggestions), location comes from the match item local is_grep_item = M.state.mode == 'grep' or M.state.suggestion_source == 'grep' if is_grep_item and item.line_number and item.line_number > 0 then @@ -2293,6 +2310,23 @@ function M.select(action) end end + -- Fallback: if location is nil but query has a :line suffix, parse it directly + if not location and query and query ~= '' then + local line_str = query:match(':(%d+)$') + if line_str then + local line_num = tonumber(line_str) + if line_num and line_num > 0 then + local col_and_line = query:match(':(%d+):(%d+)$') + if col_and_line then + local l, c = query:match(':(%d+):(%d+)$') + location = { line = tonumber(l), col = tonumber(c) } + else + location = { line = line_num } + end + end + end + end + vim.cmd('stopinsert') M.close() diff --git a/rust-toolchain.toml b/rust-toolchain.toml index ae5308be..8bd63cef 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "nightly-2026-02-10" +channel = "nightly-2026-03-14" components = [ "clippy-preview", "rustfmt-preview",