Skip to content

Commit 8c4f192

Browse files
committed
style: cargo fmt
1 parent 3db1ae5 commit 8c4f192

2 files changed

Lines changed: 8 additions & 12 deletions

File tree

src/indexer.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ use std::time::{Duration, Instant};
44

55
use anyhow::{Context, Result, anyhow};
66
use ignore::WalkBuilder;
7-
use sha2::{Digest, Sha256};
87
use indicatif::{ProgressBar, ProgressStyle};
8+
use sha2::{Digest, Sha256};
99
use tracing::info;
1010

1111
use crate::chunker::{chunk_markdown, split_oversized_chunks};

src/llm.rs

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -661,11 +661,7 @@ impl LlamaEmbed {
661661
let model = LlamaModel::load_from_file(&backend, &model_path, &model_params)
662662
.map_err(|e| anyhow::anyhow!("loading GGUF model {}: {e}", model_path.display()))?;
663663

664-
tracing::info!(
665-
"loaded LlamaEmbed from {}, target_dim={}",
666-
uri_str,
667-
dim
668-
);
664+
tracing::info!("loaded LlamaEmbed from {}, target_dim={}", uri_str, dim);
669665

670666
Ok(Self {
671667
model,
@@ -931,8 +927,10 @@ impl LlamaOrchestrator {
931927
let backend =
932928
LlamaBackend::init().map_err(|e| anyhow::anyhow!("initializing llama backend: {e}"))?;
933929
let model_params = LlamaModelParams::default();
934-
let model = LlamaModel::load_from_file(&backend, &model_path, &model_params)
935-
.map_err(|e| anyhow::anyhow!("loading orchestrator model {}: {e}", model_path.display()))?;
930+
let model =
931+
LlamaModel::load_from_file(&backend, &model_path, &model_params).map_err(|e| {
932+
anyhow::anyhow!("loading orchestrator model {}: {e}", model_path.display())
933+
})?;
936934

937935
tracing::info!("loaded LlamaOrchestrator from {}", uri_str);
938936

@@ -966,8 +964,7 @@ impl LlamaOrchestrator {
966964

967965
// Create context per-call (LlamaContext is !Send).
968966
let n_ctx = (tokens.len() + max_tokens + 16) as u32;
969-
let ctx_params =
970-
LlamaContextParams::default().with_n_ctx(std::num::NonZeroU32::new(n_ctx));
967+
let ctx_params = LlamaContextParams::default().with_n_ctx(std::num::NonZeroU32::new(n_ctx));
971968
let mut ctx = self
972969
.model
973970
.new_context(&self.backend, ctx_params)
@@ -1148,8 +1145,7 @@ impl RerankModel for LlamaRerank {
11481145

11491146
// Create context per-call (LlamaContext is !Send).
11501147
let n_ctx = (tokens.len() + 16) as u32;
1151-
let ctx_params =
1152-
LlamaContextParams::default().with_n_ctx(std::num::NonZeroU32::new(n_ctx));
1148+
let ctx_params = LlamaContextParams::default().with_n_ctx(std::num::NonZeroU32::new(n_ctx));
11531149
let mut ctx = self
11541150
.model
11551151
.new_context(&self.backend, ctx_params)

0 commit comments

Comments
 (0)