Skip to content

Commit 4fcfa97

Browse files
westonpaceclaude
andcommitted
style: apply rustfmt
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 18a7b66 commit 4fcfa97

1 file changed

Lines changed: 19 additions & 22 deletions

File tree

rust/lance-encoding/src/decoder.rs

Lines changed: 19 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1951,8 +1951,7 @@ impl StructuralBatchDecodeStream {
19511951
let num_rows = b.num_rows() as u64;
19521952
if num_rows > 0 {
19531953
let bpr = batch_data_size(b) / num_rows;
1954-
bytes_per_row_feedback
1955-
.store(bpr.max(1), Ordering::Relaxed);
1954+
bytes_per_row_feedback.store(bpr.max(1), Ordering::Relaxed);
19561955
}
19571956
}
19581957
batch
@@ -2095,9 +2094,7 @@ pub fn create_decode_stream(
20952094
.into_stream())
20962095
} else {
20972096
if batch_size_bytes.is_some() {
2098-
warn!(
2099-
"batch_size_bytes is not supported for v2.0 (legacy) files and will be ignored"
2100-
);
2097+
warn!("batch_size_bytes is not supported for v2.0 (legacy) files and will be ignored");
21012098
}
21022099
let arrow_schema = ArrowSchema::from(schema);
21032100
let root_fields = arrow_schema.fields;
@@ -2932,7 +2929,7 @@ mod tests {
29322929
batch_size: u32,
29332930
batch_size_bytes: Option<u64>,
29342931
) -> Vec<RecordBatch> {
2935-
use crate::encoder::{default_encoding_strategy, encode_batch, EncodingOptions};
2932+
use crate::encoder::{EncodingOptions, default_encoding_strategy, encode_batch};
29362933
use crate::version::LanceFileVersion;
29372934

29382935
let version = LanceFileVersion::V2_1;
@@ -2948,7 +2945,9 @@ mod tests {
29482945

29492946
let io_scheduler =
29502947
Arc::new(BufferScheduler::new(encoded.data.clone())) as Arc<dyn EncodingsIo>;
2951-
let cache = Arc::new(lance_core::cache::LanceCache::with_capacity(128 * 1024 * 1024));
2948+
let cache = Arc::new(lance_core::cache::LanceCache::with_capacity(
2949+
128 * 1024 * 1024,
2950+
));
29522951
let decoder_plugins = Arc::new(DecoderPlugins::default());
29532952

29542953
let mut decode_scheduler = DecodeBatchScheduler::try_new(
@@ -3032,11 +3031,9 @@ mod tests {
30323031

30333032
// Verify roundtrip: concatenate and compare
30343033
let all_batches: Vec<&RecordBatch> = batches.iter().collect();
3035-
let concatenated = arrow_select::concat::concat_batches(
3036-
&batches[0].schema(),
3037-
all_batches.iter().copied(),
3038-
)
3039-
.unwrap();
3034+
let concatenated =
3035+
arrow_select::concat::concat_batches(&batches[0].schema(), all_batches.iter().copied())
3036+
.unwrap();
30403037
assert_eq!(concatenated.num_rows(), num_rows as usize);
30413038
for col in 0..4 {
30423039
assert_eq!(
@@ -3068,8 +3065,7 @@ mod tests {
30683065
let input_batch = RecordBatch::try_new(schema, arrays).unwrap();
30693066

30703067
// batch_size=250, batch_size_bytes=None => 4 batches of 250 rows
3071-
let batches =
3072-
decode_batches_with_byte_limit(&input_batch, /*batch_size=*/ 250, None).await;
3068+
let batches = decode_batches_with_byte_limit(&input_batch, /*batch_size=*/ 250, None).await;
30733069
assert_eq!(batches.len(), 4);
30743070
for (i, batch) in batches.iter().enumerate() {
30753071
assert_eq!(
@@ -3104,17 +3100,18 @@ mod tests {
31043100
// Schema estimate is 64 bytes/row → first batch ~78 rows (overshoot).
31053101
// After feedback kicks in, batches should converge to ~50 rows.
31063102
let target_bytes: u64 = 5000;
3107-
let batches =
3108-
decode_batches_with_byte_limit(&input_batch, /*batch_size=*/ 1024, Some(target_bytes))
3109-
.await;
3103+
let batches = decode_batches_with_byte_limit(
3104+
&input_batch,
3105+
/*batch_size=*/ 1024,
3106+
Some(target_bytes),
3107+
)
3108+
.await;
31103109

31113110
// Verify all data round-trips correctly
31123111
let all_batches: Vec<&RecordBatch> = batches.iter().collect();
3113-
let concatenated = arrow_select::concat::concat_batches(
3114-
&batches[0].schema(),
3115-
all_batches.iter().copied(),
3116-
)
3117-
.unwrap();
3112+
let concatenated =
3113+
arrow_select::concat::concat_batches(&batches[0].schema(), all_batches.iter().copied())
3114+
.unwrap();
31183115
assert_eq!(concatenated.num_rows(), num_rows as usize);
31193116
assert_eq!(
31203117
concatenated.column(0).as_ref(),

0 commit comments

Comments
 (0)