Skip to content

Commit 6c16df7

Browse files
committed
refactor: use render-order indices natively in keyed reconciliation
Replace the reversed-index hack (middle_count - 1 - fwd_idx) with native render-order indices in apply_keyed's run algorithm. The barrier tracking uses Option<usize> so checked_sub handles the "everything committed" edge case that previously required reverse indexing.
1 parent 8f3873d commit 6c16df7

1 file changed

Lines changed: 37 additions & 35 deletions

File tree

packages/yew/src/dom_bundle/blist.rs

Lines changed: 37 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -299,17 +299,11 @@ impl BList {
299299
}
300300
let (matching_len_start, bundle_middle) = (matching_len_start, bundle_middle);
301301

302-
let middle_count = bundle_middle.len();
303-
304302
// BNode contains js objects that look suspicious to clippy but are harmless
305303
#[allow(clippy::mutable_key_type)]
306-
let mut spare_bundles: HashSet<KeyedEntry> = HashSet::with_capacity(middle_count);
304+
let mut spare_bundles: HashSet<KeyedEntry> = HashSet::with_capacity(bundle_middle.len());
307305
let mut spliced_middle = bundles.splice(bundle_middle, std::iter::empty());
308-
for (fwd_idx, r) in (&mut spliced_middle).enumerate() {
309-
// Assign indices measuring distance from the right (rightmost = 0).
310-
// The run algorithm below processes right-to-left and expects
311-
// increasing indices in that direction.
312-
let idx = middle_count - 1 - fwd_idx;
306+
for (idx, r) in (&mut spliced_middle).enumerate() {
313307
#[cold]
314308
fn duplicate_in_bundle(root: &BSubtree, parent: &Element, r: BNode) {
315309
test_log!("removing: {:?}", r);
@@ -327,16 +321,16 @@ impl BList {
327321
// We handle runs of in-order nodes. When we encounter one out-of-order, we decide whether:
328322
// - to shift all nodes in the current run to the position after the node before of the run,
329323
// or to
330-
// - "commit" to the current run, shift all nodes before the end of the run that we might
324+
// - "commit" to the current run, shift all nodes after the end of the run that we might
331325
// encounter in the future, and then start a new run.
332-
// Example of a run:
333-
// barrier_idx --v v-- end_idx
334-
// spliced_middle [ ... , M , N , C , D , E , F , G , ... ] (original element order)
335-
// ^---^-----------^ the nodes that are part of the current
336-
// run v start_writer
337-
// replacements [ ... , M , C , D , G ] (new element order)
338-
// ^-- start_idx
339-
let mut barrier_idx = 0; // nodes from spliced_middle[..barrier_idx] are shifted unconditionally
326+
//
327+
// Indices are in render order (idx 0 = leftmost). Processing goes right-to-left, so we
328+
// expect decreasing indices for nodes already in the right relative order.
329+
//
330+
// `barrier_idx` is the next expected index when scanning right-to-left.
331+
// `None` means every remaining node is in the committed region and must be shifted.
332+
let middle_count = spare_bundles.len();
333+
let mut barrier_idx: Option<usize> = middle_count.checked_sub(1);
340334
struct RunInformation<'a> {
341335
start_writer: NodeWriter<'a>,
342336
start_idx: usize,
@@ -353,20 +347,21 @@ impl BList {
353347
if let Some(run) = current_run.as_mut() {
354348
if let Some(KeyedEntry(idx, _)) = ancestor {
355349
// If there are only few runs, this is a cold path
356-
if idx < run.end_idx {
350+
if idx > run.end_idx {
357351
// Have to decide whether to shift or commit the current run. A few
358352
// calculations: A perfect estimate of the amount of
359353
// nodes we have to shift if we move this run:
360354
let run_length = replacements.len() - run.start_idx;
361355
// A very crude estimate of the amount of nodes we will have to shift if we
362356
// commit the run: Note nodes of the current run
363357
// should not be counted here!
364-
let estimated_skipped_nodes = run.end_idx - idx.max(barrier_idx);
358+
let barrier_val = barrier_idx.unwrap_or(run.end_idx);
359+
let estimated_skipped_nodes = idx.min(barrier_val) - run.end_idx;
365360
// double run_length to counteract that the run is part of the
366361
// estimated_skipped_nodes
367362
if 2 * run_length > estimated_skipped_nodes {
368363
// less work to commit to this run
369-
barrier_idx = 1 + run.end_idx;
364+
barrier_idx = run.end_idx.checked_sub(1);
370365
} else {
371366
// Less work to shift this run
372367
for r in replacements[run.start_idx..].iter_mut().rev() {
@@ -380,23 +375,30 @@ impl BList {
380375
let bundle = if let Some(KeyedEntry(idx, mut r_bundle)) = ancestor {
381376
match current_run.as_mut() {
382377
// hot path
383-
// We know that idx >= run.end_idx, so this node doesn't need to shift
378+
// We know that idx <= run.end_idx, so this node doesn't need to shift
384379
Some(run) => run.end_idx = idx,
385-
None => match idx.cmp(&barrier_idx) {
386-
// peep hole optimization, don't start a run as the element is already where
387-
// it should be
388-
Ordering::Equal => barrier_idx += 1,
389-
// shift the node unconditionally, don't start a run
390-
Ordering::Less => writer.shift(&r_bundle),
391-
// start a run
392-
Ordering::Greater => {
393-
current_run = Some(RunInformation {
394-
start_writer: writer.clone(),
395-
start_idx: replacements.len(),
396-
end_idx: idx,
397-
})
380+
None => {
381+
if let Some(b) = barrier_idx {
382+
match idx.cmp(&b) {
383+
// peep hole optimization, don't start a run as the element is
384+
// already where it should be
385+
Ordering::Equal => barrier_idx = idx.checked_sub(1),
386+
// shift the node unconditionally, don't start a run
387+
Ordering::Greater => writer.shift(&r_bundle),
388+
// start a run
389+
Ordering::Less => {
390+
current_run = Some(RunInformation {
391+
start_writer: writer.clone(),
392+
start_idx: replacements.len(),
393+
end_idx: idx,
394+
})
395+
}
396+
}
397+
} else {
398+
// Everything to the right has been committed; must shift
399+
writer.shift(&r_bundle);
398400
}
399-
},
401+
}
400402
}
401403
writer = writer.patch(l, &mut r_bundle);
402404
r_bundle

0 commit comments

Comments
 (0)