Skip to content

Commit 5e305fe

Browse files
committed
Improve documentation
1 parent 2061590 commit 5e305fe

1 file changed

Lines changed: 40 additions & 42 deletions

File tree

zjit/src/hir.rs

Lines changed: 40 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -4848,36 +4848,49 @@ impl Function {
48484848
}
48494849
}
48504850

4851-
// TODO: Replace hashmaps with vecs?
4852-
// TODO: Add comments describing load store elimination in the first pass
4853-
// TODO: Link to rails at scale post?
4854-
// TODO: Add comments describing deada store elimination in the second pass
4855-
// TODO: Add master comment about type based alias analysis with specific callouts referencing the overall TBAA comment
4856-
// TODO(Jacob): Spend time with the old noggin to answer the following questions.
4857-
// 1. It is possible to do DSE forwards. Is it possible to do this at the same time as load store?
4858-
// If so, this would allow us to do it in one pass*. We still need to clean up the dead stores afterwards and convert them to
4859-
// NOPs or something. The more I think about it, the more I think it makes sense to have a cleanup pass at the end of all other
4860-
// analysis passes.
4861-
// 2. DSE and load-store-opt seem like duals of each other. If this is true, then they should be symmetric. Why does DSE only handle
4862-
// stores? Can I convince myself that we're not missing some potential dead loads? Is this caught at an SSA level already?
4851+
/// Load store optimization performs two functions:
4852+
/// 1. Elide duplicate LoadField and StoreField instructions, and replace
4853+
/// redundant loads or stores with their values when possible.
4854+
/// 2. Remove "dead stores". A store is dead when it is never used before
4855+
/// getting overwritten by a second store. If this pass proves no use,
4856+
/// the first store is elided.
4857+
///
4858+
/// These two optimizations use very similar abstract interpretations and are
4859+
/// currently combined into one pass. The primary difference is that load
4860+
/// store optimization runs forward (from top to bottom of a block) while
4861+
/// dead store elimination runs in reverse. A subtle secondary difference is
4862+
/// that our use of extended basic blocks induces an asymmetry between these
4863+
/// two optimizations. When scanning in reverse, dead store elimination needs
4864+
/// to consider the "multi-exit" nature of EBBs. Stores that appear to be
4865+
/// dead can sometimes have early exits that make the optimization unsound
4866+
/// if we don't handle this special case. Basic blocks avoid this problem.
4867+
///
4868+
/// Note for future improvements:
4869+
/// Load & store optimizations currently operate at a block level, but could
4870+
/// be expanded to operate on methods. This likely means the two passes
4871+
/// should be split into their own methods as instead of "one forwards, one
4872+
/// backwards" we need to traverse dominator and post dominator trees and it
4873+
/// becomes tricky enough that splitting may be cleaner.
4874+
///
4875+
/// For further reading, see the following Rails at Scale blog post.
4876+
/// https://railsatscale.com/2026-03-18-how-zjit-removes-redundant-object-loads-and-stores/
48634877
fn optimize_load_store(&mut self) {
48644878
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
48654879
struct HeapKey {
48664880
object: InsnId,
48674881
offset: i32,
48684882
}
48694883

4870-
// This helper function is used while we don't have type based alias
4871-
// analysis. Matching offsets could result in aliased objects that need
4872-
// to be clear.
4873-
// Any code location that uses the remove_aliasing function needs to be replaced
4874-
// when we implement TBAA
4875-
fn remove_aliasing(map: &mut HashMap<HeapKey, InsnId>, offset: i32) {
4884+
// This helper function is used to clear the cache for matching offsets
4885+
// while we don't have type based alias.
4886+
// TBAA will primarily modify any part of this optimization that
4887+
// currently uses this helper function.
4888+
fn flush_aliasing(map: &mut HashMap<HeapKey, InsnId>, offset: i32) {
48764889
map.retain(|HeapKey { object: _, offset: off }, _| *off != offset);
48774890
}
48784891

4892+
// Redundant load and store optimization
48794893
for block in self.rpo() {
4880-
// First, we eliminate loads and stores
48814894
let mut compile_time_heap: HashMap<HeapKey, InsnId> = HashMap::new();
48824895
let old_insns = std::mem::take(&mut self.blocks[block.0].insns);
48834896
let mut new_insns = vec![];
@@ -4887,12 +4900,11 @@ impl Function {
48874900
Insn::StoreField { recv, offset, val, .. } => {
48884901
let key = HeapKey { object: self.chase_insn(recv), offset };
48894902
let heap_entry = compile_time_heap.get(&key).copied();
4890-
// TODO(Jacob): Switch from actual to partial equality
48914903
if Some(val) == heap_entry {
48924904
// If the value is already stored, short circuit and don't add an instruction to the block
48934905
continue
48944906
}
4895-
remove_aliasing(&mut compile_time_heap, offset);
4907+
flush_aliasing(&mut compile_time_heap, offset);
48964908
compile_time_heap.insert(key, val);
48974909
},
48984910
Insn::LoadField { recv, offset, .. } => {
@@ -4901,7 +4913,6 @@ impl Function {
49014913
std::collections::hash_map::Entry::Occupied(entry) => {
49024914
// If the value is stored already, we should short circuit.
49034915
// However, we need to replace insn_id with its representative in the SSA union.
4904-
// TODO: Show an example of why we need this with p little ascii diagram or something
49054916
self.make_equal_to(insn_id, *entry.get());
49064917
continue
49074918
}
@@ -4917,7 +4928,7 @@ impl Function {
49174928
// But flags does not exist in our effects abstract heap modeling and we don't want to add special casing to effects.
49184929
// This special casing in this pass here should be removed once we refine our effects system to provide greater granularity for WriteBarrier.
49194930
let offset = RUBY_OFFSET_RBASIC_FLAGS;
4920-
remove_aliasing(&mut compile_time_heap, offset);
4931+
flush_aliasing(&mut compile_time_heap, offset);
49214932
},
49224933
insn => {
49234934
// If an instruction affects memory and we haven't modeled it, the compile_time_heap is invalidated
@@ -4929,25 +4940,12 @@ impl Function {
49294940
new_insns.push(insn_id);
49304941
}
49314942

4932-
4933-
// get the block
4934-
// load-store optimization
4935-
// reverse updated instructions
4936-
// DSE
4937-
// reverse final updated instructions
4938-
// return
4939-
//
4940-
// get the block
4941-
// load-store optimization, DSE* after (same iteration)
4942-
// instead of eliding the store, mark the earlier store for removal
4943-
// return
4944-
4945-
// After performing scalar replacement on loads and stores where
4946-
// applicable, we scan in reverse to find an eliminate dead stores
4943+
// Set up for dead store elimination
49474944
let mut reversed_insns = std::mem::take(&mut new_insns);
49484945
reversed_insns.reverse();
49494946
compile_time_heap.clear();
49504947

4948+
// Dead store elimnination
49514949
for insn_id in reversed_insns {
49524950
match self.find(insn_id) {
49534951
Insn::StoreField { recv, offset, .. } => {
@@ -4957,21 +4955,21 @@ impl Function {
49574955
continue
49584956
}
49594957
else {
4960-
remove_aliasing(&mut compile_time_heap, offset);
4958+
flush_aliasing(&mut compile_time_heap, offset);
49614959
compile_time_heap.insert(key, insn_id);
49624960
}
49634961
}
49644962
Insn::LoadField{ offset, .. } => {
4965-
remove_aliasing(&mut compile_time_heap, offset);
4963+
flush_aliasing(&mut compile_time_heap, offset);
49664964
}
49674965
Insn::WriteBarrier { .. } => {
49684966
let offset = RUBY_OFFSET_RBASIC_FLAGS;
4969-
remove_aliasing(&mut compile_time_heap, offset);
4967+
flush_aliasing(&mut compile_time_heap, offset);
49704968
},
49714969
insn => {
49724970
let insn_reads_memory = insn.effects_of().includes(Effect::read(abstract_heaps::Memory));
49734971
let insn_uses_control_flow = insn.effects_of().includes(effects::Control);
4974-
// TODO: Once we update from extended basic blocks to normal basic blocks, we can remove the control flow check
4972+
// TODO(Jacob): Once we update from extended basic blocks to normal basic blocks, we can remove the control flow check
49754973
if insn_reads_memory || insn_uses_control_flow {
49764974
compile_time_heap.clear();
49774975
}

0 commit comments

Comments
 (0)