forked from Rust-GPU/rust-gpu
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbuilder_methods.rs
More file actions
3747 lines (3477 loc) · 158 KB
/
builder_methods.rs
File metadata and controls
3747 lines (3477 loc) · 158 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// HACK(eddyb) avoids rewriting all of the imports (see `lib.rs` and `build.rs`).
use crate::maybe_pqp_cg_ssa as rustc_codegen_ssa;
use super::Builder;
use crate::abi::ConvSpirvType;
use crate::builder_spirv::{BuilderCursor, SpirvConst, SpirvValue, SpirvValueExt, SpirvValueKind};
use crate::codegen_cx::CodegenCx;
use crate::custom_insts::{CustomInst, CustomOp};
use crate::spirv_type::SpirvType;
use itertools::Itertools;
use rspirv::dr::{InsertPoint, Instruction, Operand};
use rspirv::spirv::{Capability, MemoryModel, MemorySemantics, Op, Scope, StorageClass, Word};
use rustc_apfloat::{Float, Round, Status, ieee};
use rustc_codegen_ssa::MemFlags;
use rustc_codegen_ssa::common::{
AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
};
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{
BackendTypes, BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods,
LayoutTypeCodegenMethods, OverflowOp,
};
use rustc_data_structures::fx::FxHashSet;
use rustc_middle::bug;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::{self, Ty};
use rustc_span::Span;
use rustc_target::abi::call::FnAbi;
use rustc_target::abi::{Align, BackendRepr, Scalar, Size, WrappingRange};
use smallvec::SmallVec;
use std::borrow::Cow;
use std::cell::Cell;
use std::iter::{self, empty};
use std::ops::RangeInclusive;
use tracing::{Level, instrument, span};
use tracing::{trace, warn};
macro_rules! simple_op {
(
$func_name:ident, $inst_name:ident
$(, fold_const {
$(int($fold_int_lhs:ident, $fold_int_rhs:ident) => $fold_int:expr)?
})?
) => {
fn $func_name(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
assert_ty_eq!(self, lhs.ty, rhs.ty);
let result_type = lhs.ty;
$(if let Some(const_lhs) = self.builder.lookup_const(lhs) {
if let Some(const_rhs) = self.builder.lookup_const(rhs) {
match self.lookup_type(result_type) {
$(SpirvType::Integer(bits, signed) => {
let size = Size::from_bits(bits);
let as_u128 = |const_val| {
let x = match const_val {
SpirvConst::Scalar(x) => x,
_ => return None,
};
Some(if signed {
size.sign_extend(x) as u128
} else {
size.truncate(x)
})
};
if let Some($fold_int_lhs) = as_u128(const_lhs) {
if let Some($fold_int_rhs) = as_u128(const_rhs) {
return self.const_uint_big(result_type, $fold_int);
}
}
})?
_ => {}
}
}
})?
self.emit()
.$inst_name(result_type, None, lhs.def(self), rhs.def(self))
.unwrap()
.with_type(result_type)
}
};
}
// shl and shr allow different types as their operands
macro_rules! simple_op_unchecked_type {
($func_name:ident, $inst_name:ident) => {
fn $func_name(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
self.emit()
.$inst_name(lhs.ty, None, lhs.def(self), rhs.def(self))
.unwrap()
.with_type(lhs.ty)
}
};
}
macro_rules! simple_uni_op {
($func_name:ident, $inst_name:ident) => {
fn $func_name(&mut self, val: Self::Value) -> Self::Value {
self.emit()
.$inst_name(val.ty, None, val.def(self))
.unwrap()
.with_type(val.ty)
}
};
}
fn memset_fill_u16(b: u8) -> u16 {
b as u16 | ((b as u16) << 8)
}
fn memset_fill_u32(b: u8) -> u32 {
b as u32 | ((b as u32) << 8) | ((b as u32) << 16) | ((b as u32) << 24)
}
fn memset_fill_u64(b: u8) -> u64 {
b as u64
| ((b as u64) << 8)
| ((b as u64) << 16)
| ((b as u64) << 24)
| ((b as u64) << 32)
| ((b as u64) << 40)
| ((b as u64) << 48)
| ((b as u64) << 56)
}
fn memset_dynamic_scalar(
builder: &Builder<'_, '_>,
fill_var: Word,
byte_width: usize,
is_float: bool,
) -> Word {
let composite_type = SpirvType::Vector {
element: SpirvType::Integer(8, false).def(builder.span(), builder),
count: byte_width as u32,
}
.def(builder.span(), builder);
let composite = builder
.emit()
.composite_construct(
composite_type,
None,
iter::repeat(fill_var).take(byte_width),
)
.unwrap();
let result_type = if is_float {
SpirvType::Float(byte_width as u32 * 8)
} else {
SpirvType::Integer(byte_width as u32 * 8, false)
};
builder
.emit()
.bitcast(result_type.def(builder.span(), builder), None, composite)
.unwrap()
}
impl<'a, 'tcx> Builder<'a, 'tcx> {
#[instrument(level = "trace", skip(self))]
fn ordering_to_semantics_def(&self, ordering: AtomicOrdering) -> SpirvValue {
let mut invalid_seq_cst = false;
let semantics = match ordering {
AtomicOrdering::Unordered | AtomicOrdering::Relaxed => MemorySemantics::NONE,
// Note: rustc currently has AtomicOrdering::Consume commented out, if it ever becomes
// uncommented, it should be MakeVisible | Acquire.
AtomicOrdering::Acquire => MemorySemantics::MAKE_VISIBLE | MemorySemantics::ACQUIRE,
AtomicOrdering::Release => MemorySemantics::MAKE_AVAILABLE | MemorySemantics::RELEASE,
AtomicOrdering::AcquireRelease => {
MemorySemantics::MAKE_AVAILABLE
| MemorySemantics::MAKE_VISIBLE
| MemorySemantics::ACQUIRE_RELEASE
}
AtomicOrdering::SequentiallyConsistent => {
let emit = self.emit();
let memory_model = emit.module_ref().memory_model.as_ref().unwrap();
if memory_model.operands[1].unwrap_memory_model() == MemoryModel::Vulkan {
invalid_seq_cst = true;
}
MemorySemantics::MAKE_AVAILABLE
| MemorySemantics::MAKE_VISIBLE
| MemorySemantics::SEQUENTIALLY_CONSISTENT
}
};
let semantics = self.constant_u32(self.span(), semantics.bits());
if invalid_seq_cst {
self.zombie(
semantics.def(self),
"cannot use AtomicOrdering=SequentiallyConsistent on Vulkan memory model \
(check if AcquireRelease fits your needs)",
);
}
semantics
}
#[instrument(level = "trace", skip(self))]
fn memset_const_pattern(&self, ty: &SpirvType<'tcx>, fill_byte: u8) -> Word {
match *ty {
SpirvType::Void => self.fatal("memset invalid on void pattern"),
SpirvType::Bool => self.fatal("memset invalid on bool pattern"),
SpirvType::Integer(width, false) => match width {
8 => self.constant_u8(self.span(), fill_byte).def(self),
16 => self
.constant_u16(self.span(), memset_fill_u16(fill_byte))
.def(self),
32 => self
.constant_u32(self.span(), memset_fill_u32(fill_byte))
.def(self),
64 => self
.constant_u64(self.span(), memset_fill_u64(fill_byte))
.def(self),
_ => self.fatal(format!(
"memset on integer width {width} not implemented yet"
)),
},
SpirvType::Integer(width, true) => match width {
8 => self
.constant_i8(self.span(), unsafe {
std::mem::transmute::<u8, i8>(fill_byte)
})
.def(self),
16 => self
.constant_i16(self.span(), unsafe {
std::mem::transmute::<u16, i16>(memset_fill_u16(fill_byte))
})
.def(self),
32 => self
.constant_i32(self.span(), unsafe {
std::mem::transmute::<u32, i32>(memset_fill_u32(fill_byte))
})
.def(self),
64 => self
.constant_i64(self.span(), unsafe {
std::mem::transmute::<u64, i64>(memset_fill_u64(fill_byte))
})
.def(self),
_ => self.fatal(format!(
"memset on integer width {width} not implemented yet"
)),
},
SpirvType::Float(width) => match width {
32 => self
.constant_f32(self.span(), f32::from_bits(memset_fill_u32(fill_byte)))
.def(self),
64 => self
.constant_f64(self.span(), f64::from_bits(memset_fill_u64(fill_byte)))
.def(self),
_ => self.fatal(format!("memset on float width {width} not implemented yet")),
},
SpirvType::Adt { .. } => self.fatal("memset on structs not implemented yet"),
SpirvType::Vector { element, count } | SpirvType::Matrix { element, count } => {
let elem_pat = self.memset_const_pattern(&self.lookup_type(element), fill_byte);
self.constant_composite(
ty.def(self.span(), self),
iter::repeat(elem_pat).take(count as usize),
)
.def(self)
}
SpirvType::Array { element, count } => {
let elem_pat = self.memset_const_pattern(&self.lookup_type(element), fill_byte);
let count = self.builder.lookup_const_scalar(count).unwrap() as usize;
self.constant_composite(
ty.def(self.span(), self),
iter::repeat(elem_pat).take(count),
)
.def(self)
}
SpirvType::RuntimeArray { .. } => {
self.fatal("memset on runtime arrays not implemented yet")
}
SpirvType::Pointer { .. } => self.fatal("memset on pointers not implemented yet"),
SpirvType::Function { .. } => self.fatal("memset on functions not implemented yet"),
SpirvType::Image { .. } => self.fatal("cannot memset image"),
SpirvType::Sampler => self.fatal("cannot memset sampler"),
SpirvType::SampledImage { .. } => self.fatal("cannot memset sampled image"),
SpirvType::InterfaceBlock { .. } => self.fatal("cannot memset interface block"),
SpirvType::AccelerationStructureKhr => {
self.fatal("cannot memset acceleration structure")
}
SpirvType::RayQueryKhr => self.fatal("cannot memset ray query"),
}
}
#[instrument(level = "trace", skip(self))]
fn memset_dynamic_pattern(&self, ty: &SpirvType<'tcx>, fill_var: Word) -> Word {
match *ty {
SpirvType::Void => self.fatal("memset invalid on void pattern"),
SpirvType::Bool => self.fatal("memset invalid on bool pattern"),
SpirvType::Integer(width, _signedness) => match width {
8 => fill_var,
16 => memset_dynamic_scalar(self, fill_var, 2, false),
32 => memset_dynamic_scalar(self, fill_var, 4, false),
64 => memset_dynamic_scalar(self, fill_var, 8, false),
_ => self.fatal(format!(
"memset on integer width {width} not implemented yet"
)),
},
SpirvType::Float(width) => match width {
32 => memset_dynamic_scalar(self, fill_var, 4, true),
64 => memset_dynamic_scalar(self, fill_var, 8, true),
_ => self.fatal(format!("memset on float width {width} not implemented yet")),
},
SpirvType::Adt { .. } => self.fatal("memset on structs not implemented yet"),
SpirvType::Array { element, count } => {
let elem_pat = self.memset_dynamic_pattern(&self.lookup_type(element), fill_var);
let count = self.builder.lookup_const_scalar(count).unwrap() as usize;
self.emit()
.composite_construct(
ty.def(self.span(), self),
None,
iter::repeat(elem_pat).take(count),
)
.unwrap()
}
SpirvType::Vector { element, count } | SpirvType::Matrix { element, count } => {
let elem_pat = self.memset_dynamic_pattern(&self.lookup_type(element), fill_var);
self.emit()
.composite_construct(
ty.def(self.span(), self),
None,
iter::repeat(elem_pat).take(count as usize),
)
.unwrap()
}
SpirvType::RuntimeArray { .. } => {
self.fatal("memset on runtime arrays not implemented yet")
}
SpirvType::Pointer { .. } => self.fatal("memset on pointers not implemented yet"),
SpirvType::Function { .. } => self.fatal("memset on functions not implemented yet"),
SpirvType::Image { .. } => self.fatal("cannot memset image"),
SpirvType::Sampler => self.fatal("cannot memset sampler"),
SpirvType::SampledImage { .. } => self.fatal("cannot memset sampled image"),
SpirvType::InterfaceBlock { .. } => self.fatal("cannot memset interface block"),
SpirvType::AccelerationStructureKhr => {
self.fatal("cannot memset acceleration structure")
}
SpirvType::RayQueryKhr => self.fatal("cannot memset ray query"),
}
}
#[instrument(level = "trace", skip(self))]
fn memset_constant_size(&mut self, ptr: SpirvValue, pat: SpirvValue, size_bytes: u64) {
let size_elem = self
.lookup_type(pat.ty)
.sizeof(self)
.expect("Memset on unsized values not supported");
let count = size_bytes / size_elem.bytes();
if count == 1 {
self.store(pat, ptr, Align::from_bytes(0).unwrap());
} else {
for index in 0..count {
let const_index = self.constant_u32(self.span(), index as u32);
let gep_ptr = self.inbounds_gep(pat.ty, ptr, &[const_index]);
self.store(pat, gep_ptr, Align::from_bytes(0).unwrap());
}
}
}
// TODO: Test this is correct
#[instrument(level = "trace", skip(self))]
fn memset_dynamic_size(&mut self, ptr: SpirvValue, pat: SpirvValue, size_bytes: SpirvValue) {
let size_elem = self
.lookup_type(pat.ty)
.sizeof(self)
.expect("Unable to memset a dynamic sized object");
let size_elem_const = self.constant_int(size_bytes.ty, size_elem.bytes().into());
let zero = self.constant_int(size_bytes.ty, 0);
let one = self.constant_int(size_bytes.ty, 1);
let zero_align = Align::from_bytes(0).unwrap();
let header_bb = self.append_sibling_block("memset_header");
let body_bb = self.append_sibling_block("memset_body");
let exit_bb = self.append_sibling_block("memset_exit");
let count = self.udiv(size_bytes, size_elem_const);
let index = self.alloca(self.lookup_type(count.ty).sizeof(self).unwrap(), zero_align);
self.store(zero, index, zero_align);
self.br(header_bb);
self.switch_to_block(header_bb);
let current_index = self.load(count.ty, index, zero_align);
let cond = self.icmp(IntPredicate::IntULT, current_index, count);
self.cond_br(cond, body_bb, exit_bb);
self.switch_to_block(body_bb);
let gep_ptr = self.gep(pat.ty, ptr, &[current_index]);
self.store(pat, gep_ptr, zero_align);
let current_index_plus_1 = self.add(current_index, one);
self.store(current_index_plus_1, index, zero_align);
self.br(header_bb);
self.switch_to_block(exit_bb);
}
#[instrument(level = "trace", skip(self))]
fn zombie_convert_ptr_to_u(&self, def: Word) {
self.zombie(def, "cannot convert pointers to integers");
}
#[instrument(level = "trace", skip(self))]
fn zombie_convert_u_to_ptr(&self, def: Word) {
self.zombie(def, "cannot convert integers to pointers");
}
#[instrument(level = "trace", skip(self))]
fn zombie_ptr_equal(&self, def: Word, inst: &str) {
if !self.builder.has_capability(Capability::VariablePointers) {
self.zombie(
def,
&format!("{inst} without OpCapability VariablePointers"),
);
}
}
/// Convenience wrapper for `adjust_pointer_for_sized_access`, falling back
/// on choosing `ty` as the leaf's type (and casting `ptr` to a pointer to it).
//
// HACK(eddyb) temporary workaround for untyped pointers upstream.
// FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar.
#[instrument(level = "trace", skip(self), fields(ptr, ty = ?self.debug_type(ty)))]
fn adjust_pointer_for_typed_access(
&mut self,
ptr: SpirvValue,
ty: <Self as BackendTypes>::Type,
) -> (SpirvValue, <Self as BackendTypes>::Type) {
self.lookup_type(ty)
.sizeof(self)
.and_then(|size| self.adjust_pointer_for_sized_access(ptr, size))
.unwrap_or_else(|| (self.pointercast(ptr, self.type_ptr_to(ty)), ty))
}
/// If `ptr`'s pointee type contains any prefix field/element of size `size`,
/// i.e. some leaf which can be used for all accesses of size `size`, return
/// `ptr` adjusted to point to the innermost such leaf, and the leaf's type.
//
// FIXME(eddyb) technically this duplicates `pointercast`, but the main use
// of `pointercast` is being replaced by this, and this can be more efficient.
//
// HACK(eddyb) temporary workaround for untyped pointers upstream.
// FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar.
#[instrument(level = "trace", skip(self))]
fn adjust_pointer_for_sized_access(
&mut self,
ptr: SpirvValue,
size: Size,
) -> Option<(SpirvValue, <Self as BackendTypes>::Type)> {
let ptr = ptr.strip_ptrcasts();
let mut leaf_ty = match self.lookup_type(ptr.ty) {
SpirvType::Pointer { pointee } => pointee,
other => self.fatal(format!("`ptr` is non-pointer type: {other:?}")),
};
trace!(
"before nested adjust_pointer_for_sized_access. `leaf_ty`: {}",
self.debug_type(leaf_ty)
);
let mut indices = SmallVec::<[_; 8]>::new();
while let Some((inner_indices, inner_ty)) = self.recover_access_chain_from_offset(
leaf_ty,
Size::ZERO,
Some(size)..=Some(size),
None,
) {
indices.extend(inner_indices);
leaf_ty = inner_ty;
}
trace!(
"after nested adjust_pointer_for_sized_access. `leaf_ty`: {}",
self.debug_type(leaf_ty)
);
let leaf_ptr_ty = (self.lookup_type(leaf_ty).sizeof(self) == Some(size))
.then(|| self.type_ptr_to(leaf_ty))?;
let leaf_ptr = if indices.is_empty() {
assert_ty_eq!(self, ptr.ty, leaf_ptr_ty);
ptr
} else {
let indices = indices
.into_iter()
.map(|idx| self.constant_u32(self.span(), idx).def(self))
.collect::<Vec<_>>();
self.emit()
.in_bounds_access_chain(leaf_ptr_ty, None, ptr.def(self), indices)
.unwrap()
.with_type(leaf_ptr_ty)
};
trace!(
"adjust_pointer_for_sized_access returning {} {}",
self.debug_type(leaf_ptr.ty),
self.debug_type(leaf_ty)
);
Some((leaf_ptr, leaf_ty))
}
/// If possible, return the appropriate `OpAccessChain` indices for going
/// from a pointer to `ty`, to a pointer to some leaf field/element having
/// a size that fits `leaf_size_range` (and, optionally, the type `leaf_ty`),
/// while adding `offset` bytes.
///
/// That is, try to turn `((_: *T) as *u8).add(offset) as *Leaf` into a series
/// of struct field and array/vector element accesses.
#[instrument(level = "trace", skip(self), fields(ty = ?self.debug_type(ty), leaf_size_or_unsized_range, leaf_ty = ?leaf_ty))]
fn recover_access_chain_from_offset(
&self,
mut ty: <Self as BackendTypes>::Type,
mut offset: Size,
// FIXME(eddyb) using `None` for "unsized" is a pretty bad design.
leaf_size_or_unsized_range: RangeInclusive<Option<Size>>,
leaf_ty: Option<<Self as BackendTypes>::Type>,
) -> Option<(SmallVec<[u32; 8]>, <Self as BackendTypes>::Type)> {
assert_ne!(Some(ty), leaf_ty);
if let Some(leaf_ty) = leaf_ty {
trace!(
"recovering access chain: leaf_ty: {:?}",
self.debug_type(leaf_ty)
);
} else {
trace!("recovering access chain: leaf_ty: None");
}
// HACK(eddyb) this has the correct ordering (`Sized(_) < Unsized`).
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum MaybeSized {
Sized(Size),
Unsized,
}
let leaf_size_range = {
let r = leaf_size_or_unsized_range;
let [start, end] =
[r.start(), r.end()].map(|x| x.map_or(MaybeSized::Unsized, MaybeSized::Sized));
start..=end
};
trace!("leaf_size_range: {:?}", leaf_size_range);
// NOTE(eddyb) `ty` and `ty_kind`/`ty_size` should be kept in sync.
let mut ty_kind = self.lookup_type(ty);
let mut indices = SmallVec::new();
loop {
let ty_size;
match ty_kind {
SpirvType::Adt {
field_types,
field_offsets,
..
} => {
trace!("recovering access chain from ADT");
let (i, field_ty, field_ty_kind, field_ty_size, offset_in_field) = field_offsets
.iter()
.enumerate()
.find_map(|(i, &field_offset)| {
if field_offset > offset {
return None;
}
// Grab the actual field type to be able to confirm that
// the leaf is somewhere inside the field.
let field_ty = field_types[i];
let field_ty_kind = self.lookup_type(field_ty);
let field_ty_size = field_ty_kind
.sizeof(self).map_or(MaybeSized::Unsized, MaybeSized::Sized);
let offset_in_field = offset - field_offset;
if MaybeSized::Sized(offset_in_field) < field_ty_size
// If the field is a zero sized type, check the
// expected size and type to get the correct entry
|| offset_in_field == Size::ZERO
&& leaf_size_range.contains(&MaybeSized::Sized(Size::ZERO)) && leaf_ty == Some(field_ty)
{
Some((i, field_ty, field_ty_kind, field_ty_size, offset_in_field))
} else {
None
}
})?;
ty = field_ty;
trace!("setting ty = field_ty: {:?}", self.debug_type(field_ty));
ty_kind = field_ty_kind;
trace!("setting ty_kind = field_ty_kind: {:?}", field_ty_kind);
ty_size = field_ty_size;
trace!("setting ty_size = field_ty_size: {:?}", field_ty_size);
indices.push(i as u32);
offset = offset_in_field;
trace!("setting offset = offset_in_field: {:?}", offset_in_field);
}
SpirvType::Vector { element, .. }
| SpirvType::Array { element, .. }
| SpirvType::RuntimeArray { element }
| SpirvType::Matrix { element, .. } => {
trace!("recovering access chain from Vector, Array, RuntimeArray, or Matrix");
ty = element;
trace!("setting ty = element: {:?}", self.debug_type(element));
ty_kind = self.lookup_type(ty);
trace!("looked up ty kind: {:?}", ty_kind);
let stride = ty_kind.sizeof(self)?;
ty_size = MaybeSized::Sized(stride);
indices.push((offset.bytes() / stride.bytes()).try_into().ok()?);
offset = Size::from_bytes(offset.bytes() % stride.bytes());
}
_ => {
trace!("recovering access chain from SOMETHING ELSE, RETURNING NONE");
return None;
}
}
// Avoid digging beyond the point the leaf could actually fit.
if ty_size < *leaf_size_range.start() {
trace!("avoiding digging beyond the point the leaf could actually fit");
return None;
}
if offset == Size::ZERO
&& leaf_size_range.contains(&ty_size)
&& leaf_ty.map_or(true, |leaf_ty| leaf_ty == ty)
{
trace!("returning type: {:?}", self.debug_type(ty));
trace!("returning indices with len: {:?}", indices.len());
return Some((indices, ty));
}
}
}
#[instrument(level = "trace", skip(self), fields(ty = ?self.debug_type(ty), ptr, combined_indices = ?combined_indices.iter().map(|x| (self.debug_type(x.ty), x.kind)).collect::<Vec<_>>(), is_inbounds))]
fn maybe_inbounds_gep(
&mut self,
// Represents the type of the element that `ptr` is assumed to point to
// *before* applying the *first* index (`ptr_base_index`). This type is used
// primarily to calculate the size for the initial offset.
ty: Word,
// The base pointer value for the GEP operation.
ptr: SpirvValue,
// A slice of indices used for the GEP calculation. The *first* index is treated
// as the main offset from the base pointer `ptr`, scaled by the size of `ty`.
// Subsequent indices navigate through nested aggregate types (structs, arrays,
// vectors, etc.) starting from `ty`.
combined_indices: &[SpirvValue],
// If true, generate an `OpInBoundsAccessChain`, which has stricter requirements
// but allows more optimization. If false, generate `OpAccessChain`.
is_inbounds: bool,
) -> SpirvValue {
// Separate the first index (used for the base offset) from the rest of the
// indices (used for navigating within the aggregate type). `ptr_base_index` is
// the index applied directly to `ptr`, effectively an offset multiplier based
// on the size of `ty`. `indices` are the subsequent indices used to drill down
// into fields or elements of `ty`.
// https://llvm.org/docs/GetElementPtr.html
// "An OpAccessChain instruction is the equivalent of an LLVM getelementptr instruction where the first index element is zero."
// https://github.com/gpuweb/gpuweb/issues/33
let (&ptr_base_index, indices) = combined_indices.split_first().unwrap();
// Determine if this GEP operation is effectively byte-level addressing.
// This check is based on the *provided* input type `ty`. If `ty` is i8 or u8,
// it suggests the caller intends to perform byte-offset calculations,
// which might allow for more flexible type recovery later.
let is_byte_gep = matches!(self.lookup_type(ty), SpirvType::Integer(8, _));
trace!("Is byte GEP (based on input type): {}", is_byte_gep);
// --- Calculate the final pointee type based on the GEP operation ---
// This loop does the type traversal according to the `indices` (excluding the
// base offset index). It starts with the initial element type `ty` and
// iteratively applies each index to determine the type of the element being
// accessed at each step. The result is the type that the *final* pointer,
// generated by the SPIR-V `AccessChain`` instruction, *must* point to according
// to the SPIR-V specification and the provided `indices`.
let mut calculated_pointee_type = ty;
for index_val in indices {
// Lookup the current aggregate type we are indexing into.
calculated_pointee_type = match self.lookup_type(calculated_pointee_type) {
// If it's a struct (ADT), the index must be a constant. Use it to get
// the field type.
SpirvType::Adt { field_types, .. } => {
let const_index = self
.builder
.lookup_const_scalar(*index_val)
.expect("Non-constant struct index for GEP")
as usize;
// Get the type of the specific field.
field_types[const_index]
}
// If it's an array, vector, or matrix, indexing yields the element type.
SpirvType::Array { element, .. }
| SpirvType::RuntimeArray { element }
| SpirvType::Vector { element, .. }
| SpirvType::Matrix { element, .. } => element,
// Special case: If we started with a byte GEP (`is_byte_gep` is true) and
// we are currently indexing into a byte type, the result is still a byte type.
// This prevents errors if `indices` contains non-zero values when `ty` is u8/i8.
SpirvType::Integer(8, signedness) if is_byte_gep => {
// Define the resulting byte type as it might not exist yet).
SpirvType::Integer(8, signedness).def(self.span(), self)
}
// Any other type cannot be indexed into via GEP.
_ => self.fatal(format!(
"GEP not implemented for indexing into type {}",
self.debug_type(calculated_pointee_type)
)),
};
}
// Construct the SPIR-V pointer type that points to the final calculated pointee
// type. This is the *required* result type for the SPIR-V `AccessChain`
// instruction.
let final_spirv_ptr_type = self.type_ptr_to(calculated_pointee_type);
trace!(
"Calculated final SPIR-V pointee type: {}",
self.debug_type(calculated_pointee_type)
);
trace!(
"Calculated final SPIR-V ptr type: {}",
self.debug_type(final_spirv_ptr_type)
);
// Ensure all the `indices` (excluding the base offset index) are defined in the
// SPIR-V module and get their corresponding SPIR-V IDs. These IDs will be used
// as operands in the AccessChain instruction.
let gep_indices_ids: Vec<_> = indices.iter().map(|index| index.def(self)).collect();
// --- Prepare the base pointer ---
// Remove any potentially redundant pointer casts applied to the input `ptr`.
// GEP operations should ideally work on the "underlying" pointer.
let ptr = ptr.strip_ptrcasts();
// Get the SPIR-V ID for the (potentially stripped) base pointer.
let ptr_id = ptr.def(self);
// Determine the actual pointee type of the base pointer `ptr` *after* stripping casts.
// This might differ from the input `ty` if `ty` was less specific (e.g., u8).
let original_pointee_ty = match self.lookup_type(ptr.ty) {
SpirvType::Pointer { pointee } => pointee,
other => self.fatal(format!("gep called on non-pointer type: {other:?}")),
};
// --- Recovery Path ---
// Try to calculate the byte offset implied by the *first* index
// (`ptr_base_index`) if it's a compile-time constant. This uses the size of the
// *input type* `ty`.
let const_ptr_offset_bytes = self
.builder
.lookup_const_scalar(ptr_base_index) // Check if ptr_base_index is constant scalar
.and_then(|idx| {
let idx_u64 = u64::try_from(idx).ok()?;
// Get the size of the input type `ty`
self.lookup_type(ty)
.sizeof(self)
// Calculate offset in bytes
.map(|size| idx_u64.saturating_mul(size.bytes()))
});
// If we successfully calculated a constant byte offset for the first index...
if let Some(const_ptr_offset_bytes) = const_ptr_offset_bytes {
// Try to reconstruct a more "structured" access chain based on the *original*
// pointee type of the pointer (`original_pointee_ty`) and the calculated byte offset.
// This is useful if the input `ty` was generic (like u8) but the pointer actually
// points to a structured type (like a struct). `recover_access_chain_from_offset`
// attempts to find a sequence of constant indices (`base_indices`) into
// `original_pointee_ty` that matches the `const_ptr_offset_bytes`.
if let Some((base_indices, base_pointee_ty)) = self.recover_access_chain_from_offset(
// Start from the pointer's actual underlying type
original_pointee_ty,
// The target byte offset
Size::from_bytes(const_ptr_offset_bytes),
// Allowed range (not strictly needed here?)
Some(Size::ZERO)..=None,
// Don't require alignment
None,
) {
// Recovery successful! Found a structured path (`base_indices`) to the target offset.
trace!(
"`recover_access_chain_from_offset` returned Some with base_pointee_ty: {}",
self.debug_type(base_pointee_ty)
);
// Determine the result type for the `AccessChain` instruction we might
// emit. By default, use the `final_spirv_ptr_type` strictly calculated
// earlier from `ty` and `indices`.
//
// If this is a byte GEP *and* the recovered type happens to be a byte
// type, we can use the pointer type derived from the *recovered* type
// (`base_pointee_ty`). This helps preserve type information when
// recovery works for byte addressing.
let result_wrapper_type = if !is_byte_gep
|| matches!(self.lookup_type(base_pointee_ty), SpirvType::Integer(8, _))
{
trace!(
"Using strictly calculated type for wrapper: {}",
// Use type based on input `ty` + `indices`
self.debug_type(calculated_pointee_type)
);
final_spirv_ptr_type
} else {
trace!(
"Byte GEP allowing recovered type for wrapper: {}",
// Use type based on recovery result
self.debug_type(base_pointee_ty)
);
self.type_ptr_to(base_pointee_ty)
};
// Check if we can directly use the recovered path combined with the
// remaining indices. This is possible if:
// 1. The input type `ty` matches the type found by recovery
// (`base_pointee_ty`). This means the recovery didn't fundamentally
// change the type interpretation needed for the *next* steps
// (`indices`).
// OR
// 2. There are no further indices (`gep_indices_ids` is empty). In this
// case, the recovery path already leads to the final destination.
if ty == base_pointee_ty || gep_indices_ids.is_empty() {
// Combine the recovered constant indices with the remaining dynamic/constant indices.
let combined_indices = base_indices
.into_iter()
// Convert recovered `u32` indices to constant SPIR-V IDs.
.map(|idx| self.constant_u32(self.span(), idx).def(self))
// Chain the original subsequent indices (`indices`).
.chain(gep_indices_ids.iter().copied())
.collect();
trace!(
"emitting access chain via recovery path with wrapper type: {}",
self.debug_type(result_wrapper_type)
);
// Emit a single AccessChain using the original pointer `ptr_id` and the fully combined index list.
// Note: We don't pass `ptr_base_index` here because its effect is incorporated into `base_indices`.
return self.emit_access_chain(
result_wrapper_type, // The chosen result pointer type
ptr_id, // The original base pointer ID
None, // No separate base index needed
combined_indices, // The combined structured + original indices
is_inbounds, // Preserve original inbounds request
);
} else {
// Recovery happened, but the recovered type `base_pointee_ty` doesn't match the input `ty`,
// AND there are more `indices` to process. Using the `base_indices` derived from
// `original_pointee_ty` would be incorrect for interpreting the subsequent `indices`
// which were intended to operate relative to `ty`. Fall back to the standard path.
trace!(
"Recovery type mismatch ({}) vs ({}) and GEP indices exist, falling back",
self.debug_type(ty),
self.debug_type(base_pointee_ty)
);
}
} else {
// `recover_access_chain_from_offset` couldn't find a structured path for the constant offset.
trace!("`recover_access_chain_from_offset` returned None, falling back");
}
}
// --- End Recovery Path ---
// --- Attempt GEP Merging Path ---
// Check if the base pointer `ptr` itself was the result of a previous
// AccessChain instruction. Merging is only attempted if the input type `ty`
// matches the pointer's actual underlying pointee type `original_pointee_ty`.
// If they differ, merging could be invalid.
let maybe_original_access_chain = if ty == original_pointee_ty {
// Search the current function's instructions...
// FIXME(eddyb) this could get ridiculously expensive, at the very least
// it could use `.rev()`, hoping the base pointer was recently defined?
let search_result = {
let emit = self.emit();
let module = emit.module_ref();
emit.selected_function().and_then(|func_idx| {
module.functions.get(func_idx).and_then(|func| {
// Find the instruction that defined our base pointer `ptr_id`.
func.all_inst_iter()
.find(|inst| inst.result_id == Some(ptr_id))
.and_then(|ptr_def_inst| {
// Check if that instruction was an `AccessChain` or `InBoundsAccessChain`.
if matches!(
ptr_def_inst.class.opcode,
Op::AccessChain | Op::InBoundsAccessChain
) {
// If yes, extract its base pointer and its indices.
let base_ptr = ptr_def_inst.operands[0].unwrap_id_ref();
let indices = ptr_def_inst.operands[1..]
.iter()
.map(|op| op.unwrap_id_ref())
.collect::<Vec<_>>();
Some((base_ptr, indices))
} else {
// The instruction defining ptr was not an `AccessChain`.
None
}
})
})
})
};
search_result
} else {
// Input type `ty` doesn't match the pointer's actual type, cannot safely merge.
None
};
// If we found that `ptr` was defined by a previous `AccessChain`...
if let Some((original_ptr, mut original_indices)) = maybe_original_access_chain {
trace!("has original access chain, attempting to merge GEPs");
// Check if merging is possible. Requires:
// 1. The original AccessChain had at least one index.
// 2. The *last* index of the original AccessChain is a constant.
// 3. The *first* index (`ptr_base_index`) of the *current* GEP is a constant.
// Merging usually involves adding these two constant indices.
let can_merge = if let Some(&last_original_idx_id) = original_indices.last() {
// Check if both the last original index and the current base index are constant scalars.
self.builder
.lookup_const_scalar(last_original_idx_id.with_type(ptr_base_index.ty))
.is_some()
&& self.builder.lookup_const_scalar(ptr_base_index).is_some()
} else {
// Original access chain had no indices to merge with.
false
};
if can_merge {
let last_original_idx_id = original_indices.last_mut().unwrap();
// Add the current `ptr_base_index` to the last index of the original chain.
// The result becomes the new last index.
*last_original_idx_id = self
.add(
// Ensure types match for add.
last_original_idx_id.with_type(ptr_base_index.ty),
ptr_base_index,
)
// Define the result of the addition.
.def(self);
// Append the remaining indices (`indices`) from the current GEP operation.
original_indices.extend(gep_indices_ids);
trace!(
"emitting merged access chain with pointer to type: {}",
self.debug_type(calculated_pointee_type)
);
// Emit a *single* AccessChain using the *original* base pointer and the *merged* index list.
// The result type *must* be the `final_spirv_ptr_type` calculated earlier based on the full chain of operations.
return self.emit_access_chain(
final_spirv_ptr_type, // Use the strictly calculated final type.
original_ptr, // Base pointer from the *original* AccessChain.
None, // No separate base index; it's merged.
original_indices, // The combined list of indices.
is_inbounds, // Preserve original inbounds request.
);
} else {
// Cannot merge because one or both relevant indices are not constant,
// or the original chain was empty.
trace!(
"Last index or base offset is not constant, or no last index, cannot merge."
);
}
} else {
// The base pointer `ptr` was not the result of an AccessChain, or merging
// wasn't attempted due to type mismatch.
trace!("no original access chain to merge with");
}
// --- End GEP Merging Path ---
// --- Fallback / Default Path ---
// This path is taken if neither the Recovery nor the Merging path succeeded or applied.
// It performs a more direct translation of the GEP request.
// HACK(eddyb): Workaround for potential upstream issues where pointers might lack precise type info.
// FIXME(eddyb): Ideally, this should use untyped memory features if available/necessary.
// Before emitting the AccessChain, explicitly cast the base pointer `ptr` to
// ensure its pointee type matches the input `ty`. This is required because the
// SPIR-V `AccessChain` instruction implicitly uses the size of the base
// pointer's pointee type when applying the *first* index operand (our
// `ptr_base_index`). If `ty` and `original_pointee_ty` mismatched and we
// reached this fallback, this cast ensures SPIR-V validity.
trace!("maybe_inbounds_gep fallback path calling pointercast");
// Cast ptr to point to `ty`.
let ptr = self.pointercast(ptr, self.type_ptr_to(ty));
// Get the ID of the (potentially newly casted) pointer.
let ptr_id = ptr.def(self);
trace!(
"emitting access chain via fallback path with pointer type: {}",
self.debug_type(final_spirv_ptr_type)
);
// Emit the `AccessChain` instruction.
self.emit_access_chain(
final_spirv_ptr_type, // Result *must* be a pointer to the final calculated type.
ptr_id, // Use the (potentially casted) base pointer ID.
Some(ptr_base_index), // Provide the first index separately.
gep_indices_ids, // Provide the rest of the indices.
is_inbounds, // Preserve original inbounds request.
)
}
#[instrument(
level = "trace",
skip(self),