-
Notifications
You must be signed in to change notification settings - Fork 145
Expand file tree
/
Copy patharray.rs
More file actions
464 lines (409 loc) · 17.9 KB
/
array.rs
File metadata and controls
464 lines (409 loc) · 17.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright the Vortex contributors
use std::ops::Range;
use vortex_buffer::Buffer;
use vortex_buffer::BufferMut;
use vortex_error::VortexExpect;
use vortex_error::VortexResult;
use vortex_error::vortex_ensure;
use vortex_error::vortex_err;
use crate::ArrayRef;
use crate::Canonical;
use crate::DynArray;
use crate::ExecutionCtx;
use crate::IntoArray;
use crate::arrays::PrimitiveArray;
use crate::arrays::patched::TransposedPatches;
use crate::arrays::patched::patch_lanes;
use crate::buffer::BufferHandle;
use crate::dtype::IntegerPType;
use crate::dtype::NativePType;
use crate::dtype::PType;
use crate::match_each_native_ptype;
use crate::match_each_unsigned_integer_ptype;
use crate::patches::Patches;
use crate::stats::ArrayStats;
use crate::validity::Validity;
/// An array that partially "patches" another array with new values.
///
/// # Background
///
/// This is meant to be the foundation of a fully data-parallel patching strategy, based on the
/// work published in ["G-ALP" from Hepkema et al.](https://ir.cwi.nl/pub/35205/35205.pdf)
///
/// Patching is common when an encoding almost completely covers an array save a few exceptions.
/// In that case, rather than avoid the encoding entirely, it's preferable to
///
/// * Replace unencodable values with fillers (zeros, frequent values, nulls, etc.)
/// * Wrap the array with a `PatchedArray` signaling that when the original array is executed,
/// some of the decoded values must be overwritten.
///
/// In Vortex, the FastLanes bit-packing encoding is often the terminal node in an encoding tree,
/// and FastLanes has an intrinsic chunking of 1024 elements. Thus, 1024 elements is pervasively
/// a useful unit of chunking throughout Vortex, and so we use 1024 as a chunk size here
/// as well.
///
/// # Details
///
/// To patch an array, we first divide it into a set of chunks of length 1024, and then within
/// each chunk, we assign each position to a lane. The number of lanes depends on the width of
/// the underlying type.
///
/// Thus, rather than sorting patch indices and values by their global offset, they are sorted
/// primarily by their chunk, and then subsequently by their lanes.
///
/// The Patched array layout has 4 children
///
/// * `inner`: the inner array is the one containing encoded values, including the filler values
/// that need to be patched over at execution time
/// * `lane_offsets`: this is an indexing buffer that allows you to see into ranges of the other
/// two children
/// * `indices`: An array of `u16` chunk indices, indicating where within the chunk should the value
/// be overwritten by the patch value
/// * `values`: The child array containing the patch values, which should be inserted over
/// the values of the `inner` at the locations provided by `indices`
///
/// `indices` and `values` are aligned and accessed together.
///
/// ```text
///
/// chunk 0 chunk 0 chunk 0 chunk 0 chunk 0 chunk 0
/// lane 0 lane 1 lane 2 lane 3 lane 4 lane 5
/// ┌────────────┬────────────┬────────────┬────────────┬────────────┬────────────┐
/// lane_offsets │ 0 │ 0 │ 2 │ 2 │ 3 │ 5 │ ...
/// └─────┬──────┴─────┬──────┴─────┬──────┴──────┬─────┴──────┬─────┴──────┬─────┘
/// │ │ │ │ │ │
/// │ │ │ │ │ │
/// ┌─────┴────────────┘ └──────┬──────┘ ┌──────┘ └─────┐
/// │ │ │ │
/// │ │ │ │
/// │ │ │ │
/// ▼────────────┬────────────┬────────────▼────────────▼────────────┬────────────▼
/// indices │ │ │ │ │ │ │
/// │ │ │ │ │ │ │
/// ├────────────┼────────────┼────────────┼────────────┼────────────┼────────────┤
/// values │ │ │ │ │ │ │
/// │ │ │ │ │ │ │
/// └────────────┴────────────┴────────────┴────────────┴────────────┴────────────┘
/// ```
///
/// It turns out that this layout is optimal for executing patching on GPUs, because the
/// `lane_offsets` allows each thread in a warp to seek to its patches in constant time.
/// The inner array containing the base unpatched values.
pub(super) const INNER_SLOT: usize = 0;
/// The lane offsets array for locating patches within lanes.
pub(super) const LANE_OFFSETS_SLOT: usize = 1;
/// The indices of patched (exception) values.
pub(super) const INDICES_SLOT: usize = 2;
/// The patched (exception) values at the corresponding indices.
pub(super) const VALUES_SLOT: usize = 3;
pub(super) const NUM_SLOTS: usize = 4;
pub(super) const SLOT_NAMES: [&str; NUM_SLOTS] =
["inner", "lane_offsets", "patch_indices", "patch_values"];
#[derive(Debug, Clone)]
pub struct PatchedArray {
/// Child arrays stored as slots:
/// 0: inner - the inner array being patched
/// 1: lane_offsets - u32 array for indexing into indices/values
/// 2: indices - u16 array of chunk indices
/// 3: values - array of patch values
pub(super) slots: Vec<Option<ArrayRef>>,
/// Number of lanes the patch indices and values have been split into. Each of the `n_chunks`
/// of 1024 values is split into `n_lanes` lanes horizontally, each lane having 1024 / n_lanes
/// values that might be patched.
pub(super) n_lanes: usize,
/// The offset into that first chunk that is considered in bounds.
///
/// The patch indices of the first chunk less than `offset` should be skipped, and the offset
/// should be subtracted out of the remaining offsets to get their final position in the
/// executed array.
pub(super) offset: usize,
/// Length of the array
pub(super) len: usize,
pub(super) stats_set: ArrayStats,
}
impl PatchedArray {
/// Create a new `PatchedArray` from a child array and a set of [`Patches`].
///
/// # Errors
///
/// The `inner` array must be primitive type, and it must have the same `DType` as the patches.
///
/// The patches cannot contain nulls themselves. Any nulls must be stored in the `inner` array's
/// validity.
pub fn from_array_and_patches(
inner: ArrayRef,
patches: &Patches,
ctx: &mut ExecutionCtx,
) -> VortexResult<Self> {
vortex_ensure!(
inner.dtype().eq_with_nullability_superset(patches.dtype()),
"array DType must match patches DType"
);
vortex_ensure!(
inner.dtype().is_primitive(),
"Creating PatchedArray from Patches only supported for primitive arrays"
);
vortex_ensure!(
patches.num_patches() <= u32::MAX as usize,
"PatchedArray does not support > u32::MAX patch values"
);
vortex_ensure!(
patches.values().all_valid()?,
"PatchedArray cannot be built from Patches with nulls"
);
let values_ptype = patches.dtype().as_ptype();
let TransposedPatches {
n_lanes,
lane_offsets,
indices,
values,
} = transpose_patches(patches, ctx)?;
let lane_offsets = PrimitiveArray::from_buffer_handle(
BufferHandle::new_host(lane_offsets),
PType::U32,
Validity::NonNullable,
)
.into_array();
let indices = PrimitiveArray::from_buffer_handle(
BufferHandle::new_host(indices),
PType::U16,
Validity::NonNullable,
)
.into_array();
let values = PrimitiveArray::from_buffer_handle(
BufferHandle::new_host(values),
values_ptype,
Validity::NonNullable,
)
.into_array();
let len = inner.len();
Ok(Self {
slots: vec![Some(inner), Some(lane_offsets), Some(indices), Some(values)],
n_lanes,
offset: 0,
len,
stats_set: ArrayStats::default(),
})
}
}
/// The owned parts of a [`PatchedArray`], produced by [`PatchedArray::into_parts`].
pub struct PatchedArrayParts {
/// The inner array being patched.
pub inner: ArrayRef,
/// The lane offsets array (u32).
pub lane_offsets: ArrayRef,
/// The patch indices array (u16).
pub indices: ArrayRef,
/// The patch values array.
pub values: ArrayRef,
/// Number of lanes.
pub n_lanes: usize,
/// Offset into the first chunk.
pub offset: usize,
/// Logical length.
pub len: usize,
}
impl PatchedArray {
/// Consume this array into its owned parts.
pub fn into_parts(mut self) -> PatchedArrayParts {
PatchedArrayParts {
inner: self.slots[INNER_SLOT]
.take()
.vortex_expect("PatchedArray inner slot"),
lane_offsets: self.slots[LANE_OFFSETS_SLOT]
.take()
.vortex_expect("PatchedArray lane_offsets slot"),
indices: self.slots[INDICES_SLOT]
.take()
.vortex_expect("PatchedArray indices slot"),
values: self.slots[VALUES_SLOT]
.take()
.vortex_expect("PatchedArray values slot"),
n_lanes: self.n_lanes,
offset: self.offset,
len: self.len,
}
}
}
impl PatchedArray {
/// Returns a reference to the base array being patched.
#[inline]
pub fn base_array(&self) -> &ArrayRef {
self.slots[INNER_SLOT]
.as_ref()
.vortex_expect("PatchedArray inner slot")
}
/// Returns a reference to the lane offsets array (u32).
#[inline]
pub fn lane_offsets(&self) -> &ArrayRef {
self.slots[LANE_OFFSETS_SLOT]
.as_ref()
.vortex_expect("PatchedArray lane_offsets slot")
}
/// Returns a reference to the indices array (u16).
#[inline]
pub fn patch_indices(&self) -> &ArrayRef {
self.slots[INDICES_SLOT]
.as_ref()
.vortex_expect("PatchedArray indices slot")
}
/// Returns a reference to the patch values array.
#[inline]
pub fn patch_values(&self) -> &ArrayRef {
self.slots[VALUES_SLOT]
.as_ref()
.vortex_expect("PatchedArray values slot")
}
}
impl PatchedArray {
/// Get a range of indices that can be used to access the `indices` and `values` children
/// to retrieve all patches for a specified lane.
///
/// # Panics
///
/// Note that this function will panic if the caller requests out of bounds chunk/lane ordinals.
pub(crate) fn lane_range(&self, chunk: usize, lane: usize) -> VortexResult<Range<usize>> {
assert!(chunk * 1024 <= self.len + self.offset);
assert!(lane < self.n_lanes);
let start = self.lane_offsets().scalar_at(chunk * self.n_lanes + lane)?;
let stop = self
.lane_offsets()
.scalar_at(chunk * self.n_lanes + lane + 1)?;
let start = start
.as_primitive()
.as_::<usize>()
.ok_or_else(|| vortex_err!("could not cast lane_offset to usize"))?;
let stop = stop
.as_primitive()
.as_::<usize>()
.ok_or_else(|| vortex_err!("could not cast lane_offset to usize"))?;
Ok(start..stop)
}
/// Slice the array to just the patches and inner values that are within the chunk range.
pub(crate) fn slice_chunks(&self, chunks: Range<usize>) -> VortexResult<Self> {
let lane_offsets_start = chunks.start * self.n_lanes;
let lane_offsets_stop = chunks.end * self.n_lanes + 1;
let sliced_lane_offsets = self
.lane_offsets()
.slice(lane_offsets_start..lane_offsets_stop)?;
let indices = self.patch_indices().clone();
let values = self.patch_values().clone();
// Find the new start/end for slicing the inner array.
// The inner array has already been sliced to start at position `offset` in absolute terms,
// so we need to convert chunk boundaries to inner-relative coordinates.
let begin = (chunks.start * 1024).saturating_sub(self.offset);
let end = (chunks.end * 1024)
.saturating_sub(self.offset)
.min(self.len);
let offset = if chunks.start == 0 { self.offset } else { 0 };
let inner = self.base_array().slice(begin..end)?;
let len = end - begin;
Ok(PatchedArray {
slots: vec![
Some(inner),
Some(sliced_lane_offsets),
Some(indices),
Some(values),
],
n_lanes: self.n_lanes,
offset,
len,
stats_set: ArrayStats::default(),
})
}
}
/// Transpose a set of patches from the default sorted layout into the data parallel layout.
#[allow(clippy::cognitive_complexity)]
fn transpose_patches(patches: &Patches, ctx: &mut ExecutionCtx) -> VortexResult<TransposedPatches> {
let array_len = patches.array_len();
let offset = patches.offset();
let indices = patches
.indices()
.clone()
.execute::<Canonical>(ctx)?
.into_primitive();
let values = patches
.values()
.clone()
.execute::<Canonical>(ctx)?
.into_primitive();
let indices_ptype = indices.ptype();
let values_ptype = values.ptype();
let indices = indices.buffer_handle().clone().unwrap_host();
let values = values.buffer_handle().clone().unwrap_host();
match_each_unsigned_integer_ptype!(indices_ptype, |I| {
match_each_native_ptype!(values_ptype, |V| {
let indices: Buffer<I> = Buffer::from_byte_buffer(indices);
let values: Buffer<V> = Buffer::from_byte_buffer(values);
Ok(transpose(
indices.as_slice(),
values.as_slice(),
offset,
array_len,
))
})
})
}
#[allow(clippy::cast_possible_truncation)]
fn transpose<I: IntegerPType, V: NativePType>(
indices_in: &[I],
values_in: &[V],
offset: usize,
array_len: usize,
) -> TransposedPatches {
// Total number of slots is number of chunks times number of lanes.
let n_chunks = array_len.div_ceil(1024);
assert!(
n_chunks <= u32::MAX as usize,
"Cannot transpose patches for array with >= 4 trillion elements"
);
let n_lanes = patch_lanes::<V>();
// We know upfront how many indices and values we'll have.
let mut indices_buffer = BufferMut::with_capacity(indices_in.len());
let mut values_buffer = BufferMut::with_capacity(values_in.len());
// number of patches in each chunk.
let mut lane_offsets: BufferMut<u32> = BufferMut::zeroed(n_chunks * n_lanes + 1);
// Scan the index/values once to get chunk/lane counts
for index in indices_in {
let index = index.as_() - offset;
let chunk = index / 1024;
let lane = index % n_lanes;
lane_offsets[chunk * n_lanes + lane + 1] += 1;
}
// Prefix-sum sizes -> offsets
for index in 1..lane_offsets.len() {
lane_offsets[index] += lane_offsets[index - 1];
}
// Loop over patches, writing them to final positions
let indices_out = indices_buffer.spare_capacity_mut();
let values_out = values_buffer.spare_capacity_mut();
for (index, &value) in std::iter::zip(indices_in, values_in) {
let index = index.as_() - offset;
let chunk = index / 1024;
let lane = index % n_lanes;
let position = &mut lane_offsets[chunk * n_lanes + lane];
indices_out[*position as usize].write((index % 1024) as u16);
values_out[*position as usize].write(value);
*position += 1;
}
// SAFETY: we know there are exactly indices_in.len() indices/values, and we just
// set them to the appropriate values in the loop above.
unsafe {
indices_buffer.set_len(indices_in.len());
values_buffer.set_len(values_in.len());
}
// Now, pass over all the indices and values again and subtract out the position increments.
for index in indices_in {
let index = index.as_() - offset;
let chunk = index / 1024;
let lane = index % n_lanes;
lane_offsets[chunk * n_lanes + lane] -= 1;
}
TransposedPatches {
n_lanes,
lane_offsets: lane_offsets.freeze().into_byte_buffer(),
indices: indices_buffer.freeze().into_byte_buffer(),
values: values_buffer.freeze().into_byte_buffer(),
}
}