This repository was archived by the owner on Apr 1, 2026. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 68
Expand file tree
/
Copy pathpyarrow_utils.py
More file actions
96 lines (79 loc) · 2.91 KB
/
pyarrow_utils.py
File metadata and controls
96 lines (79 loc) · 2.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, Iterator
import pyarrow as pa
class BatchBuffer:
"""
FIFO buffer of pyarrow Record batches
Not thread-safe.
"""
def __init__(self):
self._buffer: list[pa.RecordBatch] = []
self._buffer_size: int = 0
def __len__(self):
return self._buffer_size
def append_batch(self, batch: pa.RecordBatch) -> None:
self._buffer.append(batch)
self._buffer_size += batch.num_rows
def take_as_batches(self, n: int) -> tuple[pa.RecordBatch, ...]:
if n > len(self):
raise ValueError(f"Cannot take {n} rows, only {len(self)} rows in buffer.")
rows_taken = 0
sub_batches: list[pa.RecordBatch] = []
while rows_taken < n:
batch = self._buffer.pop(0)
if batch.num_rows > (n - rows_taken):
sub_batches.append(batch.slice(length=n - rows_taken))
self._buffer.insert(0, batch.slice(offset=n - rows_taken))
rows_taken += n - rows_taken
else:
sub_batches.append(batch)
rows_taken += batch.num_rows
self._buffer_size -= n
return tuple(sub_batches)
def take_rechunked(self, n: int) -> pa.RecordBatch:
return (
pa.Table.from_batches(self.take_as_batches(n))
.combine_chunks()
.to_batches()[0]
)
def chunk_by_row_count(
batches: Iterable[pa.RecordBatch], page_size: int
) -> Iterator[tuple[pa.RecordBatch, ...]]:
buffer = BatchBuffer()
for batch in batches:
buffer.append_batch(batch)
while len(buffer) >= page_size:
yield buffer.take_as_batches(page_size)
# emit final page, maybe smaller
if len(buffer) > 0:
yield buffer.take_as_batches(len(buffer))
def truncate_pyarrow_iterable(
batches: Iterable[pa.RecordBatch], max_results: int
) -> Iterator[pa.RecordBatch]:
total_yielded = 0
for batch in batches:
if batch.num_rows >= (max_results - total_yielded):
yield batch.slice(length=max_results - total_yielded)
return
else:
yield batch
total_yielded += batch.num_rows
def append_offsets(
pa_table: pa.Table,
offsets_col: str,
) -> pa.Table:
return pa_table.append_column(
offsets_col, pa.array(range(pa_table.num_rows), type=pa.int64())
)