-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtableProvider.ts
More file actions
132 lines (116 loc) · 4.48 KB
/
tableProvider.ts
File metadata and controls
132 lines (116 loc) · 4.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import { DataFrame, DataFrameEvents, ResolvedValue, checkSignal, createEventTarget, validateFetchParams, validateGetCellParams, validateGetRowNumberParams } from 'hightable'
import type { ColumnData } from 'hyparquet'
import { FileMetaData, ParquetReadOptions, parquetSchema } from 'hyparquet'
import { parquetReadWorker } from './workers/parquetWorkerClient.js'
import type { AsyncBufferFrom } from './workers/types.d.ts'
type GroupStatus = {
kind: 'unfetched'
} | {
kind: 'fetching'
promise: Promise<void>
} | {
kind: 'fetched'
}
interface VirtualRowGroup {
groupStart: number
groupEnd: number
state: Map<string, GroupStatus>
}
/**
* Convert a parquet file into a dataframe.
*
* It fetches data on demand in chunks of 1000 rows within each row group.
* It's not sortable. You can use sortableDataFrame from hightable to make it sortable.
*/
export function parquetDataFrame(from: AsyncBufferFrom, metadata: FileMetaData, options?: Pick<ParquetReadOptions, 'utf8'>): DataFrame<{parquet: FileMetaData}> {
const { children } = parquetSchema(metadata)
const columnDescriptors = children.map(child => ({ name: child.element.name }))
const eventTarget = createEventTarget<DataFrameEvents>()
const cellCache = new Map<string, ResolvedValue<unknown>[]>(columnDescriptors.map(({ name }) => [name, []]))
// virtual row groups are up to 1000 rows within row group boundaries
const groups: VirtualRowGroup[] = []
let groupStart = 0
for (const rg of metadata.row_groups) {
// make virtual row groups of size 1000
for (let j = 0; j < rg.num_rows; j += 1000) {
const groupSize = Math.min(1000, Number(rg.num_rows) - j)
const groupEnd = groupStart + groupSize
groups.push({
groupStart,
groupEnd,
state: new Map(columnDescriptors.map(({ name }) => [name, { kind: 'unfetched' }])),
})
groupStart = groupEnd
}
}
async function fetchVirtualRowGroup({ group, columns }: {
group: VirtualRowGroup, columns: string[]
}): Promise<void> {
const { groupStart, groupEnd, state } = group
const columnsToFetch = columns.filter(column => state.get(column)?.kind === 'unfetched')
const promises = [...group.state.values()].filter((status): status is { kind: 'fetching', promise: Promise<void> } => status.kind === 'fetching').map(status => status.promise)
// TODO(SL): pass AbortSignal to the worker?
if (columnsToFetch.length > 0) {
const commonPromise = parquetReadWorker({ ...options, from, metadata, rowStart: groupStart, rowEnd: groupEnd, columns: columnsToFetch, onChunk })
columnsToFetch.forEach(column => {
state.set(column, { kind: 'fetching', promise: commonPromise })
})
promises.push(commonPromise)
}
await Promise.all(promises)
columnsToFetch.forEach(column => {
state.set(column, { kind: 'fetched' })
})
}
function onChunk(chunk: ColumnData): void {
const { columnName, columnData, rowStart } = chunk
const cachedColumn = cellCache.get(columnName)
if (!cachedColumn) {
throw new Error(`Column "${columnName}" not found in header`)
}
let row = rowStart
for (const value of columnData) {
cachedColumn[row] ??= { value }
row++
}
eventTarget.dispatchEvent(new CustomEvent('resolve'))
}
const numRows = Number(metadata.num_rows)
const unsortableDataFrame: DataFrame<{parquet: FileMetaData}> = {
columnDescriptors,
numRows,
metadata: { parquet: metadata },
eventTarget,
getRowNumber({ row, orderBy }) {
validateGetRowNumberParams({ row, orderBy, data: { numRows, columnDescriptors } })
return { value: row }
},
getCell({ row, column, orderBy }) {
validateGetCellParams({ row, column, orderBy, data: { numRows, columnDescriptors } })
return cellCache.get(column)?.[row]
},
fetch: async ({ rowStart, rowEnd, columns, signal }) => {
validateFetchParams({ rowStart, rowEnd, columns, data: { numRows, columnDescriptors } })
checkSignal(signal)
if (!columns || columns.length === 0) {
return
}
const promises: Promise<void>[] = []
groups.forEach((group) => {
const { groupStart, groupEnd } = group
if (groupStart < rowEnd && groupEnd > rowStart) {
promises.push(
fetchVirtualRowGroup({
group,
columns,
}).then(() => {
checkSignal(signal)
})
)
}
})
await Promise.all(promises)
},
}
return unsortableDataFrame
}