-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathmock_context.py
More file actions
267 lines (222 loc) · 9.74 KB
/
mock_context.py
File metadata and controls
267 lines (222 loc) · 9.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
from typing import List, Tuple, Iterator, Iterable, Any, Optional, Union
from collections.abc import Sized
from functools import wraps
import pandas as pd
from exasol_udf_mock_python.column import Column
from exasol_udf_mock_python.group import Group
from exasol_udf_mock_python.mock_meta_data import MockMetaData
from exasol_udf_mock_python.udf_context import UDFContext
def check_context(f):
"""
Decorator checking that a MockContext object has valid current group context.
Raises a RuntimeError if this is not the case.
"""
@wraps(f)
def wrapper(self, *args, **kwargs):
if self.no_context:
raise RuntimeError('Calling UDFContext interface when the current group context '
'is invalid is disallowed')
return f(self, *args, **kwargs)
return wrapper
def validate_emit(row: Tuple, columns: List[Column]):
"""
Validates that a data row to be emitted corresponds to the definition of the output columns.
The number of elements in the row should match the number of columns and the type of each
element should match the type of the correspondent column. Raises a ValueError if the first
condition is false or a TypeError if the second condition is false.
:param row: Data row
:param columns: Column definition.
"""
if (expected_len := len(columns)) != (actual_len := len(row)):
raise ValueError(
f"Row length missmatch: got {actual_len} values but {expected_len} columns are defined.\n"
f" Expected columns: {[str(c) for c in columns]}. Actual values: {row}\n"
)
errors = []
for i, column in enumerate(columns):
if row[i] is not None and not isinstance(row[i], column.type):
errors.append(
f"Type missmatch at column '{column.name}' (index {i})\n"
f" Expected type: {column.type.__name__}. Actual type {type(row[i]).__name__} with Value: {row[i]}.\n"
)
if errors:
raise TypeError("\n".join(errors))
class MockContext(UDFContext):
"""
Implementation of generic UDF Mock Context interface for a SET UDF with groups.
This class allows iterating over groups. The functionality of the UDF Context are applicable
for the current input group.
Call `next_group` to iterate over groups. The `output_groups` property provides the emit
output for all groups iterated so far including the output for the current group.
Calling any function of the UDFContext interface when the group iterator has passed the end
or before the first call to the `next_group` is illegal and will cause a RuntimeException.
"""
def __init__(self, input_groups: Iterator[Group], metadata: MockMetaData):
"""
:param input_groups: Input groups. Each group object should contain input rows for the group.
:param metadata: The mock metadata object.
"""
self._input_groups = input_groups
self._metadata = metadata
""" Mock context for the current group """
self._current_context: Optional[StandaloneMockContext] = None
""" Output for all groups """
self._previous_output: List[Group] = []
@property
def no_context(self) -> bool:
"""Returns True if the current group context is invalid"""
return self._current_context is None
def next_group(self) -> bool:
"""
Moves group iterator to the next group.
Returns False if the iterator gets beyond the last group. Returns True otherwise.
"""
# Save output of the current group
if self._current_context is not None:
self._previous_output.append(Group(self._current_context.output))
self._current_context = None
# Try get to the next input group
try:
input_group = next(self._input_groups)
except StopIteration as e:
return False
if len(input_group) == 0:
raise RuntimeError("Empty input groups are not allowed")
# Create Mock Context for the new input group
self._current_context = StandaloneMockContext(input_group, self._metadata)
return True
@property
def output_groups(self):
"""
Output of all groups including the current one.
"""
if self._current_context is None:
return self._previous_output
else:
groups = list(self._previous_output)
groups.append(Group(self._current_context.output))
return groups
@check_context
def __getattr__(self, name):
return getattr(self._current_context, name)
@check_context
def get_dataframe(self, num_rows: Union[str, int], start_col: int = 0) -> Optional[pd.DataFrame]:
return self._current_context.get_dataframe(num_rows, start_col)
@check_context
def next(self, reset: bool = False) -> bool:
return self._current_context.next(reset)
@check_context
def size(self) -> int:
return self._current_context.size()
@check_context
def reset(self) -> None:
self._current_context.reset()
@check_context
def emit(self, *args) -> None:
self._current_context.emit(*args)
def get_scalar_input(inp: Any) -> Iterable[Iterable[Any]]:
"""
Figures out if the SCALAR parameters are provided as a scalar value or a tuple
and also if there is a wrapping container around.
Unless the parameters are already in a wrapping Sized container, returns parameters as an iterable
wrapped into a one-item list, e.g [(param1, [param2, ...])]. Otherwise, returns the original input.
:param inp: Input parameters.
"""
if inp is not None:
if (not isinstance(inp, Iterable)) or isinstance(inp, str):
return [(inp,)]
try:
row1 = next(iter(inp))
if (not isinstance(row1, Iterable)) or isinstance(row1, str):
return [inp]
elif not isinstance(inp, Sized):
return list(inp)
else:
return inp
except StopIteration:
pass
return [tuple()]
class StandaloneMockContext(UDFContext):
"""
Implementation of generic UDF Mock Context interface a SCALAR UDF or a SET UDF with no groups.
For Emit UDFs the output in the form of the list of tuples can be
accessed by reading the `output` property.
"""
def __init__(self, inp: Any, metadata: MockMetaData):
"""
:param inp: Input rows for a SET UDF or parameters for a SCALAR one.
In the former case the input object must be an iterable of rows. This, for example,
can be a Group object. It must implement the __len__ method. Each data row must be
an indexable container, e.g. a tuple.
In the SCALAR case the input can be a scalar value, or tuple. This can also be wrapped
in an iterable container, similar to the SET case.
:param metadata: The mock metadata object.
"""
if metadata.input_type.upper() == 'SCALAR':
self._input = get_scalar_input(inp)
else:
self._input = inp
self._metadata = metadata
self._data: Optional[Any] = None
self._iter: Optional[Iterator[Tuple[Any, ...]]] = None
self._name_position_map = \
{column.name: position
for position, column
in enumerate(metadata.input_columns)}
self._output = []
self.next(reset=True)
@property
def output(self) -> List[Tuple[Any, ...]]:
"""Emitted output so far"""
return self._output
@staticmethod
def _is_positive_integer(value):
return value is not None and isinstance(value, int) and value > 0
def get_dataframe(self, num_rows='all', start_col=0):
if not (num_rows == 'all' or self._is_positive_integer(num_rows)):
raise RuntimeError("get_dataframe() parameter 'num_rows' must be 'all' or an integer > 0")
if not (self._is_positive_integer(start_col) or start_col == 0):
raise RuntimeError("get_dataframe() parameter 'start_col' must be an integer >= 0")
if self._data is None:
return None
columns_ = [column.name for column in self._metadata.input_columns[start_col:]]
i = 0
dfs: list[pd.DataFrame] = []
while num_rows == 'all' or i < num_rows:
dfs.append(pd.DataFrame.from_records(
[self._data[start_col:]], columns=columns_))
if not self.next():
break
i += 1
if dfs:
df = pd.concat(dfs, ignore_index=True)
df.reset_index(inplace=True, drop=True)
return df
return None
def __getattr__(self, name):
return None if self._data is None else self._data[self._name_position_map[name]]
def next(self, reset: bool = False):
if self._iter is None or reset:
self.reset()
else:
try:
new_data = next(self._iter)
self._data = new_data
validate_emit(self._data, self._metadata.input_columns)
return True
except StopIteration as e:
self._data = None
return False
def size(self):
return len(self._input)
def reset(self):
self._iter = iter(self._input)
self.next()
def emit(self, *args):
if len(args) == 1 and isinstance(args[0], pd.DataFrame):
tuples = [tuple(x) for x in args[0].astype('object').values]
else:
tuples = [args]
for row in tuples:
validate_emit(row, self._metadata.output_columns)
self._output.extend(tuples)