Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion pyiceberg/conversions.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,6 @@ def _(_: PrimitiveType, b: bytes) -> int:
return _INT_STRUCT.unpack(b)[0]


@from_bytes.register(LongType)
@from_bytes.register(TimeType)
@from_bytes.register(TimestampType)
@from_bytes.register(TimestamptzType)
Expand All @@ -353,13 +352,24 @@ def _(_: PrimitiveType, b: bytes) -> int:
return _LONG_STRUCT.unpack(b)[0]


@from_bytes.register(LongType)
def _(_: PrimitiveType, b: bytes) -> int:
if len(b) == 4:
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: just curious here when i linked the comment earlier to the java code I noticed they use < 8 any reason for the == 4 on these?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If it's not 8 bytes, it's going to be 4 bytes. I'm not sure what kind of data would be 5-7 bytes. I changed it nonetheless just to match the Java implementation.

# If the length is 4 bytes, it is a promoted IntegerType
return _INT_STRUCT.unpack(b)[0]
return _LONG_STRUCT.unpack(b)[0]


@from_bytes.register(FloatType)
def _(_: FloatType, b: bytes) -> float:
return _FLOAT_STRUCT.unpack(b)[0]


@from_bytes.register(DoubleType)
def _(_: DoubleType, b: bytes) -> float:
if len(b) == 4:
# If the length is 4 bytes, it is a promoted FloatType
return _FLOAT_STRUCT.unpack(b)[0]
return _DOUBLE_STRUCT.unpack(b)[0]


Expand Down
94 changes: 93 additions & 1 deletion tests/expressions/test_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,14 @@
Or,
StartsWith,
)
from pyiceberg.expressions.visitors import _InclusiveMetricsEvaluator, _StrictMetricsEvaluator
from pyiceberg.expressions.visitors import (
ROWS_CANNOT_MATCH,
ROWS_MIGHT_MATCH,
ROWS_MIGHT_NOT_MATCH,
ROWS_MUST_MATCH,
_InclusiveMetricsEvaluator,
_StrictMetricsEvaluator,
)
from pyiceberg.manifest import DataFile, FileFormat
from pyiceberg.schema import Schema
from pyiceberg.typedef import Record
Expand All @@ -50,6 +57,7 @@
FloatType,
IcebergType,
IntegerType,
LongType,
NestedField,
PrimitiveType,
StringType,
Expand Down Expand Up @@ -1463,3 +1471,87 @@ def test_strict_integer_not_in(strict_data_file_schema: Schema, strict_data_file

should_read = _StrictMetricsEvaluator(strict_data_file_schema, NotIn("no_nulls", {"abc", "def"})).eval(strict_data_file_1)
assert not should_read, "Should not match: no_nulls field does not have bounds"


@pytest.mark.parametrize(
"file_type, evolved_type, lower_bound, upper_bound, op, lit, expected",
[
# Int -> Long
(IntegerType(), LongType(), 30, 79, GreaterThan, 100, ROWS_CANNOT_MATCH),
(IntegerType(), LongType(), 30, 79, LessThan, 50, ROWS_MIGHT_MATCH),
# Float -> Double
(FloatType(), DoubleType(), 30.0, 79.0, GreaterThan, 100.0, ROWS_CANNOT_MATCH),
(FloatType(), DoubleType(), 30.0, 79.0, LessThan, 50.0, ROWS_MIGHT_MATCH),
],
)
def test_inclusive_metrics_evaluator_with_type_promotion(
Comment thread
rambleraptor marked this conversation as resolved.
Outdated
file_type: PrimitiveType,
evolved_type: PrimitiveType,
lower_bound: Any,
upper_bound: Any,
op: Any,
lit: Any,
expected: bool,
) -> None:
# Schema defines 'col' with evolved state
Comment thread
rambleraptor marked this conversation as resolved.
Outdated
schema = Schema(NestedField(1, "col", evolved_type, required=True))

# Historical manifest contains file_type bounds
data_file = DataFile.from_args(
file_path="file_1.parquet",
file_format=FileFormat.PARQUET,
partition={},
record_count=100,
file_size_in_bytes=1024,
lower_bounds={1: to_bytes(file_type, lower_bound)},
upper_bounds={1: to_bytes(file_type, upper_bound)},
)

# Predicate refers to 'col'
evaluator = _InclusiveMetricsEvaluator(schema, op("col", lit))
assert evaluator.eval(data_file) == expected


@pytest.mark.parametrize(
"file_type, evolved_type, lower_bound, upper_bound, op, lit, expected",
[
# Int -> Long
(IntegerType(), LongType(), 30, 79, GreaterThan, 20, ROWS_MUST_MATCH),
(IntegerType(), LongType(), 30, 79, GreaterThan, 100, ROWS_MIGHT_NOT_MATCH),
(IntegerType(), LongType(), 30, 79, LessThan, 100, ROWS_MUST_MATCH),
(IntegerType(), LongType(), 30, 79, LessThan, 20, ROWS_MIGHT_NOT_MATCH),
# Float -> Double
(FloatType(), DoubleType(), 30.0, 79.0, GreaterThan, 20.0, ROWS_MUST_MATCH),
(FloatType(), DoubleType(), 30.0, 79.0, GreaterThan, 100.0, ROWS_MIGHT_NOT_MATCH),
(FloatType(), DoubleType(), 30.0, 79.0, LessThan, 100.0, ROWS_MUST_MATCH),
(FloatType(), DoubleType(), 30.0, 79.0, LessThan, 20.0, ROWS_MIGHT_NOT_MATCH),
],
)
def test_strict_metrics_evaluator_with_type_promotion(
file_type: PrimitiveType,
evolved_type: PrimitiveType,
lower_bound: Any,
upper_bound: Any,
op: Any,
lit: Any,
expected: bool,
) -> None:
# Schema defines 'col' with evolved state
schema = Schema(NestedField(1, "col", evolved_type, required=True))

# Historical manifest contains file_type bounds
data_file = DataFile.from_args(
file_path="file_1.parquet",
file_format=FileFormat.PARQUET,
partition={},
record_count=100,
file_size_in_bytes=1024,
lower_bounds={1: to_bytes(file_type, lower_bound)},
upper_bounds={1: to_bytes(file_type, upper_bound)},
null_value_counts={1: 0},
nan_value_counts={1: 0},
)

# Predicate refers to 'col'
evaluator = _StrictMetricsEvaluator(schema, op("col", lit))
assert evaluator.eval(data_file) == expected
Loading