Skip to content

Commit fe55f7f

Browse files
author
Thierry RAMORASOAVINA
committed
Adapt to pandas 3.0
- For python 3.10, 2.3.3 is still used but with the new pandas StringDtype enabled - For python 3.11+, the later 3.0.0+ versions are used
1 parent a2b0668 commit fe55f7f

File tree

7 files changed

+72
-34
lines changed

7 files changed

+72
-34
lines changed

doc/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,6 @@ ipykernel>=6.9.1
44
nbconvert==6.4.4
55
nbformat==5.3.0
66
numpydoc>=1.5.0
7-
pandas>=0.25.3,<=2.3.3
7+
pandas>=2.3.3,<4.0.0
88
scikit-learn>=1.7.2,<1.9.0
99
sphinx-copybutton>=0.5.0

khiops/sklearn/dataset.py

Lines changed: 53 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313

1414
import numpy as np
1515
import pandas as pd
16-
import sklearn
1716
from scipy import sparse as sp
1817
from sklearn.utils import check_array
1918
from sklearn.utils.validation import column_or_1d
@@ -33,6 +32,13 @@
3332
# pylint --disable=all --enable=invalid-names dataset.py
3433
# pylint: disable=invalid-name
3534

35+
# Set a special pandas option to force the new string data type (`StringDtype`)
36+
# even for version 2.0 which is still required for python 3.10.
37+
# This new string data type no longer maps to a NumPy data type.
38+
# Hence, code assuming NumPy type compatibility will break unless
39+
# this string data type is handled separately.
40+
pd.options.future.infer_string = True
41+
3642

3743
def check_dataset_spec(ds_spec):
3844
"""Checks that a dataset spec is valid
@@ -393,16 +399,11 @@ def write_internal_data_table(dataframe, file_path_or_stream):
393399

394400

395401
def _column_or_1d_with_dtype(y, dtype=None):
396-
# 'dtype' has been introduced on `column_or_1d' since Scikit-learn 1.2;
397-
if sklearn.__version__ < "1.2":
398-
if pd.api.types.is_string_dtype(dtype) and y.isin(["True", "False"]).all():
399-
warnings.warn(
400-
"'y' stores strings restricted to 'True'/'False' values: "
401-
"The predict method may return a bool vector."
402-
)
403-
return column_or_1d(y, warn=True)
404-
else:
405-
return column_or_1d(y, warn=True, dtype=dtype)
402+
"""Checks the data is of the provided `dtype`.
403+
If a problem is detected a warning is printed or an error raised,
404+
otherwise the pandas object is transformed into a numpy.array
405+
"""
406+
return column_or_1d(y, warn=True, dtype=dtype)
406407

407408

408409
class Dataset:
@@ -607,12 +608,37 @@ def _init_target_column(self, y):
607608
# pandas.Series, pandas.DataFrame or numpy.ndarray
608609
else:
609610
if hasattr(y, "dtype"):
611+
if not isinstance(y, np.ndarray):
612+
# Since pandas 3.0, numbers and boolean values in an array
613+
# but with a carriage-return are wrongly inferred first
614+
# respectively as `object` dtype instead of `int64` and
615+
# `object` dtype instead of `bool`.
616+
# Forcing pandas to `infer_objects` fixes the error
617+
if pd.api.types.is_object_dtype(y):
618+
y = y.infer_objects()
619+
warnings.warn(
620+
"The first guess of 'y' dtype is 'object'. "
621+
"This would lead to errors. "
622+
"After a second pass of inferring, "
623+
f"the detected dtype is {y.dtype}"
624+
)
610625
if isinstance(y.dtype, pd.CategoricalDtype):
611626
y_checked = _column_or_1d_with_dtype(
612627
y, dtype=y.dtype.categories.dtype
613628
)
614629
else:
615-
y_checked = _column_or_1d_with_dtype(y, dtype=y.dtype)
630+
# Since pandas 3.0 (and even in 2.0 if the option is activated)
631+
# a new `StringDtype` is used to handle strings.
632+
# It does not match any longer the one recognized by numpy.
633+
# An issue was created on scikit-learn
634+
# https://github.com/scikit-learn/scikit-learn/issues/33383
635+
# Until it is fixed 'y' is not checked when pandas dtype is
636+
# `StringDtype`.
637+
if pd.api.types.is_string_dtype(y.dtype):
638+
dtype = None
639+
else:
640+
dtype = y.dtype
641+
y_checked = _column_or_1d_with_dtype(y, dtype=dtype)
616642
elif hasattr(y, "dtypes"):
617643
if isinstance(y.dtypes.iloc[0], pd.CategoricalDtype):
618644
y_checked = _column_or_1d_with_dtype(
@@ -965,21 +991,19 @@ def __init__(self, name, dataframe, key=None):
965991

966992
# Initialize feature columns and verify their types
967993
self.column_ids = self.data_source.columns.values
968-
if not np.issubdtype(self.column_ids.dtype, np.integer):
969-
if np.issubdtype(self.column_ids.dtype, object):
970-
for i, column_id in enumerate(self.column_ids):
971-
if not isinstance(column_id, str):
972-
raise TypeError(
973-
f"Dataframe column ids must be either all integers or "
974-
f"all strings. Column id at index {i} ('{column_id}') is"
975-
f" of type '{type(column_id).__name__}'"
976-
)
977-
else:
978-
raise TypeError(
979-
f"Dataframe column ids must be either all integers or "
980-
f"all strings. The column index has dtype "
981-
f"'{self.column_ids.dtype}'"
982-
)
994+
# Ensure the feature columns are either all string
995+
# or all numeric but not a mix of both.
996+
# Warning : the new pandas string data type (`StringDType`)
997+
# - by default in pandas 3.0 or forced in pandas 2.0 -
998+
# cannot be evaluated by `np.issubdtype`, any attempt will raise an error.
999+
if not pd.api.types.is_numeric_dtype(
1000+
self.column_ids
1001+
) and not pd.api.types.is_string_dtype(self.column_ids):
1002+
raise TypeError(
1003+
"Dataframe column ids must be either all integers or "
1004+
"all strings. Columns have the following mixed types: "
1005+
f"{sorted(set([type(cid).__name__ for cid in self.column_ids]))}."
1006+
)
9831007

9841008
# Initialize Khiops types
9851009
self.khiops_types = {}
@@ -988,7 +1012,8 @@ def __init__(self, name, dataframe, key=None):
9881012
column_numpy_type = column.dtype
9891013
column_max_size = None
9901014
if isinstance(column_numpy_type, pd.StringDtype):
991-
column_max_size = column.str.len().max()
1015+
# Warning pandas.Series.str.len() returns a float64
1016+
column_max_size = int(column.str.len().max())
9921017
self.khiops_types[column_id] = get_khiops_type(
9931018
column_numpy_type, column_max_size
9941019
)

khiops/sklearn/estimators.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2021,7 +2021,7 @@ def predict_proba(self, X):
20212021
y_probas, (pd.DataFrame, np.ndarray)
20222022
), "y_probas is not a Pandas DataFrame nor Numpy array"
20232023
y_probas = y_probas.reindex(
2024-
self._sorted_prob_variable_names(), axis=1, copy=False
2024+
self._sorted_prob_variable_names(), axis=1
20252025
).to_numpy(copy=False)
20262026

20272027
assert isinstance(y_probas, (str, np.ndarray)), "Expected str or np.ndarray"
@@ -2265,7 +2265,7 @@ def predict(self, X):
22652265

22662266
# Transform to np.ndarray
22672267
if isinstance(y_pred, pd.DataFrame):
2268-
y_pred = y_pred.astype("float64", copy=False).to_numpy(copy=False).ravel()
2268+
y_pred = y_pred.astype("float64").to_numpy(copy=False).ravel()
22692269

22702270
assert isinstance(y_pred, (str, np.ndarray)), "Expected str or np.array"
22712271
return y_pred

packaging/conda/meta.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ requirements:
2424
run:
2525
- python
2626
- khiops-core =11.0.0
27-
- pandas >=0.25.3,<=2.3.3
27+
- pandas >=2.3.3,<4.0.0
2828
- scikit-learn>=1.7.2,<1.9.0
2929
run_constrained:
3030
# do not necessary use the latest version

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ classifiers = [
105105
requires-python = ">=3.8"
106106
dependencies = [
107107
# do not use the latest versions, to avoid undesired breaking changes
108-
"pandas>=0.25.3,<=2.3.3",
108+
"pandas>=2.3.3,<4.0.0",
109109
"scikit-learn>=1.7.2,<1.9.0",
110110
]
111111

tests/test_dataset_class.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,12 @@ def create_monotable_dataframe(self):
7474
1077,
7575
1077,
7676
],
77+
# Since pandas 3.0 the default precision for parsing a datetime
78+
# is now microseconds (us) instead of nanoseconds (ns)
79+
# unless enough precision is given.
80+
# Unfortunately only the changelog states this, not the docstring.
81+
# To avoid any comparison error in tests
82+
# we need to add the required precision to the datetime
7783
"Date": pd.to_datetime(
7884
[
7985
"2019-03-22",
@@ -503,6 +509,13 @@ def test_out_file_from_dataframe_monotable(self):
503509
ref_table = spec["main_table"][0]
504510
ref_table["class"] = y
505511

512+
# Since pandas 3.0 the default precision for parsing a datetime
513+
# is now microseconds (us) instead of nanoseconds (ns)
514+
# unless enough precision is given.
515+
# Unfortunately only the changelog states this, not the docstring.
516+
# To avoid any comparison error in tests
517+
# we need set the required precision (ns) to the datetime
518+
ref_table["Date"] = ref_table["Date"].astype("datetime64[ns]")
506519
# Check that the dataframes are equal
507520
assert_frame_equal(
508521
ref_table.sort_values(by="User_ID").reset_index(drop=True),

tests/test_dataset_errors.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -595,6 +595,6 @@ def test_pandas_table_column_ids_must_all_be_int_or_str(self):
595595
output_error_msg = str(context.exception)
596596
expected_msg = (
597597
"Dataframe column ids must be either all integers or all "
598-
"strings. Column id at index 0 ('1') is of type 'int'"
598+
"strings. Columns have the following mixed types: ['int', 'str']."
599599
)
600600
self.assertEqual(output_error_msg, expected_msg)

0 commit comments

Comments
 (0)