diff --git a/src/ydata_profiling/report/presentation/core/scores.py b/src/ydata_profiling/report/presentation/core/scores.py index 62b001bff..7ff70570a 100644 --- a/src/ydata_profiling/report/presentation/core/scores.py +++ b/src/ydata_profiling/report/presentation/core/scores.py @@ -3,6 +3,7 @@ """ from typing import Any, Dict, List, Optional +from ydata_profiling.config import Style from ydata_profiling.report.presentation.core.item_renderer import ItemRenderer @@ -11,15 +12,15 @@ def __init__( self, items: List[Dict], overall_score: float, + style: Style, name: Optional[str], - caption: Optional[str], **kwargs ): content = { "items": items, "overall_score": overall_score, "name": name, - "caption": caption, + "style": style, } super().__init__("scores", content=content, **kwargs) diff --git a/src/ydata_profiling/report/presentation/flavours/html/templates/scores.html b/src/ydata_profiling/report/presentation/flavours/html/templates/scores.html index f0bf8e895..90ad82cda 100644 --- a/src/ydata_profiling/report/presentation/flavours/html/templates/scores.html +++ b/src/ydata_profiling/report/presentation/flavours/html/templates/scores.html @@ -1,44 +1,78 @@
-
-
Overall Data Quality Score
-
{{ overall_score }}
-
+
+
Overall Data Quality Score
+
+ {% for i in range(overall_score | length) %} +
+ {% if overall_score | length > 1 %} +
{{ name[i]}}
+ {% endif %} +
{{ overall_score[i]}}
+
+ {% endfor %} +
+
-
+
{% for metric in items %}
-
-
{{ metric.name }}
-
-
+
+
{{ metric.name }}
+ {% for j in range(metric.submetrics| length) %} + {% if metric.submetrics | length > 1 %} +
{{ name[j] }}
+ {% endif %} +
+
+ {{ metric.submetrics[j].value }}% +
-
-
{{ metric.value }}%
+ {% endfor %}
{% endfor %}
+ + diff --git a/tests/issues/test_issue537.py b/tests/issues/test_issue537.py index 7248a3ac8..f0a2c9f0a 100644 --- a/tests/issues/test_issue537.py +++ b/tests/issues/test_issue537.py @@ -34,7 +34,15 @@ def test_multiprocessing_describe1d(config, summarizer, typeset): def download_and_process_data(): """Downloads and processes the dataset into a Pandas DataFrame.""" - response = requests.get("https://ndownloader.figshare.com/files/5976042") + headers = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " + "AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/122.0.0.0 Safari/537.36" + } + response = requests.get( + "https://ndownloader.figshare.com/files/5976042", headers=headers + ) + response.raise_for_status() # Ensure successful download file = decompress(response.content) diff --git a/tests/unit/test_pandas/test_compat.py b/tests/unit/test_pandas/test_compat.py new file mode 100644 index 000000000..b0dd94d39 --- /dev/null +++ b/tests/unit/test_pandas/test_compat.py @@ -0,0 +1,46 @@ +import unittest + +import pandas as pd + +from ydata_profiling.utils.compat import optional_option_context, pandas_version_info + + +class TestCompatUtils(unittest.TestCase): + def test_pandas_version_info_format(self): + version_info = pandas_version_info() + self.assertIsInstance(version_info, tuple) + self.assertTrue(all(isinstance(i, int) for i in version_info)) + + expected_prefix = tuple( + int(x) for x in pd.__version__.split(".")[: len(version_info)] + ) + self.assertEqual(version_info, expected_prefix) + + def test_optional_option_context_with_existing_option(self): + option = "display.max_rows" + original_value = pd.get_option(option) + + with optional_option_context(option, 123): + self.assertEqual(pd.get_option(option), 123) + + self.assertEqual(pd.get_option(option), original_value) + + def test_optional_option_context_with_missing_option(self): + class FakeOptionContext: + def __init__(self, *args, **kwargs): + raise pd.errors.OptionError("Simulated OptionError") + + original_option_context = pd.option_context + pd.option_context = FakeOptionContext + + try: + # Should not raise, even though the option is invalid + with optional_option_context("non.existent.option", 456): + pass + finally: + # Restore the original option_context + pd.option_context = original_option_context + + +if __name__ == "__main__": + unittest.main()