11[build-system ]
2- requires = [" setuptools" ]
32build-backend = " setuptools.build_meta"
3+ requires = [
4+ " setuptools>=72.0.0,<80.0.0" ,
5+ " setuptools-scm>=8.0.0,<9.0.0" ,
6+ " wheel>=0.38.4,<1.0.0"
7+ ]
8+
9+ [packaging ]
10+ package_name = " ydata-profiling"
411
512[project ]
613name = " ydata-profiling"
14+ requires-python = " >=3.7,<3.13"
715authors = [
8- {name = " YData Labs Inc" , email = " opensource@ydata.ai" },
16+ {name = " YData Labs Inc" , email = " opensource@ydata.ai" }
917]
10- description =" Generate profile report for pandas DataFrame"
18+ description = " Generate profile report for pandas DataFrame"
19+ keywords = [" pandas" , " data-science" , " data-analysis" , " python" , " jupyter" , " ipython" ]
1120readme = " README.md"
12- requires-python =" >=3.7, <3.13"
13- keywords =[" pandas" , " data-science" , " data-analysis" , " python" , " jupyter" , " ipython" ]
14- license = {text = " MIT" }
15- classifiers =[
21+ license = {file = " LICENSE.md" }
22+ classifiers = [
1623 " Development Status :: 5 - Production/Stable" ,
1724 " Topic :: Software Development :: Build Tools" ,
1825 " License :: OSI Approved :: MIT License" ,
@@ -63,10 +70,11 @@ dependencies = [
6370 " numba>=0.56.0, <1" ,
6471]
6572
66- dynamic = [" version" ]
73+ dynamic = [
74+ " version" ,
75+ ]
6776
6877[project .optional-dependencies ]
69- # dependencies for development and testing
7078dev = [
7179 " black>=20.8b1" ,
7280 " isort>=5.0.7" ,
@@ -80,6 +88,22 @@ dev = [
8088 " sphinx-multiversion>=0.2.3" ,
8189 " autodoc_pydantic" ,
8290]
91+
92+ docs = [
93+ " mkdocs>=1.6.0,<1.7.0" ,
94+ " mkdocs-material>=9.0.12,<10.0.0" ,
95+ " mkdocs-material-extensions>=1.1.1,<2.0.0" ,
96+ " mkdocs-table-reader-plugin<=2.2.0" ,
97+ " mike>=2.1.1,<2.2.0" ,
98+ " mkdocstrings[python]>=0.20.0,<1.0.0" ,
99+ " mkdocs-badges" ,
100+ ]
101+
102+ notebook = [
103+ " jupyter>=1.0.0" ,
104+ " ipywidgets>=7.5.1" ,
105+ ]
106+
83107# this provides the recommended pyspark and pyarrow versions for spark to work on pandas-profiling
84108# note that if you are using pyspark 2.3 or 2.4 and pyarrow >= 0.15, you might need to
85109# set ARROW_PRE_0_15_IPC_FORMAT=1 in your conf/spark-env.sh for toPandas functions to work properly
@@ -90,6 +114,7 @@ spark = [
90114 " numpy>=1.16.0,<1.24" ,
91115 " visions[type_image_path]>=0.7.5, <0.7.7" ,
92116]
117+
93118test = [
94119 " pytest" ,
95120 " coverage>=6.5, <8" ,
@@ -100,35 +125,29 @@ test = [
100125 " twine>=3.1.1" ,
101126 " kaggle" ,
102127]
103- notebook = [
104- " jupyter>=1.0.0" ,
105- " ipywidgets>=7.5.1" ,
106- ]
107- docs = [
108- " mkdocs>=1.6.0,<1.7.0" ,
109- " mkdocs-material>=9.0.12,<10.0.0" ,
110- " mkdocs-material-extensions>=1.1.1,<2.0.0" ,
111- " mkdocs-table-reader-plugin<=2.2.0" ,
112- " mike>=2.1.1,<2.2.0" ,
113- " mkdocstrings[python]>=0.20.0,<1.0.0" ,
114- " mkdocs-badges" ,
115- ]
128+
116129unicode = [
117130 " tangled-up-in-unicode==0.2.0" ,
118131]
119132
120- [tool .setuptools .packages .find ]
121- where = [" src" ]
133+ [project .urls ]
134+ Homepage = " https://ydata.ai"
135+ Repository = " https://github.com/ydataai/ydata-profiling"
122136
123- [tool .setuptools .package-data ]
124- ydata_profiling = [" py.typed" ]
137+ [project .scripts ]
138+ ydata_profiling = " ydata_profiling.controller.console:main"
139+ pandas_profiling = " ydata_profiling.controller.console:main"
140+
141+ # setuptools relative
125142
126143[tool .setuptools ]
127144include-package-data = true
128145
129- [project .scripts ]
130- ydata_profiling = " ydata_profiling.controller.console:main"
131- pandas_profiling = " ydata_profiling.controller.console:main"
146+ [tool .setuptools .package-data ]
147+ ydata_profiling = [" py.typed" ]
132148
133- [project .urls ]
134- homepage = " https://github.com/ydataai/ydata-profiling"
149+ [tool .distutils .bdist_wheel ]
150+ universal = true
151+
152+ [tool .setuptools .package-dir ]
153+ "" = " src"
0 commit comments