Skip to content

Commit bf87a28

Browse files
pre-commit-ci[bot]dpaetzelrpreen
authored
update pre-commit and add Ruff linting and formatting (#196)
* [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pre-commit/mirrors-clang-format: v20.1.7 → v20.1.8](pre-commit/mirrors-clang-format@v20.1.7...v20.1.8) - [github.com/astral-sh/ruff-pre-commit: v0.12.2 → v0.12.3](astral-sh/ruff-pre-commit@v0.12.2...v0.12.3) * Add checks for array contiguity (#199) Fixes #198. * Add `clean` parameter to `json_read` (#202) * add Ruff Python linting/formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: David Pätzel <david.paetzel@posteo.de> Co-authored-by: Richard Preen <rpreen@gmail.com>
1 parent 0001f54 commit bf87a28

14 files changed

Lines changed: 156 additions & 71 deletions

.pre-commit-config.yaml

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,16 +30,16 @@ repos:
3030
rev: v2.4.1
3131
hooks:
3232
- id: codespell
33-
args: ["-L", "larg"]
3433
exclude: >
3534
(?x)^(
3635
.*\.txt|
36+
.*\.svg|
3737
.*\.ipynb
3838
)$
3939
4040
# Clang format
4141
- repo: https://github.com/pre-commit/mirrors-clang-format
42-
rev: v20.1.7
42+
rev: v20.1.8
4343
hooks:
4444
- id: clang-format
4545
types_or: [c++, c, cuda]
@@ -55,15 +55,16 @@ repos:
5555

5656
# Ruff, the Python auto-correcting linter/formatter written in Rust
5757
- repo: https://github.com/astral-sh/ruff-pre-commit
58-
rev: v0.12.2
58+
rev: v0.12.3
5959
hooks:
6060
- id: ruff
61-
args: ["--fix", "--show-fixes", "--exclude=__init__.py"]
61+
args: ["--fix", "--show-fixes"]
6262
- id: ruff-format
6363

64-
# Black format Python and notebooks
65-
- repo: https://github.com/psf/black
66-
rev: 25.1.0
64+
# Format docstrings
65+
- repo: https://github.com/DanielNoord/pydocstringformatter
66+
rev: v0.7.3
6767
hooks:
68-
- id: black-jupyter
68+
- id: pydocstringformatter
69+
args: ["--style=numpydoc"]
6970
...

pyproject.toml

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,82 @@
11
[build-system]
22
requires = ["setuptools"]
33
build-backend = "setuptools.build_meta"
4+
5+
[tool.ruff]
6+
indent-width = 4
7+
line-length = 88
8+
target-version = "py39"
9+
extend-include = ["*.ipynb"]
10+
11+
lint.select = [
12+
"ANN", # flake8-annotations
13+
"ARG", # flake8-unused-arguments
14+
"B", # flake8-bugbear
15+
"C4", # flake8-comprehensions
16+
"C90", # mccabe
17+
# "D", # pydocstyle
18+
"DTZ", # flake8-datetimez
19+
"E", # pycodestyle
20+
"EM", # flake8-errmsg
21+
"ERA", # eradicate
22+
"F", # Pyflakes
23+
"I", # isort
24+
"ICN", # flake8-import-conventions
25+
"N", # pep8-naming
26+
"PD", # pandas-vet
27+
"PGH", # pygrep-hooks
28+
"PIE", # flake8-pie
29+
"PL", # Pylint
30+
"PLC", # Pylint
31+
"PLE", # Pylint
32+
"PLR", # Pylint
33+
"PLW", # Pylint
34+
"PT", # flake8-pytest-style
35+
"Q", # flake8-quotes
36+
"RET", # flake8-return
37+
"RUF100", # Ruff-specific
38+
"S", # flake8-bandit
39+
"SIM", # flake8-simplify
40+
# "T20", # flake8-print
41+
"TID", # flake8-tidy-imports
42+
"UP", # pyupgrade
43+
"W", # pycodestyle
44+
"YTT", # flake8-2020
45+
]
46+
47+
exclude = [
48+
"__init__.py",
49+
"setup.py",
50+
]
51+
52+
lint.ignore = [
53+
"S301", # pickle
54+
"S311", # Standard pseudo-random generators
55+
]
56+
57+
[tool.ruff.lint.pep8-naming]
58+
extend-ignore-names = [
59+
"X", "X1", "X_train", "X_val", "X_test", "X_predict",
60+
]
61+
62+
[tool.ruff.lint.pydocstyle]
63+
convention = "numpy"
64+
65+
[tool.ruff.format]
66+
docstring-code-format = true
67+
docstring-code-line-length = 80
68+
69+
[tool.ruff.lint.extend-per-file-ignores]
70+
"test/**" = ["S101", "PLR2004", "ANN"]
71+
"python/example_rmux.py" = ["PLR2004"]
72+
"python/example_maze.py" = ["PLR2004"]
73+
"python/notebooks/example_cartpole.ipynb" = ["E501", "SIM115"]
74+
"python/notebooks/example_maze.ipynb" = ["E501", "PLR2004"]
75+
"python/notebooks/example_regression.ipynb" = ["PLR2004"]
76+
"python/notebooks/example_rmux.ipynb" = ["PLR2004"]
77+
"python/notebooks/example_tuning.ipynb" = ["E501", "ANN201"]
78+
79+
[tool.codespell]
80+
ignore-words-list = [
81+
"larg",
82+
]

python/example_cartpole.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@
117117

118118

119119
def replay(replay_size: int = 5000) -> None:
120-
"""Performs experience replay updates"""
120+
"""Performs experience replay updates."""
121121
batch_size: int = min(len(memory), replay_size)
122122
batch = random.sample(memory, batch_size)
123123
for state, action, reward, next_state, done in batch:
@@ -133,7 +133,7 @@ def replay(replay_size: int = 5000) -> None:
133133

134134

135135
def egreedy_action(state: np.ndarray) -> int:
136-
"""Selects an action using an epsilon greedy policy"""
136+
"""Selects an action using an epsilon greedy policy."""
137137
if np.random.rand() < epsilon:
138138
return random.randrange(N_ACTIONS)
139139
prediction_array = xcs.predict(state.reshape(1, -1))[0]
@@ -143,7 +143,7 @@ def egreedy_action(state: np.ndarray) -> int:
143143

144144

145145
def episode() -> tuple[float, int]:
146-
"""Executes a single episode, saving to memory buffer"""
146+
"""Executes a single episode, saving to memory buffer."""
147147
episode_score: float = 0
148148
episode_steps: int = 0
149149
state: np.ndarray = env.reset()[0]

python/example_maze.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ def run_experiment(env: Maze) -> None:
283283
bar.close()
284284

285285

286-
def plot_performance(env: Maze):
286+
def plot_performance(env: Maze) -> None:
287287
"""Plots learning performance."""
288288
plt.figure(figsize=(10, 6))
289289
plt.plot(trials, steps)

python/example_rmux.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -101,13 +101,12 @@ def execute(self, act: int) -> tuple[bool, float]:
101101
reward += 0.1
102102
else:
103103
reward = self.max_payoff
104+
elif self.payoff_map:
105+
reward = (pos - self.pos_bits) * 0.2
106+
if answer == 1:
107+
reward += 0.1
104108
else:
105-
if self.payoff_map:
106-
reward = (pos - self.pos_bits) * 0.2
107-
if answer == 1:
108-
reward += 0.1
109-
else:
110-
reward = 0
109+
reward = 0
111110
return correct, reward
112111

113112

python/notebooks/example_cartpole.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,12 @@
3333
"import random\n",
3434
"from collections import deque\n",
3535
"\n",
36-
"from matplotlib import rcParams\n",
37-
"import matplotlib.pyplot as plt\n",
38-
"import imageio\n",
3936
"import gymnasium as gym\n",
37+
"import imageio\n",
38+
"import matplotlib.pyplot as plt\n",
4039
"import numpy as np\n",
41-
"from IPython.display import display, Image\n",
40+
"from IPython.display import Image, display\n",
41+
"from matplotlib import rcParams\n",
4242
"from tqdm import tqdm\n",
4343
"\n",
4444
"import xcsf\n",
@@ -625,12 +625,12 @@
625625
}
626626
],
627627
"source": [
628-
"annotated_frames = list()\n",
628+
"annotated_frames = []\n",
629629
"\n",
630630
"if SAVE_GIF:\n",
631631
" # add score and episode nr\n",
632632
" rcParams[\"font.family\"] = \"monospace\"\n",
633-
" bbox = dict(boxstyle=\"round\", fc=\"0.8\")\n",
633+
" bbox = {\"boxstyle\": \"round\", \"fc\": \"0.8\"}\n",
634634
" bar = tqdm(total=len(frames), position=0, leave=True)\n",
635635
" for i in range(len(frames)):\n",
636636
" fig = plt.figure(dpi=90)\n",

python/notebooks/example_classification.ipynb

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,11 @@
2121
"import matplotlib.pyplot as plt\n",
2222
"import numpy as np\n",
2323
"from sklearn.datasets import fetch_openml\n",
24-
"from sklearn.metrics import ConfusionMatrixDisplay\n",
25-
"from sklearn.metrics import confusion_matrix, classification_report\n",
24+
"from sklearn.metrics import (\n",
25+
" ConfusionMatrixDisplay,\n",
26+
" classification_report,\n",
27+
" confusion_matrix,\n",
28+
")\n",
2629
"from sklearn.model_selection import train_test_split\n",
2730
"from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n",
2831
"\n",

python/notebooks/example_maze.ipynb

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -486,7 +486,7 @@
486486
}
487487
],
488488
"source": [
489-
"def plot_performance(env: Maze):\n",
489+
"def plot_performance(env: Maze) -> None:\n",
490490
" \"\"\"Plots learning performance.\"\"\"\n",
491491
" plt.figure(figsize=(10, 6))\n",
492492
" plt.plot(trials, steps)\n",
@@ -534,9 +534,10 @@
534534
],
535535
"source": [
536536
"import io\n",
537-
"from PIL import Image\n",
538537
"from turtle import Screen, Turtle\n",
538+
"\n",
539539
"from IPython.display import display\n",
540+
"from PIL import Image\n",
540541
"\n",
541542
"GRID_WIDTH: int = maze.x_size\n",
542543
"GRID_HEIGHT: int = maze.y_size\n",

python/notebooks/example_regression.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
"import graphviz\n",
2222
"import matplotlib.pyplot as plt\n",
2323
"import numpy as np\n",
24-
"from IPython.display import display, Image\n",
24+
"from IPython.display import Image, display\n",
2525
"from sklearn.datasets import fetch_openml\n",
2626
"from sklearn.ensemble import RandomForestRegressor\n",
2727
"from sklearn.gaussian_process import GaussianProcessRegressor\n",

python/notebooks/example_rmux.ipynb

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -109,13 +109,12 @@
109109
" reward += 0.1\n",
110110
" else:\n",
111111
" reward = self.max_payoff\n",
112+
" elif self.payoff_map:\n",
113+
" reward = (pos - self.pos_bits) * 0.2\n",
114+
" if answer == 1:\n",
115+
" reward += 0.1\n",
112116
" else:\n",
113-
" if self.payoff_map:\n",
114-
" reward = (pos - self.pos_bits) * 0.2\n",
115-
" if answer == 1:\n",
116-
" reward += 0.1\n",
117-
" else:\n",
118-
" reward = 0\n",
117+
" reward = 0\n",
119118
" return correct, reward"
120119
]
121120
},

0 commit comments

Comments
 (0)