Skip to content

Commit ac6c4c2

Browse files
XHPlusclaude
andauthored
fix: add missing whitespace around arithmetic operators (#459)
Fixed PEP 8 compliance issues by adding proper spacing around arithmetic operators in f-strings and expressions. This resolves all flake8 E225/E226 formatting errors that were preventing pre-commit checks from passing. Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 06e3ed9 commit ac6c4c2

File tree

8 files changed

+18
-17
lines changed

8 files changed

+18
-17
lines changed

llmc/compression/quantization/quarot.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def get_orthogonal_matrix(self):
9696
raise ValueError(f'Unsupported mode {self.mode}')
9797

9898
def block_transform(self, block):
99-
logger.info(f'Start transform the {self.block_idx+1}-th block')
99+
logger.info(f'Start transform the {self.block_idx + 1}-th block')
100100

101101
if self.online_rotate:
102102
self.replace_rotate_linears(block)
@@ -108,7 +108,7 @@ def block_transform(self, block):
108108
gc.collect()
109109

110110
logger.info(f'block:{block}')
111-
logger.info(f'End transform the {self.block_idx+1}-th block')
111+
logger.info(f'End transform the {self.block_idx + 1}-th block')
112112

113113
@torch.no_grad()
114114
def subset_transform(self, block, subset):

llmc/compression/quantization/spqr.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ def block_transform_true_sequential(self, block, input_feat):
9090

9191
@torch.no_grad()
9292
def block_transform(self, block, input_feat, *block_kwargs):
93-
logger.info(f'Start transform the {self.block_idx+1}-th block')
93+
logger.info(f'Start transform the {self.block_idx + 1}-th block')
9494

9595
if self.true_sequential:
9696
self.block_transform_true_sequential(block, input_feat)
@@ -103,7 +103,7 @@ def block_transform(self, block, input_feat, *block_kwargs):
103103
self.get_replacement_params(mode='fake_quant', w_only=True),
104104
)
105105

106-
logger.info(f'End transform the {self.block_idx+1}-th block')
106+
logger.info(f'End transform the {self.block_idx + 1}-th block')
107107

108108
@torch.no_grad()
109109
def subset_transform(self, layers_dict):

llmc/compression/quantization/tesseraq.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ def collect_block_qparams(self, block, input_feat):
176176

177177
@torch.no_grad()
178178
def block_transform(self, block, input_feat, block_kwargs):
179-
logger.info(f'Start transform the {self.block_idx+1}-th block')
179+
logger.info(f'Start transform the {self.block_idx + 1}-th block')
180180

181181
with torch.no_grad():
182182
block.float()
@@ -204,7 +204,7 @@ def block_transform(self, block, input_feat, block_kwargs):
204204
if self.reduce_memory:
205205
block.to(self.model_dtype)
206206

207-
logger.info(f'End transform the {self.block_idx+1}-th block')
207+
logger.info(f'End transform the {self.block_idx + 1}-th block')
208208

209209
def tesseraq_train(self, block):
210210
self.set_dynamic_tmp_quant(block, on=True)
@@ -273,8 +273,8 @@ def tesseraq_train(self, block):
273273
norm = loss_scaler(loss, optimizer, parameters=params_r + params_s)
274274

275275
logger.info(
276-
f'block {self.block_idx} iter {i+1} loss:{loss.item():5f} \
277-
norm:{norm.item():4f} HR progress:{(1-thresholds[i])*100:1f}% '
276+
f'block {self.block_idx} iter {i + 1} loss:{loss.item():5f} \
277+
norm:{norm.item():4f} HR progress:{(1 - thresholds[i]) * 100:1f}% '
278278
)
279279
for p in params_r + params_s:
280280
p.requires_grad = False

llmc/compression/sparsification/base_blockwise_sparsification.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ def block_opt(self, block):
154154
self.block_transform(block)
155155

156156
def block_transform(self, block, input_feat, block_kwargs):
157-
logger.info(f'Start transform the {self.block_idx+1}-th block')
157+
logger.info(f'Start transform the {self.block_idx + 1}-th block')
158158
subsets = self.model.get_subsets_in_block(block)
159159
for index, subset in enumerate(subsets):
160160
if not self.filter_subset(subset):
@@ -174,7 +174,7 @@ def block_transform(self, block, input_feat, block_kwargs):
174174
inspect_module,
175175
subset_kwargs
176176
)
177-
logger.info(f'End transform the {self.block_idx+1}-th block')
177+
logger.info(f'End transform the {self.block_idx + 1}-th block')
178178

179179
def filter_subset(self, subset):
180180
return True

llmc/compression/sparsification/dense.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,6 @@ def __init__(self, model, sparsity_config, input, padding_mask, config):
1111
super().__init__(model, sparsity_config, input, padding_mask, config)
1212

1313
def block_transform(self, block):
14-
logger.info(f'Start transform the {self.block_idx+1}-th block')
14+
logger.info(f'Start transform the {self.block_idx + 1}-th block')
1515
logger.info(block)
16-
logger.info(f'End transform the {self.block_idx+1}-th block')
16+
logger.info(f'End transform the {self.block_idx + 1}-th block')

llmc/compression/sparsification/shortgpt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def block_opt(self, block):
3030
self.input['data'] = output_feat
3131

3232
def block_transform(self, input_feat, output_feat):
33-
logger.info(f'Start transform the {self.block_idx+1}-th block')
33+
logger.info(f'Start transform the {self.block_idx + 1}-th block')
3434
self.subset_transform(
3535
input_feat,
3636
output_feat

llmc/eval/eval_vqa.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -255,8 +255,8 @@ def _adjust_config(task_dict):
255255
gen_max_mem = torch.cuda.max_memory_allocated() / 1024 / 1024
256256

257257
logger.info(f'peak memory: {gen_max_mem:.1f} MB.')
258-
logger.info(f'prefill average time: {prefill *1000:.1f} ms.')
259-
logger.info(f'decode average time: {decode *1000:.1f} ms.')
258+
logger.info(f'prefill average time: {prefill * 1000:.1f} ms.')
259+
logger.info(f'decode average time: {decode * 1000:.1f} ms.')
260260

261261
if hasattr(lm, '_model'):
262262
del lm._model

llmc/models/mixtral.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,8 @@ def get_subsets_in_block(self, block):
5959
return self._get_subsets_fused(block)
6060

6161
def _get_subsets_legacy(self, block):
62-
"""transformers <5.0: block.block_sparse_moe with ModuleList experts."""
62+
"""Transformers <5.0: block.block_sparse_moe with ModuleList
63+
experts."""
6364
moe = block.block_sparse_moe
6465
return [
6566
{
@@ -106,7 +107,7 @@ def _get_subsets_legacy(self, block):
106107
]
107108

108109
def _get_subsets_fused(self, block):
109-
"""transformers >=5.0: block.mlp with fused MixtralExperts."""
110+
"""Transformers >=5.0: block.mlp with fused MixtralExperts."""
110111
moe = block.mlp
111112
return [
112113
{

0 commit comments

Comments
 (0)