Skip to content

Commit d0efe55

Browse files
committed
fix format of coding
1 parent bccfd43 commit d0efe55

File tree

3 files changed

+12
-8
lines changed

3 files changed

+12
-8
lines changed

python/ctranslate2/converters/transformers.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def _load(self):
148148
tokenizer = self.load_tokenizer(
149149
tokenizer_class, self._model_name_or_path, **tokenizer_kwargs
150150
)
151-
except:
151+
except Exception:
152152
tokenizer = None
153153
print("Escape tokenizer, which does not exist.")
154154

@@ -1081,7 +1081,6 @@ def set_common_layers(self, spec, module):
10811081
self.set_layer_norm(spec.layer_norm, module.layer_norm)
10821082

10831083

1084-
10851084
@register_loader("WavLMConfig")
10861085
class WavLMLoader(BartLoader):
10871086
@property
@@ -1119,7 +1118,7 @@ def set_vocabulary(self, spec, tokens):
11191118

11201119
def set_feature_extractor(self, spec, feature_extractor):
11211120
spec.feat_layer0.conv.weight = feature_extractor.conv_layers[0].conv.weight
1122-
# spec.feat_layer0.conv.bias = feature_extractor.conv_layers[0].conv.bias // wavlm has no bias
1121+
# wavlm has no bias in conv
11231122
self.set_layer_norm(
11241123
spec.feat_layer0.layer_norm, feature_extractor.conv_layers[0].layer_norm
11251124
)
@@ -1161,7 +1160,7 @@ def set_wavlm_encoder_layer(self, spec, encoder):
11611160
layer_spec.self_attention,
11621161
layer.self_attn,
11631162
self_attention=True,
1164-
has_rel_attn_embed=(layer_index==0),
1163+
has_rel_attn_embed=(layer_index == 0),
11651164
)
11661165
self.set_layer_norm(
11671166
layer_spec.self_attention.layer_norm,
@@ -1187,7 +1186,8 @@ def set_attention(self, spec, attention, self_attention=False, has_rel_attn_embe
11871186
self.set_linear(spec.linear[-1], attention.out_proj)
11881187

11891188
self.set_linear(spec.gru_relative_position_linear, attention.gru_rel_pos_linear)
1190-
spec.gru_relative_position_const = attention.gru_rel_pos_const.data # is torch.nn.parameter.Parameter
1189+
# which is torch.nn.parameter.Parameter
1190+
spec.gru_relative_position_const = attention.gru_rel_pos_const.data
11911191

11921192
if has_rel_attn_embed:
11931193
spec.relative_attention_bias = attention.rel_attn_embed.weight

python/ctranslate2/specs/wavlm_spec.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,9 @@ def __init__(self, feat_layers, num_layers, num_heads, return_hidden):
6565
self.pos_conv_embed = WavLMPosEmbedConvLayer()
6666
self.layer_norm = common_spec.LayerNormSpec()
6767
self.layer = [
68-
transformer_spec.TransformerEncoderLayerSpec(gated_relative_attention_bias=True,
69-
relative_attention_bias=(i == 0)) for i in range(num_layers)
68+
transformer_spec.TransformerEncoderLayerSpec(
69+
gated_relative_attention_bias=True,
70+
relative_attention_bias=(i == 0)) for i in range(num_layers)
7071
]
7172
# if not return_hidden:
7273
# self.lm_head = common_spec.LinearSpec()

python/tests/test_transformers.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1116,7 +1116,10 @@ def test_transformers_wavlm(
11161116

11171117
hg_output = hf_model(input_values.unsqueeze(0))
11181118

1119-
similarity = torch.nn.functional.cosine_similarity(last_hidden_state, hg_output.last_hidden_state.flatten(0, -1), dim=0)
1119+
similarity = torch.nn.functional.cosine_similarity(
1120+
last_hidden_state,
1121+
hg_output.last_hidden_state.flatten(0, -1),
1122+
dim=0)
11201123

11211124
assert similarity == 1.0
11221125

0 commit comments

Comments
 (0)