Skip to content

Commit 722bc73

Browse files
Make text generation work with ministral model. (#13395)
Needs template before it works properly.
1 parent 402ff1c commit 722bc73

1 file changed

Lines changed: 2 additions & 1 deletion

File tree

comfy/text_encoders/llama.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,7 @@ class Ministral3_3BConfig:
8282
rope_scale = None
8383
final_norm: bool = True
8484
lm_head: bool = False
85+
stop_tokens = [2]
8586

8687
@dataclass
8788
class Qwen25_3BConfig:
@@ -969,7 +970,7 @@ def __init__(self, config_dict, dtype, device, operations):
969970
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
970971
self.dtype = dtype
971972

972-
class Ministral3_3B(BaseLlama, torch.nn.Module):
973+
class Ministral3_3B(BaseLlama, BaseQwen3, BaseGenerate, torch.nn.Module):
973974
def __init__(self, config_dict, dtype, device, operations):
974975
super().__init__()
975976
config = Ministral3_3BConfig(**config_dict)

0 commit comments

Comments
 (0)