@@ -160,7 +160,7 @@ enum common_speculative_type {
160160 COMMON_SPECULATIVE_TYPE_NONE, // no speculative decoding
161161 COMMON_SPECULATIVE_TYPE_DRAFT_SIMPLE, // standalone draft model speculative decoding
162162 COMMON_SPECULATIVE_TYPE_DRAFT_EAGLE3, // Eagle3 speculative decoding
163- COMMON_SPECULATIVE_TYPE_DRAFT_MTP, // multi -token prediction head loaded from the target GGUF
163+ COMMON_SPECULATIVE_TYPE_DRAFT_MTP, // Multi -token prediction
164164 COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE, // simple self-speculative decoding based on n-grams
165165 COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K, // self-speculative decoding with n-gram keys only
166166 COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V, // self-speculative decoding with n-gram keys and 4 m-gram values
@@ -884,7 +884,7 @@ enum common_context_seq_rm_type {
884884 COMMON_CONTEXT_SEQ_RM_TYPE_NO = 0 , // seq_rm not supported (e.g. no memory module)
885885 COMMON_CONTEXT_SEQ_RM_TYPE_PART = 1 , // can seq_rm partial sequences
886886 COMMON_CONTEXT_SEQ_RM_TYPE_FULL = 2 , // can seq_rm full sequences only
887- COMMON_CONTEXT_SEQ_RM_TYPE_PART_BOUNDED = 3 , // can seq_rm partial sequences, bounded by n_rs_seq
887+ COMMON_CONTEXT_SEQ_RM_TYPE_RS = 3 , // can seq_rm partial sequences, bounded by n_rs_seq
888888};
889889
890890// check if the llama_context can remove sequences
0 commit comments