Skip to content

Commit f749d43

Browse files
committed
fix format
Signed-off-by: Ceng23333 <441651826@qq.com>
1 parent 585a001 commit f749d43

1 file changed

Lines changed: 3 additions & 3 deletions

File tree

src/infinicore/ops/multi_head_attention_varlen/mha_varlen_flashattn.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,15 +80,15 @@ void run(void *planned_meta) {
8080
auto block_table = std::optional<at::Tensor>(infinicore::adaptor::to_aten_tensor(p->block_table));
8181
auto max_seqlen_q = p->max_seqlen_q;
8282
auto max_seqlen_k = p->max_seqlen_k;
83-
auto alibi_slopes =
84-
p->alibi_slopes ? std::optional<at::Tensor>(infinicore::adaptor::to_aten_tensor(*p->alibi_slopes)) : std::nullopt;
83+
auto alibi_slopes = p->alibi_slopes ? std::optional<at::Tensor>(infinicore::adaptor::to_aten_tensor(*p->alibi_slopes)) : std::nullopt;
8584
auto scale = p->scale;
8685

8786
#if defined(ENABLE_METAX_API) && defined(INFINICORE_HPCC_VERSION_MAJOR) && (INFINICORE_HPCC_VERSION_MAJOR >= 3)
8887
std::optional<at::Tensor> flash_attn_mars_ext = std::nullopt;
8988
#endif
9089

91-
INFINICORE_FLASH_OP(mha_varlen_fwd)(
90+
INFINICORE_FLASH_OP(mha_varlen_fwd)
91+
(
9292
q,
9393
k,
9494
v,

0 commit comments

Comments
 (0)