Skip to content

Commit 87f65fd

Browse files
committed
cpu: x64: fix matmul code formating
1 parent a79cc1c commit 87f65fd

2 files changed

Lines changed: 45 additions & 44 deletions

File tree

src/cpu/x64/matmul/brgemm_matmul.cpp

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -339,11 +339,11 @@ status_t brgemm_matmul_t<isa>::pd_t::init(engine_t *engine) {
339339
// processing for tail kernel
340340
const auto backup_isa = is_amx && bgmmc_.is_runtime_M && !is_s8s8
341341
? (is_f16 || is_f32_f16 || is_f16_with_int_wei
342-
? avx512_core_fp16
343-
: (is_bf16 || is_f32_bf16 || is_bf16_with_int_wei
344-
? avx512_core_bf16
345-
: (is_int8 ? avx512_core_vnni
346-
: avx512_core)))
342+
? avx512_core_fp16
343+
: (is_bf16 || is_f32_bf16 || is_bf16_with_int_wei
344+
? avx512_core_bf16
345+
: (is_int8 ? avx512_core_vnni
346+
: avx512_core)))
347347
: isa;
348348

349349
const int i_bs_end = bgmmc_.brgemm_batch_tail_size ? 2 : 1;
@@ -767,7 +767,7 @@ void brgemm_matmul_t<isa>::compute_kernel(
767767
void *scratch = is_amx
768768
? static_cast<void *>(wsp_tile)
769769
: static_cast<void *>(brgmm_ctx.get_s8s8_comp_ptr(
770-
ithr, b_idx, n_blk_idx));
770+
ithr, b_idx, n_blk_idx));
771771

772772
const size_t dst_row_logical_off
773773
= brgmm_ctx.get_M_idx(m_blk_idx, true);
@@ -823,7 +823,7 @@ void brgemm_matmul_t<isa>::compute_kernel(
823823
void *scratch = is_amx
824824
? static_cast<void *>(wsp_tile)
825825
: static_cast<void *>(brgmm_ctx.get_s8s8_comp_ptr(
826-
ithr, b_idx, n_blk_idx));
826+
ithr, b_idx, n_blk_idx));
827827

828828
const size_t dst_row_logical_off
829829
= brgmm_ctx.get_M_idx(m_blk_idx, true);
@@ -1379,20 +1379,20 @@ struct brgemm_matmul_t<isa>::brg_matmul_exec_ctx_t {
13791379

13801380
zero_point_a_negative_val_ = src_zero_points
13811381
? -cpu::io::load_int_value(
1382-
pd->attr()->zero_points_.get_data_type(DNNL_ARG_SRC),
1383-
src_zero_points, 0)
1382+
pd->attr()->zero_points_.get_data_type(DNNL_ARG_SRC),
1383+
src_zero_points, 0)
13841384
: 0;
13851385
zero_point_c_val_ = dst_zero_points
13861386
? cpu::io::load_int_value(
1387-
pd->attr()->zero_points_.get_data_type(DNNL_ARG_DST),
1388-
dst_zero_points, 0)
1387+
pd->attr()->zero_points_.get_data_type(DNNL_ARG_DST),
1388+
dst_zero_points, 0)
13891389
: 0;
13901390

13911391
wei_zp_neg_val_ = (-1)
13921392
* (wei_zp_ptr_ ? cpu::io::load_int_value(
1393-
pd->attr()->zero_points_.get_data_type(
1394-
DNNL_ARG_WEIGHTS),
1395-
wei_zp_ptr_, 0)
1393+
pd->attr()->zero_points_.get_data_type(
1394+
DNNL_ARG_WEIGHTS),
1395+
wei_zp_ptr_, 0)
13961396
: 0);
13971397
memory_tracking::grantor_t scratchpad = ctx.get_scratchpad_grantor();
13981398

@@ -1429,35 +1429,35 @@ struct brgemm_matmul_t<isa>::brg_matmul_exec_ctx_t {
14291429

14301430
buf_reduce_ptr_ = bgmmc.use_buffer_reduce
14311431
? scratchpad.template get<char>(
1432-
key_brgemm_primitive_buffer_reduce)
1432+
key_brgemm_primitive_buffer_reduce)
14331433
: nullptr;
14341434

14351435
is_amx_ = is_superset(isa, avx512_core_amx);
14361436
wsp_tile_ptr_ = is_amx_
14371437
? ctx.get_scratchpad_grantor().template get<char>(
1438-
key_conv_amx_tile_buffer)
1438+
key_conv_amx_tile_buffer)
14391439
: nullptr;
14401440

14411441
const dim_t comp_offset = bgmmc_.b_dt_sz
14421442
* (weights_d.size() - weights_d.additional_buffer_size());
14431443
s8s8_compensation_ptr_ = (bgmmc.s8s8_compensation_required)
14441444
? ((bgmmc.use_buffer_b)
1445-
? scratchpad.template get<int32_t>(
1446-
key_brgemm_primitive_buffer_comp)
1447-
: const_cast<int32_t *>(
1448-
reinterpret_cast<const int32_t *>(
1449-
&data_B_ptr_[comp_offset])))
1445+
? scratchpad.template get<int32_t>(
1446+
key_brgemm_primitive_buffer_comp)
1447+
: const_cast<int32_t *>(
1448+
reinterpret_cast<const int32_t *>(
1449+
&data_B_ptr_[comp_offset])))
14501450
: nullptr;
14511451
assert(IMPLICATION(bgmmc.s8s8_compensation_required,
14521452
bgmmc_.b_dt_sz == bgmmc_.tr_b_dt_sz));
14531453

14541454
zero_point_a_compensations_ptr_ = bgmmc.has_zero_point_a
14551455
? scratchpad.template get<int32_t>(
1456-
key_brgemm_primitive_zp_comp_a)
1456+
key_brgemm_primitive_zp_comp_a)
14571457
: nullptr;
14581458
zero_point_b_compensations_ptr_ = bgmmc.has_zero_point_b
14591459
? scratchpad.template get<int32_t>(
1460-
key_brgemm_primitive_zp_comp_b)
1460+
key_brgemm_primitive_zp_comp_b)
14611461
: nullptr;
14621462

14631463
zero_point_mixed_ab_compensation_component_

src/cpu/x64/matmul/brgemm_matmul_utils.cpp

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -467,13 +467,15 @@ status_t brgemm_matmul_conf_utils_t::set_or_check_B_tag(memory_desc_t &B_md,
467467
bgmmc.wei_tag = blocked_B_layouts_allowed && !bgmmc.is_runtime_N
468468
&& !bgmmc.is_int4_weights
469469
? memory_desc_matches_one_of_tag(B_md,
470-
plain_tensor_layout_tag,
471-
transposed_tensor_layout_tag,
472-
blocked_64n_B_layout_tag, blocked_48n_B_layout_tag,
473-
blocked_32n_B_layout_tag, blocked_16n_B_layout_tag)
470+
plain_tensor_layout_tag,
471+
transposed_tensor_layout_tag,
472+
blocked_64n_B_layout_tag,
473+
blocked_48n_B_layout_tag,
474+
blocked_32n_B_layout_tag,
475+
blocked_16n_B_layout_tag)
474476
: memory_desc_matches_one_of_tag(B_md,
475-
plain_tensor_layout_tag,
476-
transposed_tensor_layout_tag, acbd, adbc);
477+
plain_tensor_layout_tag,
478+
transposed_tensor_layout_tag, acbd, adbc);
477479
const bool plain_transposed_matched
478480
= memory_desc_matches_tag(B_md, plain_tensor_layout_tag)
479481
&& memory_desc_matches_tag(
@@ -545,15 +547,16 @@ status_t brgemm_matmul_conf_utils_t::set_or_check_tags(memory_desc_t &A_md,
545547
|| this->is_f16_with_int_wei() || this->is_tf32()
546548
|| this->is_f32_with_int_wei())
547549
&& !xf16_avx2_vnni_2;
548-
bgmmc.src_tag = is_adbc_allowed ? memory_desc_matches_one_of_tag(
549-
A_md, plain_tensor_layout_tag,
550-
transposed_tensor_layout_tag, acbd, adbc)
550+
bgmmc.src_tag = is_adbc_allowed
551+
? memory_desc_matches_one_of_tag(A_md,
552+
plain_tensor_layout_tag,
553+
transposed_tensor_layout_tag, acbd, adbc)
551554
: is_int8_avx512_core
552555
? memory_desc_matches_one_of_tag(A_md,
553-
plain_tensor_layout_tag,
554-
transposed_tensor_layout_tag, acbd)
556+
plain_tensor_layout_tag,
557+
transposed_tensor_layout_tag, acbd)
555558
: memory_desc_matches_one_of_tag(
556-
A_md, plain_tensor_layout_tag, acbd);
559+
A_md, plain_tensor_layout_tag, acbd);
557560
if (bgmmc.src_tag == format_tag::undef
558561
|| (memory_desc_matches_tag(
559562
A_md, transposed_tensor_layout_tag)
@@ -655,8 +658,7 @@ format_tag_t brgemm_matmul_conf_utils_t::pick_blocked_B_layout(
655658
const bool is_amx_or_avx2_vnni_2 = is_superset(bgmmc.isa, avx512_core_amx)
656659
|| is_superset(bgmmc.isa, avx2_vnni_2);
657660
const bool prefer_amx_or_avx2_vnni_2 = is_f16() || is_f32_f16()
658-
|| is_f32_bf16() || is_f16_with_int_wei()
659-
|| is_f32_with_int_wei();
661+
|| is_f32_bf16() || is_f16_with_int_wei() || is_f32_with_int_wei();
660662

661663
if ((prefer_amx_or_avx2_vnni_2 && is_amx_or_avx2_vnni_2) || is_bf16()
662664
|| is_bf16_with_int_wei()) {
@@ -672,8 +674,7 @@ format_tag_t brgemm_matmul_conf_utils_t::pick_blocked_B_layout(
672674
// Note: bf32 assumes f32 blocking
673675
if (is_f32() || is_bf32() || is_f16() || is_f32_f16() || is_f32_bf16()
674676
|| is_f16_with_int_wei() || is_tf32()
675-
|| (is_f32_with_int_wei()
676-
&& is_superset(bgmmc.isa, avx512_core))) {
677+
|| (is_f32_with_int_wei() && is_superset(bgmmc.isa, avx512_core))) {
677678
switch (n_blk) {
678679
case 64: return bgmmc.ndims == 3 ? aCB16b64c : BA16a64b;
679680
case 48: return bgmmc.ndims == 3 ? aCB16b48c : BA16a48b;
@@ -1262,9 +1263,9 @@ status_t compute_blocking_heuristic(brgemm_matmul_conf_t &bgmmc,
12621263

12631264
const float best_imbalance = is_f32
12641265
? compute_blocking_heuristic_avx2_f32(
1265-
bgmmc, bm_conf_utils, matmul, best_blocking)
1266+
bgmmc, bm_conf_utils, matmul, best_blocking)
12661267
: compute_blocking_heuristic_avx2(
1267-
bgmmc, bm_conf_utils, matmul, best_blocking);
1268+
bgmmc, bm_conf_utils, matmul, best_blocking);
12681269

12691270
VCONDCHECK_BG(best_imbalance != 1.f, VERBOSE_BLOCKING_FAIL, "")
12701271

@@ -1748,9 +1749,9 @@ status_t init_brgemm_matmul_conf(cpu_isa_t isa, brgemm_matmul_conf_t &bgmmc,
17481749
bgmmc.N_tail = bgmmc.is_runtime_N ? 0 : bgmmc.N % bgmmc.N_blk;
17491750
bgmmc.K_tail = bgmmc.K > bgmmc.K_blk
17501751
? ((bgmmc.extendable_k || bgmmc.use_fused_copy_a)
1751-
? bgmmc.K % bgmmc.K_blk
1752-
: rnd_up(bgmmc.K % bgmmc.K_blk,
1753-
bgmmc.required_k_granularity))
1752+
? bgmmc.K % bgmmc.K_blk
1753+
: rnd_up(bgmmc.K % bgmmc.K_blk,
1754+
bgmmc.required_k_granularity))
17541755
: 0;
17551756

17561757
bgmmc.LDB = bm_conf_utils.get_actual_LDB();

0 commit comments

Comments
 (0)