diff --git a/kernels/optimized/CMakeLists.txt b/kernels/optimized/CMakeLists.txt index e47c2293f82..82d73dcc6cd 100644 --- a/kernels/optimized/CMakeLists.txt +++ b/kernels/optimized/CMakeLists.txt @@ -76,17 +76,34 @@ target_link_libraries( ) target_compile_options(optimized_kernels PUBLIC ${_common_compile_options}) -# op_grid_sampler_2d.cpp uses ARMv8.2-a+fp16 NEON intrinsics -# (vcvt_f32_f16 / vld1_f16) when compiled for aarch64. Scope the extra -# `-march` flag to just that source so non-arm64 targets (e.g. x86_64 on -# Android) are unaffected — the kernel itself has `#ifdef __aarch64__` -# guards and falls through to the portable kernel otherwise. -if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64" - OR ANDROID_ABI STREQUAL "arm64-v8a" +# op_grid_sampler_2d_fp16_hw.cpp uses hardware fp16 NEON intrinsics +# (vcvt_f32_f16 / vld1_f16). Those are part of the ARMv8.2-a+fp16 extension and +# raise SIGILL on chips without it. Build it as a separate OBJECT library so the +# `-march=armv8.2-a+fp16` flag stays strictly scoped to that translation unit +# and never reaches the dispatcher / fallback code in op_grid_sampler_2d.cpp +# (which would otherwise risk auto-vectorizing into fp16 NEON instructions). The +# dispatcher chooses between this entry point and the fp16 software-convert path +# at runtime via cpuinfo_has_arm_neon_fp16(). Mirrors the buck +# `grid_sampler_2d_fp16_hw_impl` library. +if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64" OR ANDROID_ABI STREQUAL + "arm64-v8a" ) - set_source_files_properties( - ${EXECUTORCH_ROOT}/kernels/optimized/cpu/op_grid_sampler_2d.cpp - PROPERTIES COMPILE_OPTIONS "-march=armv8.2-a+fp16" + add_library( + grid_sampler_2d_fp16_hw_impl OBJECT + ${EXECUTORCH_ROOT}/kernels/optimized/cpu/op_grid_sampler_2d_fp16_hw.cpp + ) + target_compile_options( + grid_sampler_2d_fp16_hw_impl PRIVATE -march=armv8.2-a+fp16 + ${_common_compile_options} + ) + target_link_libraries(grid_sampler_2d_fp16_hw_impl PRIVATE executorch_core) + # BUILD_LOCAL_INTERFACE: object files are baked into optimized_kernels.a at + # archive time, so this OBJECT target stays out of the install EXPORT set and + # downstream consumers of the installed optimized_kernels need no separate + # link entry. + target_link_libraries( + optimized_kernels + PRIVATE $ ) endif() @@ -98,30 +115,6 @@ gen_operators_lib( executorch_core ) -# On-device verifier for optimized grid_sampler_2d / sum.IntList_out. -# Opt-in via -DEXECUTORCH_BUILD_OPTIMIZED_VERIFY=ON so it doesn't affect -# default AAR / library builds. Cross-checks both ops against an fp32 -# reference derived from the portable kernel; non-zero exit on divergence. -if(EXECUTORCH_BUILD_OPTIMIZED_VERIFY) - add_executable( - verify_optimized_kernels ${EXECUTORCH_ROOT}/kernels/optimized/verify.cpp - ) - target_link_libraries( - verify_optimized_kernels - PRIVATE optimized_kernels portable_kernels executorch_core - ) - target_compile_options( - verify_optimized_kernels PRIVATE ${_common_compile_options} - ) - if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64" - OR ANDROID_ABI STREQUAL "arm64-v8a" - ) - target_compile_options( - verify_optimized_kernels PRIVATE -march=armv8.2-a+fp16 - ) - endif() -endif() - install( # eigen_blas doesn't export itself, so we have to do our own install to export # it. diff --git a/kernels/optimized/cpu/op_grid_sampler_2d.cpp b/kernels/optimized/cpu/op_grid_sampler_2d.cpp index e3fe8c49779..aebfd292bab 100644 --- a/kernels/optimized/cpu/op_grid_sampler_2d.cpp +++ b/kernels/optimized/cpu/op_grid_sampler_2d.cpp @@ -7,21 +7,37 @@ */ // Optimized grid_sampler_2d.out for CPU. On aarch64 this is a NEON-vectorized -// implementation for the common (bilinear + zeros padding) case, processing -// 4 channels at a time. Other modes — and non-aarch64 targets — fall through -// to the portable kernel. +// implementation for the common (bilinear + zeros padding) case. fp16 inputs +// are promoted to fp32 for weight computation and accumulation and cast back +// on store — this avoids fp16 catastrophic cancellation on `ix_se - ix`-style +// weight subtractions in the portable kernel. // -// fp16 inputs: all interior math (interpolation weights and corner -// accumulation) is done in fp32. Loads/stores stay in the tensor's dtype. -// Avoids catastrophic cancellation on `ix_se - ix`-style subtractions that -// would otherwise make fp16 weights meaningless. +// fp16 comes in two flavors to avoid SIGILL on ARMv8 chips without the +// +fp16 extension: +// +// * Hardware path (op_grid_sampler_2d_fp16_hw.cpp) — compiled with +// `-march=armv8.2-a+fp16`. Uses hardware fp16 NEON instructions +// (vld1_f16 / vcvt_f32_f16 / ...). Fast on capable chips; illegal +// instructions on older ones. +// +// * Software path (below) — plain ARMv8 NEON. Converts fp16<->fp32 in +// software via `c10::Half`'s portable conversion. Slower per +// conversion but safe on any ARMv8 CPU. +// +// A runtime cpuinfo_has_arm_neon_fp16() check picks the right one. Non-aarch64 +// targets, and any unsupported interpolation/padding/layout combination, +// delegate to the portable kernel. #include #ifdef __aarch64__ #include +#include +#include #endif +#include + #include namespace torch { @@ -44,7 +60,8 @@ Tensor& grid_sampler_2d_out( #ifdef __aarch64__ namespace { -// One output spatial location, all channels. fp32 path. +// -------------------- fp32 (plain ARMv8 NEON) -------------------- + inline void bilinear_all_channels_f32( const float* input_n, float* output_n, @@ -95,20 +112,28 @@ inline void bilinear_all_channels_f32( float tl[4] = {0}, tr[4] = {0}, bl[4] = {0}, br[4] = {0}; if (tl_v) { - tl[0] = p0[off_tl]; tl[1] = p1[off_tl]; - tl[2] = p2[off_tl]; tl[3] = p3[off_tl]; + tl[0] = p0[off_tl]; + tl[1] = p1[off_tl]; + tl[2] = p2[off_tl]; + tl[3] = p3[off_tl]; } if (tr_v) { - tr[0] = p0[off_tr]; tr[1] = p1[off_tr]; - tr[2] = p2[off_tr]; tr[3] = p3[off_tr]; + tr[0] = p0[off_tr]; + tr[1] = p1[off_tr]; + tr[2] = p2[off_tr]; + tr[3] = p3[off_tr]; } if (bl_v) { - bl[0] = p0[off_bl]; bl[1] = p1[off_bl]; - bl[2] = p2[off_bl]; bl[3] = p3[off_bl]; + bl[0] = p0[off_bl]; + bl[1] = p1[off_bl]; + bl[2] = p2[off_bl]; + bl[3] = p3[off_bl]; } if (br_v) { - br[0] = p0[off_br]; br[1] = p1[off_br]; - br[2] = p2[off_br]; br[3] = p3[off_br]; + br[0] = p0[off_br]; + br[1] = p1[off_br]; + br[2] = p2[off_br]; + br[3] = p3[off_br]; } float32x4_t result = vmulq_f32(vw_tl, vld1q_f32(tl)); @@ -124,7 +149,6 @@ inline void bilinear_all_channels_f32( output_n[(c + 3) * spatial_out + out_off] = res[3]; } - // Scalar tail const float w_tl = (1.0f - fx) * (1.0f - fy); const float w_tr = fx * (1.0f - fy); const float w_bl = (1.0f - fx) * fy; @@ -132,18 +156,27 @@ inline void bilinear_all_channels_f32( for (; c < C; ++c) { const float* p = input_n + c * spatial_in; float v = 0.0f; - if (tl_v) v += w_tl * p[off_tl]; - if (tr_v) v += w_tr * p[off_tr]; - if (bl_v) v += w_bl * p[off_bl]; - if (br_v) v += w_br * p[off_br]; + if (tl_v) + v += w_tl * p[off_tl]; + if (tr_v) + v += w_tr * p[off_tr]; + if (bl_v) + v += w_bl * p[off_bl]; + if (br_v) + v += w_br * p[off_br]; output_n[c * spatial_out + out_off] = v; } } -// fp16 path: loads/stores fp16, math in fp32. -inline void bilinear_all_channels_f16( - const __fp16* input_n, - __fp16* output_n, +// -------------------- fp16 software-convert path -------------------- +// +// Uses only plain ARMv8 NEON. fp16 <-> fp32 conversion goes through +// c10::Half's portable `operator float()` / constructor, which is a +// software conversion on chips that lack the +fp16 extension. + +inline void bilinear_all_channels_f16_sw( + const c10::Half* input_n, + c10::Half* output_n, int C, int H_in, int W_in, @@ -184,45 +217,50 @@ inline void bilinear_all_channels_f16( int c = 0; for (; c + 3 < C; c += 4) { - const __fp16* p0 = input_n + (c + 0) * spatial_in; - const __fp16* p1 = input_n + (c + 1) * spatial_in; - const __fp16* p2 = input_n + (c + 2) * spatial_in; - const __fp16* p3 = input_n + (c + 3) * spatial_in; + const c10::Half* p0 = input_n + (c + 0) * spatial_in; + const c10::Half* p1 = input_n + (c + 1) * spatial_in; + const c10::Half* p2 = input_n + (c + 2) * spatial_in; + const c10::Half* p3 = input_n + (c + 3) * spatial_in; - __fp16 tl[4] = {0}, tr[4] = {0}, bl[4] = {0}, br[4] = {0}; + // SW fp16 -> fp32: use c10::Half's portable conversion on each lane. + float tl[4] = {0}, tr[4] = {0}, bl[4] = {0}, br[4] = {0}; if (tl_v) { - tl[0] = p0[off_tl]; tl[1] = p1[off_tl]; - tl[2] = p2[off_tl]; tl[3] = p3[off_tl]; + tl[0] = static_cast(p0[off_tl]); + tl[1] = static_cast(p1[off_tl]); + tl[2] = static_cast(p2[off_tl]); + tl[3] = static_cast(p3[off_tl]); } if (tr_v) { - tr[0] = p0[off_tr]; tr[1] = p1[off_tr]; - tr[2] = p2[off_tr]; tr[3] = p3[off_tr]; + tr[0] = static_cast(p0[off_tr]); + tr[1] = static_cast(p1[off_tr]); + tr[2] = static_cast(p2[off_tr]); + tr[3] = static_cast(p3[off_tr]); } if (bl_v) { - bl[0] = p0[off_bl]; bl[1] = p1[off_bl]; - bl[2] = p2[off_bl]; bl[3] = p3[off_bl]; + bl[0] = static_cast(p0[off_bl]); + bl[1] = static_cast(p1[off_bl]); + bl[2] = static_cast(p2[off_bl]); + bl[3] = static_cast(p3[off_bl]); } if (br_v) { - br[0] = p0[off_br]; br[1] = p1[off_br]; - br[2] = p2[off_br]; br[3] = p3[off_br]; + br[0] = static_cast(p0[off_br]); + br[1] = static_cast(p1[off_br]); + br[2] = static_cast(p2[off_br]); + br[3] = static_cast(p3[off_br]); } - const float32x4_t v_tl = vcvt_f32_f16(vld1_f16(tl)); - const float32x4_t v_tr = vcvt_f32_f16(vld1_f16(tr)); - const float32x4_t v_bl = vcvt_f32_f16(vld1_f16(bl)); - const float32x4_t v_br = vcvt_f32_f16(vld1_f16(br)); - - float32x4_t result = vmulq_f32(vw_tl, v_tl); - result = vfmaq_f32(result, vw_tr, v_tr); - result = vfmaq_f32(result, vw_bl, v_bl); - result = vfmaq_f32(result, vw_br, v_br); + float32x4_t result = vmulq_f32(vw_tl, vld1q_f32(tl)); + result = vfmaq_f32(result, vw_tr, vld1q_f32(tr)); + result = vfmaq_f32(result, vw_bl, vld1q_f32(bl)); + result = vfmaq_f32(result, vw_br, vld1q_f32(br)); - __fp16 res[4]; - vst1_f16(res, vcvt_f16_f32(result)); - output_n[(c + 0) * spatial_out + out_off] = res[0]; - output_n[(c + 1) * spatial_out + out_off] = res[1]; - output_n[(c + 2) * spatial_out + out_off] = res[2]; - output_n[(c + 3) * spatial_out + out_off] = res[3]; + float res[4]; + vst1q_f32(res, result); + // SW fp32 -> fp16 on store. + output_n[(c + 0) * spatial_out + out_off] = c10::Half(res[0]); + output_n[(c + 1) * spatial_out + out_off] = c10::Half(res[1]); + output_n[(c + 2) * spatial_out + out_off] = c10::Half(res[2]); + output_n[(c + 3) * spatial_out + out_off] = c10::Half(res[3]); } const float w_tl = (1.0f - fx) * (1.0f - fy); @@ -230,13 +268,17 @@ inline void bilinear_all_channels_f16( const float w_bl = (1.0f - fx) * fy; const float w_br = fx * fy; for (; c < C; ++c) { - const __fp16* p = input_n + c * spatial_in; + const c10::Half* p = input_n + c * spatial_in; float v = 0.0f; - if (tl_v) v += w_tl * static_cast(p[off_tl]); - if (tr_v) v += w_tr * static_cast(p[off_tr]); - if (bl_v) v += w_bl * static_cast(p[off_bl]); - if (br_v) v += w_br * static_cast(p[off_br]); - output_n[c * spatial_out + out_off] = static_cast<__fp16>(v); + if (tl_v) + v += w_tl * static_cast(p[off_tl]); + if (tr_v) + v += w_tr * static_cast(p[off_tr]); + if (bl_v) + v += w_bl * static_cast(p[off_bl]); + if (br_v) + v += w_br * static_cast(p[off_br]); + output_n[c * spatial_out + out_off] = c10::Half(v); } } @@ -276,8 +318,7 @@ void grid_sampler_2d_neon( gx = (gx + 1.0f) * W_in * 0.5f - 0.5f; gy = (gy + 1.0f) * H_in * 0.5f - 0.5f; } - sample_fn( - input_n, output_n, C, H_in, W_in, H_out, W_out, h, w, gx, gy); + sample_fn(input_n, output_n, C, H_in, W_in, H_out, W_out, h, w, gx, gy); } } } @@ -294,20 +335,25 @@ Tensor& opt_grid_sampler_2d_out( int64_t padding_mode, bool align_corners, Tensor& out) { - // The NEON path indexes input/grid/out directly assuming a contiguous NCHW - // default-dim-order layout — no use of .strides() or .dim_order(). If the - // caller passes anything else, fall back to portable (which does handle - // arbitrary strides and dim orders correctly). These are cheap checks. + // The NEON paths index input/grid/out directly assuming a contiguous NCHW + // default-dim-order layout — no use of .strides() or .dim_order(). Fall + // back to portable for anything else. const bool fast_eligible = tensor_is_default_dim_order(input) && - tensor_is_default_dim_order(grid) && - tensor_is_default_dim_order(out) && - tensor_is_contiguous(input) && - tensor_is_contiguous(grid) && + tensor_is_default_dim_order(grid) && tensor_is_default_dim_order(out) && + tensor_is_contiguous(input) && tensor_is_contiguous(grid) && tensor_is_contiguous(out); - // Only the bilinear + zeros-padding combination is accelerated. Everything - // else — non-default layout, any non-aarch64 target — delegates to portable. - if (interpolation_mode != 0 || padding_mode != 0 || !fast_eligible) { + // The fast paths read input/grid and write out as a single dtype: float for + // the fp32 NEON path, fp16 for both the fp16 HW path (which raw-casts the + // void* pointers to __fp16*) and the SW fp16 NEON path (which uses + // data_ptr(), whose runtime dtype check is not guaranteed in + // release builds). Reject any mixed-dtype call up front so none of those + // unchecked casts can be reached with a mismatched buffer. + const bool dtypes_match = input.scalar_type() == grid.scalar_type() && + input.scalar_type() == out.scalar_type(); + + if (interpolation_mode != 0 || padding_mode != 0 || !fast_eligible || + !dtypes_match) { return grid_sampler_2d_out( ctx, input, grid, interpolation_mode, padding_mode, align_corners, out); } @@ -327,23 +373,49 @@ Tensor& opt_grid_sampler_2d_out( input.const_data_ptr(), grid.const_data_ptr(), out.mutable_data_ptr(), - N, C, H_in, W_in, H_out, W_out, + N, + C, + H_in, + W_in, + H_out, + W_out, align_corners, bilinear_all_channels_f32); return out; } if (input.scalar_type() == ScalarType::Half) { - static_assert(sizeof(__fp16) == 2, "expected __fp16 == 2 bytes"); - grid_sampler_2d_neon<__fp16>( - reinterpret_cast(input.const_data_ptr()), - reinterpret_cast(grid.const_data_ptr()), - reinterpret_cast<__fp16*>(out.mutable_data_ptr()), - N, C, H_in, W_in, H_out, W_out, + if (cpuinfo_initialize() && cpuinfo_has_arm_neon_fp16()) { + // Hardware fp16 path — safe because the CPU supports the +fp16 + // extension. Declared in op_grid_sampler_2d_fp16_hw.cpp. + opt_grid_sampler_2d_internal::grid_sampler_2d_bilinear_fp16_hw( + input.const_data_ptr(), + grid.const_data_ptr(), + out.mutable_data_ptr(), + N, + C, + H_in, + W_in, + H_out, + W_out, + align_corners); + return out; + } + // Software fp16<->fp32 conversion path. Works on any ARMv8. + grid_sampler_2d_neon( + input.const_data_ptr(), + grid.const_data_ptr(), + out.mutable_data_ptr(), + N, + C, + H_in, + W_in, + H_out, + W_out, align_corners, - bilinear_all_channels_f16); + bilinear_all_channels_f16_sw); return out; } - // Any other dtype (e.g. Double, BFloat16): let portable handle it. + // Any other dtype: let portable handle it. return grid_sampler_2d_out( ctx, input, grid, interpolation_mode, padding_mode, align_corners, out); #endif diff --git a/kernels/optimized/cpu/op_grid_sampler_2d_fp16_hw.cpp b/kernels/optimized/cpu/op_grid_sampler_2d_fp16_hw.cpp new file mode 100644 index 00000000000..e9d88464d32 --- /dev/null +++ b/kernels/optimized/cpu/op_grid_sampler_2d_fp16_hw.cpp @@ -0,0 +1,207 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +// Hardware-fp16 variant of the NEON grid_sampler_2d.out bilinear + zeros- +// padding fast path. This translation unit is compiled with +// `-march=armv8.2-a+fp16`, which lets the compiler emit hardware fp16 +// load/store/convert intrinsics (vld1_f16 / vcvt_f32_f16 / vst1_f16 / +// vcvt_f16_f32). Those instructions are undefined on ARMv8.0 and ARMv8.1 +// chips without the fp16 extension, so this entry point must only be +// invoked after a runtime CPU-feature check — see the dispatcher in +// op_grid_sampler_2d.cpp (cpuinfo_has_arm_neon_fp16). +// +// Math happens in fp32 regardless: we load fp16 from memory, convert to +// fp32 via the hardware instruction, do the weighted-sum FMA chain in +// fp32, convert back to fp16 on store. This matches the precision of +// the portable kernel once #19117 lands. + +#ifdef __aarch64__ + +#include + +#include +#include + +namespace torch { +namespace executor { +namespace native { +namespace opt_grid_sampler_2d_internal { + +namespace { + +// One output spatial location, all channels. +inline void bilinear_all_channels_fp16_hw_sample( + const __fp16* input_n, + __fp16* output_n, + int C, + int H_in, + int W_in, + int H_out, + int W_out, + int h_out, + int w_out, + float gx, + float gy) { + const int x0 = static_cast(std::floor(gx)); + const int y0 = static_cast(std::floor(gy)); + const int x1 = x0 + 1; + const int y1 = y0 + 1; + const float fx = gx - static_cast(x0); + const float fy = gy - static_cast(y0); + + const bool tl_v = static_cast(x0) < static_cast(W_in) && + static_cast(y0) < static_cast(H_in); + const bool tr_v = static_cast(x1) < static_cast(W_in) && + static_cast(y0) < static_cast(H_in); + const bool bl_v = static_cast(x0) < static_cast(W_in) && + static_cast(y1) < static_cast(H_in); + const bool br_v = static_cast(x1) < static_cast(W_in) && + static_cast(y1) < static_cast(H_in); + + const int off_tl = y0 * W_in + x0; + const int off_tr = y0 * W_in + x1; + const int off_bl = y1 * W_in + x0; + const int off_br = y1 * W_in + x1; + const int spatial_in = H_in * W_in; + const int spatial_out = H_out * W_out; + const int out_off = h_out * W_out + w_out; + + const float32x4_t vw_tl = vdupq_n_f32((1.0f - fx) * (1.0f - fy)); + const float32x4_t vw_tr = vdupq_n_f32(fx * (1.0f - fy)); + const float32x4_t vw_bl = vdupq_n_f32((1.0f - fx) * fy); + const float32x4_t vw_br = vdupq_n_f32(fx * fy); + + int c = 0; + for (; c + 3 < C; c += 4) { + const __fp16* p0 = input_n + (c + 0) * spatial_in; + const __fp16* p1 = input_n + (c + 1) * spatial_in; + const __fp16* p2 = input_n + (c + 2) * spatial_in; + const __fp16* p3 = input_n + (c + 3) * spatial_in; + + __fp16 tl[4] = {0}, tr[4] = {0}, bl[4] = {0}, br[4] = {0}; + if (tl_v) { + tl[0] = p0[off_tl]; + tl[1] = p1[off_tl]; + tl[2] = p2[off_tl]; + tl[3] = p3[off_tl]; + } + if (tr_v) { + tr[0] = p0[off_tr]; + tr[1] = p1[off_tr]; + tr[2] = p2[off_tr]; + tr[3] = p3[off_tr]; + } + if (bl_v) { + bl[0] = p0[off_bl]; + bl[1] = p1[off_bl]; + bl[2] = p2[off_bl]; + bl[3] = p3[off_bl]; + } + if (br_v) { + br[0] = p0[off_br]; + br[1] = p1[off_br]; + br[2] = p2[off_br]; + br[3] = p3[off_br]; + } + + // Hardware fp16 -> fp32 conversion (requires +fp16 extension). + const float32x4_t v_tl = vcvt_f32_f16(vld1_f16(tl)); + const float32x4_t v_tr = vcvt_f32_f16(vld1_f16(tr)); + const float32x4_t v_bl = vcvt_f32_f16(vld1_f16(bl)); + const float32x4_t v_br = vcvt_f32_f16(vld1_f16(br)); + + float32x4_t result = vmulq_f32(vw_tl, v_tl); + result = vfmaq_f32(result, vw_tr, v_tr); + result = vfmaq_f32(result, vw_bl, v_bl); + result = vfmaq_f32(result, vw_br, v_br); + + __fp16 res[4]; + vst1_f16(res, vcvt_f16_f32(result)); + output_n[(c + 0) * spatial_out + out_off] = res[0]; + output_n[(c + 1) * spatial_out + out_off] = res[1]; + output_n[(c + 2) * spatial_out + out_off] = res[2]; + output_n[(c + 3) * spatial_out + out_off] = res[3]; + } + + // Scalar tail. + const float w_tl = (1.0f - fx) * (1.0f - fy); + const float w_tr = fx * (1.0f - fy); + const float w_bl = (1.0f - fx) * fy; + const float w_br = fx * fy; + for (; c < C; ++c) { + const __fp16* p = input_n + c * spatial_in; + float v = 0.0f; + if (tl_v) + v += w_tl * static_cast(p[off_tl]); + if (tr_v) + v += w_tr * static_cast(p[off_tr]); + if (bl_v) + v += w_bl * static_cast(p[off_bl]); + if (br_v) + v += w_br * static_cast(p[off_br]); + output_n[c * spatial_out + out_off] = static_cast<__fp16>(v); + } +} + +} // namespace + +// Exposed entry point. Called by op_grid_sampler_2d.cpp's dispatcher only +// when cpuinfo_has_arm_neon_fp16() reports true. Input/output data are +// raw uint16_t buffers interpreted as __fp16; N/C/H/W/grid come pre- +// computed from the dispatcher. +void grid_sampler_2d_bilinear_fp16_hw( + const void* input, + const void* grid, + void* output, + int N, + int C, + int H_in, + int W_in, + int H_out, + int W_out, + bool align_corners) { + const __fp16* in = reinterpret_cast(input); + const __fp16* gd = reinterpret_cast(grid); + __fp16* out = reinterpret_cast<__fp16*>(output); + + const int spatial_in = H_in * W_in; + const int spatial_out = H_out * W_out; + + for (int n = 0; n < N; ++n) { + const __fp16* input_n = in + n * C * spatial_in; + __fp16* output_n = out + n * C * spatial_out; + const __fp16* grid_n = gd + n * H_out * W_out * 2; + + for (int h = 0; h < H_out; ++h) { + if (h + 1 < H_out) { + __builtin_prefetch(grid_n + (h + 1) * W_out * 2, 0, 1); + } + for (int w = 0; w < W_out; ++w) { + const int grid_off = (h * W_out + w) * 2; + float gx = static_cast(grid_n[grid_off]); + float gy = static_cast(grid_n[grid_off + 1]); + if (align_corners) { + gx = (gx + 1.0f) * (W_in - 1) * 0.5f; + gy = (gy + 1.0f) * (H_in - 1) * 0.5f; + } else { + gx = (gx + 1.0f) * W_in * 0.5f - 0.5f; + gy = (gy + 1.0f) * H_in * 0.5f - 0.5f; + } + bilinear_all_channels_fp16_hw_sample( + input_n, output_n, C, H_in, W_in, H_out, W_out, h, w, gx, gy); + } + } + } +} + +} // namespace opt_grid_sampler_2d_internal +} // namespace native +} // namespace executor +} // namespace torch + +#endif // __aarch64__ diff --git a/kernels/optimized/cpu/op_grid_sampler_2d_fp16_hw.h b/kernels/optimized/cpu/op_grid_sampler_2d_fp16_hw.h new file mode 100644 index 00000000000..59c54a6af91 --- /dev/null +++ b/kernels/optimized/cpu/op_grid_sampler_2d_fp16_hw.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#ifdef __aarch64__ + +namespace torch { +namespace executor { +namespace native { +namespace opt_grid_sampler_2d_internal { + +// Hardware-fp16 NEON bilinear + zeros-padding fast path. Defined in +// op_grid_sampler_2d_fp16_hw.cpp, which is the only translation unit +// compiled with `-march=armv8.2-a+fp16`. Only safe to call when +// cpuinfo_has_arm_neon_fp16() reports true — see the runtime dispatcher +// in op_grid_sampler_2d.cpp. +// +// Input/output buffers are passed as void* (raw uint16_t storage +// interpreted as __fp16) so this header doesn't need and +// callers don't need the +fp16 march flag just to declare it. +void grid_sampler_2d_bilinear_fp16_hw( + const void* input, + const void* grid, + void* output, + int N, + int C, + int H_in, + int W_in, + int H_out, + int W_out, + bool align_corners); + +} // namespace opt_grid_sampler_2d_internal +} // namespace native +} // namespace executor +} // namespace torch + +#endif // __aarch64__ diff --git a/kernels/optimized/cpu/op_sum.cpp b/kernels/optimized/cpu/op_sum.cpp index 826bfb29c98..059153120ab 100644 --- a/kernels/optimized/cpu/op_sum.cpp +++ b/kernels/optimized/cpu/op_sum.cpp @@ -66,8 +66,8 @@ inline void sum_innermost( acc = acc + Vec::loadu(tmp); } } - float sum = at::vec::vec_reduce_all( - [](Vec a, Vec b) { return a + b; }, acc); + float sum = + at::vec::vec_reduce_all([](Vec a, Vec b) { return a + b; }, acc); for (; j < reduce_size; ++j) { sum += static_cast(row[j]); } @@ -160,8 +160,7 @@ Tensor& opt_sum_dim_out( // Fast path: single reduction dim, matching dtype, non-complex, contiguous. // Anything else falls through to the portable kernel. const bool fast_eligible = dim_list.has_value() && - dim_list.value().size() == 1 && - in.scalar_type() == out.scalar_type() && + dim_list.value().size() == 1 && in.scalar_type() == out.scalar_type() && !executorch::runtime::isComplexType(in.scalar_type()) && tensor_is_contiguous(in); diff --git a/kernels/optimized/cpu/targets.bzl b/kernels/optimized/cpu/targets.bzl index 78bbecd9e2c..9da8d67ab38 100644 --- a/kernels/optimized/cpu/targets.bzl +++ b/kernels/optimized/cpu/targets.bzl @@ -76,6 +76,26 @@ def define_common_targets(): ], ) + # Hardware fp16 variant of grid_sampler_2d. Needs ARMv8.2-a+fp16 so it + # must be a separate translation unit — op_grid_sampler_2d.cpp (the + # runtime dispatcher) remains on plain ARMv8 and only calls into this + # after cpuinfo_has_arm_neon_fp16() reports true. Scoped compile flag + # stays local to this library. Named without the "op_" prefix so the + # op_registration_util dependency check (which forbids op_target -> + # op_target edges) still lets op_grid_sampler_2d depend on it. + runtime.cxx_library( + name = "grid_sampler_2d_fp16_hw_impl", + srcs = ["op_grid_sampler_2d_fp16_hw.cpp"], + visibility = ["PUBLIC"], + compiler_flags = select({ + "DEFAULT": [], + "ovr_config//cpu:arm64": ["-march=armv8.2-a+fp16"], + }), + exported_deps = [ + "//executorch/runtime/kernel:kernel_includes", + ], + ) + # Used for dtype selective build. Collect source and header files. runtime.filegroup( name = "optimized_source_files", diff --git a/shim_et/xplat/executorch/kernels/optimized/op_registration_util.bzl b/shim_et/xplat/executorch/kernels/optimized/op_registration_util.bzl index 65683625d5b..fe77affcf36 100644 --- a/shim_et/xplat/executorch/kernels/optimized/op_registration_util.bzl +++ b/shim_et/xplat/executorch/kernels/optimized/op_registration_util.bzl @@ -221,6 +221,15 @@ OPTIMIZED_ATEN_OPS = ( name = "op_grid_sampler_2d", deps = [ "//executorch/kernels/portable/cpu:op_grid_sampler_2d", + # Hardware fp16 path lives in a separate translation unit so the + # ARMv8.2-a+fp16 compile flag can be scoped locally. A runtime + # cpuinfo_has_arm_neon_fp16() check in op_grid_sampler_2d.cpp + # picks between it and the software-convert fp16 path. Named + # without the "op_" prefix so _enforce_deps doesn't reject it + # as an op_target-to-op_target edge. + ":grid_sampler_2d_fp16_hw_impl", + "fbsource//third-party/cpuinfo:cpuinfo", + "//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch", ], ), op_target(