@@ -11,6 +11,7 @@ using namespace ggml_cuda_mma;
1111
1212#define MMQ_DP4A_MAX_BATCH_SIZE 64 // Max. batch size to use for dp4a MMQ kernels when FP16 tensor cores are available.
1313#define MMQ_ITER_K 256
14+ #define MMQ_ITER_K_Q1_0 128 // For Q1_0: QK1_0=128, QI1_0=4, so threads_per_row = 128/(4*4) = 8
1415#define MMQ_ITER_K_MXFP4_FP4 512
1516#define MMQ_NWARPS 8
1617
@@ -57,6 +58,8 @@ static_assert(sizeof(block_fp4_mmq) == sizeof(block_q8_1_mmq), "Unexpected b
5758
5859static mmq_q8_1_ds_layout mmq_get_q8_1_ds_layout (const ggml_type type_x) {
5960 switch (type_x) {
61+ case GGML_TYPE_Q1_0:
62+ return MMQ_Q8_1_DS_LAYOUT_D4;
6063 case GGML_TYPE_Q4_0:
6164 case GGML_TYPE_Q4_1:
6265 return MMQ_Q8_1_DS_LAYOUT_DS4;
@@ -229,6 +232,7 @@ static_assert(MMQ_MMA_TILE_X_K_NVFP4 % 8 == 4, "Wrong padding.");
229232
230233static constexpr __host__ __device__ int mmq_get_mma_tile_x_k (ggml_type type) {
231234 switch (type) {
235+ case GGML_TYPE_Q1_0: return MMQ_MMA_TILE_X_K_Q8_0;
232236 case GGML_TYPE_Q4_0: return MMQ_MMA_TILE_X_K_Q8_0;
233237 case GGML_TYPE_Q4_1: return MMQ_MMA_TILE_X_K_Q8_1;
234238 case GGML_TYPE_Q5_0: return MMQ_MMA_TILE_X_K_Q8_0;
@@ -302,6 +306,87 @@ static constexpr __device__ int mmq_get_nwarps_device() {
302306
303307// ------------------------------------------------------------
304308
309+ template <int mmq_y, bool need_check> static __device__ __forceinline__ void load_tiles_q1_0 (
310+ const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) {
311+ #if !(defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE))
312+ GGML_UNUSED_VARS (x, x_tile, kbx0, i_max, stride, mmq_y, need_check);
313+ NO_DEVICE_CODE;
314+ #else
315+ constexpr int nwarps = mmq_get_nwarps_device ();
316+ constexpr int warp_size = ggml_cuda_get_physical_warp_size ();
317+
318+ int * x_qs = (int *) x_tile;
319+ float * x_df = (float *) (x_qs + 2 *MMQ_TILE_NE_K);
320+
321+ constexpr int blocks_per_iter = MMQ_ITER_K / QK1_0;
322+ constexpr int threads_per_row = blocks_per_iter * QI1_0;
323+ constexpr int nrows = warp_size / threads_per_row;
324+ constexpr int scale_entries_per_block = QK1_0 / QK8_1;
325+ constexpr int scale_entries_per_row = blocks_per_iter * scale_entries_per_block;
326+
327+ const int txi = threadIdx .x % threads_per_row;
328+ const int kbx = txi / QI1_0;
329+ const int kqsx = txi % QI1_0;
330+
331+ #pragma unroll
332+ for (int i0 = 0 ; i0 < mmq_y; i0 += nrows*nwarps) {
333+ int i = i0 + threadIdx .y *nrows + threadIdx .x /threads_per_row;
334+
335+ if (need_check) {
336+ i = min (i, i_max);
337+ }
338+
339+ const block_q1_0 * bxi = (const block_q1_0 *) x + kbx0 + i*stride + kbx;
340+ const int qs_offset = 4 *kqsx;
341+ const int qs0 = bxi->qs [qs_offset + 0 ] | (bxi->qs [qs_offset + 1 ] << 8 ) |
342+ (bxi->qs [qs_offset + 2 ] << 16 ) | (bxi->qs [qs_offset + 3 ] << 24 );
343+
344+ int unpacked_bytes[8 ];
345+ #pragma unroll
346+ for (int j = 0 ; j < 8 ; ++j) {
347+ const int shift = j * 4 ;
348+ const int bits4 = (qs0 >> shift) & 0x0F ;
349+ const int b0 = (bits4 & 0x01 ) ? 1 : -1 ;
350+ const int b1 = (bits4 & 0x02 ) ? 1 : -1 ;
351+ const int b2 = (bits4 & 0x04 ) ? 1 : -1 ;
352+ const int b3 = (bits4 & 0x08 ) ? 1 : -1 ;
353+ unpacked_bytes[j] = (b0 & 0xFF ) | ((b1 & 0xFF ) << 8 ) | ((b2 & 0xFF ) << 16 ) | ((b3 & 0xFF ) << 24 );
354+ }
355+
356+ const int dst_offset = kbx*(scale_entries_per_block*QI8_0) + kqsx*QI8_0;
357+ #pragma unroll
358+ for (int j = 0 ; j < 8 ; ++j) {
359+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + dst_offset + j] = unpacked_bytes[j];
360+ }
361+ }
362+
363+ const int ksx = threadIdx .x % scale_entries_per_row;
364+ const int scale_block = ksx / scale_entries_per_block;
365+
366+ #pragma unroll
367+ for (int i0 = 0 ; i0 < mmq_y; i0 += nwarps) {
368+ int i = i0 + threadIdx .y ;
369+
370+ if (need_check) {
371+ i = min (i, i_max);
372+ }
373+
374+ const block_q1_0 * bxi = (const block_q1_0 *) x + kbx0 + i*stride + scale_block;
375+
376+ x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + ksx] = bxi->d ;
377+ }
378+ #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE)
379+ }
380+
381+ template <int mmq_x, int mmq_y>
382+ static __device__ __forceinline__ void vec_dot_q1_mmq_dp4a_disabled (
383+ const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) {
384+ // Q1_0 intentionally targets the MMA path only.
385+ // If DP4A support is needed later for older GPUs, it should be reintroduced and validated separately.
386+ GGML_UNUSED_VARS (x, y, sum, k00, mmq_x, mmq_y);
387+ NO_DEVICE_CODE;
388+ }
389+
305390template <int mmq_y, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0 (
306391 const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) {
307392 constexpr int nwarps = mmq_get_nwarps_device ();
@@ -3274,6 +3359,14 @@ static __device__ __forceinline__ void mmq_write_back_mma(
32743359template <int mmq_x, int mmq_y, bool need_check, ggml_type type>
32753360struct mmq_type_traits ;
32763361
3362+ template <int mmq_x, int mmq_y, bool need_check>
3363+ struct mmq_type_traits <mmq_x, mmq_y, need_check, GGML_TYPE_Q1_0> {
3364+ static constexpr int vdr = VDR_Q1_0_Q8_1_MMQ;
3365+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q1_0<mmq_y, need_check>;
3366+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma<mmq_x, mmq_y, MMQ_Q8_1_DS_LAYOUT_D4>;
3367+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q1_mmq_dp4a_disabled<mmq_x, mmq_y>;
3368+ };
3369+
32773370template <int mmq_x, int mmq_y, bool need_check>
32783371struct mmq_type_traits <mmq_x, mmq_y, need_check, GGML_TYPE_Q4_0> {
32793372 static constexpr int vdr = VDR_Q4_0_Q8_1_MMQ;
0 commit comments