Skip to content

Commit 6bc9c0a

Browse files
Sayali Patilmaddy-kerneldev
authored andcommitted
powerpc: fix KUAP warning in VMX usercopy path
On powerpc with PREEMPT_FULL or PREEMPT_LAZY and function tracing enabled, KUAP warnings can be triggered from the VMX usercopy path under memory stress workloads. KUAP requires that no subfunctions are called once userspace access has been enabled. The existing VMX copy implementation violates this requirement by invoking enter_vmx_usercopy() from the assembly path after userspace access has already been enabled. If preemption occurs in this window, the AMR state may not be preserved correctly, leading to unexpected userspace access state and resulting in KUAP warnings. Fix this by restructuring the VMX usercopy flow so that VMX selection and VMX state management are centralized in raw_copy_tofrom_user(), which is invoked by the raw_copy_{to,from,in}_user() wrappers. The new flow is: - raw_copy_{to,from,in}_user() calls raw_copy_tofrom_user() - raw_copy_tofrom_user() decides whether to use the VMX path based on size and CPU capability - Call enter_vmx_usercopy() before enabling userspace access - Enable userspace access as per the copy direction and perform the VMX copy - Disable userspace access as per the copy direction - Call exit_vmx_usercopy() - Fall back to the base copy routine if the VMX copy faults With this change, the VMX assembly routines no longer perform VMX state management or call helper functions; they only implement the copy operations. The previous feature-section based VMX selection inside __copy_tofrom_user_power7() is removed, and a dedicated __copy_tofrom_user_power7_vmx() entry point is introduced. This ensures correct KUAP ordering, avoids subfunction calls while KUAP is unlocked, and eliminates the warnings while preserving the VMX fast path. Fixes: de78a9c ("powerpc: Add a framework for Kernel Userspace Access Protection") Reported-by: Shrikanth Hegde <sshegde@linux.ibm.com> Closes: https://lore.kernel.org/all/20260109064917.777587-2-sshegde@linux.ibm.com/ Suggested-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org> Reviewed-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org> Co-developed-by: Aboorva Devarajan <aboorvad@linux.ibm.com> Signed-off-by: Aboorva Devarajan <aboorvad@linux.ibm.com> Signed-off-by: Sayali Patil <sayalip@linux.ibm.com> Tested-by: Shrikanth Hegde <sshegde@linux.ibm.com> Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20260304122201.153049-1-sayalip@linux.ibm.com
1 parent e9bbfb4 commit 6bc9c0a

File tree

4 files changed

+63
-50
lines changed

4 files changed

+63
-50
lines changed

arch/powerpc/include/asm/uaccess.h

Lines changed: 45 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,9 @@
1515
#define TASK_SIZE_MAX TASK_SIZE_USER64
1616
#endif
1717

18+
/* Threshold above which VMX copy path is used */
19+
#define VMX_COPY_THRESHOLD 3328
20+
1821
#include <asm-generic/access_ok.h>
1922

2023
/*
@@ -326,40 +329,62 @@ do { \
326329
extern unsigned long __copy_tofrom_user(void __user *to,
327330
const void __user *from, unsigned long size);
328331

329-
#ifdef __powerpc64__
330-
static inline unsigned long
331-
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
332+
unsigned long __copy_tofrom_user_base(void __user *to,
333+
const void __user *from, unsigned long size);
334+
335+
unsigned long __copy_tofrom_user_power7_vmx(void __user *to,
336+
const void __user *from, unsigned long size);
337+
338+
static __always_inline bool will_use_vmx(unsigned long n)
339+
{
340+
return IS_ENABLED(CONFIG_ALTIVEC) && cpu_has_feature(CPU_FTR_VMX_COPY) &&
341+
n > VMX_COPY_THRESHOLD;
342+
}
343+
344+
static __always_inline unsigned long
345+
raw_copy_tofrom_user(void __user *to, const void __user *from,
346+
unsigned long n, unsigned long dir)
332347
{
333348
unsigned long ret;
334349

335-
barrier_nospec();
336-
allow_user_access(to, KUAP_READ_WRITE);
350+
if (will_use_vmx(n) && enter_vmx_usercopy()) {
351+
allow_user_access(to, dir);
352+
ret = __copy_tofrom_user_power7_vmx(to, from, n);
353+
prevent_user_access(dir);
354+
exit_vmx_usercopy();
355+
356+
if (unlikely(ret)) {
357+
allow_user_access(to, dir);
358+
ret = __copy_tofrom_user_base(to, from, n);
359+
prevent_user_access(dir);
360+
}
361+
return ret;
362+
}
363+
364+
allow_user_access(to, dir);
337365
ret = __copy_tofrom_user(to, from, n);
338-
prevent_user_access(KUAP_READ_WRITE);
366+
prevent_user_access(dir);
339367
return ret;
340368
}
341-
#endif /* __powerpc64__ */
342369

343-
static inline unsigned long raw_copy_from_user(void *to,
344-
const void __user *from, unsigned long n)
370+
#ifdef CONFIG_PPC64
371+
static inline unsigned long
372+
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
345373
{
346-
unsigned long ret;
374+
barrier_nospec();
375+
return raw_copy_tofrom_user(to, from, n, KUAP_READ_WRITE);
376+
}
377+
#endif /* CONFIG_PPC64 */
347378

348-
allow_user_access(NULL, KUAP_READ);
349-
ret = __copy_tofrom_user((__force void __user *)to, from, n);
350-
prevent_user_access(KUAP_READ);
351-
return ret;
379+
static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
380+
{
381+
return raw_copy_tofrom_user((__force void __user *)to, from, n, KUAP_READ);
352382
}
353383

354384
static inline unsigned long
355385
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
356386
{
357-
unsigned long ret;
358-
359-
allow_user_access(to, KUAP_WRITE);
360-
ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
361-
prevent_user_access(KUAP_WRITE);
362-
return ret;
387+
return raw_copy_tofrom_user(to, (__force const void __user *)from, n, KUAP_WRITE);
363388
}
364389

365390
unsigned long __arch_clear_user(void __user *addr, unsigned long size);

arch/powerpc/lib/copyuser_64.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -562,3 +562,4 @@ exc; std r10,32(3)
562562
li r5,4096
563563
b .Ldst_aligned
564564
EXPORT_SYMBOL(__copy_tofrom_user)
565+
EXPORT_SYMBOL(__copy_tofrom_user_base)

arch/powerpc/lib/copyuser_power7.S

Lines changed: 15 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,9 @@
55
*
66
* Author: Anton Blanchard <anton@au.ibm.com>
77
*/
8+
#include <linux/export.h>
89
#include <asm/ppc_asm.h>
910

10-
#ifndef SELFTEST_CASE
11-
/* 0 == don't use VMX, 1 == use VMX */
12-
#define SELFTEST_CASE 0
13-
#endif
14-
1511
#ifdef __BIG_ENDIAN__
1612
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
1713
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
@@ -47,10 +43,14 @@
4743
ld r15,STK_REG(R15)(r1)
4844
ld r14,STK_REG(R14)(r1)
4945
.Ldo_err3:
50-
bl CFUNC(exit_vmx_usercopy)
46+
ld r6,STK_REG(R31)(r1) /* original destination pointer */
47+
ld r5,STK_REG(R29)(r1) /* original number of bytes */
48+
subf r7,r6,r3 /* #bytes copied */
49+
subf r3,r7,r5 /* #bytes not copied in r3 */
5150
ld r0,STACKFRAMESIZE+16(r1)
5251
mtlr r0
53-
b .Lexit
52+
addi r1,r1,STACKFRAMESIZE
53+
blr
5454
#endif /* CONFIG_ALTIVEC */
5555

5656
.Ldo_err2:
@@ -74,20 +74,13 @@
7474

7575
_GLOBAL(__copy_tofrom_user_power7)
7676
cmpldi r5,16
77-
cmpldi cr1,r5,3328
7877

7978
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
8079
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
8180
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
8281

8382
blt .Lshort_copy
8483

85-
#ifdef CONFIG_ALTIVEC
86-
test_feature = SELFTEST_CASE
87-
BEGIN_FTR_SECTION
88-
bgt cr1,.Lvmx_copy
89-
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
90-
#endif
9184

9285
.Lnonvmx_copy:
9386
/* Get the source 8B aligned */
@@ -263,23 +256,14 @@ err1; stb r0,0(r3)
263256
15: li r3,0
264257
blr
265258

266-
.Lunwind_stack_nonvmx_copy:
267-
addi r1,r1,STACKFRAMESIZE
268-
b .Lnonvmx_copy
269-
270-
.Lvmx_copy:
271259
#ifdef CONFIG_ALTIVEC
260+
_GLOBAL(__copy_tofrom_user_power7_vmx)
272261
mflr r0
273262
std r0,16(r1)
274263
stdu r1,-STACKFRAMESIZE(r1)
275-
bl CFUNC(enter_vmx_usercopy)
276-
cmpwi cr1,r3,0
277-
ld r0,STACKFRAMESIZE+16(r1)
278-
ld r3,STK_REG(R31)(r1)
279-
ld r4,STK_REG(R30)(r1)
280-
ld r5,STK_REG(R29)(r1)
281-
mtlr r0
282264

265+
std r3,STK_REG(R31)(r1)
266+
std r5,STK_REG(R29)(r1)
283267
/*
284268
* We prefetch both the source and destination using enhanced touch
285269
* instructions. We use a stream ID of 0 for the load side and
@@ -300,8 +284,6 @@ err1; stb r0,0(r3)
300284

301285
DCBT_SETUP_STREAMS(r6, r7, r9, r10, r8)
302286

303-
beq cr1,.Lunwind_stack_nonvmx_copy
304-
305287
/*
306288
* If source and destination are not relatively aligned we use a
307289
* slower permute loop.
@@ -478,7 +460,8 @@ err3; lbz r0,0(r4)
478460
err3; stb r0,0(r3)
479461

480462
15: addi r1,r1,STACKFRAMESIZE
481-
b CFUNC(exit_vmx_usercopy) /* tail call optimise */
463+
li r3,0
464+
blr
482465

483466
.Lvmx_unaligned_copy:
484467
/* Get the destination 16B aligned */
@@ -681,5 +664,7 @@ err3; lbz r0,0(r4)
681664
err3; stb r0,0(r3)
682665

683666
15: addi r1,r1,STACKFRAMESIZE
684-
b CFUNC(exit_vmx_usercopy) /* tail call optimise */
667+
li r3,0
668+
blr
669+
EXPORT_SYMBOL(__copy_tofrom_user_power7_vmx)
685670
#endif /* CONFIG_ALTIVEC */

arch/powerpc/lib/vmx-helper.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ int enter_vmx_usercopy(void)
2727

2828
return 1;
2929
}
30+
EXPORT_SYMBOL(enter_vmx_usercopy);
3031

3132
/*
3233
* This function must return 0 because we tail call optimise when calling
@@ -49,6 +50,7 @@ int exit_vmx_usercopy(void)
4950
set_dec(1);
5051
return 0;
5152
}
53+
EXPORT_SYMBOL(exit_vmx_usercopy);
5254

5355
int enter_vmx_ops(void)
5456
{

0 commit comments

Comments
 (0)