File tree Expand file tree Collapse file tree 1 file changed +10
-13
lines changed
Expand file tree Collapse file tree 1 file changed +10
-13
lines changed Original file line number Diff line number Diff line change @@ -336,24 +336,21 @@ noreturn void _dd_solib_bootstrap(void *stack_top) {
336336 // Restore sp to the original kernel stack and jump to ld.so entry. The
337337 // function is noreturn - we transfer control via inline asm.
338338
339- #ifdef __x86_64__
340- // Restore the original kernel stack and jump to ld.so's entry point.
341- // rdx must be 0 at ld.so startup (x86-64 ABI: rdx = rtld finalizer, 0 = none).
342- // The "a" constraint pins ldso_entry to rax, guaranteeing it is never in rdx
343- // (which the xor would clobber). Using a clobber on "rdx" alone is not
344- // sufficient: GCC is permitted to allocate inputs into clobbered registers
345- // because inputs are consumed before the asm fires. A specific constraint
346- // ("a" = rax) is the correct solution and is safe at any optimisation level.
347339 uintptr_t ldso_entry = bs_ldso .entry ;
340+
341+ // Restore the original kernel stack and jump to ld.so's entry point.
342+ #ifdef __x86_64__
343+ // On x86, the kernel could pass the rtld_fini function in edx.
344+ // This is not the case for amd64
348345 __asm__ volatile (
349346 "mov %[sp], %%rsp\n"
350- "xor %%edx, %%edx\n"
351- "jmpq *%[entry]\n"
352- :: [sp ] "r" (stack_top ), [entry ] "a" (ldso_entry )
353- : "rdx" , "memory"
347+ "jmp *%[entry]\n"
348+ :
349+ : [sp ] "r" (stack_top ),
350+ [entry ] "r" (ldso_entry )
351+ : "memory"
354352 );
355353#elif defined(__aarch64__ )
356- uintptr_t ldso_entry = bs_ldso .entry ;
357354 __asm__ volatile (
358355 "mov sp, %0\n"
359356 "br %1\n"
You can’t perform that action at this time.
0 commit comments