mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-23 09:56:00 +00:00
d10d89ec78
It's really a pretty ugly thing to need, and some day it will hopefully be obviated by teaching gcc about the magic calling conventions for the low-level system call code, but in the meantime we can at least add big honking comments about why we need these insane and strange macros. I took my comments from my version of the macro, but I ended up deciding to just pick Roland's version of the actual code instead (with his prettier syntax that uses vararg macros). Thus the previous two commits that actually implement it. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
59 lines
2.0 KiB
C
59 lines
2.0 KiB
C
#ifndef __ASM_LINKAGE_H
|
|
#define __ASM_LINKAGE_H
|
|
|
|
#ifdef CONFIG_X86_64
|
|
#define __ALIGN .p2align 4,,15
|
|
#define __ALIGN_STR ".p2align 4,,15"
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
|
|
/*
|
|
* For 32-bit UML - mark functions implemented in assembly that use
|
|
* regparm input parameters:
|
|
*/
|
|
#define asmregparm __attribute__((regparm(3)))
|
|
|
|
/*
|
|
* Make sure the compiler doesn't do anything stupid with the
|
|
* arguments on the stack - they are owned by the *caller*, not
|
|
* the callee. This just fools gcc into not spilling into them,
|
|
* and keeps it from doing tailcall recursion and/or using the
|
|
* stack slots for temporaries, since they are live and "used"
|
|
* all the way to the end of the function.
|
|
*
|
|
* NOTE! On x86-64, all the arguments are in registers, so this
|
|
* only matters on a 32-bit kernel.
|
|
*/
|
|
#define asmlinkage_protect(n, ret, args...) \
|
|
__asmlinkage_protect##n(ret, ##args)
|
|
#define __asmlinkage_protect_n(ret, args...) \
|
|
__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
|
|
#define __asmlinkage_protect0(ret) \
|
|
__asmlinkage_protect_n(ret)
|
|
#define __asmlinkage_protect1(ret, arg1) \
|
|
__asmlinkage_protect_n(ret, "g" (arg1))
|
|
#define __asmlinkage_protect2(ret, arg1, arg2) \
|
|
__asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2))
|
|
#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
|
|
__asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3))
|
|
#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
|
|
__asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
|
|
"g" (arg4))
|
|
#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
|
|
__asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
|
|
"g" (arg4), "g" (arg5))
|
|
#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
|
|
__asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
|
|
"g" (arg4), "g" (arg5), "g" (arg6))
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_ALIGNMENT_16
|
|
#define __ALIGN .align 16,0x90
|
|
#define __ALIGN_STR ".align 16,0x90"
|
|
#endif
|
|
|
|
#endif
|
|
|