diff options
Diffstat (limited to 'arch/x86/include/asm/calling.h')
| -rw-r--r-- | arch/x86/include/asm/calling.h | 228 | 
1 files changed, 125 insertions, 103 deletions
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h index 30af5a83216..cb4c73bfeb4 100644 --- a/arch/x86/include/asm/calling.h +++ b/arch/x86/include/asm/calling.h @@ -46,110 +46,96 @@ For 32-bit we have the following conventions - kernel is built with  */ +#include <asm/dwarf2.h> + +#ifdef CONFIG_X86_64  /* - * 64-bit system call stack frame layout defines and helpers, for - * assembly code (note that the seemingly unnecessary parentheses - * are to prevent cpp from inserting spaces in expressions that get - * passed to macros): + * 64-bit system call stack frame layout defines and helpers, + * for assembly code:   */ -#define R15		  (0) -#define R14		  (8) -#define R13		 (16) -#define R12		 (24) -#define RBP		 (32) -#define RBX		 (40) +#define R15		  0 +#define R14		  8 +#define R13		 16 +#define R12		 24 +#define RBP		 32 +#define RBX		 40  /* arguments: interrupts/non tracing syscalls only save up to here: */ -#define R11		 (48) -#define R10		 (56) -#define R9		 (64) -#define R8		 (72) -#define RAX		 (80) -#define RCX		 (88) -#define RDX		 (96) -#define RSI		(104) -#define RDI		(112) -#define ORIG_RAX	(120)       /* + error_code */ +#define R11		 48 +#define R10		 56 +#define R9		 64 +#define R8		 72 +#define RAX		 80 +#define RCX		 88 +#define RDX		 96 +#define RSI		104 +#define RDI		112 +#define ORIG_RAX	120       /* + error_code */  /* end of arguments */  /* cpu exception frame or undefined in case of fast syscall: */ -#define RIP		(128) -#define CS		(136) -#define EFLAGS		(144) -#define RSP		(152) -#define SS		(160) +#define RIP		128 +#define CS		136 +#define EFLAGS		144 +#define RSP		152 +#define SS		160  #define ARGOFFSET	R11  #define SWFRAME		ORIG_RAX -	.macro SAVE_ARGS addskip=0, norcx=0, nor891011=0 +	.macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1  	subq  $9*8+\addskip, %rsp  	CFI_ADJUST_CFA_OFFSET	9*8+\addskip -	movq  %rdi, 8*8(%rsp) -	CFI_REL_OFFSET	rdi, 8*8 -	movq  %rsi, 7*8(%rsp) -	CFI_REL_OFFSET	rsi, 7*8 -	movq  %rdx, 6*8(%rsp) -	CFI_REL_OFFSET	rdx, 6*8 -	.if \norcx -	.else -	movq  %rcx, 5*8(%rsp) -	CFI_REL_OFFSET	rcx, 5*8 +	movq_cfi rdi, 8*8 +	movq_cfi rsi, 7*8 +	movq_cfi rdx, 6*8 + +	.if \save_rcx +	movq_cfi rcx, 5*8  	.endif -	movq  %rax, 4*8(%rsp) -	CFI_REL_OFFSET	rax, 4*8 -	.if \nor891011 -	.else -	movq  %r8, 3*8(%rsp) -	CFI_REL_OFFSET	r8,  3*8 -	movq  %r9, 2*8(%rsp) -	CFI_REL_OFFSET	r9,  2*8 -	movq  %r10, 1*8(%rsp) -	CFI_REL_OFFSET	r10, 1*8 -	movq  %r11, (%rsp) -	CFI_REL_OFFSET	r11, 0*8 + +	movq_cfi rax, 4*8 + +	.if \save_r891011 +	movq_cfi r8,  3*8 +	movq_cfi r9,  2*8 +	movq_cfi r10, 1*8 +	movq_cfi r11, 0*8  	.endif +  	.endm  #define ARG_SKIP	(9*8) -	.macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ -			    skipr8910=0, skiprdx=0 -	.if \skipr11 -	.else -	movq (%rsp), %r11 -	CFI_RESTORE r11 +	.macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \ +			    rstor_r8910=1, rstor_rdx=1 +	.if \rstor_r11 +	movq_cfi_restore 0*8, r11  	.endif -	.if \skipr8910 -	.else -	movq 1*8(%rsp), %r10 -	CFI_RESTORE r10 -	movq 2*8(%rsp), %r9 -	CFI_RESTORE r9 -	movq 3*8(%rsp), %r8 -	CFI_RESTORE r8 + +	.if \rstor_r8910 +	movq_cfi_restore 1*8, r10 +	movq_cfi_restore 2*8, r9 +	movq_cfi_restore 3*8, r8  	.endif -	.if \skiprax -	.else -	movq 4*8(%rsp), %rax -	CFI_RESTORE rax + +	.if \rstor_rax +	movq_cfi_restore 4*8, rax  	.endif -	.if \skiprcx -	.else -	movq 5*8(%rsp), %rcx -	CFI_RESTORE rcx + +	.if \rstor_rcx +	movq_cfi_restore 5*8, rcx  	.endif -	.if \skiprdx -	.else -	movq 6*8(%rsp), %rdx -	CFI_RESTORE rdx + +	.if \rstor_rdx +	movq_cfi_restore 6*8, rdx  	.endif -	movq 7*8(%rsp), %rsi -	CFI_RESTORE rsi -	movq 8*8(%rsp), %rdi -	CFI_RESTORE rdi + +	movq_cfi_restore 7*8, rsi +	movq_cfi_restore 8*8, rdi +  	.if ARG_SKIP+\addskip > 0  	addq $ARG_SKIP+\addskip, %rsp  	CFI_ADJUST_CFA_OFFSET	-(ARG_SKIP+\addskip) @@ -176,33 +162,21 @@ For 32-bit we have the following conventions - kernel is built with  	.macro SAVE_REST  	subq $REST_SKIP, %rsp  	CFI_ADJUST_CFA_OFFSET	REST_SKIP -	movq %rbx, 5*8(%rsp) -	CFI_REL_OFFSET	rbx, 5*8 -	movq %rbp, 4*8(%rsp) -	CFI_REL_OFFSET	rbp, 4*8 -	movq %r12, 3*8(%rsp) -	CFI_REL_OFFSET	r12, 3*8 -	movq %r13, 2*8(%rsp) -	CFI_REL_OFFSET	r13, 2*8 -	movq %r14, 1*8(%rsp) -	CFI_REL_OFFSET	r14, 1*8 -	movq %r15, (%rsp) -	CFI_REL_OFFSET	r15, 0*8 +	movq_cfi rbx, 5*8 +	movq_cfi rbp, 4*8 +	movq_cfi r12, 3*8 +	movq_cfi r13, 2*8 +	movq_cfi r14, 1*8 +	movq_cfi r15, 0*8  	.endm  	.macro RESTORE_REST -	movq (%rsp),     %r15 -	CFI_RESTORE r15 -	movq 1*8(%rsp),  %r14 -	CFI_RESTORE r14 -	movq 2*8(%rsp),  %r13 -	CFI_RESTORE r13 -	movq 3*8(%rsp),  %r12 -	CFI_RESTORE r12 -	movq 4*8(%rsp),  %rbp -	CFI_RESTORE rbp -	movq 5*8(%rsp),  %rbx -	CFI_RESTORE rbx +	movq_cfi_restore 0*8, r15 +	movq_cfi_restore 1*8, r14 +	movq_cfi_restore 2*8, r13 +	movq_cfi_restore 3*8, r12 +	movq_cfi_restore 4*8, rbp +	movq_cfi_restore 5*8, rbx  	addq $REST_SKIP, %rsp  	CFI_ADJUST_CFA_OFFSET	-(REST_SKIP)  	.endm @@ -214,9 +188,57 @@ For 32-bit we have the following conventions - kernel is built with  	.macro RESTORE_ALL addskip=0  	RESTORE_REST -	RESTORE_ARGS 0, \addskip +	RESTORE_ARGS 1, \addskip  	.endm  	.macro icebp  	.byte 0xf1  	.endm + +#else /* CONFIG_X86_64 */ + +/* + * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These + * are different from the entry_32.S versions in not changing the segment + * registers. So only suitable for in kernel use, not when transitioning + * from or to user space. The resulting stack frame is not a standard + * pt_regs frame. The main use case is calling C code from assembler + * when all the registers need to be preserved. + */ + +	.macro SAVE_ALL +	pushl_cfi %eax +	CFI_REL_OFFSET eax, 0 +	pushl_cfi %ebp +	CFI_REL_OFFSET ebp, 0 +	pushl_cfi %edi +	CFI_REL_OFFSET edi, 0 +	pushl_cfi %esi +	CFI_REL_OFFSET esi, 0 +	pushl_cfi %edx +	CFI_REL_OFFSET edx, 0 +	pushl_cfi %ecx +	CFI_REL_OFFSET ecx, 0 +	pushl_cfi %ebx +	CFI_REL_OFFSET ebx, 0 +	.endm + +	.macro RESTORE_ALL +	popl_cfi %ebx +	CFI_RESTORE ebx +	popl_cfi %ecx +	CFI_RESTORE ecx +	popl_cfi %edx +	CFI_RESTORE edx +	popl_cfi %esi +	CFI_RESTORE esi +	popl_cfi %edi +	CFI_RESTORE edi +	popl_cfi %ebp +	CFI_RESTORE ebp +	popl_cfi %eax +	CFI_RESTORE eax +	.endm + +#endif /* CONFIG_X86_64 */ +  | 
