diff options
Diffstat (limited to 'arch/x86/include/asm/xsave.h')
| -rw-r--r-- | arch/x86/include/asm/xsave.h | 59 | 
1 files changed, 30 insertions, 29 deletions
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index c6ce2452f10..d949ef28c48 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -6,11 +6,18 @@  #define XSTATE_CPUID		0x0000000d -#define XSTATE_FP	0x1 -#define XSTATE_SSE	0x2 -#define XSTATE_YMM	0x4 +#define XSTATE_FP		0x1 +#define XSTATE_SSE		0x2 +#define XSTATE_YMM		0x4 +#define XSTATE_BNDREGS		0x8 +#define XSTATE_BNDCSR		0x10 +#define XSTATE_OPMASK		0x20 +#define XSTATE_ZMM_Hi256	0x40 +#define XSTATE_Hi16_ZMM		0x80  #define XSTATE_FPSSE	(XSTATE_FP | XSTATE_SSE) +/* Bit 63 of XCR0 is reserved for future expansion */ +#define XSTATE_EXTEND_MASK	(~(XSTATE_FPSSE | (1ULL << 63)))  #define FXSAVE_SIZE	512 @@ -20,10 +27,15 @@  #define XSAVE_YMM_SIZE	    256  #define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) -/* - * These are the features that the OS can handle currently. - */ -#define XCNTXT_MASK	(XSTATE_FP | XSTATE_SSE | XSTATE_YMM) +/* Supported features which support lazy state saving */ +#define XSTATE_LAZY	(XSTATE_FP | XSTATE_SSE | XSTATE_YMM		      \ +			| XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM) + +/* Supported features which require eager state saving */ +#define XSTATE_EAGER	(XSTATE_BNDREGS | XSTATE_BNDCSR) + +/* All currently supported features */ +#define XCNTXT_MASK	(XSTATE_LAZY | XSTATE_EAGER)  #ifdef CONFIG_X86_64  #define REX_PREFIX	"0x48, " @@ -34,17 +46,14 @@  extern unsigned int xstate_size;  extern u64 pcntxt_mask;  extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; +extern struct xsave_struct *init_xstate_buf;  extern void xsave_init(void);  extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);  extern int init_fpu(struct task_struct *child); -extern int check_for_xstate(struct i387_fxsave_struct __user *buf, -			    void __user *fpstate, -			    struct _fpx_sw_bytes *sw); -static inline int fpu_xrstor_checking(struct fpu *fpu) +static inline int fpu_xrstor_checking(struct xsave_struct *fx)  { -	struct xsave_struct *fx = &fpu->state->xsave;  	int err;  	asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" @@ -69,27 +78,21 @@ static inline int xsave_user(struct xsave_struct __user *buf)  	 * Clear the xsave header first, so that reserved fields are  	 * initialized to zero.  	 */ -	err = __clear_user(&buf->xsave_hdr, -			   sizeof(struct xsave_hdr_struct)); +	err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));  	if (unlikely(err))  		return -EFAULT; -	__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" -			     "2:\n" +	__asm__ __volatile__(ASM_STAC "\n" +			     "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" +			     "2: " ASM_CLAC "\n"  			     ".section .fixup,\"ax\"\n"  			     "3:  movl $-1,%[err]\n"  			     "    jmp  2b\n"  			     ".previous\n" -			     ".section __ex_table,\"a\"\n" -			     _ASM_ALIGN "\n" -			     _ASM_PTR "1b,3b\n" -			     ".previous" +			     _ASM_EXTABLE(1b,3b)  			     : [err] "=r" (err)  			     : "D" (buf), "a" (-1), "d" (-1), "0" (0)  			     : "memory"); -	if (unlikely(err) && __clear_user(buf, xstate_size)) -		err = -EFAULT; -	/* No need to clear here because the caller clears USED_MATH */  	return err;  } @@ -100,16 +103,14 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)  	u32 lmask = mask;  	u32 hmask = mask >> 32; -	__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" -			     "2:\n" +	__asm__ __volatile__(ASM_STAC "\n" +			     "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" +			     "2: " ASM_CLAC "\n"  			     ".section .fixup,\"ax\"\n"  			     "3:  movl $-1,%[err]\n"  			     "    jmp  2b\n"  			     ".previous\n" -			     ".section __ex_table,\"a\"\n" -			     _ASM_ALIGN "\n" -			     _ASM_PTR "1b,3b\n" -			     ".previous" +			     _ASM_EXTABLE(1b,3b)  			     : [err] "=r" (err)  			     : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)  			     : "memory");	/* memory required? */  | 
