diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Target/X86/X86InstrSSE.td | 29 | ||||
-rw-r--r-- | lib/Target/X86/X86MCCodeEmitter.cpp | 12 |
2 files changed, 26 insertions, 15 deletions
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index c8f2d3e06b..e34e45e4d3 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -2019,6 +2019,8 @@ defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>, defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>, sse1_fp_unop_p<0x53, "rcp", X86frcp, int_x86_sse_rcp_ps>; +// There is no f64 version of the reciprocal approximation instructions. + //===----------------------------------------------------------------------===// // SSE 1 & 2 - Non-temporal stores //===----------------------------------------------------------------------===// @@ -2111,7 +2113,7 @@ def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), TB, Requires<[HasSSE2]>; //===----------------------------------------------------------------------===// -// SSE 1 & 2 - Misc Instructions +// SSE 1 & 2 - Misc Instructions (No AVX form) //===----------------------------------------------------------------------===// // Prefetch intrinsic. @@ -2128,12 +2130,6 @@ def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src), def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>, TB, Requires<[HasSSE1]>; -// MXCSR register -def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src), - "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>; -def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst), - "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>; - // Alias instructions that map zero vector to pxor / xorp* for sse. // We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-zeros value if folding it would be beneficial. @@ -2156,13 +2152,26 @@ def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>; def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))), (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>; +//===----------------------------------------------------------------------===// +// SSE 1 & 2 - Load/Store XCSR register +//===----------------------------------------------------------------------===// + +let isAsmParserOnly = 1 in { + def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src), + "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX; + def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst), + "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX; +} + +def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src), + "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>; +def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst), + "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>; + //===---------------------------------------------------------------------===// // SSE2 Instructions //===---------------------------------------------------------------------===// - -// There is no f64 version of the reciprocal approximation instructions. - //===---------------------------------------------------------------------===// // SSE integer instructions let ExeDomain = SSEPackedInt in { diff --git a/lib/Target/X86/X86MCCodeEmitter.cpp b/lib/Target/X86/X86MCCodeEmitter.cpp index 3b9b20a665..5dd668400b 100644 --- a/lib/Target/X86/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/X86MCCodeEmitter.cpp @@ -455,15 +455,17 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, unsigned NumOps = MI.getNumOperands(); unsigned CurOp = 0; - if ((TSFlags & X86II::FormMask) == X86II::MRMDestMem) - NumOps = CurOp = X86AddrNumOperands; - switch (TSFlags & X86II::FormMask) { case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!"); - case X86II::MRMSrcMem: + case X86II::MRM0m: case X86II::MRM1m: + case X86II::MRM2m: case X86II::MRM3m: + case X86II::MRM4m: case X86II::MRM5m: + case X86II::MRM6m: case X86II::MRM7m: case X86II::MRMDestMem: + NumOps = CurOp = X86AddrNumOperands; + case X86II::MRMSrcMem: case X86II::MRMSrcReg: - if (MI.getOperand(CurOp).isReg() && + if (MI.getNumOperands() > CurOp && MI.getOperand(CurOp).isReg() && X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_R = 0x0; |