diff options
-rw-r--r-- | lib/Target/X86/X86InstrSSE.td | 35 | ||||
-rw-r--r-- | test/MC/AsmParser/X86/x86_64-encoding.s | 32 |
2 files changed, 62 insertions, 5 deletions
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index d8bb435c29..75c1b2ca47 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -1391,12 +1391,25 @@ let isAsmParserOnly = 1 in { defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd", SSEPackedDouble>, OpSize, VEX; + // FIXME: merge with multiclass above when the intrinsics come. + def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), + "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX; + def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), + "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize, + VEX; + def VMOVMSKPSYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src), "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX; def VMOVMSKPDYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src), "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize, - VEX; + VEX; + + def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src), + "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX; + def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src), + "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize, + VEX; } //===----------------------------------------------------------------------===// @@ -2715,8 +2728,13 @@ def PEXTRWri : PDIi8<0xC5, MRMSrcReg, imm:$src2))]>; // Insert -let isAsmParserOnly = 1, Predicates = [HasAVX] in +let isAsmParserOnly = 1, Predicates = [HasAVX] in { defm PINSRW : sse2_pinsrw<0>, OpSize, VEX_4V; + def PINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, GR64:$src2, i32i8imm:$src3), + "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, OpSize, VEX_4V; +} let Constraints = "$src1 = $dst" in defm VPINSRW : sse2_pinsrw, TB, OpSize; @@ -2729,10 +2747,13 @@ let Constraints = "$src1 = $dst" in let ExeDomain = SSEPackedInt in { -let isAsmParserOnly = 1 in -def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), +let isAsmParserOnly = 1 in { +def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), "pmovmskb\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX; +def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), + "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX; +} def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), "pmovmskb\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>; @@ -4019,8 +4040,12 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> { // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst) } -let isAsmParserOnly = 1, Predicates = [HasAVX] in +let isAsmParserOnly = 1, Predicates = [HasAVX] in { defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX; + def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst), + (ins VR128:$src1, i32i8imm:$src2), + "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX; +} defm PEXTRB : SS41I_extract8<0x14, "pextrb">; diff --git a/test/MC/AsmParser/X86/x86_64-encoding.s b/test/MC/AsmParser/X86/x86_64-encoding.s index f709bcdf41..d60bde1be4 100644 --- a/test/MC/AsmParser/X86/x86_64-encoding.s +++ b/test/MC/AsmParser/X86/x86_64-encoding.s @@ -3400,3 +3400,35 @@ pshufb CPI1_0(%rip), %xmm1 // CHECK: encoding: [0xc4,0xe1,0xf9,0x7e,0xe1] vmovd %xmm4, %rcx +// CHECK: vmovmskpd %xmm4, %rcx +// CHECK: encoding: [0xc5,0xf9,0x50,0xcc] + vmovmskpd %xmm4, %rcx + +// CHECK: vmovmskpd %ymm4, %rcx +// CHECK: encoding: [0xc5,0xfd,0x50,0xcc] + vmovmskpd %ymm4, %rcx + +// CHECK: vmovmskps %xmm4, %rcx +// CHECK: encoding: [0xc5,0xf8,0x50,0xcc] + vmovmskps %xmm4, %rcx + +// CHECK: vmovmskps %ymm4, %rcx +// CHECK: encoding: [0xc5,0xfc,0x50,0xcc] + vmovmskps %ymm4, %rcx + +// CHECK: vpextrb $7, %xmm4, %rcx +// CHECK: encoding: [0xc4,0xe3,0x79,0x14,0xe1,0x07] + vpextrb $7, %xmm4, %rcx + +// CHECK: vpinsrw $7, %r8, %xmm15, %xmm8 +// CHECK: encoding: [0xc4,0x41,0x01,0xc4,0xc0,0x07] + vpinsrw $7, %r8, %xmm15, %xmm8 + +// CHECK: vpinsrw $7, %rcx, %xmm4, %xmm6 +// CHECK: encoding: [0xc5,0xd9,0xc4,0xf1,0x07] + vpinsrw $7, %rcx, %xmm4, %xmm6 + +// CHECK: vpmovmskb %xmm4, %rcx +// CHECK: encoding: [0xc5,0xf9,0xd7,0xcc] + vpmovmskb %xmm4, %rcx + |