diff options
author | Sean Callanan <scallanan@apple.com> | 2009-12-18 00:01:26 +0000 |
---|---|---|
committer | Sean Callanan <scallanan@apple.com> | 2009-12-18 00:01:26 +0000 |
commit | 108934c65d4cba18f08ed4fab0cae506c20fd212 (patch) | |
tree | 693abb3580c9939943d0ee2b300d8f8242f1c931 | |
parent | a6923131032be5e47b5e1155e69b23aa4c5e65ac (diff) |
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
24 files changed, 1433 insertions, 620 deletions
diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index da467fe6aa..a6e1ca3128 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -63,7 +63,7 @@ def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true", def FeatureAVX : SubtargetFeature<"avx", "HasAVX", "true", "Enable AVX instructions">; def FeatureFMA3 : SubtargetFeature<"fma3", "HasFMA3", "true", - "Enable three-operand fused multiple-add">; + "Enable three-operand fused multiple-add">; def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true", "Enable four-operand fused multiple-add">; diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index 0751b9da8e..65fbbdae9a 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -111,6 +111,9 @@ def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), Requires<[In64BitMode]>; } +// Interrupt Instructions +def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iret{q}", []>; + //===----------------------------------------------------------------------===// // Call Instructions... // @@ -131,20 +134,21 @@ let isCall = 1 in // the 32-bit pcrel field that we have. def CALL64pcrel32 : Ii32<0xE8, RawFrm, (outs), (ins i64i32imm_pcrel:$dst, variable_ops), - "call\t$dst", []>, + "call{q}\t$dst", []>, Requires<[In64BitMode, NotWin64]>; def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops), - "call\t{*}$dst", [(X86call GR64:$dst)]>, + "call{q}\t{*}$dst", [(X86call GR64:$dst)]>, Requires<[NotWin64]>; def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops), - "call\t{*}$dst", [(X86call (loadi64 addr:$dst))]>, + "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>, Requires<[NotWin64]>; def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst), "lcall{q}\t{*}$dst", []>; } - // FIXME: We need to teach codegen about single list of call-clobbered registers. + // FIXME: We need to teach codegen about single list of call-clobbered + // registers. let isCall = 1 in // All calls clobber the non-callee saved registers. RSP is marked as // a use to prevent stack-pointer assignments that appear immediately @@ -162,9 +166,10 @@ let isCall = 1 in def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops), "call\t{*}$dst", [(X86call GR64:$dst)]>, Requires<[IsWin64]>; - def WINCALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops), - "call\t{*}$dst", - [(X86call (loadi64 addr:$dst))]>, Requires<[IsWin64]>; + def WINCALL64m : I<0xFF, MRM2m, (outs), + (ins i64mem:$dst, variable_ops), "call\t{*}$dst", + [(X86call (loadi64 addr:$dst))]>, + Requires<[IsWin64]>; } @@ -188,6 +193,8 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in // Branches let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { + def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst), + "jmp{q}\t$dst", []>; def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst", [(brind GR64:$dst)]>; def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst", @@ -210,6 +217,12 @@ def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), //===----------------------------------------------------------------------===// // Miscellaneous Instructions... // + +def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS; +def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), + "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS; + let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>; @@ -238,9 +251,9 @@ def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm), } let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in -def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W; +def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf{q}", []>, REX_W; let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in -def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>; +def PUSHFQ64 : I<0x9C, RawFrm, (outs), (ins), "pushf{q}", []>; def LEA64_32r : I<0x8D, MRMSrcMem, (outs GR32:$dst), (ins lea64_32mem:$src), @@ -309,6 +322,9 @@ def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src), [(set GR64:$dst, i64immSExt32:$src)]>; } +def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>; + let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "mov{q}\t{$src, $dst|$dst, $src}", @@ -321,24 +337,36 @@ def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(store i64immSExt32:$src, addr:$dst)]>; -def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins i8imm:$src), +def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src), "mov{q}\t{$src, %rax|%rax, $src}", []>; -def MOV64o32a : RIi32<0xA1, RawFrm, (outs), (ins i32imm:$src), +def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src), "mov{q}\t{$src, %rax|%rax, $src}", []>; -def MOV64ao8 : RIi8<0xA2, RawFrm, (outs i8imm:$dst), (ins), +def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins), "mov{q}\t{%rax, $dst|$dst, %rax}", []>; -def MOV64ao32 : RIi32<0xA3, RawFrm, (outs i32imm:$dst), (ins), +def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins), "mov{q}\t{%rax, $dst|$dst, %rax}", []>; // Moves to and from segment registers def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; + +// Moves to and from debug registers +def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; + +// Moves to and from control registers +def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG_64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG_64:$dst), (ins GR64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; // Sign/Zero extenders @@ -365,6 +393,16 @@ def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), "movs{lq|xd}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (sextloadi64i32 addr:$src))]>; +// movzbq and movzwq encodings for the disassembler +def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src), + "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB; +def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src), + "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB; +def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), + "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB; +def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), + "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB; + // Use movzbl instead of movzbq when the destination is a register; it's // equivalent due to implicit zero-extending, and it has a smaller encoding. def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), @@ -430,31 +468,36 @@ let isTwoAddress = 1 in { let isConvertibleToThreeAddress = 1 in { let isCommutable = 1 in // Register-Register Addition -def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; // Register-Integer Addition -def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)), (implicit EFLAGS)]>; -def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)), (implicit EFLAGS)]>; } // isConvertibleToThreeAddress // Register-Memory Addition -def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; // Register-Register Addition - Equivalent to the normal rr form (ADD64rr), but // differently encoded. -def ADD64mrmrr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def ADD64mrmrr : RI<0x03, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "add{l}\t{$src2, $dst|$dst, $src2}", []>; } // isTwoAddress @@ -480,18 +523,26 @@ def ADC64i32 : RI<0x15, RawFrm, (outs), (ins i32imm:$src), let isTwoAddress = 1 in { let isCommutable = 1 in -def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>; -def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst), + (ins GR64:$src1, GR64:$src2), + "adc{q}\t{$src2, $dst|$dst, $src2}", []>; + +def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>; -def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>; -def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>; } // isTwoAddress @@ -501,21 +552,29 @@ def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>; def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", - [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + [(store (adde (load addr:$dst), i64immSExt8:$src2), + addr:$dst)]>; def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", - [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + [(store (adde (load addr:$dst), i64immSExt8:$src2), + addr:$dst)]>; } // Uses = [EFLAGS] let isTwoAddress = 1 in { // Register-Register Subtraction -def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; +def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", []>; + // Register-Memory Subtraction -def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; @@ -556,18 +615,26 @@ def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2), let Uses = [EFLAGS] in { let isTwoAddress = 1 in { -def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>; -def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "sbb{q}\t{$src2, $dst|$dst, $src2}", []>; + +def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>; -def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>; -def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>; } // isTwoAddress @@ -652,15 +719,19 @@ def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32 // Unsigned division / remainder let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in { -def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX +// RDX:RAX/r64 = RAX,RDX +def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), "div{q}\t$src", []>; // Signed division / remainder -def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX +// RDX:RAX/r64 = RAX,RDX +def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), "idiv{q}\t$src", []>; let mayLoad = 1 in { -def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX +// RDX:RAX/[mem64] = RAX,RDX +def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), "div{q}\t$src", []>; -def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX +// RDX:RAX/[mem64] = RAX,RDX +def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), "idiv{q}\t$src", []>; } } @@ -694,19 +765,23 @@ def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", // In 64-bit mode, single byte INC and DEC cannot be encoded. let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in { // Can transform into LEA. -def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst", +def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), + "inc{w}\t$dst", [(set GR16:$dst, (add GR16:$src, 1)), (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; -def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst", +def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), + "inc{l}\t$dst", [(set GR32:$dst, (add GR32:$src, 1)), (implicit EFLAGS)]>, Requires<[In64BitMode]>; -def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst", +def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), + "dec{w}\t$dst", [(set GR16:$dst, (add GR16:$src, -1)), (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; -def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst", +def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), + "dec{l}\t$dst", [(set GR32:$dst, (add GR32:$src, -1)), (implicit EFLAGS)]>, Requires<[In64BitMode]>; @@ -743,13 +818,14 @@ def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src), "shl{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (shl GR64:$src, CL))]>; let isConvertibleToThreeAddress = 1 in // Can transform into LEA. -def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), +def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), "shl{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>; // NOTE: We don't include patterns for shifts of a register by one, because // 'add reg,reg' is cheaper. def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1), - "shr{q}\t$dst", []>; + "shl{q}\t$dst", []>; } // isTwoAddress let Uses = [CL] in @@ -792,9 +868,10 @@ let Uses = [CL] in def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src), "sar{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (sra GR64:$src, CL))]>; -def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), - "sar{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>; +def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), + "sar{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>; def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1), "sar{q}\t$dst", [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>; @@ -826,7 +903,8 @@ def RCL64mCL : RI<0xD3, MRM2m, (outs i64mem:$dst), (ins i64mem:$src), } def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt), "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCL64mi : RIi8<0xC1, MRM2m, (outs i64mem:$dst), (ins i64mem:$src, i8imm:$cnt), +def RCL64mi : RIi8<0xC1, MRM2m, (outs i64mem:$dst), + (ins i64mem:$src, i8imm:$cnt), "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>; def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src), @@ -841,7 +919,8 @@ def RCR64mCL : RI<0xD3, MRM3m, (outs i64mem:$dst), (ins i64mem:$src), } def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt), "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCR64mi : RIi8<0xC1, MRM3m, (outs i64mem:$dst), (ins i64mem:$src, i8imm:$cnt), +def RCR64mi : RIi8<0xC1, MRM3m, (outs i64mem:$dst), + (ins i64mem:$src, i8imm:$cnt), "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>; } @@ -850,7 +929,8 @@ let Uses = [CL] in def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src), "rol{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (rotl GR64:$src, CL))]>; -def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), +def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), "rol{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>; def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1), @@ -859,9 +939,9 @@ def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1), } // isTwoAddress let Uses = [CL] in -def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst), - "rol{q}\t{%cl, $dst|$dst, %CL}", - [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>; +def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst), + "rol{q}\t{%cl, $dst|$dst, %CL}", + [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>; def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src), "rol{q}\t{$src, $dst|$dst, $src}", [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -874,7 +954,8 @@ let Uses = [CL] in def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src), "ror{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (rotr GR64:$src, CL))]>; -def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), +def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), "ror{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>; def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1), @@ -896,23 +977,29 @@ def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst), // Double shift instructions (generalizations of rotate) let isTwoAddress = 1 in { let Uses = [CL] in { -def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", - [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB; -def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), + [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, + TB; +def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", - [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB; + [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, + TB; } let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction def SHLD64rri8 : RIi8<0xA4, MRMDestReg, - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3), + (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2, i8imm:$src3), "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, (i8 imm:$src3)))]>, TB; def SHRD64rri8 : RIi8<0xAC, MRMDestReg, - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3), + (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2, i8imm:$src3), "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, (i8 imm:$src3)))]>, @@ -965,6 +1052,9 @@ def AND64rr : RI<0x21, MRMDestReg, "and{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (and GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; +def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "and{q}\t{$src2, $dst|$dst, $src2}", []>; def AND64rm : RI<0x23, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "and{q}\t{$src2, $dst|$dst, $src2}", @@ -1000,19 +1090,26 @@ def AND64mi32 : RIi32<0x81, MRM4m, let isTwoAddress = 1 in { let isCommutable = 1 in -def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; -def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "or{q}\t{$src2, $dst|$dst, $src2}", []>; +def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2)), (implicit EFLAGS)]>; -def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2)), (implicit EFLAGS)]>; @@ -1036,15 +1133,21 @@ def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i32imm:$src), let isTwoAddress = 1 in { let isCommutable = 1 in -def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (xor GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; -def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "xor{q}\t{$src2, $dst|$dst, $src2}", []>; +def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2)), (implicit EFLAGS)]>; @@ -1148,10 +1251,12 @@ def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), // Unlike with the register+register form, the memory+register form of the // bt instruction does not ignore the high bits of the index. From ISel's // perspective, this is pretty bizarre. Disable these instructions for now. -//def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), -// "bt{q}\t{$src2, $src1|$src1, $src2}", +def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), + "bt{q}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi64 addr:$src1), GR64:$src2), -// (implicit EFLAGS)]>, TB; +// (implicit EFLAGS)] + [] + >, TB; def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2), |