aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rwxr-xr-xlib/Target/X86/X86ATTAsmPrinter.cpp8
-rw-r--r--lib/Target/X86/X86CodeEmitter.cpp6
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp6
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp24
-rw-r--r--lib/Target/X86/X86InstrInfo.td1488
-rw-r--r--lib/Target/X86/X86InstrMMX.td2
-rw-r--r--lib/Target/X86/X86InstrSSE.td114
-rwxr-xr-xlib/Target/X86/X86IntelAsmPrinter.cpp8
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp30
-rw-r--r--lib/Target/X86/X86RegisterInfo.td22
10 files changed, 854 insertions, 854 deletions
diff --git a/lib/Target/X86/X86ATTAsmPrinter.cpp b/lib/Target/X86/X86ATTAsmPrinter.cpp
index 7adc1c5472..7d40c8842a 100755
--- a/lib/Target/X86/X86ATTAsmPrinter.cpp
+++ b/lib/Target/X86/X86ATTAsmPrinter.cpp
@@ -360,14 +360,14 @@ void X86ATTAsmPrinter::printMachineInstruction(const MachineInstr *MI) {
// See if a truncate instruction can be turned into a nop.
switch (MI->getOpcode()) {
default: break;
- case X86::TRUNC_R32_R16:
- case X86::TRUNC_R32_R8:
- case X86::TRUNC_R16_R8: {
+ case X86::TRUNC_GR32_GR16:
+ case X86::TRUNC_GR32_GR8:
+ case X86::TRUNC_GR16_GR8: {
const MachineOperand &MO0 = MI->getOperand(0);
const MachineOperand &MO1 = MI->getOperand(1);
unsigned Reg0 = MO0.getReg();
unsigned Reg1 = MO1.getReg();
- if (MI->getOpcode() == X86::TRUNC_R32_R16)
+ if (MI->getOpcode() == X86::TRUNC_GR32_GR16)
Reg1 = getX86SubSuperRegister(Reg1, MVT::i16);
else
Reg1 = getX86SubSuperRegister(Reg1, MVT::i8);
diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp
index 25063fc09e..dd5efaa113 100644
--- a/lib/Target/X86/X86CodeEmitter.cpp
+++ b/lib/Target/X86/X86CodeEmitter.cpp
@@ -393,9 +393,9 @@ void Emitter::emitInstruction(const MachineInstr &MI) {
assert(0 && "psuedo instructions should be removed before code emission");
case X86::IMPLICIT_USE:
case X86::IMPLICIT_DEF:
- case X86::IMPLICIT_DEF_R8:
- case X86::IMPLICIT_DEF_R16:
- case X86::IMPLICIT_DEF_R32:
+ case X86::IMPLICIT_DEF_GR8:
+ case X86::IMPLICIT_DEF_GR16:
+ case X86::IMPLICIT_DEF_GR32:
case X86::IMPLICIT_DEF_FR32:
case X86::IMPLICIT_DEF_FR64:
case X86::IMPLICIT_DEF_VR64:
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index e82e3a7b27..40b69dcec3 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -509,7 +509,7 @@ SDOperand X86DAGToDAGISel::getGlobalBaseReg() {
SSARegMap *RegMap = BB->getParent()->getSSARegMap();
// FIXME: when we get to LP64, we will need to create the appropriate
// type of register here.
- GlobalBaseReg = RegMap->createVirtualRegister(X86::R32RegisterClass);
+ GlobalBaseReg = RegMap->createVirtualRegister(X86::GR32RegisterClass);
BuildMI(FirstMBB, MBBI, X86::MovePCtoStack, 0);
BuildMI(FirstMBB, MBBI, X86::POP32r, 1, GlobalBaseReg);
}
@@ -801,12 +801,12 @@ void X86DAGToDAGISel::Select(SDOperand &Result, SDOperand N) {
case MVT::i16:
Opc = X86::MOV16to16_;
VT = MVT::i16;
- Opc2 = X86::TRUNC_R16_R8;
+ Opc2 = X86::TRUNC_GR16_GR8;
break;
case MVT::i32:
Opc = X86::MOV32to32_;
VT = MVT::i32;
- Opc2 = X86::TRUNC_R32_R8;
+ Opc2 = X86::TRUNC_GR32_GR8;
break;
}
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 8fc591b5d1..3c4dd2b60e 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -67,9 +67,9 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM)
addLegalAddressScale(3);
// Set up the register classes.
- addRegisterClass(MVT::i8, X86::R8RegisterClass);
- addRegisterClass(MVT::i16, X86::R16RegisterClass);
- addRegisterClass(MVT::i32, X86::R32RegisterClass);
+ addRegisterClass(MVT::i8, X86::GR8RegisterClass);
+ addRegisterClass(MVT::i16, X86::GR16RegisterClass);
+ addRegisterClass(MVT::i32, X86::GR32RegisterClass);
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
@@ -940,33 +940,33 @@ X86TargetLowering::PreprocessFastCCArguments(std::vector<SDOperand>Args,
case MVT::i1:
case MVT::i8:
Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
- X86::R8RegisterClass);
+ X86::GR8RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i8;
break;
case MVT::i16:
Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
- X86::R16RegisterClass);
+ X86::GR16RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i16;
break;
case MVT::i32:
Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
- X86::R32RegisterClass);
+ X86::GR32RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i32;
break;
case MVT::i64:
Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
- X86::R32RegisterClass);
+ X86::GR32RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i32;
if (ObjIntRegs == 2) {
- Reg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
+ Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass);
Loc.second.Kind = FALocInfo::LiveInRegLoc;
Loc.second.Loc = Reg;
Loc.second.Typ = MVT::i32;
@@ -1563,7 +1563,7 @@ X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
// Load the old value of the high byte of the control word...
unsigned OldCW =
- F->getSSARegMap()->createVirtualRegister(X86::R16RegisterClass);
+ F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
// Set the high part to be round to zero...
@@ -2558,7 +2558,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
}
}
- // Take advantage of the fact R32 to VR128 scalar_to_vector (i.e. movd)
+ // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd)
// clears the upper bits.
// FIXME: we can do the same for v4f32 case when we know both parts of
// the lower half come from scalar_to_vector (loadf32). We should do
@@ -2899,7 +2899,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
SDOperand
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
- // Transform it so it match pinsrw which expects a 16-bit value in a R32
+ // Transform it so it match pinsrw which expects a 16-bit value in a GR32
// as its second argument.
MVT::ValueType VT = Op.getValueType();
MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
@@ -2930,7 +2930,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
Idx <<= 1;
if (MVT::isFloatingPoint(N1.getValueType())) {
if (N1.getOpcode() == ISD::LOAD) {
- // Just load directly from f32mem to R32.
+ // Just load directly from f32mem to GR32.
N1 = DAG.getLoad(MVT::i32, N1.getOperand(0), N1.getOperand(1),
N1.getOperand(2));
} else {
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 6729e7db7b..d98e88e461 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -97,7 +97,7 @@ def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
class X86MemOperand<string printMethod> : Operand<i32> {
let PrintMethod = printMethod;
let NumMIOperands = 4;
- let MIOperandInfo = (ops R32, i8imm, R32, i32imm);
+ let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
}
def i8mem : X86MemOperand<"printi8mem">;
@@ -343,27 +343,27 @@ def ADJCALLSTACKUP : I<0, Pseudo, (ops i32imm:$amt1, i32imm:$amt2),
[(X86callseq_end imm:$amt1, imm:$amt2)]>;
def IMPLICIT_USE : I<0, Pseudo, (ops variable_ops), "#IMPLICIT_USE", []>;
def IMPLICIT_DEF : I<0, Pseudo, (ops variable_ops), "#IMPLICIT_DEF", []>;
-def IMPLICIT_DEF_R8 : I<0, Pseudo, (ops R8:$dst),
+def IMPLICIT_DEF_GR8 : I<0, Pseudo, (ops GR8:$dst),
"#IMPLICIT_DEF $dst",
- [(set R8:$dst, (undef))]>;
-def IMPLICIT_DEF_R16 : I<0, Pseudo, (ops R16:$dst),
+ [(set GR8:$dst, (undef))]>;
+def IMPLICIT_DEF_GR16 : I<0, Pseudo, (ops GR16:$dst),
"#IMPLICIT_DEF $dst",
- [(set R16:$dst, (undef))]>;
-def IMPLICIT_DEF_R32 : I<0, Pseudo, (ops R32:$dst),
+ [(set GR16:$dst, (undef))]>;
+def IMPLICIT_DEF_GR32 : I<0, Pseudo, (ops GR32:$dst),
"#IMPLICIT_DEF $dst",
- [(set R32:$dst, (undef))]>;
+ [(set GR32:$dst, (undef))]>;
// Nop
def NOOP : I<0x90, RawFrm, (ops), "nop", []>;
// Truncate
-def TRUNC_R32_R8 : I<0x88, MRMDestReg, (ops R8:$dst, R32_:$src),
+def TRUNC_GR32_GR8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR32_:$src),
"mov{b} {${src:trunc8}, $dst|$dst, ${src:trunc8}", []>;
-def TRUNC_R16_R8 : I<0x88, MRMDestReg, (ops R8:$dst, R16_:$src),
+def TRUNC_GR16_GR8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR16_:$src),
"mov{b} {${src:trunc8}, $dst|$dst, ${src:trunc8}}", []>;
-def TRUNC_R32_R16 : I<0x89, MRMDestReg, (ops R16:$dst, R32:$src),
+def TRUNC_GR32_GR16 : I<0x89, MRMDestReg, (ops GR16:$dst, GR32:$src),
"mov{w} {${src:trunc16}, $dst|$dst, ${src:trunc16}}",
- [(set R16:$dst, (trunc R32:$src))]>;
+ [(set GR16:$dst, (trunc GR32:$src))]>;
//===----------------------------------------------------------------------===//
// Control Flow Instructions...
@@ -387,8 +387,8 @@ let isBarrier = 1 in
def JMP : IBr<0xE9, (ops brtarget:$dst), "jmp $dst", [(br bb:$dst)]>;
let isBranch = 1, isTerminator = 1, noResults = 1, isBarrier = 1 in {
- def JMP32r : I<0xFF, MRM4r, (ops R32:$dst), "jmp{l} {*}$dst",
- [(brind R32:$dst)]>;
+ def JMP32r : I<0xFF, MRM4r, (ops GR32:$dst), "jmp{l} {*}$dst",
+ [(brind GR32:$dst)]>;
def JMP32m : I<0xFF, MRM4m, (ops i32mem:$dst), "jmp{l} {*}$dst",
[(brind (loadi32 addr:$dst))]>;
}
@@ -438,8 +438,8 @@ let isCall = 1, noResults = 1 in
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7] in {
def CALLpcrel32 : I<0xE8, RawFrm, (ops i32imm:$dst), "call ${dst:call}",
[]>;
- def CALL32r : I<0xFF, MRM2r, (ops R32:$dst), "call {*}$dst",
- [(X86call R32:$dst)]>;
+ def CALL32r : I<0xFF, MRM2r, (ops GR32:$dst), "call {*}$dst",
+ [(X86call GR32:$dst)]>;
def CALL32m : I<0xFF, MRM2m, (ops i32mem:$dst), "call {*}$dst",
[(X86call (loadi32 addr:$dst))]>;
}
@@ -448,7 +448,7 @@ let isCall = 1, noResults = 1 in
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, noResults = 1 in
def TAILJMPd : IBr<0xE9, (ops i32imm:$dst), "jmp ${dst:call} # TAIL CALL", []>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, noResults = 1 in
- def TAILJMPr : I<0xFF, MRM4r, (ops R32:$dst), "jmp {*}$dst # TAIL CALL", []>;
+ def TAILJMPr : I<0xFF, MRM4r, (ops GR32:$dst), "jmp {*}$dst # TAIL CALL", []>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, noResults = 1 in
def TAILJMPm : I<0xFF, MRM4m, (ops i32mem:$dst),
"jmp {*}$dst # TAIL CALL", []>;
@@ -459,7 +459,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, noResults = 1 in
// this until we have a more accurate way of tracking where the stack pointer is
// within a function.
let isTerminator = 1, isTwoAddress = 1 in
- def ADJSTACKPTRri : Ii32<0x81, MRM0r, (ops R32:$dst, R32:$src1, i32imm:$src2),
+ def ADJSTACKPTRri : Ii32<0x81, MRM0r, (ops GR32:$dst, GR32:$src1, i32imm:$src2),
"add{l} {$src2, $dst|$dst, $src2}", []>;
//===----------------------------------------------------------------------===//
@@ -468,53 +468,53 @@ let isTerminator = 1, isTwoAddress = 1 in
def LEAVE : I<0xC9, RawFrm,
(ops), "leave", []>, Imp<[EBP,ESP],[EBP,ESP]>;
def POP32r : I<0x58, AddRegFrm,
- (ops R32:$reg), "pop{l} $reg", []>, Imp<[ESP],[ESP]>;
+ (ops GR32:$reg), "pop{l} $reg", []>, Imp<[ESP],[ESP]>;
def MovePCtoStack : I<0, Pseudo, (ops piclabel:$label),
"call $label", []>;
-let isTwoAddress = 1 in // R32 = bswap R32
+let isTwoAddress = 1 in // GR32 = bswap GR32
def BSWAP32r : I<0xC8, AddRegFrm,
- (ops R32:$dst, R32:$src),
+ (ops GR32:$dst, GR32:$src),
"bswap{l} $dst",
- [(set R32:$dst, (bswap R32:$src))]>, TB;
+ [(set GR32:$dst, (bswap GR32:$src))]>, TB;
-def XCHG8rr : I<0x86, MRMDestReg, // xchg R8, R8
- (ops R8:$src1, R8:$src2),
+def XCHG8rr : I<0x86, MRMDestReg, // xchg GR8, GR8
+ (ops GR8:$src1, GR8:$src2),
"xchg{b} {$src2|$src1}, {$src1|$src2}", []>;
-def XCHG16rr : I<0x87, MRMDestReg, // xchg R16, R16
- (ops R16:$src1, R16:$src2),
+def XCHG16rr : I<0x87, MRMDestReg, // xchg GR16, GR16
+ (ops GR16:$src1, GR16:$src2),
"xchg{w} {$src2|$src1}, {$src1|$src2}", []>, OpSize;
-def XCHG32rr : I<0x87, MRMDestReg, // xchg R32, R32
- (ops R32:$src1, R32:$src2),
+def XCHG32rr : I<0x87, MRMDestReg, // xchg GR32, GR32
+ (ops GR32:$src1, GR32:$src2),
"xchg{l} {$src2|$src1}, {$src1|$src2}", []>;
def XCHG8mr : I<0x86, MRMDestMem,
- (ops i8mem:$src1, R8:$src2),
+ (ops i8mem:$src1, GR8:$src2),
"xchg{b} {$src2|$src1}, {$src1|$src2}", []>;
def XCHG16mr : I<0x87, MRMDestMem,
- (ops i16mem:$src1, R16:$src2),
+ (ops i16mem:$src1, GR16:$src2),
"xchg{w} {$src2|$src1}, {$src1|$src2}", []>, OpSize;
def XCHG32mr : I<0x87, MRMDestMem,
- (ops i32mem:$src1, R32:$src2),
+ (ops i32mem:$src1, GR32:$src2),
"xchg{l} {$src2|$src1}, {$src1|$src2}", []>;
def XCHG8rm : I<0x86, MRMSrcMem,
- (ops R8:$src1, i8mem:$src2),
+ (ops GR8:$src1, i8mem:$src2),
"xchg{b} {$src2|$src1}, {$src1|$src2}", []>;
def XCHG16rm : I<0x87, MRMSrcMem,
- (ops R16:$src1, i16mem:$src2),
+ (ops GR16:$src1, i16mem:$src2),
"xchg{w} {$src2|$src1}, {$src1|$src2}", []>, OpSize;
def XCHG32rm : I<0x87, MRMSrcMem,
- (ops R32:$src1, i32mem:$src2),
+ (ops GR32:$src1, i32mem:$src2),
"xchg{l} {$src2|$src1}, {$src1|$src2}", []>;
def LEA16r : I<0x8D, MRMSrcMem,
- (ops R16:$dst, i32mem:$src),
+ (ops GR16:$dst, i32mem:$src),
"lea{w} {$src|$dst}, {$dst|$src}", []>, OpSize;
def LEA32r : I<0x8D, MRMSrcMem,
- (ops R32:$dst, i32mem:$src),
+ (ops GR32:$dst, i32mem:$src),
"lea{l} {$src|$dst}, {$dst|$src}",
- [(set R32:$dst, leaaddr:$src)]>;
+ [(set GR32:$dst, leaaddr:$src)]>;
def REP_MOVSB : I<0xA4, RawFrm, (ops), "{rep;movsb|rep movsb}",
[(X86rep_movs i8)]>,
@@ -589,21 +589,21 @@ def OUT32ir : Ii8<0xE7, RawFrm, (ops i16i8imm:$port),
//===----------------------------------------------------------------------===//
// Move Instructions...
//
-def MOV8rr : I<0x88, MRMDestReg, (ops R8 :$dst, R8 :$src),
+def MOV8rr : I<0x88, MRMDestReg, (ops GR8 :$dst, GR8 :$src),
"mov{b} {$src, $dst|$dst, $src}", []>;
-def MOV16rr : I<0x89, MRMDestReg, (ops R16:$dst, R16:$src),
+def MOV16rr : I<0x89, MRMDestReg, (ops GR16:$dst, GR16:$src),
"mov{w} {$src, $dst|$dst, $src}", []>, OpSize;
-def MOV32rr : I<0x89, MRMDestReg, (ops R32:$dst, R32:$src),
+def MOV32rr : I<0x89, MRMDestReg, (ops GR32:$dst, GR32:$src),
"mov{l} {$src, $dst|$dst, $src}", []>;
-def MOV8ri : Ii8 <0xB0, AddRegFrm, (ops R8 :$dst, i8imm :$src),
+def MOV8ri : Ii8 <0xB0, AddRegFrm, (ops GR8 :$dst, i8imm :$src),
"mov{b} {$src, $dst|$dst, $src}",
- [(set R8:$dst, imm:$src)]>;
-def MOV16ri : Ii16<0xB8, AddRegFrm, (ops R16:$dst, i16imm:$src),
+ [(set GR8:$dst, imm:$src)]>;
+def MOV16ri : Ii16<0xB8, AddRegFrm, (ops GR16:$dst, i16imm:$src),
"mov{w} {$src, $dst|$dst, $src}",
- [(set R16:$dst, imm:$src)]>, OpSize;
-def MOV32ri : Ii32<0xB8, AddRegFrm, (ops R32:$dst, i32imm:$src),
+ [(set GR16:$dst, imm:$src)]>, OpSize;
+def MOV32ri : Ii32<0xB8, AddRegFrm, (ops GR32:$dst, i32imm:$src),
"mov{l} {$src, $dst|$dst, $src}",
- [(set R32:$dst, imm:$src)]>;
+ [(set GR32:$dst, imm:$src)]>;
def MOV8mi : Ii8 <0xC6, MRM0m, (ops i8mem :$dst, i8imm :$src),
"mov{b} {$src, $dst|$dst, $src}",
[(store (i8 imm:$src), addr:$dst)]>;
@@ -614,41 +614,41 @@ def MOV32mi : Ii32<0xC7, MRM0m, (ops i32mem:$dst, i32imm:$src),
"mov{l} {$src, $dst|$dst, $src}",
[(store (i32 imm:$src), addr:$dst)]>;
-def MOV8rm : I<0x8A, MRMSrcMem, (ops R8 :$dst, i8mem :$src),
+def MOV8rm : I<0x8A, MRMSrcMem, (ops GR8 :$dst, i8mem :$src),
"mov{b} {$src, $dst|$dst, $src}",
- [(set R8:$dst, (load addr:$src))]>;
-def MOV16rm : I<0x8B, MRMSrcMem, (ops R16:$dst, i16mem:$src),
+ [(set GR8:$dst, (load addr:$src))]>;
+def MOV16rm : I<0x8B, MRMSrcMem, (ops GR16:$dst, i16mem:$src),
"mov{w} {$src, $dst|$dst, $src}",
- [(set R16:$dst, (load addr:$src))]>, OpSize;
-def MOV32rm : I<0x8B, MRMSrcMem, (ops R32:$dst, i32mem:$src),
+ [(set GR16:$dst, (load addr:$src))]>, OpSize;
+def MOV32rm : I<0x8B, MRMSrcMem, (ops GR32:$dst, i32mem:$src),
"mov{l} {$src, $dst|$dst, $src}",
- [(set R32:$dst, (load addr:$src))]>;
+ [(set GR32:$dst, (load addr:$src))]>;
-def MOV8mr : I<0x88, MRMDestMem, (ops i8mem :$dst, R8 :$src),
+def MOV8mr : I<0x88, MRMDestMem, (ops i8mem :$dst, GR8 :$src),
"mov{b} {$src, $dst|$dst, $src}",
- [(store R8:$src, addr:$dst)]>;
-def MOV16mr : I<0x89, MRMDestMem, (ops i16mem:$dst, R16:$src),
+ [(store GR8:$src, addr:$dst)]>;
+def MOV16mr : I<0x89, MRMDestMem, (ops i16mem:$dst, GR16:$src),
"mov{w} {$src, $dst|$dst, $src}",
- [(store R16:$src, addr:$dst)]>, OpSize;
-def MOV32mr : I<0x89, MRMDestMem, (ops i32mem:$dst, R32:$src),
+ [(store GR16:$src, addr:$dst)]>, OpSize;
+def MOV32mr : I<0x89, MRMDestMem, (ops i32mem:$dst, GR32:$src),
"mov{l} {$src, $dst|$dst, $src}",
- [(store R32:$src, addr:$dst)]>;
+ [(store GR32:$src, addr:$dst)]>;
//===----------------------------------------------------------------------===//
// Fixed-Register Multiplication and Division Instructions...
//
// Extra precision multiplication
-def MUL8r : I<0xF6, MRM4r, (ops R8:$src), "mul{b} $src",
+def MUL8r : I<0xF6, MRM4r, (ops GR8:$src), "mul{b} $src",
// FIXME: Used for 8-bit mul, ignore result upper 8 bits.
// This probably ought to be moved to a def : Pat<> if the
// syntax can be accepted.
- [(set AL, (mul AL, R8:$src))]>,
- Imp<[AL],[AX]>; // AL,AH = AL*R8
-def MUL16r : I<0xF7, MRM4r, (ops R16:$src), "mul{w} $src", []>,
- Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*R16
-def MUL32r : I<0xF7, MRM4r, (ops R32:$src), "mul{l} $src", []>,
- Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*R32
+ [(set AL, (mul AL, GR8:$src))]>,
+ Imp<[AL],[AX]>; // AL,AH = AL*GR8
+def MUL16r : I<0xF7, MRM4r, (ops GR16:$src), "mul{w} $src", []>,
+ Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*GR16
+def MUL32r : I<0xF7, MRM4r, (ops GR32:$src), "mul{l} $src", []>,
+ Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*GR32
def MUL8m : I<0xF6, MRM4m, (ops i8mem :$src),
"mul{b} $src",
// FIXME: Used for 8-bit mul, ignore result upper 8 bits.
@@ -662,12 +662,12 @@ def MUL16m : I<0xF7, MRM4m, (ops i16mem:$src),
def MUL32m : I<0xF7, MRM4m, (ops i32mem:$src),
"mul{l} $src", []>, Imp<[EAX],[EAX,EDX]>;// EAX,EDX = EAX*[mem32]
-def IMUL8r : I<0xF6, MRM5r, (ops R8:$src), "imul{b} $src", []>,
- Imp<[AL],[AX]>; // AL,AH = AL*R8
-def IMUL16r : I<0xF7, MRM5r, (ops R16:$src), "imul{w} $src", []>,
- Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*R16
-def IMUL32r : I<0xF7, MRM5r, (ops R32:$src), "imul{l} $src", []>,
- Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*R32
+def IMUL8r : I<0xF6, MRM5r, (ops GR8:$src), "imul{b} $src", []>,
+ Imp<[AL],[AX]>; // AL,AH = AL*GR8
+def IMUL16r : I<0xF7, MRM5r, (ops GR16:$src), "imul{w} $src", []>,
+ Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*GR16
+def IMUL32r : I<0xF7, MRM5r, (ops GR32:$src), "imul{l} $src", []>,
+ Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*GR32
def IMUL8m : I<0xF6, MRM5m, (ops i8mem :$src),
"imul{b} $src", []>, Imp<[AL],[AX]>; // AL,AH = AL*[mem8]
def IMUL16m : I<0xF7, MRM5m, (ops i16mem:$src),
@@ -678,11 +678,11 @@ def IMUL32m : I<0xF7, MRM5m, (ops i32mem:$src),
Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*[mem32]
// unsigned division/remainder
-def DIV8r : I<0xF6, MRM6r, (ops R8:$src), // AX/r8 = AL,AH
+def DIV8r : I<0xF6, MRM6r, (ops GR8:$src), // AX/r8 = AL,AH
"div{b} $src", []>, Imp<[AX],[AX]>;
-def DIV16r : I<0xF7, MRM6r, (ops R16:$src), // DX:AX/r16 = AX,DX
+def DIV16r : I<0xF7, MRM6r, (ops GR16:$src), // DX:AX/r16 = AX,DX
"div{w} $src", []>, Imp<[AX,DX],[AX,DX]>, OpSize;
-def DIV32r : I<0xF7, MRM6r, (ops R32:$src), // EDX:EAX/r32 = EAX,EDX
+def DIV32r : I<0xF7, MRM6r, (ops GR32:$src), // EDX:EAX/r32 = EAX,EDX
"div{l} $src", []>, Imp<[EAX,EDX],[EAX,EDX]>;
def DIV8m : I<0xF6, MRM6m, (ops i8mem:$src), // AX/[mem8] = AL,AH
"div{b} $src", []>, Imp<[AX],[AX]>;
@@ -692,11 +692,11 @@ def DIV32m : I<0xF7, MRM6m, (ops i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX
"div{l} $src", []>, Imp<[EAX,EDX],[EAX,EDX]>;
// Signed division/remainder.
-def IDIV8r : I<0xF6, MRM7r, (ops R8:$src), // AX/r8 = AL,AH
+def IDIV8r : I<0xF6, MRM7r, (ops GR8:$src), // AX/r8 = AL,AH
"idiv{b} $src", []>, Imp<[AX],[AX]>;
-def IDIV16r: I<0xF7, MRM7r, (ops R16:$src), // DX:AX/r16 = AX,DX
+def IDIV16r: I<0xF7, MRM7r, (ops GR16:$src), // DX:AX/r16 = AX,DX
"idiv{w} $src", []>, Imp<[AX,DX],[AX,DX]>, OpSize;
-def IDIV32r: I<0xF7, MRM7r, (ops R32:$src), // EDX:EAX/r32 = EAX,EDX
+def IDIV32r: I<0xF7, MRM7r, (ops GR32:$src), // EDX:EAX/r32 = EAX,EDX
"idiv{l} $src", []>, Imp<[EAX,EDX],[EAX,EDX]>;
def IDIV8m : I<0xF6, MRM7m, (ops i8mem:$src), // AX/[mem8] = AL,AH
"idiv{b} $src", []>, Imp<[AX],[AX]>;
@@ -720,364 +720,364 @@ def CDQ : I<0x99, RawFrm, (ops),
let isTwoAddress = 1 in {
// Conditional moves
-def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, R16 = R16
- (ops R16:$dst, R16:$src1, R16:$src2),
+def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
+ (ops GR16:$dst, GR16:$src1, GR16:$src2),
"cmovb {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
X86_COND_B))]>,
TB, OpSize;
-def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, R16 = [mem16]
- (ops R16:$dst, R16:$src1, i16mem:$src2),
+def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16]
+ (ops GR16:$dst, GR16:$src1, i16mem:$src2),
"cmovb {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
X86_COND_B))]>,
TB, OpSize;
-def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, R32 = R32
- (ops R32:$dst, R32:$src1, R32:$src2),
+def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, GR32 = GR32
+ (ops GR32:$dst, GR32:$src1, GR32:$src2),
"cmovb {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
X86_COND_B))]>,
TB;
-def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, R32 = [mem32]
- (ops R32:$dst, R32:$src1, i32mem:$src2),
+def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32]
+ (ops GR32:$dst, GR32:$src1, i32mem:$src2),
"cmovb {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
X86_COND_B))]>,
TB;
-def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, R16 = R16
- (ops R16:$dst, R16:$src1, R16:$src2),
+def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
+ (ops GR16:$dst, GR16:$src1, GR16:$src2),
"cmovae {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
X86_COND_AE))]>,
TB, OpSize;
-def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, R16 = [mem16]
- (ops R16:$dst, R16:$src1, i16mem:$src2),
+def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
+ (ops GR16:$dst, GR16:$src1, i16mem:$src2),
"cmovae {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
X86_COND_AE))]>,
TB, OpSize;
-def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, R32 = R32
- (ops R32:$dst, R32:$src1, R32:$src2),
+def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
+ (ops GR32:$dst, GR32:$src1, GR32:$src2),
"cmovae {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
X86_COND_AE))]>,
TB;
-def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, R32 = [mem32]
- (ops R32:$dst, R32:$src1, i32mem:$src2),
+def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
+ (ops GR32:$dst, GR32:$src1, i32mem:$src2),
"cmovae {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
X86_COND_AE))]>,
TB;
-def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, R16 = R16
- (ops R16:$dst, R16:$src1, R16:$src2),
+def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
+ (ops GR16:$dst, GR16:$src1, GR16:$src2),
"cmove {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
X86_COND_E))]>,
TB, OpSize;
-def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, R16 = [mem16]
- (ops R16:$dst, R16:$src1, i16mem:$src2),
+def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
+ (ops GR16:$dst, GR16:$src1, i16mem:$src2),
"cmove {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
X86_COND_E))]>,
TB, OpSize;
-def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, R32 = R32
- (ops R32:$dst, R32:$src1, R32:$src2),
+def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
+ (ops GR32:$dst, GR32:$src1, GR32:$src2),
"cmove {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
X86_COND_E))]>,
TB;
-def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, R32 = [mem32]
- (ops R32:$dst, R32:$src1, i32mem:$src2),
+def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
+ (ops GR32:$dst, GR32:$src1, i32mem:$src2),
"cmove {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
X86_COND_E))]>,
TB;
-def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, R16 = R16
- (ops R16:$dst, R16:$src1, R16:$src2),
+def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
+ (ops GR16:$dst, GR16:$src1, GR16:$src2),
"cmovne {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
X86_COND_NE))]>,
TB, OpSize;
-def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, R16 = [mem16]
- (ops R16:$dst, R16:$src1, i16mem:$src2),
+def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
+ (ops GR16:$dst, GR16:$src1, i16mem:$src2),
"cmovne {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
X86_COND_NE))]>,
TB, OpSize;
-def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, R32 = R32
- (ops R32:$dst, R32:$src1, R32:$src2),
+def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
+ (ops GR32:$dst, GR32:$src1, GR32:$src2),
"cmovne {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
X86_COND_NE))]>,
TB;
-def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, R32 = [mem32]
- (ops R32:$dst, R32:$src1, i32mem:$src2),
+def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
+ (ops GR32:$dst, GR32:$src1, i32mem:$src2),
"cmovne {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
X86_COND_NE))]>,
TB;
-def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, R16 = R16
- (ops R16:$dst, R16:$src1, R16:$src2),
+def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16
+ (ops GR16:$dst, GR16:$src1, GR16:$src2),
"cmovbe {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
X86_COND_BE))]>,
TB, OpSize;
-def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, R16 = [mem16]
- (ops R16:$dst, R16:$src1, i16mem:$src2),
+def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16]
+ (ops GR16:$dst, GR16:$src1, i16mem:$src2),
"cmovbe {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
X86_COND_BE))]>,
TB, OpSize;
-def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, R32 = R32
- (ops R32:$dst, R32:$src1, R32:$src2),
+def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32
+ (ops GR32:$dst, GR32:$src1, GR32:$src2),
"cmovbe {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
X86_COND_BE))]>,
TB;
-def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, R32 = [mem32]
- (ops R32:$dst, R32:$src1, i32mem:$src2),
+def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32]
+ (ops GR32:$dst, GR32:$src1, i32mem:$src2),
"cmovbe {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
X86_COND_BE))]>,
TB;
-def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, R16 = R16
- (ops R16:$dst, R16:$src1, R16:$src2),
+def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
+ (ops GR16:$dst, GR16:$src1, GR16:$src2),
"cmova {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
X86_COND_A))]>,
TB, OpSize;
-def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, R16 = [mem16]
- (ops R16:$dst, R16:$src1, i16mem:$src2),
+def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
+ (ops GR16:$dst, GR16:$src1, i16mem:$src2),
"cmova {$src2, $dst|$dst, $src2}",
- [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
X86_COND_A))]>,
TB, OpSize;
-def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, R32 = R32
- (ops R32:$dst, R32:$src1, R32:$src2),
+def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
+ (ops GR32:$dst, GR32:$src1, GR32:$src2),
"cmova {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
X86_COND_A))]>,
TB;
-def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, R32 = [mem32]
- (ops R32:$dst, R32:$src1, i32mem:$src2),
+def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
+ (ops GR32:$dst, GR32:$src1, i32mem:$src2),
"cmova {$src2, $dst|$dst, $src2}",
- [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
X86_COND_A))]>,
TB;
-def CMOVL16rr : I<0x4C, MRMSrcReg,