aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2012-04-20 06:31:50 +0000
committerCraig Topper <craig.topper@gmail.com>2012-04-20 06:31:50 +0000
commitc909950c384e8234a7b3c5a76b7f79e3f7012ceb (patch)
tree13d664ac45697b97c21bc451faf97986080c2863
parenteece9dc81cedf0cda5c356c180f60d91c99920fc (diff)
Convert some uses of XXXRegisterClass to &XXXRegClass. No functional change since they are equivalent.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155186 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.cpp76
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp22
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp4
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp38
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp32
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp8
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.cpp12
-rw-r--r--lib/Target/X86/X86FastISel.cpp57
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp214
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp22
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp2
-rw-r--r--lib/Target/X86/X86VZeroUpper.cpp2
12 files changed, 244 insertions, 245 deletions
diff --git a/lib/Target/CellSPU/SPUInstrInfo.cpp b/lib/Target/CellSPU/SPUInstrInfo.cpp
index 759923d7bb..a30de4688e 100644
--- a/lib/Target/CellSPU/SPUInstrInfo.cpp
+++ b/lib/Target/CellSPU/SPUInstrInfo.cpp
@@ -140,29 +140,27 @@ SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const
-{
+ const TargetRegisterInfo *TRI) const {
unsigned opc;
bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
- if (RC == SPU::GPRCRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
- } else if (RC == SPU::R64CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
- } else if (RC == SPU::R64FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
- } else if (RC == SPU::R32CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
- } else if (RC == SPU::R32FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
- } else if (RC == SPU::R16CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
- } else if (RC == SPU::R8CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
- } else if (RC == SPU::VECREGRegisterClass) {
- opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
- } else {
+ if (RC == SPU::GPRCRegisterClass)
+ opc = isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128;
+ else if (RC == SPU::R64CRegisterClass)
+ opc = isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64;
+ else if (RC == SPU::R64FPRegisterClass)
+ opc = isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64;
+ else if (RC == SPU::R32CRegisterClass)
+ opc = isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32;
+ else if (RC == SPU::R32FPRegisterClass)
+ opc = isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32;
+ else if (RC == SPU::R16CRegisterClass)
+ opc = isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16;
+ else if (RC == SPU::R8CRegisterClass)
+ opc = isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8;
+ else if (RC == SPU::VECREGRegisterClass)
+ opc = isValidFrameIdx ? SPU::STQDv16i8 : SPU::STQXv16i8;
+ else
llvm_unreachable("Unknown regclass!");
- }
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
@@ -175,29 +173,27 @@ SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const
-{
+ const TargetRegisterInfo *TRI) const {
unsigned opc;
bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
- if (RC == SPU::GPRCRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
- } else if (RC == SPU::R64CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
- } else if (RC == SPU::R64FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
- } else if (RC == SPU::R32CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
- } else if (RC == SPU::R32FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
- } else if (RC == SPU::R16CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
- } else if (RC == SPU::R8CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
- } else if (RC == SPU::VECREGRegisterClass) {
- opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
- } else {
+ if (RC == SPU::GPRCRegisterClass)
+ opc = isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128;
+ else if (RC == SPU::R64CRegisterClass)
+ opc = isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64;
+ else if (RC == SPU::R64FPRegisterClass)
+ opc = isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64;
+ else if (RC == SPU::R32CRegisterClass)
+ opc = isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32;
+ else if (RC == SPU::R32FPRegisterClass)
+ opc = isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32;
+ else if (RC == SPU::R16CRegisterClass)
+ opc = isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16;
+ else if (RC == SPU::R8CRegisterClass)
+ opc = isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8;
+ else if (RC == SPU::VECREGRegisterClass)
+ opc = isValidFrameIdx ? SPU::LQDv16i8 : SPU::LQXv16i8;
+ else
llvm_unreachable("Unknown regclass in loadRegFromStackSlot!");
- }
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index b77a80bbf3..5950290ca2 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -492,7 +492,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// This is a bit of a hack: CR2LT, CR2GT, CR2EQ and CR2UN are just
// subregisters of CR2. We just need to emit a move of CR2.
- if (PPC::CRBITRCRegisterClass->contains(Reg))
+ if (PPC::CRBITRCRegClass.contains(Reg))
continue;
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
@@ -817,7 +817,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (PPC::GPRCRegisterClass->contains(Reg)) {
+ if (PPC::GPRCRegClass.contains(Reg)) {
HasGPSaveArea = true;
GPRegs.push_back(CSI[i]);
@@ -825,7 +825,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
if (Reg < MinGPR) {
MinGPR = Reg;
}
- } else if (PPC::G8RCRegisterClass->contains(Reg)) {
+ } else if (PPC::G8RCRegClass.contains(Reg)) {
HasG8SaveArea = true;
G8Regs.push_back(CSI[i]);
@@ -833,7 +833,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
if (Reg < MinG8R) {
MinG8R = Reg;
}
- } else if (PPC::F8RCRegisterClass->contains(Reg)) {
+ } else if (PPC::F8RCRegClass.contains(Reg)) {
HasFPSaveArea = true;
FPRegs.push_back(CSI[i]);
@@ -842,12 +842,12 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
MinFPR = Reg;
}
// FIXME SVR4: Disable CR save area for now.
- } else if (PPC::CRBITRCRegisterClass->contains(Reg)
- || PPC::CRRCRegisterClass->contains(Reg)) {
+ } else if (PPC::CRBITRCRegClass.contains(Reg) ||
+ PPC::CRRCRegClass.contains(Reg)) {
// HasCRSaveArea = true;
- } else if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
+ } else if (PPC::VRSAVERCRegClass.contains(Reg)) {
HasVRSAVESaveArea = true;
- } else if (PPC::VRRCRegisterClass->contains(Reg)) {
+ } else if (PPC::VRRCRegClass.contains(Reg)) {
HasVRSaveArea = true;
VRegs.push_back(CSI[i]);
@@ -932,8 +932,8 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (PPC::CRBITRCRegisterClass->contains(Reg) ||
- PPC::CRRCRegisterClass->contains(Reg)) {
+ if (PPC::CRBITRCRegClass.contains(Reg) ||
+ PPC::CRRCRegClass.contains(Reg)) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
@@ -950,7 +950,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
+ if (PPC::VRSAVERCRegClass.contains(Reg)) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 5a04888dd4..4303ae1ce2 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -238,11 +238,11 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
DebugLoc dl;
if (PPCLowering.getPointerTy() == MVT::i32) {
- GlobalBaseReg = RegInfo->createVirtualRegister(PPC::GPRCRegisterClass);
+ GlobalBaseReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR));
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
} else {
- GlobalBaseReg = RegInfo->createVirtualRegister(PPC::G8RCRegisterClass);
+ GlobalBaseReg = RegInfo->createVirtualRegister(&PPC::G8RCRegClass);
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR8));
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR8), GlobalBaseReg);
}
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index ec1c14c115..b0b3c17f25 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -76,9 +76,9 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setMinStackArgumentAlignment(TM.getSubtarget<PPCSubtarget>().isPPC64() ? 8:4);
// Set up the register classes.
- addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
- addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
- addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
+ addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
+ addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
+ addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
@@ -292,7 +292,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
// 64-bit PowerPC implementations can support i64 types directly
- addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
+ addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
// BUILD_PAIR can't be handled natively, and should be expanded to shl/or
setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
// 64-bit PowerPC wants to expand i128 shifts itself.
@@ -370,10 +370,10 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
setOperationAction(ISD::STORE , MVT::v4i32, Legal);
- addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
- addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
- addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
- addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
+ addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
+ addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
+ addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
+ addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
setOperationAction(ISD::MUL, MVT::v4f32, Legal);
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
@@ -1721,19 +1721,19 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
default:
llvm_unreachable("ValVT not supported by formal arguments Lowering");
case MVT::i32:
- RC = PPC::GPRCRegisterClass;
+ RC = &PPC::GPRCRegClass;
break;
case MVT::f32:
- RC = PPC::F4RCRegisterClass;
+ RC = &PPC::F4RCRegClass;
break;
case MVT::f64:
- RC = PPC::F8RCRegisterClass;
+ RC = &PPC::F8RCRegClass;
break;
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v4f32:
- RC = PPC::VRRCRegisterClass;
+ RC = &PPC::VRRCRegClass;
break;
}
@@ -5612,18 +5612,18 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case 'b': // R1-R31
case 'r': // R0-R31
if (VT == MVT::i64 && PPCSubTarget.isPPC64())
- return std::make_pair(0U, PPC::G8RCRegisterClass);
- return std::make_pair(0U, PPC::GPRCRegisterClass);
+ return std::make_pair(0U, &PPC::G8RCRegClass);
+ return std::make_pair(0U, &PPC::GPRCRegClass);
case 'f':
if (VT == MVT::f32)
- return std::make_pair(0U, PPC::F4RCRegisterClass);
- else if (VT == MVT::f64)
- return std::make_pair(0U, PPC::F8RCRegisterClass);
+ return std::make_pair(0U, &PPC::F4RCRegClass);
+ if (VT == MVT::f64)
+ return std::make_pair(0U, &PPC::F8RCRegClass);
break;
case 'v':
- return std::make_pair(0U, PPC::VRRCRegisterClass);
+ return std::make_pair(0U, &PPC::VRRCRegClass);
case 'y': // crrc
- return std::make_pair(0U, PPC::CRRCRegisterClass);
+ return std::make_pair(0U, &PPC::CRRCRegClass);
}
}
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index b45ada9db3..c0b9e92c2d 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -354,7 +354,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const{
DebugLoc DL;
- if (PPC::GPRCRegisterClass->hasSubClassEq(RC)) {
+ if (PPC::GPRCRegClass.hasSubClassEq(RC)) {
if (SrcReg != PPC::LR) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW))
.addReg(SrcReg,
@@ -370,7 +370,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
getKillRegState(isKill)),
FrameIdx));
}
- } else if (PPC::G8RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) {
if (SrcReg != PPC::LR8) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD))
.addReg(SrcReg,
@@ -386,17 +386,17 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
getKillRegState(isKill)),
FrameIdx));
}
- } else if (PPC::F8RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFD))
.addReg(SrcReg,
getKillRegState(isKill)),
FrameIdx));
- } else if (PPC::F4RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFS))
.addReg(SrcReg,
getKillRegState(isKill)),
FrameIdx));
- } else if (PPC::CRRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
if ((!DisablePPC32RS && !TM.getSubtargetImpl()->isPPC64()) ||
(!DisablePPC64RS && TM.getSubtargetImpl()->isPPC64())) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CR))
@@ -438,7 +438,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
getKillRegState(isKill)),
FrameIdx));
}
- } else if (PPC::CRBITRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
// FIXME: We use CRi here because there is no mtcrf on a bit. Since the
// backend currently only uses CR1EQ as an individual bit, this should
// not cause any bug. If we need other uses of CR bits, the following
@@ -470,9 +470,9 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
Reg = PPC::CR7;
return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx,
- PPC::CRRCRegisterClass, NewMIs);
+ &PPC::CRRCRegClass, NewMIs);
- } else if (PPC::VRRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#
// STVX VAL, 0, R0
@@ -522,7 +522,7 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs)const{
- if (PPC::GPRCRegisterClass->hasSubClassEq(RC)) {
+ if (PPC::GPRCRegClass.hasSubClassEq(RC)) {
if (DestReg != PPC::LR) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
DestReg), FrameIdx));
@@ -531,7 +531,7 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
PPC::R11), FrameIdx));
NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTLR)).addReg(PPC::R11));
}
- } else if (PPC::G8RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) {
if (DestReg != PPC::LR8) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD), DestReg),
FrameIdx));
@@ -540,13 +540,13 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
PPC::X11), FrameIdx));
NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTLR8)).addReg(PPC::X11));
}
- } else if (PPC::F8RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFD), DestReg),
FrameIdx));
- } else if (PPC::F4RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFS), DestReg),
FrameIdx));
- } else if (PPC::CRRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
if ((!DisablePPC32RS && !TM.getSubtargetImpl()->isPPC64()) ||
(!DisablePPC64RS && TM.getSubtargetImpl()->isPPC64())) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
@@ -578,7 +578,7 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
PPC::MTCRF8 : PPC::MTCRF), DestReg)
.addReg(ScratchReg));
}
- } else if (PPC::CRBITRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
unsigned Reg = 0;
if (DestReg == PPC::CR0LT || DestReg == PPC::CR0GT ||
@@ -607,9 +607,9 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
Reg = PPC::CR7;
return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx,
- PPC::CRRCRegisterClass, NewMIs);
+ &PPC::CRRCRegClass, NewMIs);
- } else if (PPC::VRRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#
// Dest = LVX 0, R0
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index c3e6f16067..d58ef5d067 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -689,9 +689,9 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
// Set up the register classes.
- addRegisterClass(MVT::i32, SP::IntRegsRegisterClass);
- addRegisterClass(MVT::f32, SP::FPRegsRegisterClass);
- addRegisterClass(MVT::f64, SP::DFPRegsRegisterClass);
+ addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
+ addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
+ addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
// Turn FP extload into load/fextend
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
@@ -1259,7 +1259,7 @@ SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
- return std::make_pair(0U, SP::IntRegsRegisterClass);
+ return std::make_pair(0U, &SP::IntRegsRegClass);
}
}
diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp
index faff468a58..f8674d0bd6 100644
--- a/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -303,13 +303,13 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
if (I != MBB.end()) DL = I->getDebugLoc();
// On the order of operands here: think "[FrameIdx + 0] = SrcReg".
- if (RC == SP::IntRegsRegisterClass)
+ if (RC == &SP::IntRegsRegClass)
BuildMI(MBB, I, DL, get(SP::STri)).addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill));
- else if (RC == SP::FPRegsRegisterClass)
+ else if (RC == &SP::FPRegsRegClass)
BuildMI(MBB, I, DL, get(SP::STFri)).addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill));
- else if (RC == SP::DFPRegsRegisterClass)
+ else if (RC == &SP::DFPRegsRegClass)
BuildMI(MBB, I, DL, get(SP::STDFri)).addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill));
else
@@ -324,11 +324,11 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
- if (RC == SP::IntRegsRegisterClass)
+ if (RC == &SP::IntRegsRegClass)
BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0);
- else if (RC == SP::FPRegsRegisterClass)
+ else if (RC == &SP::FPRegsRegClass)
BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0);
- else if (RC == SP::DFPRegsRegisterClass)
+ else if (RC == &SP::DFPRegsRegClass)
BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0);
else
llvm_unreachable("Can't load this register from stack slot");
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 69752c5c5d..eca0e78082 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -183,37 +183,37 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
case MVT::i1:
case MVT::i8:
Opc = X86::MOV8rm;
- RC = X86::GR8RegisterClass;
+ RC = &X86::GR8RegClass;
break;
case MVT::i16:
Opc = X86::MOV16rm;
- RC = X86::GR16RegisterClass;
+ RC = &X86::GR16RegClass;
break;
case MVT::i32:
Opc = X86::MOV32rm;
- RC = X86::GR32RegisterClass;
+ RC = &X86::GR32RegClass;
break;
case MVT::i64:
// Must be in x86-64 mode.
Opc = X86::MOV64rm;
- RC = X86::GR64RegisterClass;
+ RC = &X86::GR64RegClass;
break;
case MVT::f32:
if (X86ScalarSSEf32) {
Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
- RC = X86::FR32RegisterClass;
+ RC = &X86::FR32RegClass;
} else {
Opc = X86::LD_Fp32m;
- RC = X86::RFP32RegisterClass;
+ RC = &X86::RFP32RegClass;
}
break;
case MVT::f64:
if (X86ScalarSSEf64) {
Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
- RC = X86::FR64RegisterClass;
+ RC = &X86::FR64RegClass;
} else {
Opc = X86::LD_Fp64m;
- RC = X86::RFP64RegisterClass;
+ RC = &X86::RFP64RegClass;
}
break;
case MVT::f80:
@@ -240,7 +240,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
default: return false;
case MVT::i1: {
// Mask out all but lowest bit.
- unsigned AndResult = createResultReg(X86::GR8RegisterClass);
+ unsigned AndResult = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1);
Val = AndResult;
@@ -547,13 +547,13 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm;
- RC = X86::GR64RegisterClass;
+ RC = &X86::GR64RegClass;
if (Subtarget->isPICStyleRIPRel())
StubAM.Base.Reg = X86::RIP;
} else {
Opc = X86::MOV32rm;
- RC = X86::GR32RegisterClass;
+ RC = &X86::GR32RegClass;
}
LoadReg = createResultReg(RC);
@@ -1258,7 +1258,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) {
if (V->getType()->isFloatTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
- unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
+ unsigned ResultReg = createResultReg(&X86::FR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::CVTSS2SDrr), ResultReg)
.addReg(OpReg);
@@ -1277,7 +1277,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
if (V->getType()->isDoubleTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
- unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
+ unsigned ResultReg = createResultReg(&X86::FR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::CVTSD2SSrr), ResultReg)
.addReg(OpReg);
@@ -1314,8 +1314,9 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
if (!Subtarget->is64Bit()) {
// If we're on x86-32; we can't extract an i8 from a general register.
// First issue a copy to GR16_ABCD or GR32_ABCD.
- const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
- ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
+ const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ?
+ (const TargetRegisterClass*)&X86::GR16_ABCDRegClass :
+ (const TargetRegisterClass*)&X86::GR32_ABCDRegClass;
unsigned CopyReg = createResultReg(CopyRC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
CopyReg).addReg(InputReg);
@@ -1903,7 +1904,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
RVLocs[i].getLocReg() == X86::ST1)) {
if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) {
CopyVT = MVT::f80;
- CopyReg = createResultReg(X86::RFP80RegisterClass);
+ CopyReg = createResultReg(&X86::RFP80RegClass);
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::FpPOP_RETVAL),
CopyReg);
@@ -2001,37 +2002,37 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
default: return false;
case MVT::i8:
Opc = X86::MOV8rm;
- RC = X86::GR8RegisterClass;
+ RC = &X86::GR8RegClass;
break;
case MVT::i16:
Opc = X86::MOV16rm;
- RC = X86::GR16RegisterClass;
+ RC = &X86::GR16RegClass;
break;
case MVT::i32:
Opc = X86::MOV32rm;
- RC = X86::GR32RegisterClass;
+ RC = &X86::GR32RegClass;
break;
case MVT::i64:
// Must be in x86-64 mode.
Opc = X86::MOV64rm;
- RC = X86::GR64RegisterClass;
+ RC = &X86::GR64RegClass;
break;
case MVT::f32:
if (X86ScalarSSEf32) {
Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
- RC = X86::FR32RegisterClass;
+ RC = &X86::FR32RegClass;
} else {
Opc = X86::LD_Fp32m;
- RC = X86::RFP32RegisterClass;
+ RC = &X86::RFP32RegClass;
}
break;
case MVT::f64:
if (X86ScalarSSEf64) {
Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
- RC = X86::FR64RegisterClass;
+ RC = &X86::FR64RegClass;
} else {
Opc = X86::LD_Fp64m;
- RC = X86::RFP64RegisterClass;
+ RC = &X86::RFP64RegClass;
}
break;
case MVT::f80:
@@ -2124,19 +2125,19 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
case MVT::f32:
if (X86ScalarSSEf32) {
Opc = X86::FsFLD0SS;
- RC = X86::FR32RegisterClass;
+ RC = &X86::FR32RegClass;
} else {
Opc = X86::LD_Fp032;
- RC = X86::RFP32RegisterClass;
+ RC = &X86::RFP32RegClass;
}
break;
case MVT::f64:
if (X86ScalarSSEf64) {
Opc = X86::FsFLD0SD;
- RC = X86::FR64RegisterClass;
+ RC = &X86::FR64RegClass;
} else {
Opc = X86::LD_Fp064;
- RC = X86::RFP64RegisterClass;
+ RC = &X86::RFP64RegClass;
}
break;
case MVT::f80:
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 04299f3008..48ec8a2f35 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -215,11 +215,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
// Set up the register classes.
- addRegisterClass(MVT::i8, X86::GR8RegisterClass);
- addRegisterClass(MVT::i16, X86::GR16RegisterClass);
- addRegisterClass(MVT::i32, X86::GR32RegisterClass);
+ addRegisterClass(MVT::i8, &X86::GR8RegClass);
+ addRegisterClass(MVT::i16, &X86::GR16RegClass);
+ addRegisterClass(MVT::i32, &X86::GR32RegClass);
if (Subtarget->is64Bit())
- addRegisterClass(MVT::i64, X86::GR64RegisterClass);
+ addRegisterClass(MVT::i64, &X86::GR64RegClass);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
@@ -567,8 +567,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
// f32 and f64 use SSE.
// Set up the FP register classes.
- addRegisterClass(MVT::f32, X86::FR32RegisterClass);
- addRegisterClass(MVT::f64, X86::FR64RegisterClass);
+ addRegisterClass(MVT::f32, &X86::FR32RegClass);
+ addRegisterClass(MVT::f64, &X86::FR64RegClass);
// Use ANDPD to simulate FABS.
setOperationAction(ISD::FABS , MVT::f64, Custom);
@@ -599,8 +599,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
} else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
// Use SSE for f32, x87 for f64.
// Set up the FP register classes.
- addRegisterClass(MVT::f32, X86::FR32RegisterClass);
- addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
+ addRegisterClass(MVT::f32, &X86::FR32RegClass);
+ addRegisterClass(MVT::f64, &X86::RFP64RegClass);
// Use ANDPS to simulate FABS.
setOperationAction(ISD::FABS , MVT::f32, Custom);
@@ -632,8 +632,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
} else if (!TM.Options.UseSoftFloat) {
// f32 and f64 in x87.
// Set up the FP register classes.
- addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
- addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
+ addRegisterClass(MVT::f64, &X86::RFP64RegClass);
+ addRegisterClass(MVT::f32, &X86::RFP32RegClass);
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
setOperationAction(ISD::UNDEF, MVT::f32, Expand);
@@ -660,7 +660,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// Long double always uses X87.
if (!TM.Options.UseSoftFloat) {
- addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
+ addRegisterClass(MVT::f80, &X86::RFP80RegClass);
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
{
@@ -776,7 +776,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
- addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass);
+ addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
// No operations on x86mmx supported, everything uses intrinsics.
}
@@ -813,7 +813,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
- addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
+ addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
setOperationAction(ISD::FADD, MVT::v4f32, Legal);
setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
@@ -830,14 +830,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
- addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
+ addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
// FIXME: Unfortunately -soft-float and -no-implicit-float means XMM
// registers cannot be used even for integer operations.
- addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
- addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
- addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
- addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
+ addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
+ addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
+ addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
+ addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
setOperationAction(ISD::ADD, MVT::v16i8, Legal);
setOperationAction(ISD::ADD, MVT::v8i16, Legal);
@@ -1011,12 +1011,12 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) {
- addRegisterClass(MVT::v32i8, X86::VR256RegisterClass);
- addRegisterClass(MVT::v16i16, X86::VR256RegisterClass);
- addRegisterClass(MVT::v8i32, X86::VR256RegisterClass);
- addRegisterClass(MVT::v8f32, X86::VR256RegisterClass);
- addRegisterClass(MVT::v4i64, X86::VR256RegisterClass);
- addRegisterClass(MVT::v4f64, X86::VR256RegisterClass);
+ addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
+ addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
+ addRegisterClass