aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/Target/MRegisterInfo.h52
-rw-r--r--include/llvm/Target/TargetInstrInfo.h54
-rw-r--r--lib/Target/ARM/ARMInstrInfo.cpp113
-rw-r--r--lib/Target/ARM/ARMInstrInfo.h14
-rw-r--r--lib/Target/ARM/ARMRegisterInfo.cpp196
-rw-r--r--lib/Target/ARM/ARMRegisterInfo.h15
-rw-r--r--lib/Target/Alpha/AlphaInstrInfo.cpp37
-rw-r--r--lib/Target/Alpha/AlphaInstrInfo.h11
-rw-r--r--lib/Target/Alpha/AlphaRegisterInfo.cpp37
-rw-r--r--lib/Target/Alpha/AlphaRegisterInfo.h10
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.cpp39
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.h14
-rw-r--r--lib/Target/CellSPU/SPURegisterInfo.cpp48
-rw-r--r--lib/Target/CellSPU/SPURegisterInfo.h10
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp31
-rw-r--r--lib/Target/Mips/MipsInstrInfo.h11
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.cpp31
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.h10
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp79
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.h15
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.cpp79
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.h15
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.cpp38
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.h10
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.cpp38
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.h10
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp1031
-rw-r--r--lib/Target/X86/X86InstrInfo.h56
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp1070
-rw-r--r--lib/Target/X86/X86RegisterInfo.h55
30 files changed, 1599 insertions, 1630 deletions
diff --git a/include/llvm/Target/MRegisterInfo.h b/include/llvm/Target/MRegisterInfo.h
index 1faa0b5012..671ce0a197 100644
--- a/include/llvm/Target/MRegisterInfo.h
+++ b/include/llvm/Target/MRegisterInfo.h
@@ -484,58 +484,6 @@ public:
unsigned DestReg,
const MachineInstr *Orig) const = 0;
- /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
- /// slot into the specified machine instruction for the specified operand(s).
- /// If this is possible, a new instruction is returned with the specified
- /// operand folded, otherwise NULL is returned. The client is responsible for
- /// removing the old instruction and adding the new one in the instruction
- /// stream.
- virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
- return 0;
- }
-
- /// foldMemoryOperand - Same as the previous version except it allows folding
- /// of any load and store from / to any address, not just from a specific
- /// stack slot.
- virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
-
- /// canFoldMemoryOperand - Returns true if the specified load / store is
- /// folding is possible.
- virtual
- bool canFoldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops) const{
- return false;
- }
-
- /// unfoldMemoryOperand - Separate a single instruction which folded a load or
- /// a store or a load and a store into two or more instruction. If this is
- /// possible, returns true as well as the new instructions by reference.
- virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const{
- return false;
- }
-
- virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
- SmallVectorImpl<SDNode*> &NewNodes) const {
- return false;
- }
-
- /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
- /// instruction after load / store are unfolded from an instruction of the
- /// specified opcode. It returns zero if the specified unfolding is not
- /// possible.
- virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
- bool UnfoldLoad, bool UnfoldStore) const {
- return 0;
- }
-
/// targetHandlesStackFrameRounding - Returns true if the target is
/// responsible for rounding up the stack frame (probably at emitPrologue
/// time).
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index f2a091b9e4..f7e7b3c24d 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -27,6 +27,8 @@ class TargetMachine;
class TargetRegisterClass;
class LiveVariables;
class CalleeSavedInfo;
+class SDNode;
+class SelectionDAG;
template<class T> class SmallVectorImpl;
@@ -540,6 +542,58 @@ public:
return false;
}
+ /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
+ /// slot into the specified machine instruction for the specified operand(s).
+ /// If this is possible, a new instruction is returned with the specified
+ /// operand folded, otherwise NULL is returned. The client is responsible for
+ /// removing the old instruction and adding the new one in the instruction
+ /// stream.
+ virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
+ return 0;
+ }
+
+ /// foldMemoryOperand - Same as the previous version except it allows folding
+ /// of any load and store from / to any address, not just from a specific
+ /// stack slot.
+ virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
+
+ /// canFoldMemoryOperand - Returns true if the specified load / store is
+ /// folding is possible.
+ virtual
+ bool canFoldMemoryOperand(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &Ops) const{
+ return false;
+ }
+
+ /// unfoldMemoryOperand - Separate a single instruction which folded a load or
+ /// a store or a load and a store into two or more instruction. If this is
+ /// possible, returns true as well as the new instructions by reference.
+ virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
+ unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const{
+ return false;
+ }
+
+ virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+ SmallVectorImpl<SDNode*> &NewNodes) const {
+ return false;
+ }
+
+ /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
+ /// instruction after load / store are unfolded from an instruction of the
+ /// specified opcode. It returns zero if the specified unfolding is not
+ /// possible.
+ virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
+ bool UnfoldLoad, bool UnfoldStore) const {
+ return 0;
+ }
+
/// BlockHasNoFallThrough - Return true if the specified block does not
/// fall-through into its successor block. This is primarily used when a
/// branch is unanalyzable. It is useful for things like unconditional
diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp
index aa0109f8b9..f40be584ee 100644
--- a/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/lib/Target/ARM/ARMInstrInfo.cpp
@@ -643,6 +643,119 @@ bool ARMInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
return true;
}
+MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &Ops,
+ int FI) const {
+ if (Ops.size() != 1) return NULL;
+
+ unsigned OpNum = Ops[0];
+ unsigned Opc = MI->getOpcode();
+ MachineInstr *NewMI = NULL;
+ switch (Opc) {
+ default: break;
+ case ARM::MOVr: {
+ if (MI->getOperand(4).getReg() == ARM::CPSR)
+ // If it is updating CPSR, then it cannot be foled.
+ break;
+ unsigned Pred = MI->getOperand(2).getImm();
+ unsigned PredReg = MI->getOperand(3).getReg();
+ if (OpNum == 0) { // move -> store
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ NewMI = BuildMI(get(ARM::STR)).addReg(SrcReg).addFrameIndex(FI)
+ .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
+ } else { // move -> load
+ unsigned DstReg = MI->getOperand(0).getReg();
+ NewMI = BuildMI(get(ARM::LDR), DstReg).addFrameIndex(FI).addReg(0)
+ .addImm(0).addImm(Pred).addReg(PredReg);
+ }
+ break;
+ }
+ case ARM::tMOVr: {
+ if (OpNum == 0) { // move -> store
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
+ // tSpill cannot take a high register operand.
+ break;
+ NewMI = BuildMI(get(ARM::tSpill)).addReg(SrcReg).addFrameIndex(FI)
+ .addImm(0);
+ } else { // move -> load
+ unsigned DstReg = MI->getOperand(0).getReg();
+ if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
+ // tRestore cannot target a high register operand.
+ break;
+ NewMI = BuildMI(get(ARM::tRestore), DstReg).addFrameIndex(FI)
+ .addImm(0);
+ }
+ break;
+ }
+ case ARM::FCPYS: {
+ unsigned Pred = MI->getOperand(2).getImm();
+ unsigned PredReg = MI->getOperand(3).getReg();
+ if (OpNum == 0) { // move -> store
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ NewMI = BuildMI(get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
+ .addImm(0).addImm(Pred).addReg(PredReg);
+ } else { // move -> load
+ unsigned DstReg = MI->getOperand(0).getReg();
+ NewMI = BuildMI(get(ARM::FLDS), DstReg).addFrameIndex(FI)
+ .addImm(0).addImm(Pred).addReg(PredReg);
+ }
+ break;
+ }
+ case ARM::FCPYD: {
+ unsigned Pred = MI->getOperand(2).getImm();
+ unsigned PredReg = MI->getOperand(3).getReg();
+ if (OpNum == 0) { // move -> store
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ NewMI = BuildMI(get(ARM::FSTD)).addReg(SrcReg).addFrameIndex(FI)
+ .addImm(0).addImm(Pred).addReg(PredReg);
+ } else { // move -> load
+ unsigned DstReg = MI->getOperand(0).getReg();
+ NewMI = BuildMI(get(ARM::FLDD), DstReg).addFrameIndex(FI)
+ .addImm(0).addImm(Pred).addReg(PredReg);
+ }
+ break;
+ }
+ }
+
+ if (NewMI)
+ NewMI->copyKillDeadInfo(MI);
+ return NewMI;
+}
+
+bool ARMInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &Ops) const {
+ if (Ops.size() != 1) return false;
+
+ unsigned OpNum = Ops[0];
+ unsigned Opc = MI->getOpcode();
+ switch (Opc) {
+ default: break;
+ case ARM::MOVr:
+ // If it is updating CPSR, then it cannot be foled.
+ return MI->getOperand(4).getReg() != ARM::CPSR;
+ case ARM::tMOVr: {
+ if (OpNum == 0) { // move -> store
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
+ // tSpill cannot take a high register operand.
+ return false;
+ } else { // move -> load
+ unsigned DstReg = MI->getOperand(0).getReg();
+ if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
+ // tRestore cannot target a high register operand.
+ return false;
+ }
+ return true;
+ }
+ case ARM::FCPYS:
+ case ARM::FCPYD:
+ return true;
+ }
+
+ return false;
+}
+
bool ARMInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
diff --git a/lib/Target/ARM/ARMInstrInfo.h b/lib/Target/ARM/ARMInstrInfo.h
index a1cd821619..60d9640b8d 100644
--- a/lib/Target/ARM/ARMInstrInfo.h
+++ b/lib/Target/ARM/ARMInstrInfo.h
@@ -190,6 +190,20 @@ public:
virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const;
+
+ virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const;
+
+ virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
+
+ virtual bool canFoldMemoryOperand(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &Ops) const;
+
virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
virtual bool ReverseBranchCondition(std::vector<MachineOperand> &Cond) const;
diff --git a/lib/Target/ARM/ARMRegisterInfo.cpp b/lib/Target/ARM/ARMRegisterInfo.cpp
index 316026234e..82aa74ea58 100644
--- a/lib/Target/ARM/ARMRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMRegisterInfo.cpp
@@ -136,7 +136,7 @@ void ARMRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
/// isLowRegister - Returns true if the register is low register r0-r7.
///
-static bool isLowRegister(unsigned Reg) {
+bool ARMRegisterInfo::isLowRegister(unsigned Reg) const {
using namespace ARM;
switch (Reg) {
case R0: case R1: case R2: case R3:
@@ -147,119 +147,6 @@ static bool isLowRegister(unsigned Reg) {
}
}
-MachineInstr *ARMRegisterInfo::foldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops,
- int FI) const {
- if (Ops.size() != 1) return NULL;
-
- unsigned OpNum = Ops[0];
- unsigned Opc = MI->getOpcode();
- MachineInstr *NewMI = NULL;
- switch (Opc) {
- default: break;
- case ARM::MOVr: {
- if (MI->getOperand(4).getReg() == ARM::CPSR)
- // If it is updating CPSR, then it cannot be foled.
- break;
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- NewMI = BuildMI(TII.get(ARM::STR)).addReg(SrcReg).addFrameIndex(FI)
- .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- NewMI = BuildMI(TII.get(ARM::LDR), DstReg).addFrameIndex(FI).addReg(0)
- .addImm(0).addImm(Pred).addReg(PredReg);
- }
- break;
- }
- case ARM::tMOVr: {
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- if (isPhysicalRegister(SrcReg) && !isLowRegister(SrcReg))
- // tSpill cannot take a high register operand.
- break;
- NewMI = BuildMI(TII.get(ARM::tSpill)).addReg(SrcReg).addFrameIndex(FI)
- .addImm(0);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- if (isPhysicalRegister(DstReg) && !isLowRegister(DstReg))
- // tRestore cannot target a high register operand.
- break;
- NewMI = BuildMI(TII.get(ARM::tRestore), DstReg).addFrameIndex(FI)
- .addImm(0);
- }
- break;
- }
- case ARM::FCPYS: {
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- NewMI = BuildMI(TII.get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
- .addImm(0).addImm(Pred).addReg(PredReg);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- NewMI = BuildMI(TII.get(ARM::FLDS), DstReg).addFrameIndex(FI)
- .addImm(0).addImm(Pred).addReg(PredReg);
- }
- break;
- }
- case ARM::FCPYD: {
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- NewMI = BuildMI(TII.get(ARM::FSTD)).addReg(SrcReg).addFrameIndex(FI)
- .addImm(0).addImm(Pred).addReg(PredReg);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- NewMI = BuildMI(TII.get(ARM::FLDD), DstReg).addFrameIndex(FI)
- .addImm(0).addImm(Pred).addReg(PredReg);
- }
- break;
- }
- }
-
- if (NewMI)
- NewMI->copyKillDeadInfo(MI);
- return NewMI;
-}
-
-bool ARMRegisterInfo::canFoldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops) const {
- if (Ops.size() != 1) return false;
-
- unsigned OpNum = Ops[0];
- unsigned Opc = MI->getOpcode();
- switch (Opc) {
- default: break;
- case ARM::MOVr:
- // If it is updating CPSR, then it cannot be foled.
- return MI->getOperand(4).getReg() != ARM::CPSR;
- case ARM::tMOVr: {
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- if (isPhysicalRegister(SrcReg) && !isLowRegister(SrcReg))
- // tSpill cannot take a high register operand.
- return false;
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- if (isPhysicalRegister(DstReg) && !isLowRegister(DstReg))
- // tRestore cannot target a high register operand.
- return false;
- }
- return true;
- }
- case ARM::FCPYS:
- case ARM::FCPYD:
- return true;
- }
-
- return false;
-}
-
const unsigned*
ARMRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
static const unsigned CalleeSavedRegs[] = {
@@ -426,12 +313,13 @@ static unsigned calcNumMI(int Opc, int ExtraOpc, unsigned Bytes,
/// constpool entry.
static
void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned DestReg, unsigned BaseReg,
- int NumBytes, bool CanChangeCC,
- const TargetInstrInfo &TII) {
- bool isHigh = !isLowRegister(DestReg) ||
- (BaseReg != 0 && !isLowRegister(BaseReg));
+ MachineBasicBlock::iterator &MBBI,
+ unsigned DestReg, unsigned BaseReg,
+ int NumBytes, bool CanChangeCC,
+ const TargetInstrInfo &TII,
+ const ARMRegisterInfo& MRI) {
+ bool isHigh = !MRI.isLowRegister(DestReg) ||
+ (BaseReg != 0 && !MRI.isLowRegister(BaseReg));
bool isSub = false;
// Subtract doesn't have high register version. Load the negative value
// if either base or dest register is a high register. Also, if do not
@@ -476,7 +364,8 @@ static
void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, unsigned BaseReg,
- int NumBytes, const TargetInstrInfo &TII) {
+ int NumBytes, const TargetInstrInfo &TII,
+ const ARMRegisterInfo& MRI) {
bool isSub = NumBytes < 0;
unsigned Bytes = (unsigned)NumBytes;
if (isSub) Bytes = -NumBytes;
@@ -522,12 +411,12 @@ void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
if (NumMIs > Threshold) {
// This will expand into too many instructions. Load the immediate from a
// constpool entry.
- emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII);
+ emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII, MRI);
return;
}
if (DstNotEqBase) {
- if (isLowRegister(DestReg) && isLowRegister(BaseReg)) {
+ if (MRI.isLowRegister(DestReg) && MRI.isLowRegister(BaseReg)) {
// If both are low registers, emit DestReg = add BaseReg, max(Imm, 7)
unsigned Chunk = (1 << 3) - 1;
unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
@@ -577,9 +466,10 @@ void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
static
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
int NumBytes, ARMCC::CondCodes Pred, unsigned PredReg,
- bool isThumb, const TargetInstrInfo &TII) {
+ bool isThumb, const TargetInstrInfo &TII,
+ const ARMRegisterInfo& MRI) {
if (isThumb)
- emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII);
+ emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII, MRI);
else
emitARMRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes,
Pred, PredReg, TII);
@@ -610,12 +500,12 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
// Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
unsigned PredReg = isThumb ? 0 : Old->getOperand(2).getReg();
- emitSPUpdate(MBB, I, -Amount, Pred, PredReg, isThumb, TII);
+ emitSPUpdate(MBB, I, -Amount, Pred, PredReg, isThumb, TII, *this);
} else {
// Note: PredReg is operand 3 for ADJCALLSTACKUP.
unsigned PredReg = isThumb ? 0 : Old->getOperand(3).getReg();
assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
- emitSPUpdate(MBB, I, Amount, Pred, PredReg, isThumb, TII);
+ emitSPUpdate(MBB, I, Amount, Pred, PredReg, isThumb, TII, *this);
}
}
}
@@ -627,7 +517,8 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
static void emitThumbConstant(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, int Imm,
- const TargetInstrInfo &TII) {
+ const TargetInstrInfo &TII,
+ const ARMRegisterInfo& MRI) {
bool isSub = Imm < 0;
if (isSub) Imm = -Imm;
@@ -636,7 +527,7 @@ static void emitThumbConstant(MachineBasicBlock &MBB,
Imm -= ThisVal;
BuildMI(MBB, MBBI, TII.get(ARM::tMOVi8), DestReg).addImm(ThisVal);
if (Imm > 0)
- emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII);
+ emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII, MRI);
if (isSub)
BuildMI(MBB, MBBI, TII.get(ARM::tNEG), DestReg)
.addReg(DestReg, false, false, true);
@@ -770,7 +661,7 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// MI would expand into a large number of instructions. Don't try to
// simplify the immediate.
if (NumMIs > 2) {
- emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII);
+ emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII, *this);
MBB.erase(II);
return;
}
@@ -783,12 +674,12 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(i+1).ChangeToImmediate(Mask);
Offset = (Offset - Mask * Scale);
MachineBasicBlock::iterator NII = next(II);
- emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII);
+ emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII, *this);
} else {
// Translate r0 = add sp, -imm to
// r0 = -imm (this is then translated into a series of instructons)
// r0 = add r0, sp
- emitThumbConstant(MBB, II, DestReg, Offset, TII);
+ emitThumbConstant(MBB, II, DestReg, Offset, TII, *this);
MI.setInstrDescriptor(TII.get(ARM::tADDhirr));
MI.getOperand(i).ChangeToRegister(DestReg, false, false, true);
MI.getOperand(i+1).ChangeToRegister(FrameReg, false);
@@ -891,13 +782,14 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
bool UseRR = false;
if (Opcode == ARM::tRestore) {
if (FrameReg == ARM::SP)
- emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
+ emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
+ Offset, false, TII, *this);
else {
emitLoadConstPool(MBB, II, TmpReg, Offset, ARMCC::AL, 0, TII, true);
UseRR = true;
}
} else
- emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII);
+ emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII, *this);
MI.setInstrDescriptor(TII.get(ARM::tLDR));
MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
if (UseRR)
@@ -927,13 +819,14 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
.addReg(ARM::R3, false, false, true);
if (Opcode == ARM::tSpill) {
if (FrameReg == ARM::SP)
- emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
+ emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
+ Offset, false, TII, *this);
else {
emitLoadConstPool(MBB, II, TmpReg, Offset, ARMCC::AL, 0, TII, true);
UseRR = true;
}
} else
- emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII);
+ emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII, *this);
MI.setInstrDescriptor(TII.get(ARM::tSTR));
MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
if (UseRR) // Use [reg, reg] addrmode.
@@ -1266,11 +1159,11 @@ void ARMRegisterInfo::emitPrologue(MachineFunction &MF) const {
int FramePtrSpillFI = 0;
if (VARegSaveSize)
- emitSPUpdate(MBB, MBBI, -VARegSaveSize, ARMCC::AL, 0, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, -VARegSaveSize, ARMCC::AL, 0, isThumb, TII, *this);
if (!AFI->hasStackFrame()) {
if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, -NumBytes, ARMCC::AL, 0, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, -NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
return;
}
@@ -1310,7 +1203,7 @@ void ARMRegisterInfo::emitPrologue(MachineFunction &MF) const {
if (!isThumb) {
// Build the new SUBri to adjust SP for integer callee-save spill area 1.
- emitSPUpdate(MBB, MBBI, -GPRCS1Size, ARMCC::AL, 0, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, -GPRCS1Size, ARMCC::AL, 0, isThumb, TII, *this);
movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, 1, STI);
} else if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH)
++MBBI;
@@ -1326,11 +1219,11 @@ void ARMRegisterInfo::emitPrologue(MachineFunction &MF) const {
if (!isThumb) {
// Build the new SUBri to adjust SP for integer callee-save spill area 2.
- emitSPUpdate(MBB, MBBI, -GPRCS2Size, ARMCC::AL, 0, false, TII);
+ emitSPUpdate(MBB, MBBI, -GPRCS2Size, ARMCC::AL, 0, false, TII, *this);
// Build the new SUBri to adjust SP for FP callee-save spill area.
movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, 2, STI);
- emitSPUpdate(MBB, MBBI, -DPRCSSize, ARMCC::AL, 0, false, TII);
+ emitSPUpdate(MBB, MBBI, -DPRCSSize, ARMCC::AL, 0, false, TII, *this);
}
// Determine starting offsets of spill areas.
@@ -1347,7 +1240,7 @@ void ARMRegisterInfo::emitPrologue(MachineFunction &MF) const {
// Insert it after all the callee-save spills.
if (!isThumb)
movePastCSLoadStoreOps(MBB, MBBI, ARM::FSTD, 3, STI);
- emitSPUpdate(MBB, MBBI, -NumBytes, ARMCC::AL, 0, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, -NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
}
if(STI.isTargetELF() && hasFP(MF)) {
@@ -1390,7 +1283,7 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
int NumBytes = (int)MFI->getStackSize();
if (!AFI->hasStackFrame()) {
if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
} else {
// Unwind MBBI to point to first LDR / FLDD.
const unsigned *CSRegs = getCalleeSavedRegs();
@@ -1412,7 +1305,8 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
// Reset SP based on frame pointer only if the stack frame extends beyond
// frame pointer stack slot or target is ELF and the function has FP.
if (NumBytes)
- emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes, TII);
+ emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes,
+ TII, *this);
else
BuildMI(MBB, MBBI, TII.get(ARM::tMOVr), ARM::SP).addReg(FramePtr);
} else {
@@ -1420,9 +1314,9 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
&MBB.front() != MBBI &&
prior(MBBI)->getOpcode() == ARM::tPOP) {
MachineBasicBlock::iterator PMBBI = prior(MBBI);
- emitSPUpdate(MBB, PMBBI, NumBytes, ARMCC::AL, 0, isThumb, TII);
+ emitSPUpdate(MBB, PMBBI, NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
} else
- emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
}
} else {
// Darwin ABI requires FP to point to the stack slot that contains the
@@ -1443,23 +1337,23 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
BuildMI(MBB, MBBI, TII.get(ARM::MOVr), ARM::SP).addReg(FramePtr)
.addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
} else if (NumBytes) {
- emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, false, TII);
+ emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, false, TII, *this);
}
// Move SP to start of integer callee save spill area 2.
movePastCSLoadStoreOps(MBB, MBBI, ARM::FLDD, 3, STI);
emitSPUpdate(MBB, MBBI, AFI->getDPRCalleeSavedAreaSize(), ARMCC::AL, 0,
- false, TII);
+ false, TII, *this);
// Move SP to start of integer callee save spill area 1.
movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, 2, STI);
emitSPUpdate(MBB, MBBI, AFI->getGPRCalleeSavedArea2Size(), ARMCC::AL, 0,
- false, TII);
+ false, TII, *this);
// Move SP to SP upon entry to the function.
movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, 1, STI);
emitSPUpdate(MBB, MBBI, AFI->getGPRCalleeSavedArea1Size(), ARMCC::AL, 0,
- false, TII);
+ false, TII, *this);
}
}
@@ -1469,7 +1363,7 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
// FIXME: Verify this is still ok when R3 is no longer being reserved.
BuildMI(MBB, MBBI, TII.get(ARM::tPOP)).addReg(ARM::R3);
- emitSPUpdate(MBB, MBBI, VARegSaveSize, ARMCC::AL, 0, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, VARegSaveSize, ARMCC::AL, 0, isThumb, TII, *this);
if (isThumb) {
BuildMI(MBB, MBBI, T