aboutsummaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2007-12-02 08:30:39 +0000
committerEvan Cheng <evan.cheng@apple.com>2007-12-02 08:30:39 +0000
commitaee4af68ae2016afc5b4ec0c430e539c5810a766 (patch)
tree2d4d78114af12fce6b3f610409ad525ee261bb60 /lib/Target
parent0465fb5663a0108399df4c19db1afb4516328964 (diff)
Remove redundant foldMemoryOperand variants and other code clean up.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44517 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/ARM/ARMRegisterInfo.cpp6
-rw-r--r--lib/Target/ARM/ARMRegisterInfo.h16
-rw-r--r--lib/Target/Alpha/AlphaRegisterInfo.cpp6
-rw-r--r--lib/Target/Alpha/AlphaRegisterInfo.h16
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.cpp9
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.h16
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.cpp7
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.h16
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.cpp7
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.h16
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp87
-rw-r--r--lib/Target/X86/X86RegisterInfo.h19
12 files changed, 78 insertions, 143 deletions
diff --git a/lib/Target/ARM/ARMRegisterInfo.cpp b/lib/Target/ARM/ARMRegisterInfo.cpp
index b5c04bab0f..f1665dc0b4 100644
--- a/lib/Target/ARM/ARMRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMRegisterInfo.cpp
@@ -347,7 +347,11 @@ static bool isLowRegister(unsigned Reg) {
}
MachineInstr *ARMRegisterInfo::foldMemoryOperand(MachineInstr *MI,
- unsigned OpNum, int FI) const {
+ SmallVectorImpl<unsigned> &Ops,
+ int FI) const {
+ if (Ops.size() != 1) return NULL;
+
+ unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = NULL;
switch (Opc) {
diff --git a/lib/Target/ARM/ARMRegisterInfo.h b/lib/Target/ARM/ARMRegisterInfo.h
index 97be04f421..ed53e4e2da 100644
--- a/lib/Target/ARM/ARMRegisterInfo.h
+++ b/lib/Target/ARM/ARMRegisterInfo.h
@@ -74,22 +74,12 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
- MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
- int FrameIndex) const;
-
MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
- int FrameIndex) const {
- return 0;
- }
-
- MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
- MachineInstr* LoadMI) const {
- return 0;
- }
+ SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
diff --git a/lib/Target/Alpha/AlphaRegisterInfo.cpp b/lib/Target/Alpha/AlphaRegisterInfo.cpp
index 3d1747e8cf..b8e2c26827 100644
--- a/lib/Target/Alpha/AlphaRegisterInfo.cpp
+++ b/lib/Target/Alpha/AlphaRegisterInfo.cpp
@@ -153,8 +153,10 @@ void AlphaRegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
}
MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI,
- unsigned OpNum,
+ SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
+ if (Ops.size() != 1) return NULL;
+
// Make sure this is a reg-reg copy.
unsigned Opc = MI->getOpcode();
@@ -166,7 +168,7 @@ MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI,
case Alpha::CPYSS:
case Alpha::CPYST:
if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
- if (OpNum == 0) { // move -> store
+ if (Ops[0] == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
diff --git a/lib/Target/Alpha/AlphaRegisterInfo.h b/lib/Target/Alpha/AlphaRegisterInfo.h
index 97d3280a09..04565f7bb2 100644
--- a/lib/Target/Alpha/AlphaRegisterInfo.h
+++ b/lib/Target/Alpha/AlphaRegisterInfo.h
@@ -48,22 +48,12 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
- MachineInstr* foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
- int FrameIndex) const;
-
MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
- int FrameIndex) const {
- return 0;
- }
-
- MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
- MachineInstr* LoadMI) const {
- return 0;
- }
+ SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp
index 94cf59bb8b..37c09331d5 100644
--- a/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -176,8 +176,11 @@ void MipsRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
}
MachineInstr *MipsRegisterInfo::
-foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const
+foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &Ops, int FI) const
{
+ if (Ops.size() != 1) return NULL;
+
MachineInstr *NewMI = NULL;
switch (MI->getOpcode())
@@ -188,10 +191,10 @@ foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const
(MI->getOperand(1).getReg() == Mips::ZERO) &&
(MI->getOperand(2).isRegister()))
{
- if (OpNum == 0) // COPY -> STORE
+ if (Ops[0] == 0) // COPY -> STORE
NewMI = BuildMI(TII.get(Mips::SW)).addFrameIndex(FI)
.addImm(0).addReg(MI->getOperand(2).getReg());
- else // COPY -> LOAD
+ else // COPY -> LOAD
NewMI = BuildMI(TII.get(Mips::LW), MI->getOperand(0)
.getReg()).addImm(0).addFrameIndex(FI);
}
diff --git a/lib/Target/Mips/MipsRegisterInfo.h b/lib/Target/Mips/MipsRegisterInfo.h
index 123f6e87dc..4ebb736975 100644
--- a/lib/Target/Mips/MipsRegisterInfo.h
+++ b/lib/Target/Mips/MipsRegisterInfo.h
@@ -55,22 +55,12 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
- MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
- int FrameIndex) const;
-
MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
- int FrameIndex) const {
- return 0;
- }
-
- MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
- MachineInstr* LoadMI) const {
- return 0;
- }
+ SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 28c1fcba29..09b0e51579 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -555,11 +555,14 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
/// copy instructions, turning them into load/store instructions.
MachineInstr *PPCRegisterInfo::foldMemoryOperand(MachineInstr *MI,
- unsigned OpNum,
- int FrameIndex) const {
+ SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
+ if (Ops.size() != 1) return NULL;
+
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
// it takes more than one instruction to store it.
unsigned Opc = MI->getOpcode();
+ unsigned OpNum = Ops[0];
MachineInstr *NewMI = NULL;
if ((Opc == PPC::OR &&
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.h b/lib/Target/PowerPC/PPCRegisterInfo.h
index 3fce8924d1..8647a33919 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -65,22 +65,12 @@ public:
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
/// copy instructions, turning them into load/store instructions.
- virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
- int FrameIndex) const;
-
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
- int FrameIndex) const {
- return 0;
- }
-
- virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
- MachineInstr* LoadMI) const {
- return 0;
- }
+ SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
diff --git a/lib/Target/Sparc/SparcRegisterInfo.cpp b/lib/Target/Sparc/SparcRegisterInfo.cpp
index f3e2ff8860..fcd0dfcca2 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.cpp
+++ b/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -148,8 +148,11 @@ void SparcRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
}
MachineInstr *SparcRegisterInfo::foldMemoryOperand(MachineInstr* MI,
- unsigned OpNum,
- int FI) const {
+ SmallVectorImpl<unsigned> &Ops,
+ int FI) const {
+ if (Ops.size() != 1) return NULL;
+
+ unsigned OpNum = Ops[0];
bool isFloat = false;
MachineInstr *NewMI = NULL;
switch (MI->getOpcode()) {
diff --git a/lib/Target/Sparc/SparcRegisterInfo.h b/lib/Target/Sparc/SparcRegisterInfo.h
index cecbc8a87d..347b631ecf 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.h
+++ b/lib/Target/Sparc/SparcRegisterInfo.h
@@ -59,23 +59,11 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
unsigned DestReg, const MachineInstr *Orig) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
- unsigned OpNum,
+ SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
- int FrameIndex) const {
- return 0;
- }
-
- virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
- unsigned OpNum,
- MachineInstr* LoadMI) const {
- return 0;
- }
-
- virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 29f401ab7b..122dd9ed75 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -1140,73 +1140,58 @@ X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i,
}
-MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
- int FrameIndex) const {
- // Check switch flag
- if (NoFusing) return NULL;
- SmallVector<MachineOperand,4> MOs;
- MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
- return foldMemoryOperand(MI, OpNum, MOs);
-}
-
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
// Check switch flag
if (NoFusing) return NULL;
- if (UseOps.size() == 1)
- return foldMemoryOperand(MI, UseOps[0], FrameIndex);
- else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+ if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
+ unsigned NewOpc = 0;
+ switch (MI->getOpcode()) {
+ default: return NULL;
+ case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+ }
+ // Change to CMPXXri r, 0 first.
+ MI->setInstrDescriptor(TII.get(NewOpc));
+ MI->getOperand(1).ChangeToImmediate(0);
+ } else if (Ops.size() != 1)
return NULL;
- unsigned NewOpc = 0;
- switch (MI->getOpcode()) {
- default: return NULL;
- case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
- case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
- case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
- case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
- }
- // Change to CMPXXri r, 0 first.
- MI->setInstrDescriptor(TII.get(NewOpc));
- MI->getOperand(1).ChangeToImmediate(0);
- return foldMemoryOperand(MI, 0, FrameIndex);
-}
-
-MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
- MachineInstr *LoadMI) const {
- // Check switch flag
- if (NoFusing) return NULL;
SmallVector<MachineOperand,4> MOs;
- unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
- for (unsigned i = NumOps - 4; i != NumOps; ++i)
- MOs.push_back(LoadMI->getOperand(i));
- return foldMemoryOperand(MI, OpNum, MOs);
+ MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
+ return foldMemoryOperand(MI, Ops[0], MOs);
}
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const {
// Check switch flag
if (NoFusing) return NULL;
- if (UseOps.size() == 1)
- return foldMemoryOperand(MI, UseOps[0], LoadMI);
- else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+ if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
+ unsigned NewOpc = 0;
+ switch (MI->getOpcode()) {
+ default: return NULL;
+ case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+ }
+ // Change to CMPXXri r, 0 first.
+ MI->setInstrDescriptor(TII.get(NewOpc));
+ MI->getOperand(1).ChangeToImmediate(0);
+ } else if (Ops.size() != 1)
return NULL;
- unsigned NewOpc = 0;
- switch (MI->getOpcode()) {
- default: return NULL;
- case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
- case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
- case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
- case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
- }
- // Change to CMPXXri r, 0 first.
- MI->setInstrDescriptor(TII.get(NewOpc));
- MI->getOperand(1).ChangeToImmediate(0);
- return foldMemoryOperand(MI, 0, LoadMI);
+
+ SmallVector<MachineOperand,4> MOs;
+ unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
+ for (unsigned i = NumOps - 4; i != NumOps; ++i)
+ MOs.push_back(LoadMI->getOperand(i));
+ return foldMemoryOperand(MI, Ops[0], MOs);
}
diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h
index 53f08440a3..c74d2e769d 100644
--- a/lib/Target/X86/X86RegisterInfo.h
+++ b/lib/Target/X86/X86RegisterInfo.h
@@ -133,32 +133,19 @@ public:
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
- /// specified operand. If this is possible, the target should perform the
+ /// specified operand(s). If this is possible, the target should perform the
/// folding and return true, otherwise it should return false. If it folds
/// the instruction, it is likely that the MachineInstruction the iterator
/// references has been changed.
MachineInstr* foldMemoryOperand(MachineInstr* MI,
- unsigned OpNum,
- int FrameIndex) const;
-
- /// foldMemoryOperand - Same as previous except it tries to fold instruction
- /// with multiple uses of the same register.
- MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr* foldMemoryOperand(MachineInstr* MI,
- unsigned OpNum,
- MachineInstr* LoadMI) const;
-
- /// foldMemoryOperand - Same as the previous version except it allows folding
- /// of any load and store from / to any address, not just from a specific
- /// stack slot.
- MachineInstr* foldMemoryOperand(MachineInstr* MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
/// getOpcodeAfterMemoryFold - Returns the opcode of the would be new