diff options
author | Chris Lattner <sabre@nondot.org> | 2005-09-19 06:56:21 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2005-09-19 06:56:21 +0000 |
commit | cea8688ee4637e0f3cffc75e47f15be756f043bf (patch) | |
tree | 1b0e41edebe492610040333cd7d060e16536e2ea /lib/CodeGen/VirtRegMap.cpp | |
parent | a92aab74dd303c81537169cb53935665199c3afc (diff) |
Teach the local spiller to turn stack slot loads into register-register copies
when possible, avoiding the load (and avoiding the copy if the value is already
in the right register).
This patch came about when I noticed code like the following being generated:
store R17 -> [SS1]
...blah...
R4 = load [SS1]
This was causing an LSU reject on the G5. This problem was due to the register
allocator folding spill code into a reg-reg copy (producing the load), which
prevented the spiller from being able to rewrite the load into a copy, despite
the fact that the value was already available in a register. In the case
above, we now rip out the R4 load and replace it with a R4 = R17 copy.
This speeds up several programs on X86 (which spills a lot :) ), e.g.
smg2k from 22.39->20.60s, povray from 12.93->12.66s, 168.wupwise from
68.54->53.83s (!), 197.parser from 7.33->6.62s (!), etc. This may have a larger
impact in some cases on the G5 (by avoiding LSU rejects), though it probably
won't trigger as often (less spilling in general).
Targets that implement folding of loads/stores into copies should implement
the isLoadFromStackSlot hook to get this.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@23388 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/VirtRegMap.cpp')
-rw-r--r-- | lib/CodeGen/VirtRegMap.cpp | 78 |
1 files changed, 52 insertions, 26 deletions
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp index b68f36b664..c958143d66 100644 --- a/lib/CodeGen/VirtRegMap.cpp +++ b/lib/CodeGen/VirtRegMap.cpp @@ -466,36 +466,61 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, const VirtRegMap &VRM) { << I->second.second); unsigned VirtReg = I->second.first; VirtRegMap::ModRef MR = I->second.second; - if (VRM.hasStackSlot(VirtReg)) { - int SS = VRM.getStackSlot(VirtReg); - DEBUG(std::cerr << " - StackSlot: " << SS << "\n"); - - // If this reference is not a use, any previous store is now dead. - // Otherwise, the store to this stack slot is not dead anymore. - std::map<int, MachineInstr*>::iterator MDSI = MaybeDeadStores.find(SS); - if (MDSI != MaybeDeadStores.end()) { - if (MR & VirtRegMap::isRef) // Previous store is not dead. - MaybeDeadStores.erase(MDSI); - else { - // If we get here, the store is dead, nuke it now. - assert(MR == VirtRegMap::isMod && "Can't be modref!"); - MBB.erase(MDSI->second); - MaybeDeadStores.erase(MDSI); - ++NumDSE; + if (!VRM.hasStackSlot(VirtReg)) { + DEBUG(std::cerr << ": No stack slot!\n"); + continue; + } + int SS = VRM.getStackSlot(VirtReg); + DEBUG(std::cerr << " - StackSlot: " << SS << "\n"); + + // If this folded instruction is just a use, check to see if it's a + // straight load from the virt reg slot. + if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) { + int FrameIdx; + if (unsigned DestReg = MRI->isLoadFromStackSlot(&MI, FrameIdx)) { + // If this spill slot is available, insert a copy for it! + std::map<int, unsigned>::iterator It = SpillSlotsAvailable.find(SS); + if (FrameIdx == SS && It != SpillSlotsAvailable.end()) { + DEBUG(std::cerr << "Promoted Load To Copy: " << MI); + MachineFunction &MF = *MBB.getParent(); + if (DestReg != It->second) { + MRI->copyRegToReg(MBB, &MI, DestReg, It->second, + MF.getSSARegMap()->getRegClass(VirtReg)); + // Revisit the copy if the destination is a vreg. + if (MRegisterInfo::isVirtualRegister(DestReg)) { + NextMII = &MI; + --NextMII; // backtrack to the copy. + } + } + MBB.erase(&MI); + goto ProcessNextInst; } } + } - // If the spill slot value is available, and this is a new definition of - // the value, the value is not available anymore. - if (MR & VirtRegMap::isMod) { - std::map<int, unsigned>::iterator It = SpillSlotsAvailable.find(SS); - if (It != SpillSlotsAvailable.end()) { - PhysRegsAvailable.erase(It->second); - SpillSlotsAvailable.erase(It); - } + // If this reference is not a use, any previous store is now dead. + // Otherwise, the store to this stack slot is not dead anymore. + std::map<int, MachineInstr*>::iterator MDSI = MaybeDeadStores.find(SS); + if (MDSI != MaybeDeadStores.end()) { + if (MR & VirtRegMap::isRef) // Previous store is not dead. + MaybeDeadStores.erase(MDSI); + else { + // If we get here, the store is dead, nuke it now. + assert(MR == VirtRegMap::isMod && "Can't be modref!"); + MBB.erase(MDSI->second); + MaybeDeadStores.erase(MDSI); + ++NumDSE; + } + } + + // If the spill slot value is available, and this is a new definition of + // the value, the value is not available anymore. + if (MR & VirtRegMap::isMod) { + std::map<int, unsigned>::iterator It = SpillSlotsAvailable.find(SS); + if (It != SpillSlotsAvailable.end()) { + PhysRegsAvailable.erase(It->second); + SpillSlotsAvailable.erase(It); } - } else { - DEBUG(std::cerr << ": No stack slot!\n"); } } @@ -575,6 +600,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, const VirtRegMap &VRM) { } } } + ProcessNextInst: MII = NextMII; } } |