diff options
author | Jakob Stoklund Olesen <stoklund@2pi.dk> | 2012-03-01 01:43:25 +0000 |
---|---|---|
committer | Jakob Stoklund Olesen <stoklund@2pi.dk> | 2012-03-01 01:43:25 +0000 |
commit | 66c994c2dbd1a76418fdd0acb138aa029538ffe5 (patch) | |
tree | e55062e6f93cf35a758d49e316c92dcda5607933 /lib/CodeGen/InlineSpiller.cpp | |
parent | 5c2b4ea73c8f48bb5f96c86fe437385b8fb3dcda (diff) |
Make InlineSpiller bundle-aware.
Simply treat bundles as instructions. Spill code is inserted between
bundles, never inside a bundle. Rewrite all operands in a bundle at
once.
Don't attempt and memory operand folding inside bundles.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@151787 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/InlineSpiller.cpp')
-rw-r--r-- | lib/CodeGen/InlineSpiller.cpp | 93 |
1 files changed, 48 insertions, 45 deletions
diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index 85e2572504..d596d8b665 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -22,6 +22,7 @@ #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/LiveStackAnalysis.h" #include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineInstrBundle.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineLoopInfo.h" @@ -173,8 +174,7 @@ private: void reMaterializeAll(); bool coalesceStackAccess(MachineInstr *MI, unsigned Reg); - bool foldMemoryOperand(MachineBasicBlock::iterator MI, - const SmallVectorImpl<unsigned> &Ops, + bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >, MachineInstr *LoadMI = 0); void insertReload(LiveInterval &NewLI, SlotIndex, MachineBasicBlock::iterator MI); @@ -864,24 +864,19 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, // If the instruction also writes VirtReg.reg, it had better not require the // same register for uses and defs. - bool Reads, Writes; - SmallVector<unsigned, 8> Ops; - tie(Reads, Writes) = MI->readsWritesVirtualRegister(VirtReg.reg, &Ops); - if (Writes) { - for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - MachineOperand &MO = MI->getOperand(Ops[i]); - if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { - markValueUsed(&VirtReg, ParentVNI); - DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); - return false; - } - } + SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; + MIBundleOperands::RegInfo RI = + MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops); + if (RI.Tied) { + markValueUsed(&VirtReg, ParentVNI); + DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); + return false; } // Before rematerializing into a register for a single instruction, try to // fold a load into the instruction. That avoids allocating a new register. if (RM.OrigMI->canFoldAsLoad() && - foldMemoryOperand(MI, Ops, RM.OrigMI)) { + foldMemoryOperand(Ops, RM.OrigMI)) { Edit->markRematerialized(RM.ParentVNI); ++NumFoldedLoads; return true; @@ -899,7 +894,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, // Replace operands for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - MachineOperand &MO = MI->getOperand(Ops[i]); + MachineOperand &MO = MI->getOperand(Ops[i].second); if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) { MO.setReg(NewLI.reg); MO.setIsKill(); @@ -930,7 +925,7 @@ void InlineSpiller::reMaterializeAll() { LiveInterval &LI = LIS.getInterval(Reg); for (MachineRegisterInfo::use_nodbg_iterator RI = MRI.use_nodbg_begin(Reg); - MachineInstr *MI = RI.skipInstruction();) + MachineInstr *MI = RI.skipBundle();) anyRemat |= reMaterializeFor(LI, MI); } if (!anyRemat) @@ -1009,14 +1004,22 @@ bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) { return true; } -/// foldMemoryOperand - Try folding stack slot references in Ops into MI. -/// @param MI Instruction using or defining the current register. -/// @param Ops Operand indices from readsWritesVirtualRegister(). +/// foldMemoryOperand - Try folding stack slot references in Ops into their +/// instructions. +/// +/// @param Ops Operand indices from analyzeVirtReg(). /// @param LoadMI Load instruction to use instead of stack slot when non-null. -/// @return True on success, and MI will be erased. -bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, - const SmallVectorImpl<unsigned> &Ops, - MachineInstr *LoadMI) { +/// @return True on success. +bool InlineSpiller:: +foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops, + MachineInstr *LoadMI) { + if (Ops.empty()) + return false; + // Don't attempt folding in bundles. + MachineInstr *MI = Ops.front().first; + if (Ops.back().first != MI || MI->isBundled()) + return false; + bool WasCopy = MI->isCopy(); unsigned ImpReg = 0; @@ -1024,7 +1027,7 @@ bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, // operands. SmallVector<unsigned, 8> FoldOps; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - unsigned Idx = Ops[i]; + unsigned Idx = Ops[i].second; MachineOperand &MO = MI->getOperand(Idx); if (MO.isImplicit()) { ImpReg = MO.getReg(); @@ -1064,7 +1067,7 @@ bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, << *FoldMI); if (!WasCopy) ++NumFolded; - else if (Ops.front() == 0) + else if (Ops.front().second == 0) ++NumSpills; else ++NumReloads; @@ -1106,8 +1109,8 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { LiveInterval &OldLI = LIS.getInterval(Reg); // Iterate over instructions using Reg. - for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg); - MachineInstr *MI = RI.skipInstruction();) { + for (MachineRegisterInfo::reg_iterator RegI = MRI.reg_begin(Reg); + MachineInstr *MI = RegI.skipBundle();) { // Debug values are not allowed to affect codegen. if (MI->isDebugValue()) { @@ -1136,9 +1139,9 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { continue; // Analyze instruction. - bool Reads, Writes; - SmallVector<unsigned, 8> Ops; - tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops); + SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; + MIBundleOperands::RegInfo RI = + MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops); // Find the slot index where this instruction reads and writes OldLI. // This is usually the def slot, except for tied early clobbers. @@ -1156,7 +1159,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { SnippetCopies.insert(MI); continue; } - if (Writes) { + if (RI.Writes) { // Hoist the spill of a sib-reg copy. if (hoistSpill(OldLI, MI)) { // This COPY is now dead, the value is already in the stack slot. @@ -1173,7 +1176,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { } // Attempt to fold memory ops. - if (foldMemoryOperand(MI, Ops)) + if (foldMemoryOperand(Ops)) continue; // Allocate interval around instruction. @@ -1181,16 +1184,16 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { LiveInterval &NewLI = Edit->createFrom(Reg, LIS, VRM); NewLI.markNotSpillable(); - if (Reads) + if (RI.Reads) insertReload(NewLI, Idx, MI); // Rewrite instruction operands. bool hasLiveDef = false; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - MachineOperand &MO = MI->getOperand(Ops[i]); + MachineOperand &MO = Ops[i].first->getOperand(Ops[i].second); MO.setReg(NewLI.reg); if (MO.isUse()) { - if (!MI->isRegTiedToDefOperand(Ops[i])) + if (!Ops[i].first->isRegTiedToDefOperand(Ops[i].second)) MO.setIsKill(); } else { if (!MO.isDead()) @@ -1200,15 +1203,15 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI); // FIXME: Use a second vreg if instruction has no tied ops. - if (Writes) { - if (hasLiveDef) - insertSpill(NewLI, OldLI, Idx, MI); - else { - // This instruction defines a dead value. We don't need to spill it, - // but do create a live range for the dead value. - VNInfo *VNI = NewLI.getNextValue(Idx, LIS.getVNInfoAllocator()); - NewLI.addRange(LiveRange(Idx, Idx.getDeadSlot(), VNI)); - } + if (RI.Writes) { + if (hasLiveDef) + insertSpill(NewLI, OldLI, Idx, MI); + else { + // This instruction defines a dead value. We don't need to spill it, + // but do create a live range for the dead value. + VNInfo *VNI = NewLI.getNextValue(Idx, LIS.getVNInfoAllocator()); + NewLI.addRange(LiveRange(Idx, Idx.getDeadSlot(), VNI)); + } } DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); |