diff options
author | Jakob Stoklund Olesen <stoklund@2pi.dk> | 2010-03-11 00:11:33 +0000 |
---|---|---|
committer | Jakob Stoklund Olesen <stoklund@2pi.dk> | 2010-03-11 00:11:33 +0000 |
commit | 2cb4202f0748ed1e1a40a85dc9cc805629262abf (patch) | |
tree | b722b6d187d005dee9d4f2b10dccc5d0e645e045 | |
parent | bcc5cb406172edcb68598e9578bab9ed9bd0de85 (diff) |
VirtRegRewriter spring cleaning. No functional change.
Move methods out of line and M-x whitespace-cleanup.
Promote common method arguments to member variables.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@98207 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/CodeGen/VirtRegRewriter.cpp | 2547 |
1 files changed, 1292 insertions, 1255 deletions
diff --git a/lib/CodeGen/VirtRegRewriter.cpp b/lib/CodeGen/VirtRegRewriter.cpp index 7aa0a91535..69a4d20d37 100644 --- a/lib/CodeGen/VirtRegRewriter.cpp +++ b/lib/CodeGen/VirtRegRewriter.cpp @@ -98,7 +98,7 @@ struct TrivialRewriter : public VirtRegRewriter { bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM, LiveIntervals* LIs) { DEBUG(dbgs() << "********** REWRITE MACHINE CODE **********\n"); - DEBUG(dbgs() << "********** Function: " + DEBUG(dbgs() << "********** Function: " << MF.getFunction()->getName() << '\n'); DEBUG(dbgs() << "**** Machine Instrs" << "(NOTE! Does not include spills and reloads!) ****\n"); @@ -135,10 +135,10 @@ struct TrivialRewriter : public VirtRegRewriter { changed |= !reglist.empty(); } } - + DEBUG(dbgs() << "**** Post Machine Instrs ****\n"); DEBUG(MF.dump()); - + return changed; } @@ -208,7 +208,7 @@ public: /// in the specified physreg. If CanClobber is true, the physreg can be /// modified at any time without changing the semantics of the program. void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) { - // If this stack slot is thought to be available in some other physreg, + // If this stack slot is thought to be available in some other physreg, // remove its record. ModifyStackSlotOrReMat(SlotOrReMat); @@ -364,7 +364,7 @@ struct ReusedOp { // AssignedPhysReg - The physreg that was assigned for use by the reload. unsigned AssignedPhysReg; - + // VirtReg - The virtual register itself. unsigned VirtReg; @@ -384,11 +384,11 @@ public: ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) { PhysRegsClobbered.resize(tri->getNumRegs()); } - + bool hasReuses() const { return !Reuses.empty(); } - + /// addReuse - If we choose to reuse a virtual register that is already /// available instead of reloading it, remember that we did so. void addReuse(unsigned OpNo, unsigned StackSlotOrReMat, @@ -397,9 +397,9 @@ public: // If the reload is to the assigned register anyway, no undo will be // required. if (PhysRegReused == AssignedPhysReg) return; - + // Otherwise, remember this. - Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused, + Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused, AssignedPhysReg, VirtReg)); } @@ -410,10 +410,10 @@ public: bool isClobbered(unsigned PhysReg) const { return PhysRegsClobbered.test(PhysReg); } - + /// GetRegForReload - We are about to emit a reload into PhysReg. If there /// is some other operand that is using the specified register, either pick - /// a new register to use, or evict the previous reload and use this reg. + /// a new register to use, or evict the previous reload and use this reg. unsigned GetRegForReload(const TargetRegisterClass *RC, unsigned PhysReg, MachineFunction &MF, MachineInstr *MI, AvailableSpills &Spills, @@ -525,7 +525,7 @@ static void InvalidateKills(MachineInstr &MI, /// reference. static bool InvalidateRegDef(MachineBasicBlock::iterator I, MachineInstr &NewDef, unsigned Reg, - bool &HasLiveDef, + bool &HasLiveDef, const TargetRegisterInfo *TRI) { // Due to remat, it's possible this reg isn't being reused. That is, // the def of this reg (by prev MI) is now dead. @@ -579,7 +579,7 @@ static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI, unsigned Reg = MO.getReg(); if (Reg == 0) continue; - + if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) { // That can't be right. Register is killed but not re-defined and it's // being reused. Let's fix that. @@ -597,7 +597,7 @@ static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI, } } else { // Check for subreg kills as well. - // d4 = + // d4 = // store d4, fi#0 // ... // = s8<kill> @@ -802,7 +802,7 @@ void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { if (It == SpillSlotsOrReMatsAvailable.end()) return; unsigned Reg = It->second >> 1; SpillSlotsOrReMatsAvailable.erase(It); - + // This register may hold the value of multiple stack slots, only remove this // stack slot from the set of values the register contains. std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg); @@ -832,7 +832,7 @@ unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC, VirtRegMap &VRM) { const TargetInstrInfo* TII = MF.getTarget().getInstrInfo(); const TargetRegisterInfo *TRI = Spills.getRegInfo(); - + if (Reuses.empty()) return PhysReg; // This is most often empty. for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) { @@ -853,7 +853,7 @@ unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC, } else { // Otherwise, we might also have a problem if a previously reused // value aliases the new register. If so, codegen the previous reload - // and use this one. + // and use this one. unsigned PRRU = Op.PhysRegReused; if (TRI->regsOverlap(PRRU, PhysReg)) { // Okay, we found out that an alias of a reused register @@ -900,13 +900,13 @@ unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC, if (DoReMat) { ReMaterialize(*MBB, InsertLoc, NewPhysReg, NewOp.VirtReg, TII, TRI, VRM); - } else { + } else { TII->loadRegFromStackSlot(*MBB, InsertLoc, NewPhysReg, NewOp.StackSlotOrReMat, AliasRC); MachineInstr *LoadMI = prior(InsertLoc); VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI); // Any stores to this stack slot are not dead anymore. - MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; + MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; ++NumLoads; } Spills.ClobberPhysReg(NewPhysReg); @@ -919,10 +919,10 @@ unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC, Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg); UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps); DEBUG(dbgs() << '\t' << *prior(InsertLoc)); - + DEBUG(dbgs() << "Reuse undone!\n"); --NumReused; - + // Finally, PhysReg is now available, go ahead and use it. return PhysReg; } @@ -1037,1410 +1037,1447 @@ void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg, } namespace { - struct RefSorter { - bool operator()(const std::pair<MachineInstr*, int> &A, - const std::pair<MachineInstr*, int> &B) { - return A.second < B.second; - } - }; -} + +struct RefSorter { + bool operator()(const std::pair<MachineInstr*, int> &A, + const std::pair<MachineInstr*, int> &B) { + return A.second < B.second; + } +}; // ***************************** // // Local Spiller Implementation // // ***************************** // -namespace { - class LocalRewriter : public VirtRegRewriter { - MachineRegisterInfo *RegInfo; + MachineRegisterInfo *MRI; const TargetRegisterInfo *TRI; const TargetInstrInfo *TII; + VirtRegMap *VRM; BitVector AllocatableRegs; DenseMap<MachineInstr*, unsigned> DistanceMap; -public: - - bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM, - LiveIntervals* LIs) { - RegInfo = &MF.getRegInfo(); - TRI = MF.getTarget().getRegisterInfo(); - TII = MF.getTarget().getInstrInfo(); - AllocatableRegs = TRI->getAllocatableSet(MF); - DEBUG(dbgs() << "\n**** Local spiller rewriting function '" - << MF.getFunction()->getName() << "':\n"); - DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and" - " reloads!) ****\n"); - DEBUG(MF.dump()); - - // Spills - Keep track of which spilled values are available in physregs - // so that we can choose to reuse the physregs instead of emitting - // reloads. This is usually refreshed per basic block. - AvailableSpills Spills(TRI, TII); - - // Keep track of kill information. - BitVector RegKills(TRI->getNumRegs()); - std::vector<MachineOperand*> KillOps; - KillOps.resize(TRI->getNumRegs(), NULL); - - // SingleEntrySuccs - Successor blocks which have a single predecessor. - SmallVector<MachineBasicBlock*, 4> SinglePredSuccs; - SmallPtrSet<MachineBasicBlock*,16> EarlyVisited; - - // Traverse the basic blocks depth first. - MachineBasicBlock *Entry = MF.begin(); - SmallPtrSet<MachineBasicBlock*,16> Visited; - for (df_ext_iterator<MachineBasicBlock*, - SmallPtrSet<MachineBasicBlock*,16> > - DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited); - DFI != E; ++DFI) { - MachineBasicBlock *MBB = *DFI; - if (!EarlyVisited.count(MBB)) - RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps); - - // If this MBB is the only predecessor of a successor. Keep the - // availability information and visit it next. - do { - // Keep visiting single predecessor successor as long as possible. - SinglePredSuccs.clear(); - findSinglePredSuccessor(MBB, SinglePredSuccs); - if (SinglePredSuccs.empty()) - MBB = 0; - else { - // FIXME: More than one successors, each of which has MBB has - // the only predecessor. - MBB = SinglePredSuccs[0]; - if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) { - Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps); - RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps); - } - } - } while (MBB); - // Clear the availability info. - Spills.clear(); - } - - DEBUG(dbgs() << "**** Post Machine Instrs ****\n"); - DEBUG(MF.dump()); + MachineBasicBlock *MBB; // Basic block currently being processed. - // Mark unused spill slots. - MachineFrameInfo *MFI = MF.getFrameInfo(); - int SS = VRM.getLowSpillSlot(); - if (SS != VirtRegMap::NO_STACK_SLOT) - for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS) - if (!VRM.isSpillSlotUsed(SS)) { - MFI->RemoveStackObject(SS); - ++NumDSS; - } +public: - return true; - } + bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM, + LiveIntervals* LIs); private: - /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if - /// a scratch register is available. - /// xorq %r12<kill>, %r13 - /// addq %rax, -184(%rbp) - /// addq %r13, -184(%rbp) - /// ==> - /// xorq %r12<kill>, %r13 - /// movq -184(%rbp), %r12 - /// addq %rax, %r12 - /// addq %r13, %r12 - /// movq %r12, -184(%rbp) bool OptimizeByUnfold2(unsigned VirtReg, int SS, - MachineBasicBlock &MBB, MachineBasicBlock::iterator &MII, std::vector<MachineInstr*> &MaybeDeadStores, AvailableSpills &Spills, BitVector &RegKills, - std::vector<MachineOperand*> &KillOps, - VirtRegMap &VRM) { + std::vector<MachineOperand*> &KillOps); - MachineBasicBlock::iterator NextMII = llvm::next(MII); - if (NextMII == MBB.end()) - return false; + bool OptimizeByUnfold(MachineBasicBlock::iterator &MII, + std::vector<MachineInstr*> &MaybeDeadStores, + AvailableSpills &Spills, + BitVector &RegKills, + std::vector<MachineOperand*> &KillOps); - if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0) - return false; + bool CommuteToFoldReload(MachineBasicBlock::iterator &MII, + unsigned VirtReg, unsigned SrcReg, int SS, + AvailableSpills &Spills, + BitVector &RegKills, + std::vector<MachineOperand*> &KillOps, + const TargetRegisterInfo *TRI); - // Now let's see if the last couple of instructions happens to have freed up - // a register. - const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); - unsigned PhysReg = FindFreeRegister(MII, MBB, RC, TRI, AllocatableRegs); - if (!PhysReg) - return false; + void SpillRegToStackSlot(MachineBasicBlock::iterator &MII, + int Idx, unsigned PhysReg, int StackSlot, + const TargetRegisterClass *RC, + bool isAvailable, MachineInstr *&LastStore, + AvailableSpills &Spills, + SmallSet<MachineInstr*, 4> &ReMatDefs, + BitVector &RegKills, + std::vector<MachineOperand*> &KillOps); - MachineFunction &MF = *MBB.getParent(); - TRI = MF.getTarget().getRegisterInfo(); - MachineInstr &MI = *MII; - if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, VRM)) - return false; + void TransferDeadness(unsigned CurDist, + unsigned Reg, BitVector &RegKills, + std::vector<MachineOperand*> &KillOps); - // If the next instruction also folds the same SS modref and can be unfoled, - // then it's worthwhile to issue a load from SS into the free register and - // then unfold these instructions. - if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM)) - return false; + void RewriteMBB(LiveIntervals *LIs, + AvailableSpills &Spills, BitVector &RegKills, + std::vector<MachineOperand*> &KillOps); +}; +} - // Back-schedule reloads and remats. - ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, false, SS, TII, MF); +bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm, + LiveIntervals* LIs) { + MRI = &MF.getRegInfo(); + TRI = MF.getTarget().getRegisterInfo(); + TII = MF.getTarget().getInstrInfo(); + VRM = &vrm; + AllocatableRegs = TRI->getAllocatableSet(MF); + DEBUG(dbgs() << "\n**** Local spiller rewriting function '" + << MF.getFunction()->getName() << "':\n"); + DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and" + " reloads!) ****\n"); + DEBUG(MF.dump()); + + // Spills - Keep track of which spilled values are available in physregs + // so that we can choose to reuse the physregs instead of emitting + // reloads. This is usually refreshed per basic block. + AvailableSpills Spills(TRI, TII); + + // Keep track of kill information. + BitVector RegKills(TRI->getNumRegs()); + std::vector<MachineOperand*> KillOps; + KillOps.resize(TRI->getNumRegs(), NULL); + + // SingleEntrySuccs - Successor blocks which have a single predecessor. + SmallVector<MachineBasicBlock*, 4> SinglePredSuccs; + SmallPtrSet<MachineBasicBlock*,16> EarlyVisited; + + // Traverse the basic blocks depth first. + MachineBasicBlock *Entry = MF.begin(); + SmallPtrSet<MachineBasicBlock*,16> Visited; + for (df_ext_iterator<MachineBasicBlock*, + SmallPtrSet<MachineBasicBlock*,16> > + DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited); + DFI != E; ++DFI) { + MBB = *DFI; + if (!EarlyVisited.count(MBB)) + RewriteMBB(LIs, Spills, RegKills, KillOps); + + // If this MBB is the only predecessor of a successor. Keep the + // availability information and visit it next. + do { + // Keep visiting single predecessor successor as long as possible. + SinglePredSuccs.clear(); + findSinglePredSuccessor(MBB, SinglePredSuccs); + if (SinglePredSuccs.empty()) + MBB = 0; + else { + // FIXME: More than one successors, each of which has MBB has + // the only predecessor. + MBB = SinglePredSuccs[0]; + if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) { + Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps); + RewriteMBB(LIs, Spills, RegKills, KillOps); + } + } + } while (MBB); - // Load from SS to the spare physical register. - TII->loadRegFromStackSlot(MBB, MII, PhysReg, SS, RC); - // This invalidates Phys. - Spills.ClobberPhysReg(PhysReg); - // Remember it's available. - Spills.addAvailable(SS, PhysReg); - MaybeDeadStores[SS] = NULL; + // Clear the availability info. + Spills.clear(); + } - // Unfold current MI. - SmallVector<MachineInstr*, 4> NewMIs; - if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs)) + DEBUG(dbgs() << "**** Post Machine Instrs ****\n"); + DEBUG(MF.dump()); + + // Mark unused spill slots. + MachineFrameInfo *MFI = MF.getFrameInfo(); + int SS = VRM->getLowSpillSlot(); + if (SS != VirtRegMap::NO_STACK_SLOT) + for (int e = VRM->getHighSpillSlot(); SS <= e; ++SS) + if (!VRM->isSpillSlotUsed(SS)) { + MFI->RemoveStackObject(SS); + ++NumDSS; + } + + return true; +} + +/// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if +/// a scratch register is available. +/// xorq %r12<kill>, %r13 +/// addq %rax, -184(%rbp) +/// addq %r13, -184(%rbp) +/// ==> +/// xorq %r12<kill>, %r13 +/// movq -184(%rbp), %r12 +/// addq %rax, %r12 +/// addq %r13, %r12 +/// movq %r12, -184(%rbp) +bool LocalRewriter:: +OptimizeByUnfold2(unsigned VirtReg, int SS, + MachineBasicBlock::iterator &MII, + std::vector<MachineInstr*> &MaybeDeadStores, + AvailableSpills &Spills, + BitVector &RegKills, + std::vector<MachineOperand*> &KillOps) { + + MachineBasicBlock::iterator NextMII = llvm::next(MII); + if (NextMII == MBB->end()) + return false; + + if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0) + return false; + + // Now let's see if the last couple of instructions happens to have freed up + // a register. + const TargetRegisterClass* RC = MRI->getRegClass(VirtReg); + unsigned PhysReg = FindFreeRegister(MII, *MBB, RC, TRI, AllocatableRegs); + if (!PhysReg) + return false; + + MachineFunction &MF = *MBB->getParent(); + TRI = MF.getTarget().getRegisterInfo(); + MachineInstr &MI = *MII; + if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, *VRM)) + return false; + + // If the next instruction also folds the same SS modref and can be unfoled, + // then it's worthwhile to issue a load from SS into the free register and + // then unfold these instructions. + if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM)) + return false; + + // Back-schedule reloads and remats. + ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, false, SS, TII, MF); + + // Load from SS to the spare physical register. + TII->loadRegFromStackSlot(*MBB, MII, PhysReg, SS, RC); + // This invalidates Phys. + Spills.ClobberPhysReg(PhysReg); + // Remember it's available. + Spills.addAvailable(SS, PhysReg); + MaybeDeadStores[SS] = NULL; + + // Unfold current MI. + SmallVector<MachineInstr*, 4> NewMIs; + if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs)) + llvm_unreachable("Unable unfold the load / store folding instruction!"); + assert(NewMIs.size() == 1); + AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI); + VRM->transferRestorePts(&MI, NewMIs[0]); + MII = MBB->insert(MII, NewMIs[0]); + InvalidateKills(MI, TRI, RegKills, KillOps); + VRM->RemoveMachineInstrFromMaps(&MI); + MBB->erase(&MI); + ++NumModRefUnfold; + + // Unfold next instructions that fold the same SS. + do { + MachineInstr &NextMI = *NextMII; + NextMII = llvm::next(NextMII); + NewMIs.clear(); + if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs)) llvm_unreachable("Unable unfold the load / store folding instruction!"); assert(NewMIs.size() == 1); AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI); - VRM.transferRestorePts(&MI, NewMIs[0]); - MII = MBB.insert(MII, NewMIs[0]); - InvalidateKills(MI, TRI, RegKills, KillOps); - VRM.RemoveMachineInstrFromMaps(&MI); - MBB.erase(&MI); + VRM->transferRestorePts(&NextMI, NewMIs[0]); + MBB->insert(NextMII, NewMIs[0]); + InvalidateKills(NextMI, TRI, RegKills, KillOps); + VRM->RemoveMachineInstrFromMaps(&NextMI); + MBB->erase(&NextMI); ++NumModRefUnfold; + if (NextMII == MBB->end()) + break; + } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM)); - // Unfold next instructions that fold the same SS. - do { - MachineInstr &NextMI = *NextMII; - NextMII = llvm::next(NextMII); - NewMIs.clear(); - if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs)) - llvm_unreachable("Unable unfold the load / store folding instruction!"); - assert(NewMIs.size() == 1); - AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI); - VRM.transferRestorePts(&NextMI, NewMIs[0]); - MBB.insert(NextMII, NewMIs[0]); - InvalidateKills(NextMI, TRI, RegKills, KillOps); - VRM.RemoveMachineInstrFromMaps(&NextMI); - MBB.erase(&NextMI); - ++NumModRefUnfold; - if (NextMII == MBB.end()) - break; - } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM)); - - // Store the value back into SS. - TII->storeRegToStackSlot(MBB, NextMII, PhysReg, true, SS, RC); - MachineInstr *StoreMI = prior(NextMII); - VRM.addSpillSlotUse(SS, StoreMI); - VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); + // Store the value back into SS. + TII->storeRegToStackSlot(*MBB, NextMII, PhysReg, true, SS, RC); + MachineInstr *StoreMI = prior(NextMII); + VRM->addSpillSlotUse(SS, StoreMI); + VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); - return true; - } + return true; +} - /// OptimizeByUnfold - Turn a store folding instruction into a load folding - /// instruction. e.g. - /// xorl %edi, %eax - /// movl %eax, -32(%ebp) - /// movl -36(%ebp), %eax - /// orl %eax, -32(%ebp) - /// ==> - /// xorl %edi, %eax - /// orl -36(%ebp), %eax - /// mov %eax, -32(%ebp) - /// This enables unfolding optimization for a subsequent instruction which will - /// also eliminate the newly introduced store instruction. - bool OptimizeByUnfold(MachineBasicBlock &MBB, - MachineBasicBlock::iterator &MII, - std::vector<MachineInstr*> &MaybeDeadStores, - AvailableSpills &Spills, - BitVector &RegKills, - std::vector<MachineOperand*> &KillOps, - VirtRegMap &VRM) { - MachineFunction &MF = *MBB.getParent(); - MachineInstr &MI = *MII; - unsigned UnfoldedOpc = 0; - unsigned UnfoldPR = 0; - unsigned UnfoldVR = 0; - int FoldedSS = VirtRegMap::NO_STACK_SLOT; - VirtRegMap::MI2VirtMapTy::const_iterator I, End; - for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { - // Only transform a MI that folds a single register. - if (UnfoldedOpc) - return false; - UnfoldVR = I->second.first; - VirtRegMap::ModRef MR = I->second.second; - // MI2VirtMap be can updated which invalidate the iterator. - // Increment the iterator first. - ++I; - if (VRM.isAssignedReg(UnfoldVR)) +/// OptimizeByUnfold - Turn a store folding instruction into a load folding +/// instruction. e.g. +/// xorl %edi, %eax +/// movl %eax, -32(%ebp) +/// movl -36(%ebp), %eax +/// orl %eax, -32(%ebp) +/// ==> +/// xorl %edi, %eax +/// orl -36(%ebp), %eax +/// mov %eax, -32(%ebp) +/// This enables unfolding optimization for a subsequent instruction which will +/// also eliminate the newly introduced store instruction. +bool LocalRewriter:: +OptimizeByUnfold(MachineBasicBlock::iterator &MII, + std::vector<MachineInstr*> &MaybeDeadStores, + AvailableSpills &Spills, + BitVector &RegKills, + std::vector<MachineOperand*> &KillOps) { + MachineFunction &MF = *MBB->getParent(); + MachineInstr &MI = *MII; + unsigned UnfoldedOpc = 0; + unsigned UnfoldPR = 0; + unsigned UnfoldVR = 0; + int FoldedSS = VirtRegMap::NO_STACK_SLOT; + VirtRegMap::MI2VirtMapTy::const_iterator I, End; + for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) { + // Only transform a MI that folds a single register. + if (UnfoldedOpc) + return false; + UnfoldVR = I->second.first; + VirtRegMap::ModRef MR = I->second.second; + // MI2VirtMap be can updated which invalidate the iterator. + // Increment the iterator first. + ++I; + if (VRM->isAssignedReg(UnfoldVR)) + continue; + // If this reference is not a use, any previous store is now dead. + // Otherwise, the store to this stack slot is not dead anymore. + FoldedSS = VRM->getStackSlot(UnfoldVR); + MachineInstr* DeadStore = MaybeDeadStores[FoldedSS]; + if (DeadStore && (MR & VirtRegMap::isModRef)) { + unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS); + if (!PhysReg || !DeadStore->readsRegister(PhysReg)) continue; - // If this reference is not a use, any previous store is now dead. - // Otherwise, the store to this stack slot is not dead anymore. - FoldedSS = VRM.getStackSlot(UnfoldVR); - MachineInstr* DeadStore = MaybeDeadStores[FoldedSS]; - if (DeadStore && (MR & VirtRegMap::isModRef)) { - unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS); - if (!PhysReg || !DeadStore->readsRegister(PhysReg)) - continue; - UnfoldPR = PhysReg; - UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), - false, true); - } + UnfoldPR = PhysReg; + UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), + false, true); } + } - if (!UnfoldedOpc) { - if (!UnfoldVR) - return false; + if (!UnfoldedOpc) { + if (!UnfoldVR) + return false; - // Look for other unfolding opportunities. - return OptimizeByUnfold2(UnfoldVR, FoldedSS, MBB, MII, - MaybeDeadStores, Spills, RegKills, KillOps, VRM); - } + // Look for other unfolding opportunities. + return OptimizeByUnfold2(UnfoldVR, FoldedSS, MII, MaybeDeadStores, Spills, + RegKills, KillOps); + } - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - MachineOperand &MO = MI.getOperand(i); - if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse()) - continue; - unsigned VirtReg = MO.getReg(); - if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) - continue; - if (VRM.isAssignedReg(VirtReg)) { - unsigned PhysReg = VRM.getPhys(VirtReg); - if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) - return false; - } else if (VRM.isReMaterialized(VirtReg)) - continue; - int SS = VRM.getStackSlot(VirtReg); - unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); - if (PhysReg) { - if (TRI->regsOverlap(PhysReg, UnfoldPR)) - return false; + for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI.getOperand(i); + if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse()) + continue; + unsigned VirtReg = MO.getReg(); + if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) + continue; + if (VRM->isAssignedReg(VirtReg)) { + unsigned PhysReg = VRM->getPhys(VirtReg); + if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) + return false; + } else if (VRM->isReMaterialized(VirtReg)) + continue; + int SS = VRM->getStackSlot(VirtReg); + unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); + if (PhysReg) { + if (TRI->regsOverlap(PhysReg, UnfoldPR)) + return false; + continue; + } + if (VRM->hasPhys(VirtReg)) { + PhysReg = VRM->getPhys(VirtReg); + if (!TRI->regsOverlap(PhysReg, UnfoldPR)) continue; - } - if (VRM.hasPhys(VirtReg)) { - PhysReg = VRM.getPhys(VirtReg); - if (!TRI->regsOverlap(PhysReg, UnfoldPR)) - continue; - } + } - // Ok, we'll need to reload the value into a register which makes - // it impossible to perform the store unfolding optimization later. - // Let's see if it is possible to fold the load if the store is - // unfolded. This allows us to perform the store unfolding - // optimization. - SmallVector<MachineInstr*, 4> NewMIs; - if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { - assert(NewMIs.size() == 1); - MachineInstr *NewMI = NewMIs.back(); - NewMIs.clear(); - int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false); - assert(Idx != -1); - SmallVector<unsigned, 1> Ops; - Ops.push_back(Idx); - MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); - if (FoldedMI) { - VRM.addSpillSlotUse(SS, FoldedMI); - if (!VRM.hasPhys(UnfoldVR)) - VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); - VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); - MII = MBB.insert(MII, FoldedMI); - InvalidateKills(MI, TRI, RegKills, KillOps); - VRM.RemoveMachineInstrFromMaps(&MI); - MBB.erase(&MI); - MF.DeleteMachineInstr(NewMI); - return true; - } + // Ok, we'll need to reload the value into a register which makes + // it impossible to perform the store unfolding optimization later. + // Let's see if it is possible to fold the load if the store is + // unfolded. This allows us to perform the store unfolding + // optimization. + SmallVector<MachineInstr*, 4> NewMIs; + if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { + assert(NewMIs.size() == 1); + MachineInstr *NewMI = NewMIs.back(); + NewMIs.clear(); + int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false); + assert(Idx != -1); + SmallVector<unsigned, 1> Ops; + Ops.push_back(Idx); + MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); + if (FoldedMI) { + VRM->addSpillSlotUse(SS, FoldedMI); + if (!VRM->hasPhys(UnfoldVR)) + VRM->assignVirt2Phys(UnfoldVR, UnfoldPR); + VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); + MII = MBB->insert(MII, FoldedMI); + InvalidateKills(MI, TRI, RegKills, KillOps); + VRM->RemoveMachineInstrFromMaps(&MI); + MBB->erase(&MI); MF.DeleteMachineInstr(NewMI); + return true; } + MF.DeleteMachineInstr(NewMI); } + } + return false; +} + +/// CommuteChangesDestination - We are looking for r0 = op r1, r2 and +/// where SrcReg is r1 and it is tied to r0. Return true if after +/// commuting this instruction it will be r0 = op r2, r1. +static bool CommuteChangesDestination(MachineInstr *DefMI, + const TargetInstrDesc &TID, + unsigned SrcReg, + const TargetInstrInfo *TII, + unsigned &DstIdx) { + if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3) + return false; + if (!DefMI->getOperand(1).isReg() || + DefMI->getOperand(1).getReg() != SrcReg) return false; + unsigned DefIdx; + if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0) + return false; + unsigned SrcIdx1, SrcIdx2; + if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2)) + return false; + if (SrcIdx1 == 1 && SrcIdx2 == 2) { + DstIdx = 2; + return true; } + return false; +} - /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and - /// where SrcReg is r1 and it is tied to r0. Return true if after - /// commuting this instruction it will be r0 = op r2, r1. - static bool CommuteChangesDestination(MachineInstr *DefMI, - const TargetInstrDesc &TID, - unsigned SrcReg, - const TargetInstrInfo *TII, - unsigned &DstIdx) { - if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3) +/// CommuteToFoldReload - +/// Look for +/// r1 = load fi#1 +/// r1 = op r1, r2<kill> +/// store r1, fi#1 +/// +/// If op is commutable and r2 is killed, then we can xform these to +/// r2 = op r2, fi#1 +/// store r2, fi#1 +bool LocalRewriter:: +CommuteToFoldReload(MachineBasicBlock::iterator &MII, + unsigned VirtReg, unsigned SrcReg, int SS, + AvailableSpills &Spills, + BitVector &RegKills, + std::vector<MachineOperand*> &KillOps, + const TargetRegisterInfo *TRI) { + if (MII == MBB->begin() || !MII->killsRegister(SrcReg)) + return false; + + MachineFunction &MF = *MBB->getParent(); + MachineInstr &MI = *MII; + MachineBasicBlock::iterator DefMII = prior(MII); + MachineInstr *DefMI = DefMII; + const TargetInstrDesc &TID = DefMI->getDesc(); + unsigned NewDstIdx; + if (DefMII != MBB->begin() && + TID.isCommutable() && + CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) { + MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); + unsigned NewReg = NewDstMO.getReg(); + if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg)) return false; - if (!DefMI->getOperand(1).isReg() || - DefMI->getOperand(1).getReg() != SrcReg) + MachineInstr *ReloadMI = prior(DefMII); + int FrameIdx; + unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx); + if (DestReg != SrcReg || FrameIdx != SS) return false; - unsigned DefIdx; - if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0) + int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false); + if (UseIdx == -1) return false; - unsigned SrcIdx1, SrcIdx2; - if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2)) + unsigned DefIdx; + if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx)) return false; - if (SrcIdx1 == 1 && SrcIdx2 == 2) { - DstIdx = 2; - return true; - } - return false; - } + assert(DefMI->getOperand(DefIdx).isReg() && + DefMI->getOperand(DefIdx).getReg() == SrcReg); - /// CommuteToFoldReload - - /// Look for - /// r1 = load fi#1 - /// r1 = op r1, r2<kill> - /// store r1, fi#1 - /// - /// If op is commutable and r2 is killed, then we can xform these to - /// r2 = op r2, fi#1 - /// store r2, fi#1 - bool CommuteToFoldReload(MachineBasicBlock &MBB, - MachineBasicBlock::iterator &MII, - unsigned VirtReg, unsigned SrcReg, int SS, - AvailableSpills &Spills, - BitVector &RegKills, - std::vector<MachineOperand*> &KillOps, - const TargetRegisterInfo *TRI, - VirtRegMap &VRM) { - if (MII == MBB.begin() || !MII->killsRegister(SrcReg)) + // Now commute def instruction. + MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true); + if (!CommutedMI) + return false; + SmallVector<unsigned, 1> Ops; + Ops.push_back(NewDstIdx); + MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS); + // Not needed since foldMemoryOperand returns new MI. |