diff options
author | Dan Gohman <gohman@apple.com> | 2008-07-07 23:14:23 +0000 |
---|---|---|
committer | Dan Gohman <gohman@apple.com> | 2008-07-07 23:14:23 +0000 |
commit | 8e5f2c6f65841542e2a7092553fe42a00048e4c7 (patch) | |
tree | 24fe54b796f3f450ba6aff12b7357068ca66e341 /lib/CodeGen/VirtRegMap.cpp | |
parent | 0e5f1306b059b62d7725f324e087efbc8e7a782d (diff) |
Pool-allocation for MachineInstrs, MachineBasicBlocks, and
MachineMemOperands. The pools are owned by MachineFunctions.
This drastically reduces the number of calls to malloc/free made
during the "Emit" phase of scheduling, as well as later phases
in CodeGen. Combined with other changes, this speeds up the
"instruction selection" phase of CodeGen by 10% in some cases.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@53212 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/VirtRegMap.cpp')
-rw-r--r-- | lib/CodeGen/VirtRegMap.cpp | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp index 30ed107668..7696d55efa 100644 --- a/lib/CodeGen/VirtRegMap.cpp +++ b/lib/CodeGen/VirtRegMap.cpp @@ -604,7 +604,7 @@ static bool InvalidateRegDef(MachineBasicBlock::iterator I, return false; bool FoundUse = false, Done = false; - MachineBasicBlock::iterator E = NewDef; + MachineBasicBlock::iterator E = &NewDef; ++I; ++E; for (; !Done && I != E; ++I) { MachineInstr *NMI = I; @@ -973,7 +973,7 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, MBB.erase(&MI); return true; } - delete NewMI; + MF.DeleteMachineInstr(NewMI); } } return false; @@ -1032,7 +1032,8 @@ bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB, SmallVector<unsigned, 2> Ops; Ops.push_back(NewDstIdx); MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS); - delete CommutedMI; // Not needed since foldMemoryOperand returns new MI. + // Not needed since foldMemoryOperand returns new MI. + MF.DeleteMachineInstr(CommutedMI); if (!FoldedMI) return false; @@ -1040,7 +1041,7 @@ bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB, VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); // Insert new def MI and spill MI. const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg); - TII->storeRegToStackSlot(MBB, MI, NewReg, true, SS, RC); + TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC); MII = prior(MII); MachineInstr *StoreMI = MII; VRM.addSpillSlotUse(SS, StoreMI); @@ -1341,7 +1342,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; MI.getOperand(i).setReg(RReg); if (VRM.isImplicitlyDefined(VirtReg)) - BuildMI(MBB, MI, TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg); + BuildMI(MBB, &MI, TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg); continue; } @@ -1814,7 +1815,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { ProcessNextInst: DistanceMap.insert(std::make_pair(&MI, Dist++)); if (!Erased && !BackTracked) { - for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II) + for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II) UpdateKills(*II, RegKills, KillOps); } MII = NextMII; |