aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/RegAllocLinearScan.cpp
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2009-05-03 18:32:42 +0000
committerEvan Cheng <evan.cheng@apple.com>2009-05-03 18:32:42 +0000
commitc781a243a3d17e7e763515794168d8fa6043f565 (patch)
treedcaf1b9201cdb221cb2206858891575d2b241bbb /lib/CodeGen/RegAllocLinearScan.cpp
parent87e3caf81939db20d2359bd38df2ed206040026d (diff)
In some rare cases, the register allocator can spill registers but end up not utilizing registers at all. The fundamental problem is linearscan's backtracking can end up freeing more than one allocated registers. However, reloads and restores might be folded into uses / defs and freed registers might not be used at all.
VirtRegMap keeps track of allocations so it knows what's not used. As a horrible hack, the stack coloring can color spill slots with *free* registers. That is, it replace reload and spills with copies from and to the free register. It unfold instructions that load and store the spill slot and replace them with register using variants. Not yet enabled. This is part 1. More coming. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@70787 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/RegAllocLinearScan.cpp')
-rw-r--r--lib/CodeGen/RegAllocLinearScan.cpp52
1 files changed, 35 insertions, 17 deletions
diff --git a/lib/CodeGen/RegAllocLinearScan.cpp b/lib/CodeGen/RegAllocLinearScan.cpp
index 83c1cbb385..b5f581cc59 100644
--- a/lib/CodeGen/RegAllocLinearScan.cpp
+++ b/lib/CodeGen/RegAllocLinearScan.cpp
@@ -216,6 +216,18 @@ namespace {
}
void finalizeRegUses() {
+#ifndef NDEBUG
+ // Verify all the registers are "freed".
+ bool Error = false;
+ for (unsigned i = 0, e = tri_->getNumRegs(); i != e; ++i) {
+ if (regUse_[i] != 0) {
+ cerr << tri_->getName(i) << " is still in use!\n";
+ Error = true;
+ }
+ }
+ if (Error)
+ abort();
+#endif
regUse_.clear();
regUseBackUp_.clear();
}
@@ -514,6 +526,13 @@ void RALinScan::linearScan()
}
DOUT << *vrm_;
+
+ // Look for physical registers that end up not being allocated even though
+ // register allocator had to spill other registers in its register class.
+ if (ls_->getNumIntervals() == 0)
+ return;
+ if (!vrm_->FindUnusedRegisters(tri_, li_))
+ return;
}
/// processActiveIntervals - expire old intervals and move non-overlapping ones
@@ -630,9 +649,9 @@ void RALinScan::updateSpillWeights(std::vector<float> &Weights,
// bl should get the same spill weight otherwise it will be choosen
// as a spill candidate since spilling bh doesn't make ebx available.
for (unsigned i = 0, e = Supers.size(); i != e; ++i) {
- for (const unsigned *sr = tri_->getSubRegisters(Supers[i]); *sr; ++sr)
- if (!Processed.count(*sr))
- Weights[*sr] += weight;
+ for (const unsigned *sr = tri_->getSubRegisters(Supers[i]); *sr; ++sr)
+ if (!Processed.count(*sr))
+ Weights[*sr] += weight;
}
}
@@ -658,13 +677,14 @@ static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, unsigned Point){
/// addStackInterval - Create a LiveInterval for stack if the specified live
/// interval has been spilled.
static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
- LiveIntervals *li_, float &Weight,
- VirtRegMap &vrm_) {
+ LiveIntervals *li_,
+ MachineRegisterInfo* mri_, VirtRegMap &vrm_) {
int SS = vrm_.getStackSlot(cur->reg);
if (SS == VirtRegMap::NO_STACK_SLOT)
return;
- LiveInterval &SI = ls_->getOrCreateInterval(SS);
- SI.weight += Weight;
+
+ const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
+ LiveInterval &SI = ls_->getOrCreateInterval(SS, RC);
VNInfo *VNI;
if (SI.hasAtLeastOneValue())
@@ -679,10 +699,10 @@ static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
/// getConflictWeight - Return the number of conflicts between cur
/// live interval and defs and uses of Reg weighted by loop depthes.
-static float getConflictWeight(LiveInterval *cur, unsigned Reg,
- LiveIntervals *li_,
- MachineRegisterInfo *mri_,
- const MachineLoopInfo *loopInfo) {
+static
+float getConflictWeight(LiveInterval *cur, unsigned Reg, LiveIntervals *li_,
+ MachineRegisterInfo *mri_,
+ const MachineLoopInfo *loopInfo) {
float Conflicts = 0;
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(Reg),
E = mri_->reg_end(); I != E; ++I) {
@@ -1072,12 +1092,11 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
// linearscan.
if (cur->weight != HUGE_VALF && cur->weight <= minWeight) {
DOUT << "\t\t\tspilling(c): " << *cur << '\n';
- float SSWeight;
SmallVector<LiveInterval*, 8> spillIs;
std::vector<LiveInterval*> added =
- li_->addIntervalsForSpills(*cur, spillIs, loopInfo, *vrm_, SSWeight);
+ li_->addIntervalsForSpills(*cur, spillIs, loopInfo, *vrm_);
std::sort(added.begin(), added.end(), LISorter());
- addStackInterval(cur, ls_, li_, SSWeight, *vrm_);
+ addStackInterval(cur, ls_, li_, mri_, *vrm_);
if (added.empty())
return; // Early exit if all spills were folded.
@@ -1149,10 +1168,9 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
spillIs.pop_back();
DOUT << "\t\t\tspilling(a): " << *sli << '\n';
earliestStart = std::min(earliestStart, sli->beginNumber());
- float SSWeight;
std::vector<LiveInterval*> newIs =
- li_->addIntervalsForSpills(*sli, spillIs, loopInfo, *vrm_, SSWeight);
- addStackInterval(sli, ls_, li_, SSWeight, *vrm_);
+ li_->addIntervalsForSpills(*sli, spillIs, loopInfo, *vrm_);
+ addStackInterval(sli, ls_, li_, mri_, *vrm_);
std::copy(newIs.begin(), newIs.end(), std::back_inserter(added));
spilled.insert(sli->reg);
}