aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJakob Stoklund Olesen <stoklund@2pi.dk>2010-12-18 03:04:14 +0000
committerJakob Stoklund Olesen <stoklund@2pi.dk>2010-12-18 03:04:14 +0000
commit83d1ba572815c92a74817cc68e2bb48c59a87b5d (patch)
tree9d544dbcba21ef2fd384174d3c6b607917181ff6 /lib
parent87c6d25c71b028695641642d86d0cf4a3ff22096 (diff)
Teach the inline spiller to attempt folding a load instruction into its single
use before rematerializing the load. This allows us to produce: addps LCPI0_1(%rip), %xmm2 Instead of: movaps LCPI0_1(%rip), %xmm3 addps %xmm3, %xmm2 Saving a register and an instruction. The standard spiller already knows how to do this. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122133 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/InlineSpiller.cpp26
-rw-r--r--lib/CodeGen/LiveRangeEdit.h6
2 files changed, 27 insertions, 5 deletions
diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp
index 462ca6dbc9..443b2d077a 100644
--- a/lib/CodeGen/InlineSpiller.cpp
+++ b/lib/CodeGen/InlineSpiller.cpp
@@ -85,7 +85,8 @@ private:
bool coalesceStackAccess(MachineInstr *MI);
bool foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops);
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr *LoadMI = 0);
void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
};
@@ -141,6 +142,14 @@ bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
}
}
+ // Before rematerializing into a register for a single instruction, try to
+ // fold a load into the instruction. That avoids allocating a new register.
+ if (RM.OrigMI->getDesc().canFoldAsLoad() &&
+ foldMemoryOperand(MI, Ops, RM.OrigMI)) {
+ edit_->markRematerialized(RM.ParentVNI);
+ return true;
+ }
+
// Alocate a new register for the remat.
LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_);
NewLI.markNotSpillable();
@@ -243,9 +252,13 @@ bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
}
/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
-/// Return true on success, and MI will be erased.
+/// @param MI Instruction using or defining the current register.
+/// @param Ops Operandices from readsWritesVirtualRegister().
+/// @param LoadMI Load instruction to use instead of stack slot when non-null.
+/// @return True on success, and MI will be erased.
bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops) {
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr *LoadMI) {
// TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
// operands.
SmallVector<unsigned, 8> FoldOps;
@@ -262,11 +275,14 @@ bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
FoldOps.push_back(Idx);
}
- MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
+ MachineInstr *FoldMI =
+ LoadMI ? tii_.foldMemoryOperand(MI, FoldOps, LoadMI)
+ : tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
if (!FoldMI)
return false;
lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
- vrm_.addSpillSlotUse(stackSlot_, FoldMI);
+ if (!LoadMI)
+ vrm_.addSpillSlotUse(stackSlot_, FoldMI);
MI->eraseFromParent();
DEBUG(dbgs() << "\tfolded: " << *FoldMI);
return true;
diff --git a/lib/CodeGen/LiveRangeEdit.h b/lib/CodeGen/LiveRangeEdit.h
index ad248bf400..37b58279b1 100644
--- a/lib/CodeGen/LiveRangeEdit.h
+++ b/lib/CodeGen/LiveRangeEdit.h
@@ -117,6 +117,12 @@ public:
const TargetInstrInfo&,
const TargetRegisterInfo&);
+ /// markRematerialized - explicitly mark a value as rematerialized after doing
+ /// it manually.
+ void markRematerialized(VNInfo *ParentVNI) {
+ rematted_.insert(ParentVNI);
+ }
+
/// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
bool didRematerialize(VNInfo *ParentVNI) const {
return rematted_.count(ParentVNI);