aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/SelectionDAG/FastISel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/SelectionDAG/FastISel.cpp')
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp219
1 files changed, 150 insertions, 69 deletions
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 230368f2fa..02d11bbc2a 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -57,6 +57,25 @@
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
+/// startNewBlock - Set the current block to which generated machine
+/// instructions will be appended, and clear the local CSE map.
+///
+void FastISel::startNewBlock() {
+ LocalValueMap.clear();
+
+ // Start out as null, meaining no local-value instructions have
+ // been emitted.
+ LastLocalValue = 0;
+
+ // Advance the last local value past any EH_LABEL instructions.
+ MachineBasicBlock::iterator
+ I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
+ while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
+ LastLocalValue = I;
+ ++I;
+ }
+}
+
bool FastISel::hasTrivialKill(const Value *V) const {
// Don't consider constants or arguments to have trivial kills.
const Instruction *I = dyn_cast<Instruction>(V);
@@ -101,24 +120,30 @@ unsigned FastISel::getRegForValue(const Value *V) {
// only locally. This is because Instructions already have the SSA
// def-dominates-use requirement enforced.
DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
- if (I != FuncInfo.ValueMap.end())
- return I->second;
+ if (I != FuncInfo.ValueMap.end()) {
+ unsigned Reg = I->second;
+ return Reg;
+ }
unsigned Reg = LocalValueMap[V];
if (Reg != 0)
return Reg;
// In bottom-up mode, just create the virtual register which will be used
// to hold the value. It will be materialized later.
- if (IsBottomUp) {
- Reg = createResultReg(TLI.getRegClassFor(VT));
- if (isa<Instruction>(V))
- FuncInfo.ValueMap[V] = Reg;
- else
- LocalValueMap[V] = Reg;
- return Reg;
- }
+ if (isa<Instruction>(V) &&
+ (!isa<AllocaInst>(V) ||
+ !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
+ return FuncInfo.InitializeRegForValue(V);
+
+ MachineBasicBlock::iterator SaveInsertPt = enterLocalValueArea();
+
+ // Materialize the value in a register. Emit any instructions in the
+ // local value area.
+ Reg = materializeRegForValue(V, VT);
+
+ leaveLocalValueArea(SaveInsertPt);
- return materializeRegForValue(V, VT);
+ return Reg;
}
/// materializeRegForValue - Helper for getRegForVale. This function is
@@ -169,7 +194,8 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
// If target-independent code couldn't handle the value, give target-specific
@@ -179,8 +205,10 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
// Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate.
- if (Reg != 0)
+ if (Reg != 0) {
LocalValueMap[V] = Reg;
+ LastLocalValue = MRI.getVRegDef(Reg);
+ }
return Reg;
}
@@ -209,12 +237,15 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0)
+ // Use the new register.
AssignedReg = Reg;
else if (Reg != AssignedReg) {
- const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
- TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
- Reg, RegClass, RegClass, DL);
+ // Arrange for uses of AssignedReg to be replaced by uses of Reg.
+ FuncInfo.RegFixups[AssignedReg] = Reg;
+
+ AssignedReg = Reg;
}
+
return AssignedReg;
}
@@ -242,6 +273,33 @@ std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
}
+void FastISel::recomputeInsertPt() {
+ if (getLastLocalValue()) {
+ FuncInfo.InsertPt = getLastLocalValue();
+ ++FuncInfo.InsertPt;
+ } else
+ FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
+
+ // Now skip past any EH_LABELs, which must remain at the beginning.
+ while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
+ FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
+ ++FuncInfo.InsertPt;
+}
+
+MachineBasicBlock::iterator FastISel::enterLocalValueArea() {
+ MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
+ recomputeInsertPt();
+ return OldInsertPt;
+}
+
+void FastISel::leaveLocalValueArea(MachineBasicBlock::iterator OldInsertPt) {
+ if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
+ LastLocalValue = llvm::prior(FuncInfo.InsertPt);
+
+ // Restore the previous insert position.
+ FuncInfo.InsertPt = OldInsertPt;
+}
+
/// SelectBinaryOp - Select and emit code for a binary operator instruction,
/// which has an opcode which directly corresponds to the given ISD opcode.
///
@@ -434,23 +492,28 @@ bool FastISel::SelectCall(const User *I) {
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
- BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(0U).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addImm(CI->getZExtValue()).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
- BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addFPImm(CF).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) {
- BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
// Insert an undef so we can see what we dropped.
- BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(0U).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
}
return true;
}
@@ -459,12 +522,13 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
default: break;
case TargetLowering::Expand: {
- assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
+ assert(FuncInfo.MBB->isLandingPad() &&
+ "Call to eh.exception not in landing pad!");
unsigned Reg = TLI.getExceptionAddressRegister();
const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- Reg, RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ ResultReg, Reg, RC, RC, DL);
assert(InsertedCopy && "Can't copy address registers!");
InsertedCopy = InsertedCopy;
UpdateValueMap(I, ResultReg);
@@ -478,23 +542,23 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
default: break;
case TargetLowering::Expand: {
- if (MBB->isLandingPad())
- AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), MBB);
+ if (FuncInfo.MBB->isLandingPad())
+ AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
else {
#ifndef NDEBUG
FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
#endif
// FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) MBB->addLiveIn(Reg);
+ if (Reg) FuncInfo.MBB->addLiveIn(Reg);
}
unsigned Reg = TLI.getExceptionSelectorRegister();
EVT SrcVT = TLI.getPointerTy();
const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ ResultReg, Reg, RC, RC, DL);
assert(InsertedCopy && "Can't copy address registers!");
InsertedCopy = InsertedCopy;
@@ -613,8 +677,9 @@ bool FastISel::SelectBitCast(const User *I) {
TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
ResultReg = createResultReg(DstClass);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- Op0, DstClass, SrcClass, DL);
+ bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ ResultReg, Op0,
+ DstClass, SrcClass, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -662,13 +727,14 @@ FastISel::SelectInstruction(const Instruction *I) {
/// the CFG.
void
FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
- if (MBB->isLayoutSuccessor(MSucc)) {
+ if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
// The unconditional fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
- TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>(), DL);
+ TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
+ SmallVector<MachineOperand, 0>(), DL);
}
- MBB->addSuccessor(MSucc);
+ FuncInfo.MBB->addSuccessor(MSucc);
}
/// SelectFNeg - Emit an FNeg operation.
@@ -727,11 +793,19 @@ FastISel::SelectLoad(const User *I) {
BasicBlock::iterator ScanFrom = LI;
if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(),
LI->getParent(), ScanFrom)) {
+ if (!V->use_empty() &&
+ (!isa<Instruction>(V) ||
+ cast<Instruction>(V)->getParent() == LI->getParent() ||
+ (isa<AllocaInst>(V) &&
+ FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) &&
+ (!isa<Argument>(V) ||
+ LI->getParent() == &LI->getParent()->getParent()->getEntryBlock())) {
unsigned ResultReg = getRegForValue(V);
if (ResultReg != 0) {
UpdateValueMap(I, ResultReg);
return true;
}
+ }
}
}
@@ -854,8 +928,7 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
}
FastISel::FastISel(FunctionLoweringInfo &funcInfo)
- : MBB(0),
- FuncInfo(funcInfo),
+ : FuncInfo(funcInfo),
MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()),
MCP(*FuncInfo.MF->getConstantPool()),
@@ -863,8 +936,7 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo)
TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()),
- TRI(*TM.getRegisterInfo()),
- IsBottomUp(false) {
+ TRI(*TM.getRegisterInfo()) {
}
FastISel::~FastISel() {}
@@ -993,7 +1065,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
- BuildMI(MBB, DL, II, ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
return ResultReg;
}
@@ -1004,11 +1076,14 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addReg(Op0, Op0IsKill * RegState::Kill);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill);
else {
- BuildMI(MBB, DL, II).addReg(Op0, Op0IsKill * RegState::Kill);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Op0, Op0IsKill * RegState::Kill);
+ bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ ResultReg, II.ImplicitDefs[0],
+ RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1024,15 +1099,16 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
else {
- BuildMI(MBB, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ ResultReg, II.ImplicitDefs[0],
+ RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1047,15 +1123,16 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(MBB, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ ResultReg, II.ImplicitDefs[0],
+ RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1070,15 +1147,16 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
else {
- BuildMI(MBB, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ ResultReg, II.ImplicitDefs[0],
+ RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1094,17 +1172,18 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(MBB, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ ResultReg, II.ImplicitDefs[0],
+ RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1118,11 +1197,12 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
else {
- BuildMI(MBB, DL, II).addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
+ bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ ResultReg, II.ImplicitDefs[0],
+ RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1135,7 +1215,8 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
"Cannot yet extract from physregs");
- BuildMI(MBB, DL, TII.get(TargetOpcode::COPY), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill), Idx);
return ResultReg;
}