aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/CodeGen/CallingConvLower.h3
-rw-r--r--include/llvm/CodeGen/FastISel.h20
-rw-r--r--include/llvm/CodeGen/FunctionLoweringInfo.h7
-rw-r--r--include/llvm/CodeGen/SelectionDAGISel.h13
-rw-r--r--include/llvm/Target/TargetLowering.h13
-rw-r--r--lib/CodeGen/CallingConvLower.cpp9
-rw-r--r--lib/CodeGen/LLVMTargetMachine.cpp20
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp172
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp7
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp7
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp113
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp180
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp60
-rw-r--r--lib/Target/X86/X86FastISel.cpp246
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp5
-rw-r--r--lib/Target/X86/X86ISelLowering.h3
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp5
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h3
-rw-r--r--test/CodeGen/X86/fast-isel-gep.ll3
-rw-r--r--test/CodeGen/X86/fast-isel-loads.ll2
-rw-r--r--test/CodeGen/X86/fast-isel.ll14
-rw-r--r--utils/TableGen/FastISelEmitter.cpp4
22 files changed, 368 insertions, 541 deletions
diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h
index 7911907e89..5ce59b88dc 100644
--- a/include/llvm/CodeGen/CallingConvLower.h
+++ b/include/llvm/CodeGen/CallingConvLower.h
@@ -188,7 +188,8 @@ public:
/// CheckReturn - Analyze the return values of a function, returning
/// true if the return can be performed without sret-demotion, and
/// false otherwise.
- bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
+ bool CheckReturn(const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
CCAssignFn Fn);
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h
index 24499dd9ff..c5c457db0c 100644
--- a/include/llvm/CodeGen/FastISel.h
+++ b/include/llvm/CodeGen/FastISel.h
@@ -19,7 +19,6 @@
#include "llvm/ADT/SmallSet.h"
#endif
#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
namespace llvm {
@@ -45,6 +44,7 @@ class TargetRegisterInfo;
/// lowering, but runs quickly.
class FastISel {
protected:
+ MachineBasicBlock *MBB;
DenseMap<const Value *, unsigned> LocalValueMap;
FunctionLoweringInfo &FuncInfo;
MachineRegisterInfo &MRI;
@@ -56,17 +56,23 @@ protected:
const TargetInstrInfo &TII;
const TargetLowering &TLI;
const TargetRegisterInfo &TRI;
- MachineBasicBlock::iterator LastLocalValue;
+ bool IsBottomUp;
public:
- /// getLastLocalValue - Return the position of the last instruction
- /// emitted for materializing constants for use in the current block.
- MachineBasicBlock::iterator getLastLocalValue() { return LastLocalValue; }
-
/// startNewBlock - Set the current block to which generated machine
/// instructions will be appended, and clear the local CSE map.
///
- void startNewBlock();
+ void startNewBlock(MachineBasicBlock *mbb) {
+ setCurrentBlock(mbb);
+ LocalValueMap.clear();
+ }
+
+ /// setCurrentBlock - Set the current block to which generated machine
+ /// instructions will be appended.
+ ///
+ void setCurrentBlock(MachineBasicBlock *mbb) {
+ MBB = mbb;
+ }
/// getCurDebugLoc() - Return current debug location information.
DebugLoc getCurDebugLoc() const { return DL; }
diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h
index da8a3ff7c6..011d42617d 100644
--- a/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -25,7 +25,6 @@
#endif
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/ISDOpcodes.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/CallSite.h"
#include <vector>
@@ -81,12 +80,6 @@ public:
/// function arguments that are inserted after scheduling is completed.
SmallVector<MachineInstr*, 8> ArgDbgValues;
- /// MBB - The current block.
- MachineBasicBlock *MBB;
-
- /// MBB - The current insert position inside the current block.
- MachineBasicBlock::iterator InsertPt;
-
#ifndef NDEBUG
SmallSet<const Instruction *, 8> CatchInfoLost;
SmallSet<const Instruction *, 8> CatchInfoFound;
diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h
index 01d05ddac1..1615994741 100644
--- a/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/include/llvm/CodeGen/SelectionDAGISel.h
@@ -280,14 +280,15 @@ private:
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
- void PrepareEHLandingPad();
+ void PrepareEHLandingPad(MachineBasicBlock *BB);
void SelectAllBasicBlocks(const Function &Fn);
- void FinishBasicBlock();
+ void FinishBasicBlock(MachineBasicBlock *BB);
- void SelectBasicBlock(BasicBlock::const_iterator Begin,
- BasicBlock::const_iterator End,
- bool &HadTailCall);
- void CodeGenAndEmitDAG();
+ MachineBasicBlock *SelectBasicBlock(MachineBasicBlock *BB,
+ BasicBlock::const_iterator Begin,
+ BasicBlock::const_iterator End,
+ bool &HadTailCall);
+ MachineBasicBlock *CodeGenAndEmitDAG(MachineBasicBlock *BB);
void LowerArguments(const BasicBlock *BB);
void ComputeLiveOutVRegInfo();
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 47aa6d1683..2d87d4dd55 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -24,7 +24,6 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
-#include "llvm/Attributes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/ADT/APFloat.h"
@@ -1160,7 +1159,8 @@ public:
/// registers. If false is returned, an sret-demotion is performed.
///
virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const
{
// Return true by default to get preexisting behavior.
@@ -1656,15 +1656,6 @@ protected:
/// optimization.
bool benefitFromCodePlacementOpt;
};
-
-/// GetReturnInfo - Given an LLVM IR type and return type attributes,
-/// compute the return value EVTs and flags, and optionally also
-/// the offsets, if the return value is being lowered to memory.
-void GetReturnInfo(const Type* ReturnType, Attributes attr,
- SmallVectorImpl<ISD::OutputArg> &Outs,
- const TargetLowering &TLI,
- SmallVectorImpl<uint64_t> *Offsets = 0);
-
} // end llvm namespace
#endif
diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp
index 62ad8171a9..5e47038054 100644
--- a/lib/CodeGen/CallingConvLower.cpp
+++ b/lib/CodeGen/CallingConvLower.cpp
@@ -80,12 +80,13 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
/// CheckReturn - Analyze the return values of a function, returning true if
/// the return can be performed without sret-demotion, and false otherwise.
-bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+bool CCState::CheckReturn(const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- EVT VT = Outs[i].VT;
- ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ for (unsigned i = 0, e = OutTys.size(); i != e; ++i) {
+ EVT VT = OutTys[i];
+ ISD::ArgFlagsTy ArgFlags = ArgsFlags[i];
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
return false;
}
diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp
index bf3137e495..d437370031 100644
--- a/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/lib/CodeGen/LLVMTargetMachine.cpp
@@ -329,15 +329,19 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
if (OptLevel != CodeGenOpt::None)
PM.add(createOptimizePHIsPass());
- if (OptLevel != CodeGenOpt::None) {
- // With optimization, dead code should already be eliminated. However
- // there is one known exception: lowered code for arguments that are only
- // used by tail calls, where the tail calls reuse the incoming stack
- // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
- PM.add(createDeadMachineInstructionElimPass());
- printAndVerify(PM, "After codegen DCE pass",
- /* allowDoubleDefs= */ true);
+ // Delete dead machine instructions regardless of optimization level.
+ //
+ // At -O0, fast-isel frequently creates dead instructions.
+ //
+ // With optimization, dead code should already be eliminated. However
+ // there is one known exception: lowered code for arguments that are only
+ // used by tail calls, where the tail calls reuse the incoming stack
+ // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
+ PM.add(createDeadMachineInstructionElimPass());
+ printAndVerify(PM, "After codegen DCE pass",
+ /* allowDoubleDefs= */ true);
+ if (OptLevel != CodeGenOpt::None) {
PM.add(createOptimizeExtsPass());
if (!DisableMachineLICM)
PM.add(createMachineLICMPass());
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 5b566f675f..a917cdddee 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -57,17 +57,6 @@
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-/// startNewBlock - Set the current block to which generated machine
-/// instructions will be appended, and clear the local CSE map.
-///
-void FastISel::startNewBlock() {
- LocalValueMap.clear();
-
- // Start out as end(), meaining no local-value instructions have
- // been emitted.
- LastLocalValue = FuncInfo.MBB->end();
-}
-
bool FastISel::hasTrivialKill(const Value *V) const {
// Don't consider constants or arguments to have trivial kills.
const Instruction *I = dyn_cast<Instruction>(V);
@@ -120,11 +109,12 @@ unsigned FastISel::getRegForValue(const Value *V) {
// In bottom-up mode, just create the virtual register which will be used
// to hold the value. It will be materialized later.
- if (isa<Instruction>(V) &&
- (!isa<AllocaInst>(V) ||
- !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) {
+ if (IsBottomUp) {
Reg = createResultReg(TLI.getRegClassFor(VT));
- FuncInfo.ValueMap[V] = Reg;
+ if (isa<Instruction>(V))
+ FuncInfo.ValueMap[V] = Reg;
+ else
+ LocalValueMap[V] = Reg;
return Reg;
}
@@ -179,8 +169,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
+ BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
// If target-independent code couldn't handle the value, give target-specific
@@ -190,10 +179,8 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
// Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate.
- if (Reg != 0) {
+ if (Reg != 0)
LocalValueMap[V] = Reg;
- LastLocalValue = MRI.getVRegDef(Reg);
- }
return Reg;
}
@@ -222,20 +209,12 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0)
- // Use the new register.
AssignedReg = Reg;
else if (Reg != AssignedReg) {
- // We already have a register for this value. Replace uses of
- // the existing register with uses of the new one.
- MRI.replaceRegWith(AssignedReg, Reg);
- // Replace uses of the existing register in PHINodesToUpdate too.
- for (unsigned i = 0, e = FuncInfo.PHINodesToUpdate.size(); i != e; ++i)
- if (FuncInfo.PHINodesToUpdate[i].second == AssignedReg)
- FuncInfo.PHINodesToUpdate[i].second = Reg;
- // And update the ValueMap.
- AssignedReg = Reg;
+ const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
+ TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
+ Reg, RegClass, RegClass, DL);
}
-
return AssignedReg;
}
@@ -455,28 +434,23 @@ bool FastISel::SelectCall(const User *I) {
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(0U).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addImm(CI->getZExtValue()).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addFPImm(CF).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
// Insert an undef so we can see what we dropped.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(0U).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
}
return true;
}
@@ -485,13 +459,12 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
default: break;
case TargetLowering::Expand: {
- assert(FuncInfo.MBB->isLandingPad() &&
- "Call to eh.exception not in landing pad!");
+ assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
unsigned Reg = TLI.getExceptionAddressRegister();
const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, Reg, RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ Reg, RC, RC, DL);
assert(InsertedCopy && "Can't copy address registers!");
InsertedCopy = InsertedCopy;
UpdateValueMap(I, ResultReg);
@@ -505,23 +478,23 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
default: break;
case TargetLowering::Expand: {
- if (FuncInfo.MBB->isLandingPad())
- AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
+ if (MBB->isLandingPad())
+ AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), MBB);
else {
#ifndef NDEBUG
FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
#endif
// FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) FuncInfo.MBB->addLiveIn(Reg);
+ if (Reg) MBB->addLiveIn(Reg);
}
unsigned Reg = TLI.getExceptionSelectorRegister();
EVT SrcVT = TLI.getPointerTy();
const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, Reg, RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
+ RC, RC, DL);
assert(InsertedCopy && "Can't copy address registers!");
InsertedCopy = InsertedCopy;
@@ -640,9 +613,8 @@ bool FastISel::SelectBitCast(const User *I) {
TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
ResultReg = createResultReg(DstClass);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, Op0,
- DstClass, SrcClass, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ Op0, DstClass, SrcClass, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -690,14 +662,13 @@ FastISel::SelectInstruction(const Instruction *I) {
/// the CFG.
void
FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
- if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
+ if (MBB->isLayoutSuccessor(MSucc)) {
// The unconditional fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
- TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
- SmallVector<MachineOperand, 0>(), DL);
+ TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>(), DL);
}
- FuncInfo.MBB->addSuccessor(MSucc);
+ MBB->addSuccessor(MSucc);
}
/// SelectFNeg - Emit an FNeg operation.
@@ -756,15 +727,11 @@ FastISel::SelectLoad(const User *I) {
BasicBlock::iterator ScanFrom = LI;
if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(),
LI->getParent(), ScanFrom)) {
- if (!isa<Instruction>(V) ||
- cast<Instruction>(V)->getParent() == LI->getParent() ||
- (isa<AllocaInst>(V) && FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) {
unsigned ResultReg = getRegForValue(V);
if (ResultReg != 0) {
UpdateValueMap(I, ResultReg);
return true;
}
- }
}
}
@@ -887,7 +854,8 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
}
FastISel::FastISel(FunctionLoweringInfo &funcInfo)
- : FuncInfo(funcInfo),
+ : MBB(0),
+ FuncInfo(funcInfo),
MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()),
MCP(*FuncInfo.MF->getConstantPool()),
@@ -895,7 +863,8 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo)
TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()),
- TRI(*TM.getRegisterInfo()) {
+ TRI(*TM.getRegisterInfo()),
+ IsBottomUp(false) {
}
FastISel::~FastISel() {}
@@ -1024,7 +993,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
+ BuildMI(MBB, DL, II, ResultReg);
return ResultReg;
}
@@ -1035,14 +1004,11 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill);
+ BuildMI(MBB, DL, II, ResultReg).addReg(Op0, Op0IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(Op0, Op0IsKill * RegState::Kill);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ BuildMI(MBB, DL, II).addReg(Op0, Op0IsKill * RegState::Kill);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1058,16 +1024,15 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1082,16 +1047,15 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1106,16 +1070,15 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1131,18 +1094,17 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1156,12 +1118,11 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
+ BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ BuildMI(MBB, DL, II).addImm(Imm);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1177,16 +1138,15 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Idx);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Idx);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 55171fca09..8c4211ab29 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -78,13 +78,6 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
MF = &mf;
RegInfo = &MF->getRegInfo();
- // Check whether the function can return without sret-demotion.
- SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(Fn->getReturnType(),
- Fn->getAttributes().getRetAttributes(), Outs, TLI);
- CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), Fn->isVarArg(),
- Outs, Fn->getContext());
-
// Create a vreg for each argument register that is not dead and is used
// outside of the entry block for the function.
for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 5f93e2fcb2..f36620c3ed 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -732,11 +732,8 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
if (II.usesCustomInsertionHook()) {
// Insert this instruction into the basic block using a target
// specific inserter which may returns a new basic block.
- MachineBasicBlock *NewMBB = TLI->EmitInstrWithCustomInserter(MI, MBB);
- if (NewMBB != MBB) {
- MBB = NewMBB;
- InsertPos = NewMBB->end();
- }
+ MBB = TLI->EmitInstrWithCustomInserter(MI, MBB);
+ InsertPos = MBB->end();
return;
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 3b3ee3e343..0bb5e4b3cb 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -951,16 +951,79 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
// If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
- unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
- RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
- SDValue Chain = DAG.getEntryNode();
- return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL);
+ assert(Inst->isSafeToSpeculativelyExecute() &&
+ "Instruction with side effects deferred!");
+ visit(*Inst);
+ DenseMap<const Value *, SDValue>::iterator NIt = NodeMap.find(Inst);
+ if (NIt != NodeMap.end() && NIt->second.getNode())
+ return NIt->second;
}
llvm_unreachable("Can't get register for value!");
return SDValue();
}
+/// Get the EVTs and ArgFlags collections that represent the legalized return
+/// type of the given function. This does not require a DAG or a return value,
+/// and is suitable for use before any DAGs for the function are constructed.
+static void getReturnInfo(const Type* ReturnType,
+ Attributes attr, SmallVectorImpl<EVT> &OutVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
+ const TargetLowering &TLI,
+ SmallVectorImpl<uint64_t> *Offsets = 0) {
+ SmallVector<EVT, 4> ValueVTs;
+ ComputeValueVTs(TLI, ReturnType, ValueVTs);
+ unsigned NumValues = ValueVTs.size();
+ if (NumValues == 0) return;
+ unsigned Offset = 0;
+
+ for (unsigned j = 0, f = NumValues; j != f; ++j) {
+ EVT VT = ValueVTs[j];
+ ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
+
+ if (attr & Attribute::SExt)
+ ExtendKind = ISD::SIGN_EXTEND;
+ else if (attr & Attribute::ZExt)
+ ExtendKind = ISD::ZERO_EXTEND;
+
+ // FIXME: C calling convention requires the return type to be promoted to
+ // at least 32-bit. But this is not necessary for non-C calling
+ // conventions. The frontend should mark functions whose return values
+ // require promoting with signext or zeroext attributes.
+ if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
+ EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
+ if (VT.bitsLT(MinVT))
+ VT = MinVT;
+ }
+
+ unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
+ EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
+ unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
+ PartVT.getTypeForEVT(ReturnType->getContext()));
+
+ // 'inreg' on function refers to return value
+ ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
+ if (attr & Attribute::InReg)
+ Flags.setInReg();
+
+ // Propagate extension type if any
+ if (attr & Attribute::SExt)
+ Flags.setSExt();
+ else if (attr & Attribute::ZExt)
+ Flags.setZExt();
+
+ for (unsigned i = 0; i < NumParts; ++i) {
+ OutVTs.push_back(PartVT);
+ OutFlags.push_back(Flags);
+ if (Offsets)
+ {
+ Offsets->push_back(Offset);
+ Offset += PartSize;
+ }
+ }
+ }
+}
+
void SelectionDAGBuilder