diff options
author | Bob Wilson <bob.wilson@apple.com> | 2010-07-09 16:37:18 +0000 |
---|---|---|
committer | Bob Wilson <bob.wilson@apple.com> | 2010-07-09 16:37:18 +0000 |
commit | 02266e29f9250d74c5ec720aff23add3410ae920 (patch) | |
tree | eab38ddb96c513afb1c30f74a30992a9ceaf3e50 | |
parent | 9085fcab8276a8aaba33dc78bec2cdb0845351ba (diff) |
--- Reverse-merging r107947 into '.':
U utils/TableGen/FastISelEmitter.cpp
--- Reverse-merging r107943 into '.':
U test/CodeGen/X86/fast-isel.ll
U test/CodeGen/X86/fast-isel-loads.ll
U include/llvm/Target/TargetLowering.h
U include/llvm/Support/PassNameParser.h
U include/llvm/CodeGen/FunctionLoweringInfo.h
U include/llvm/CodeGen/CallingConvLower.h
U include/llvm/CodeGen/FastISel.h
U include/llvm/CodeGen/SelectionDAGISel.h
U lib/CodeGen/LLVMTargetMachine.cpp
U lib/CodeGen/CallingConvLower.cpp
U lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
U lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
U lib/CodeGen/SelectionDAG/FastISel.cpp
U lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
U lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
U lib/CodeGen/SelectionDAG/InstrEmitter.cpp
U lib/CodeGen/SelectionDAG/TargetLowering.cpp
U lib/Target/XCore/XCoreISelLowering.cpp
U lib/Target/XCore/XCoreISelLowering.h
U lib/Target/X86/X86ISelLowering.cpp
U lib/Target/X86/X86FastISel.cpp
U lib/Target/X86/X86ISelLowering.h
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@107987 91177308-0d34-0410-b5e6-96231b3b80d8
23 files changed, 368 insertions, 648 deletions
diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h index 7911907e89..5ce59b88dc 100644 --- a/include/llvm/CodeGen/CallingConvLower.h +++ b/include/llvm/CodeGen/CallingConvLower.h @@ -188,7 +188,8 @@ public: /// CheckReturn - Analyze the return values of a function, returning /// true if the return can be performed without sret-demotion, and /// false otherwise. - bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags, + bool CheckReturn(const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, CCAssignFn Fn); /// AnalyzeCallOperands - Analyze the outgoing arguments to a call, diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h index 7f3a7c7769..c5c457db0c 100644 --- a/include/llvm/CodeGen/FastISel.h +++ b/include/llvm/CodeGen/FastISel.h @@ -19,7 +19,6 @@ #include "llvm/ADT/SmallSet.h" #endif #include "llvm/CodeGen/ValueTypes.h" -#include "llvm/CodeGen/MachineBasicBlock.h" namespace llvm { @@ -45,6 +44,7 @@ class TargetRegisterInfo; /// lowering, but runs quickly. class FastISel { protected: + MachineBasicBlock *MBB; DenseMap<const Value *, unsigned> LocalValueMap; FunctionLoweringInfo &FuncInfo; MachineRegisterInfo &MRI; @@ -56,21 +56,23 @@ protected: const TargetInstrInfo &TII; const TargetLowering &TLI; const TargetRegisterInfo &TRI; - MachineInstr *LastLocalValue; + bool IsBottomUp; public: - /// getLastLocalValue - Return the position of the last instruction - /// emitted for materializing constants for use in the current block. - MachineInstr *getLastLocalValue() { return LastLocalValue; } - - /// setLastLocalValue - Update the position of the last instruction - /// emitted for materializing constants for use in the current block. - void setLastLocalValue(MachineInstr *I) { LastLocalValue = I; } - /// startNewBlock - Set the current block to which generated machine /// instructions will be appended, and clear the local CSE map. /// - void startNewBlock(); + void startNewBlock(MachineBasicBlock *mbb) { + setCurrentBlock(mbb); + LocalValueMap.clear(); + } + + /// setCurrentBlock - Set the current block to which generated machine + /// instructions will be appended. + /// + void setCurrentBlock(MachineBasicBlock *mbb) { + MBB = mbb; + } /// getCurDebugLoc() - Return current debug location information. DebugLoc getCurDebugLoc() const { return DL; } @@ -102,17 +104,6 @@ public: /// index value. std::pair<unsigned, bool> getRegForGEPIndex(const Value *V); - /// recomputeInsertPt - Reset InsertPt to prepare for insterting instructions - /// into the current block. - void recomputeInsertPt(); - - /// enterLocalValueArea - Prepare InsertPt to begin inserting instructions - /// into the local value area and return the old insert position. - MachineBasicBlock::iterator enterLocalValueArea(); - - /// leaveLocalValueArea - Reset InsertPt to the given old insert position - void leaveLocalValueArea(MachineBasicBlock::iterator OldInsertPt); - virtual ~FastISel(); protected: diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h index c49d1edb20..011d42617d 100644 --- a/include/llvm/CodeGen/FunctionLoweringInfo.h +++ b/include/llvm/CodeGen/FunctionLoweringInfo.h @@ -25,7 +25,6 @@ #endif #include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGen/ISDOpcodes.h" -#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/Support/CallSite.h" #include <vector> @@ -81,15 +80,6 @@ public: /// function arguments that are inserted after scheduling is completed. SmallVector<MachineInstr*, 8> ArgDbgValues; - /// RegFixups - Registers which need to be replaced after isel is done. - DenseMap<unsigned, unsigned> RegFixups; - - /// MBB - The current block. - MachineBasicBlock *MBB; - - /// MBB - The current insert position inside the current block. - MachineBasicBlock::iterator InsertPt; - #ifndef NDEBUG SmallSet<const Instruction *, 8> CatchInfoLost; SmallSet<const Instruction *, 8> CatchInfoFound; diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h index 01d05ddac1..1615994741 100644 --- a/include/llvm/CodeGen/SelectionDAGISel.h +++ b/include/llvm/CodeGen/SelectionDAGISel.h @@ -280,14 +280,15 @@ private: SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs, const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo); - void PrepareEHLandingPad(); + void PrepareEHLandingPad(MachineBasicBlock *BB); void SelectAllBasicBlocks(const Function &Fn); - void FinishBasicBlock(); + void FinishBasicBlock(MachineBasicBlock *BB); - void SelectBasicBlock(BasicBlock::const_iterator Begin, - BasicBlock::const_iterator End, - bool &HadTailCall); - void CodeGenAndEmitDAG(); + MachineBasicBlock *SelectBasicBlock(MachineBasicBlock *BB, + BasicBlock::const_iterator Begin, + BasicBlock::const_iterator End, + bool &HadTailCall); + MachineBasicBlock *CodeGenAndEmitDAG(MachineBasicBlock *BB); void LowerArguments(const BasicBlock *BB); void ComputeLiveOutVRegInfo(); diff --git a/include/llvm/Support/PassNameParser.h b/include/llvm/Support/PassNameParser.h index 42639a6d7e..cdca978cfe 100644 --- a/include/llvm/Support/PassNameParser.h +++ b/include/llvm/Support/PassNameParser.h @@ -69,7 +69,6 @@ public: virtual void passRegistered(const PassInfo *P) { if (ignorablePass(P) || !Opt) return; if (findOption(P->getPassArgument()) != getNumOptions()) { - return; errs() << "Two passes with the same argument (-" << P->getPassArgument() << ") attempted to be registered!\n"; llvm_unreachable(0); diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 47aa6d1683..2d87d4dd55 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -24,7 +24,6 @@ #include "llvm/CallingConv.h" #include "llvm/InlineAsm.h" -#include "llvm/Attributes.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/ADT/APFloat.h" @@ -1160,7 +1159,8 @@ public: /// registers. If false is returned, an sret-demotion is performed. /// virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, LLVMContext &Context) const { // Return true by default to get preexisting behavior. @@ -1656,15 +1656,6 @@ protected: /// optimization. bool benefitFromCodePlacementOpt; }; - -/// GetReturnInfo - Given an LLVM IR type and return type attributes, -/// compute the return value EVTs and flags, and optionally also -/// the offsets, if the return value is being lowered to memory. -void GetReturnInfo(const Type* ReturnType, Attributes attr, - SmallVectorImpl<ISD::OutputArg> &Outs, - const TargetLowering &TLI, - SmallVectorImpl<uint64_t> *Offsets = 0); - } // end llvm namespace #endif diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp index 62ad8171a9..5e47038054 100644 --- a/lib/CodeGen/CallingConvLower.cpp +++ b/lib/CodeGen/CallingConvLower.cpp @@ -80,12 +80,13 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins, /// CheckReturn - Analyze the return values of a function, returning true if /// the return can be performed without sret-demotion, and false otherwise. -bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, +bool CCState::CheckReturn(const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, CCAssignFn Fn) { // Determine which register each value should be copied into. - for (unsigned i = 0, e = Outs.size(); i != e; ++i) { - EVT VT = Outs[i].VT; - ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; + for (unsigned i = 0, e = OutTys.size(); i != e; ++i) { + EVT VT = OutTys[i]; + ISD::ArgFlagsTy ArgFlags = ArgsFlags[i]; if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) return false; } diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp index bf3137e495..d437370031 100644 --- a/lib/CodeGen/LLVMTargetMachine.cpp +++ b/lib/CodeGen/LLVMTargetMachine.cpp @@ -329,15 +329,19 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM, if (OptLevel != CodeGenOpt::None) PM.add(createOptimizePHIsPass()); - if (OptLevel != CodeGenOpt::None) { - // With optimization, dead code should already be eliminated. However - // there is one known exception: lowered code for arguments that are only - // used by tail calls, where the tail calls reuse the incoming stack - // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll). - PM.add(createDeadMachineInstructionElimPass()); - printAndVerify(PM, "After codegen DCE pass", - /* allowDoubleDefs= */ true); + // Delete dead machine instructions regardless of optimization level. + // + // At -O0, fast-isel frequently creates dead instructions. + // + // With optimization, dead code should already be eliminated. However + // there is one known exception: lowered code for arguments that are only + // used by tail calls, where the tail calls reuse the incoming stack + // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll). + PM.add(createDeadMachineInstructionElimPass()); + printAndVerify(PM, "After codegen DCE pass", + /* allowDoubleDefs= */ true); + if (OptLevel != CodeGenOpt::None) { PM.add(createOptimizeExtsPass()); if (!DisableMachineLICM) PM.add(createMachineLICMPass()); diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 02d11bbc2a..230368f2fa 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -57,25 +57,6 @@ #include "llvm/Support/ErrorHandling.h" using namespace llvm; -/// startNewBlock - Set the current block to which generated machine -/// instructions will be appended, and clear the local CSE map. -/// -void FastISel::startNewBlock() { - LocalValueMap.clear(); - - // Start out as null, meaining no local-value instructions have - // been emitted. - LastLocalValue = 0; - - // Advance the last local value past any EH_LABEL instructions. - MachineBasicBlock::iterator - I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end(); - while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) { - LastLocalValue = I; - ++I; - } -} - bool FastISel::hasTrivialKill(const Value *V) const { // Don't consider constants or arguments to have trivial kills. const Instruction *I = dyn_cast<Instruction>(V); @@ -120,30 +101,24 @@ unsigned FastISel::getRegForValue(const Value *V) { // only locally. This is because Instructions already have the SSA // def-dominates-use requirement enforced. DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V); - if (I != FuncInfo.ValueMap.end()) { - unsigned Reg = I->second; - return Reg; - } + if (I != FuncInfo.ValueMap.end()) + return I->second; unsigned Reg = LocalValueMap[V]; if (Reg != 0) return Reg; // In bottom-up mode, just create the virtual register which will be used // to hold the value. It will be materialized later. - if (isa<Instruction>(V) && - (!isa<AllocaInst>(V) || - !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) - return FuncInfo.InitializeRegForValue(V); - - MachineBasicBlock::iterator SaveInsertPt = enterLocalValueArea(); - - // Materialize the value in a register. Emit any instructions in the - // local value area. - Reg = materializeRegForValue(V, VT); - - leaveLocalValueArea(SaveInsertPt); + if (IsBottomUp) { + Reg = createResultReg(TLI.getRegClassFor(VT)); + if (isa<Instruction>(V)) + FuncInfo.ValueMap[V] = Reg; + else + LocalValueMap[V] = Reg; + return Reg; + } - return Reg; + return materializeRegForValue(V, VT); } /// materializeRegForValue - Helper for getRegForVale. This function is @@ -194,8 +169,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) { Reg = lookUpRegForValue(Op); } else if (isa<UndefValue>(V)) { Reg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(TargetOpcode::IMPLICIT_DEF), Reg); + BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg); } // If target-independent code couldn't handle the value, give target-specific @@ -205,10 +179,8 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) { // Don't cache constant materializations in the general ValueMap. // To do so would require tracking what uses they dominate. - if (Reg != 0) { + if (Reg != 0) LocalValueMap[V] = Reg; - LastLocalValue = MRI.getVRegDef(Reg); - } return Reg; } @@ -237,15 +209,12 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) { unsigned &AssignedReg = FuncInfo.ValueMap[I]; if (AssignedReg == 0) - // Use the new register. AssignedReg = Reg; else if (Reg != AssignedReg) { - // Arrange for uses of AssignedReg to be replaced by uses of Reg. - FuncInfo.RegFixups[AssignedReg] = Reg; - - AssignedReg = Reg; + const TargetRegisterClass *RegClass = MRI.getRegClass(Reg); + TII.copyRegToReg(*MBB, MBB->end(), AssignedReg, + Reg, RegClass, RegClass, DL); } - return AssignedReg; } @@ -273,33 +242,6 @@ std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) { return std::pair<unsigned, bool>(IdxN, IdxNIsKill); } -void FastISel::recomputeInsertPt() { - if (getLastLocalValue()) { - FuncInfo.InsertPt = getLastLocalValue(); - ++FuncInfo.InsertPt; - } else - FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI(); - - // Now skip past any EH_LABELs, which must remain at the beginning. - while (FuncInfo.InsertPt != FuncInfo.MBB->end() && - FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL) - ++FuncInfo.InsertPt; -} - -MachineBasicBlock::iterator FastISel::enterLocalValueArea() { - MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt; - recomputeInsertPt(); - return OldInsertPt; -} - -void FastISel::leaveLocalValueArea(MachineBasicBlock::iterator OldInsertPt) { - if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) - LastLocalValue = llvm::prior(FuncInfo.InsertPt); - - // Restore the previous insert position. - FuncInfo.InsertPt = OldInsertPt; -} - /// SelectBinaryOp - Select and emit code for a binary operator instruction, /// which has an opcode which directly corresponds to the given ISD opcode. /// @@ -492,28 +434,23 @@ bool FastISel::SelectCall(const User *I) { if (!V) { // Currently the optimizer can produce this; insert an undef to // help debugging. Probably the optimizer should not do this. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addReg(0U).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addImm(CI->getZExtValue()).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addFPImm(CF).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } else if (unsigned Reg = lookUpRegForValue(V)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addReg(Reg, RegState::Debug).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } else { // We can't yet handle anything else here because it would require // generating code, thus altering codegen because of debug info. // Insert an undef so we can see what we dropped. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addReg(0U).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } return true; } @@ -522,13 +459,12 @@ bool FastISel::SelectCall(const User *I) { switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) { default: break; case TargetLowering::Expand: { - assert(FuncInfo.MBB->isLandingPad() && - "Call to eh.exception not in landing pad!"); + assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!"); unsigned Reg = TLI.getExceptionAddressRegister(); const TargetRegisterClass *RC = TLI.getRegClassFor(VT); unsigned ResultReg = createResultReg(RC); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, Reg, RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + Reg, RC, RC, DL); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; UpdateValueMap(I, ResultReg); @@ -542,23 +478,23 @@ bool FastISel::SelectCall(const User *I) { switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) { default: break; case TargetLowering::Expand: { - if (FuncInfo.MBB->isLandingPad()) - AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB); + if (MBB->isLandingPad()) + AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), MBB); else { #ifndef NDEBUG FuncInfo.CatchInfoLost.insert(cast<CallInst>(I)); #endif // FIXME: Mark exception selector register as live in. Hack for PR1508. unsigned Reg = TLI.getExceptionSelectorRegister(); - if (Reg) FuncInfo.MBB->addLiveIn(Reg); + if (Reg) MBB->addLiveIn(Reg); } unsigned Reg = TLI.getExceptionSelectorRegister(); EVT SrcVT = TLI.getPointerTy(); const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); unsigned ResultReg = createResultReg(RC); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, Reg, RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg, + RC, RC, DL); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; @@ -677,9 +613,8 @@ bool FastISel::SelectBitCast(const User *I) { TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); ResultReg = createResultReg(DstClass); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, Op0, - DstClass, SrcClass, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + Op0, DstClass, SrcClass, DL); if (!InsertedCopy) ResultReg = 0; } @@ -727,14 +662,13 @@ FastISel::SelectInstruction(const Instruction *I) { /// the CFG. void FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) { - if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) { + if (MBB->isLayoutSuccessor(MSucc)) { // The unconditional fall-through case, which needs no instructions. } else { // The unconditional branch case. - TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL, - SmallVector<MachineOperand, 0>(), DL); + TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>(), DL); } - FuncInfo.MBB->addSuccessor(MSucc); + MBB->addSuccessor(MSucc); } /// SelectFNeg - Emit an FNeg operation. @@ -793,19 +727,11 @@ FastISel::SelectLoad(const User *I) { BasicBlock::iterator ScanFrom = LI; if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(), LI->getParent(), ScanFrom)) { - if (!V->use_empty() && - (!isa<Instruction>(V) || - cast<Instruction>(V)->getParent() == LI->getParent() || - (isa<AllocaInst>(V) && - FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) && - (!isa<Argument>(V) || - LI->getParent() == &LI->getParent()->getParent()->getEntryBlock())) { unsigned ResultReg = getRegForValue(V); if (ResultReg != 0) { UpdateValueMap(I, ResultReg); return true; } - } } } @@ -928,7 +854,8 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) { } FastISel::FastISel(FunctionLoweringInfo &funcInfo) - : FuncInfo(funcInfo), + : MBB(0), + FuncInfo(funcInfo), MRI(FuncInfo.MF->getRegInfo()), MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), @@ -936,7 +863,8 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo) TD(*TM.getTargetData()), TII(*TM.getInstrInfo()), TLI(*TM.getTargetLowering()), - TRI(*TM.getRegisterInfo()) { + TRI(*TM.getRegisterInfo()), + IsBottomUp(false) { } FastISel::~FastISel() {} @@ -1065,7 +993,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode, unsigned ResultReg = createResultReg(RC); const TargetInstrDesc &II = TII.get(MachineInstOpcode); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg); + BuildMI(MBB, DL, II, ResultReg); return ResultReg; } @@ -1076,14 +1004,11 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill); + BuildMI(MBB, DL, II, ResultReg).addReg(Op0, Op0IsKill * RegState::Kill); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addReg(Op0, Op0IsKill * RegState::Kill); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + BuildMI(MBB, DL, II).addReg(Op0, Op0IsKill * RegState::Kill); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1099,16 +1024,15 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + BuildMI(MBB, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + BuildMI(MBB, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1123,16 +1047,15 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + BuildMI(MBB, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + BuildMI(MBB, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1147,16 +1070,15 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + BuildMI(MBB, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addFPImm(FPImm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + BuildMI(MBB, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addFPImm(FPImm); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1172,18 +1094,17 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + BuildMI(MBB, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + BuildMI(MBB, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill) .addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1197,12 +1118,11 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm); + BuildMI(MBB, DL, II, ResultReg).addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + BuildMI(MBB, DL, II).addImm(Imm); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1215,8 +1135,7 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); assert(TargetRegisterInfo::isVirtualRegister(Op0) && "Cannot yet extract from physregs"); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(TargetOpcode::COPY), ResultReg) + BuildMI(MBB, DL, TII.get(TargetOpcode::COPY), ResultReg) .addReg(Op0, getKillRegState(Op0IsKill), Idx); return ResultReg; } diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp index 928e1ecd4c..6d9f1021ff 100644 --- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -82,13 +82,6 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) { MF = &mf; RegInfo = &MF->getRegInfo(); - // Check whether the function can return without sret-demotion. - SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(Fn->getReturnType(), - Fn->getAttributes().getRetAttributes(), Outs, TLI); - CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), Fn->isVarArg(), - Outs, Fn->getContext()); - // Create a vreg for each argument register that is not dead and is used // outside of the entry block for the function. for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end(); @@ -181,7 +174,6 @@ void FunctionLoweringInfo::clear() { #endif LiveOutRegInfo.clear(); ArgDbgValues.clear(); |