diff options
author | Dan Gohman <gohman@apple.com> | 2010-07-08 01:00:56 +0000 |
---|---|---|
committer | Dan Gohman <gohman@apple.com> | 2010-07-08 01:00:56 +0000 |
commit | f59514152511694d46ca8b8d2db466d256ab5759 (patch) | |
tree | 25f123bce731ec2fdfd80e22df2f6b5dfe1c5239 /lib | |
parent | d9642faf7c66273eb3a8d99e5fa6b542da5374dd (diff) |
Revert 107840 107839 107813 107804 107800 107797 107791.
Debug info intrinsics win for now.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@107850 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r-- | lib/CodeGen/CallingConvLower.cpp | 9 | ||||
-rw-r--r-- | lib/CodeGen/LLVMTargetMachine.cpp | 20 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/FastISel.cpp | 172 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp | 7 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/InstrEmitter.cpp | 7 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 113 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 180 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/TargetLowering.cpp | 60 | ||||
-rw-r--r-- | lib/Target/X86/X86FastISel.cpp | 246 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 5 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.h | 3 | ||||
-rw-r--r-- | lib/Target/XCore/XCoreISelLowering.cpp | 5 | ||||
-rw-r--r-- | lib/Target/XCore/XCoreISelLowering.h | 3 |
13 files changed, 331 insertions, 499 deletions
diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp index 62ad8171a9..5e47038054 100644 --- a/lib/CodeGen/CallingConvLower.cpp +++ b/lib/CodeGen/CallingConvLower.cpp @@ -80,12 +80,13 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins, /// CheckReturn - Analyze the return values of a function, returning true if /// the return can be performed without sret-demotion, and false otherwise. -bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, +bool CCState::CheckReturn(const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, CCAssignFn Fn) { // Determine which register each value should be copied into. - for (unsigned i = 0, e = Outs.size(); i != e; ++i) { - EVT VT = Outs[i].VT; - ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; + for (unsigned i = 0, e = OutTys.size(); i != e; ++i) { + EVT VT = OutTys[i]; + ISD::ArgFlagsTy ArgFlags = ArgsFlags[i]; if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) return false; } diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp index bf3137e495..d437370031 100644 --- a/lib/CodeGen/LLVMTargetMachine.cpp +++ b/lib/CodeGen/LLVMTargetMachine.cpp @@ -329,15 +329,19 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM, if (OptLevel != CodeGenOpt::None) PM.add(createOptimizePHIsPass()); - if (OptLevel != CodeGenOpt::None) { - // With optimization, dead code should already be eliminated. However - // there is one known exception: lowered code for arguments that are only - // used by tail calls, where the tail calls reuse the incoming stack - // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll). - PM.add(createDeadMachineInstructionElimPass()); - printAndVerify(PM, "After codegen DCE pass", - /* allowDoubleDefs= */ true); + // Delete dead machine instructions regardless of optimization level. + // + // At -O0, fast-isel frequently creates dead instructions. + // + // With optimization, dead code should already be eliminated. However + // there is one known exception: lowered code for arguments that are only + // used by tail calls, where the tail calls reuse the incoming stack + // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll). + PM.add(createDeadMachineInstructionElimPass()); + printAndVerify(PM, "After codegen DCE pass", + /* allowDoubleDefs= */ true); + if (OptLevel != CodeGenOpt::None) { PM.add(createOptimizeExtsPass()); if (!DisableMachineLICM) PM.add(createMachineLICMPass()); diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 5b566f675f..a917cdddee 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -57,17 +57,6 @@ #include "llvm/Support/ErrorHandling.h" using namespace llvm; -/// startNewBlock - Set the current block to which generated machine -/// instructions will be appended, and clear the local CSE map. -/// -void FastISel::startNewBlock() { - LocalValueMap.clear(); - - // Start out as end(), meaining no local-value instructions have - // been emitted. - LastLocalValue = FuncInfo.MBB->end(); -} - bool FastISel::hasTrivialKill(const Value *V) const { // Don't consider constants or arguments to have trivial kills. const Instruction *I = dyn_cast<Instruction>(V); @@ -120,11 +109,12 @@ unsigned FastISel::getRegForValue(const Value *V) { // In bottom-up mode, just create the virtual register which will be used // to hold the value. It will be materialized later. - if (isa<Instruction>(V) && - (!isa<AllocaInst>(V) || - !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) { + if (IsBottomUp) { Reg = createResultReg(TLI.getRegClassFor(VT)); - FuncInfo.ValueMap[V] = Reg; + if (isa<Instruction>(V)) + FuncInfo.ValueMap[V] = Reg; + else + LocalValueMap[V] = Reg; return Reg; } @@ -179,8 +169,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) { Reg = lookUpRegForValue(Op); } else if (isa<UndefValue>(V)) { Reg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(TargetOpcode::IMPLICIT_DEF), Reg); + BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg); } // If target-independent code couldn't handle the value, give target-specific @@ -190,10 +179,8 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) { // Don't cache constant materializations in the general ValueMap. // To do so would require tracking what uses they dominate. - if (Reg != 0) { + if (Reg != 0) LocalValueMap[V] = Reg; - LastLocalValue = MRI.getVRegDef(Reg); - } return Reg; } @@ -222,20 +209,12 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) { unsigned &AssignedReg = FuncInfo.ValueMap[I]; if (AssignedReg == 0) - // Use the new register. AssignedReg = Reg; else if (Reg != AssignedReg) { - // We already have a register for this value. Replace uses of - // the existing register with uses of the new one. - MRI.replaceRegWith(AssignedReg, Reg); - // Replace uses of the existing register in PHINodesToUpdate too. - for (unsigned i = 0, e = FuncInfo.PHINodesToUpdate.size(); i != e; ++i) - if (FuncInfo.PHINodesToUpdate[i].second == AssignedReg) - FuncInfo.PHINodesToUpdate[i].second = Reg; - // And update the ValueMap. - AssignedReg = Reg; + const TargetRegisterClass *RegClass = MRI.getRegClass(Reg); + TII.copyRegToReg(*MBB, MBB->end(), AssignedReg, + Reg, RegClass, RegClass, DL); } - return AssignedReg; } @@ -455,28 +434,23 @@ bool FastISel::SelectCall(const User *I) { if (!V) { // Currently the optimizer can produce this; insert an undef to // help debugging. Probably the optimizer should not do this. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addReg(0U).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addImm(CI->getZExtValue()).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addFPImm(CF).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } else if (unsigned Reg = lookUpRegForValue(V)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addReg(Reg, RegState::Debug).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } else { // We can't yet handle anything else here because it would require // generating code, thus altering codegen because of debug info. // Insert an undef so we can see what we dropped. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addReg(0U).addImm(DI->getOffset()) - .addMetadata(DI->getVariable()); + BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). + addMetadata(DI->getVariable()); } return true; } @@ -485,13 +459,12 @@ bool FastISel::SelectCall(const User *I) { switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) { default: break; case TargetLowering::Expand: { - assert(FuncInfo.MBB->isLandingPad() && - "Call to eh.exception not in landing pad!"); + assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!"); unsigned Reg = TLI.getExceptionAddressRegister(); const TargetRegisterClass *RC = TLI.getRegClassFor(VT); unsigned ResultReg = createResultReg(RC); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, Reg, RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + Reg, RC, RC, DL); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; UpdateValueMap(I, ResultReg); @@ -505,23 +478,23 @@ bool FastISel::SelectCall(const User *I) { switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) { default: break; case TargetLowering::Expand: { - if (FuncInfo.MBB->isLandingPad()) - AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB); + if (MBB->isLandingPad()) + AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), MBB); else { #ifndef NDEBUG FuncInfo.CatchInfoLost.insert(cast<CallInst>(I)); #endif // FIXME: Mark exception selector register as live in. Hack for PR1508. unsigned Reg = TLI.getExceptionSelectorRegister(); - if (Reg) FuncInfo.MBB->addLiveIn(Reg); + if (Reg) MBB->addLiveIn(Reg); } unsigned Reg = TLI.getExceptionSelectorRegister(); EVT SrcVT = TLI.getPointerTy(); const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); unsigned ResultReg = createResultReg(RC); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, Reg, RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg, + RC, RC, DL); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; @@ -640,9 +613,8 @@ bool FastISel::SelectBitCast(const User *I) { TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); ResultReg = createResultReg(DstClass); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, Op0, - DstClass, SrcClass, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + Op0, DstClass, SrcClass, DL); if (!InsertedCopy) ResultReg = 0; } @@ -690,14 +662,13 @@ FastISel::SelectInstruction(const Instruction *I) { /// the CFG. void FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) { - if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) { + if (MBB->isLayoutSuccessor(MSucc)) { // The unconditional fall-through case, which needs no instructions. } else { // The unconditional branch case. - TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL, - SmallVector<MachineOperand, 0>(), DL); + TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>(), DL); } - FuncInfo.MBB->addSuccessor(MSucc); + MBB->addSuccessor(MSucc); } /// SelectFNeg - Emit an FNeg operation. @@ -756,15 +727,11 @@ FastISel::SelectLoad(const User *I) { BasicBlock::iterator ScanFrom = LI; if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(), LI->getParent(), ScanFrom)) { - if (!isa<Instruction>(V) || - cast<Instruction>(V)->getParent() == LI->getParent() || - (isa<AllocaInst>(V) && FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) { unsigned ResultReg = getRegForValue(V); if (ResultReg != 0) { UpdateValueMap(I, ResultReg); return true; } - } } } @@ -887,7 +854,8 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) { } FastISel::FastISel(FunctionLoweringInfo &funcInfo) - : FuncInfo(funcInfo), + : MBB(0), + FuncInfo(funcInfo), MRI(FuncInfo.MF->getRegInfo()), MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), @@ -895,7 +863,8 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo) TD(*TM.getTargetData()), TII(*TM.getInstrInfo()), TLI(*TM.getTargetLowering()), - TRI(*TM.getRegisterInfo()) { + TRI(*TM.getRegisterInfo()), + IsBottomUp(false) { } FastISel::~FastISel() {} @@ -1024,7 +993,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode, unsigned ResultReg = createResultReg(RC); const TargetInstrDesc &II = TII.get(MachineInstOpcode); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg); + BuildMI(MBB, DL, II, ResultReg); return ResultReg; } @@ -1035,14 +1004,11 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill); + BuildMI(MBB, DL, II, ResultReg).addReg(Op0, Op0IsKill * RegState::Kill); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addReg(Op0, Op0IsKill * RegState::Kill); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + BuildMI(MBB, DL, II).addReg(Op0, Op0IsKill * RegState::Kill); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1058,16 +1024,15 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + BuildMI(MBB, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + BuildMI(MBB, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1082,16 +1047,15 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + BuildMI(MBB, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + BuildMI(MBB, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1106,16 +1070,15 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + BuildMI(MBB, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addFPImm(FPImm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + BuildMI(MBB, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addFPImm(FPImm); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1131,18 +1094,17 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + BuildMI(MBB, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + BuildMI(MBB, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill) .addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1156,12 +1118,11 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm); + BuildMI(MBB, DL, II, ResultReg).addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + BuildMI(MBB, DL, II).addImm(Imm); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1177,16 +1138,15 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + BuildMI(MBB, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Idx); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + BuildMI(MBB, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Idx); - bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - ResultReg, II.ImplicitDefs[0], - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + II.ImplicitDefs[0], RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp index 55171fca09..8c4211ab29 100644 --- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -78,13 +78,6 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) { MF = &mf; RegInfo = &MF->getRegInfo(); - // Check whether the function can return without sret-demotion. - SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(Fn->getReturnType(), - Fn->getAttributes().getRetAttributes(), Outs, TLI); - CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), Fn->isVarArg(), - Outs, Fn->getContext()); - // Create a vreg for each argument register that is not dead and is used // outside of the entry block for the function. for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end(); diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 5f93e2fcb2..f36620c3ed 100644 --- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -732,11 +732,8 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned, if (II.usesCustomInsertionHook()) { // Insert this instruction into the basic block using a target // specific inserter which may returns a new basic block. - MachineBasicBlock *NewMBB = TLI->EmitInstrWithCustomInserter(MI, MBB); - if (NewMBB != MBB) { - MBB = NewMBB; - InsertPos = NewMBB->end(); - } + MBB = TLI->EmitInstrWithCustomInserter(MI, MBB); + InsertPos = MBB->end(); return; } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 3b3ee3e343..0bb5e4b3cb 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -951,16 +951,79 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { // If this is an instruction which fast-isel has deferred, select it now. if (const Instruction *Inst = dyn_cast<Instruction>(V)) { - unsigned InReg = FuncInfo.InitializeRegForValue(Inst); - RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType()); - SDValue Chain = DAG.getEntryNode(); - return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL); + assert(Inst->isSafeToSpeculativelyExecute() && + "Instruction with side effects deferred!"); + visit(*Inst); + DenseMap<const Value *, SDValue>::iterator NIt = NodeMap.find(Inst); + if (NIt != NodeMap.end() && NIt->second.getNode()) + return NIt->second; } llvm_unreachable("Can't get register for value!"); return SDValue(); } +/// Get the EVTs and ArgFlags collections that represent the legalized return +/// type of the given function. This does not require a DAG or a return value, +/// and is suitable for use before any DAGs for the function are constructed. +static void getReturnInfo(const Type* ReturnType, + Attributes attr, SmallVectorImpl<EVT> &OutVTs, + SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags, + const TargetLowering &TLI, + SmallVectorImpl<uint64_t> *Offsets = 0) { + SmallVector<EVT, 4> ValueVTs; + ComputeValueVTs(TLI, ReturnType, ValueVTs); + unsigned NumValues = ValueVTs.size(); + if (NumValues == 0) return; + unsigned Offset = 0; + + for (unsigned j = 0, f = NumValues; j != f; ++j) { + EVT VT = ValueVTs[j]; + ISD::NodeType ExtendKind = ISD::ANY_EXTEND; + + if (attr & Attribute::SExt) + ExtendKind = ISD::SIGN_EXTEND; + else if (attr & Attribute::ZExt) + ExtendKind = ISD::ZERO_EXTEND; + + // FIXME: C calling convention requires the return type to be promoted to + // at least 32-bit. But this is not necessary for non-C calling + // conventions. The frontend should mark functions whose return values + // require promoting with signext or zeroext attributes. + if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { + EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); + if (VT.bitsLT(MinVT)) + VT = MinVT; + } + + unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT); + EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT); + unsigned PartSize = TLI.getTargetData()->getTypeAllocSize( + PartVT.getTypeForEVT(ReturnType->getContext())); + + // 'inreg' on function refers to return value + ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); + if (attr & Attribute::InReg) + Flags.setInReg(); + + // Propagate extension type if any + if (attr & Attribute::SExt) + Flags.setSExt(); + else if (attr & Attribute::ZExt) + Flags.setZExt(); + + for (unsigned i = 0; i < NumParts; ++i) { + OutVTs.push_back(PartVT); + OutFlags.push_back(Flags); + if (Offsets) + { + Offsets->push_back(Offset); + Offset += PartSize; + } + } + } +} + void SelectionDAGBuilder::visitRet(const ReturnInst &I) { SDValue Chain = getControlRoot(); SmallVector<ISD::OutputArg, 8> Outs; @@ -1257,7 +1320,7 @@ SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){ } void SelectionDAGBuilder::visitBr(const BranchInst &I) { - MachineBasicBlock *BrMBB = FuncInfo.MBB; + MachineBasicBlock *BrMBB = FuncInfo.MBBMap[I.getParent()]; // Update machine-CFG edges. MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; @@ -1583,7 +1646,7 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB, } void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { - MachineBasicBlock *InvokeMBB = FuncInfo.MBB; + MachineBasicBlock *InvokeMBB = FuncInfo.MBBMap[I.getParent()]; // Retrieve successors. MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; @@ -2111,7 +2174,7 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases, } void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { - MachineBasicBlock *SwitchMBB = FuncInfo.MBB; + MachineBasicBlock *SwitchMBB = FuncInfo.MBBMap[SI.getParent()]; // Figure out which block is immediately after the current one. MachineBasicBlock *NextBlock = 0; @@ -2177,7 +2240,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { } void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { - MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; + MachineBasicBlock *IndirectBrMBB = FuncInfo.MBBMap[I.getParent()]; // Update machine-CFG edges with unique successors. SmallVector<BasicBlock*, 32> succs; @@ -3837,7 +3900,7 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const DbgValueInst &DI, if (DV.isInlinedFnArgument(MF.getFunction())) return false; - MachineBasicBlock *MBB = FuncInfo.MBB; + MachineBasicBlock *MBB = FuncInfo.MBBMap[DI.getParent()]; if (MBB != &MF.front()) return false; @@ -4100,7 +4163,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { } case Intrinsic::eh_exception: { // Insert the EXCEPTIONADDR instruction. - assert(FuncInfo.MBB->isLandingPad() && + assert(FuncInfo.MBBMap[I.getParent()]->isLandingPad() && "Call to eh.exception not in landing pad!"); SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other); SDValue Ops[1]; @@ -4112,7 +4175,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { } case Intrinsic::eh_selector: { - MachineBasicBlock *CallMBB = FuncInfo.MBB; + MachineBasicBlock *CallMBB = FuncInfo.MBBMap[I.getParent()]; MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); if (CallMBB->isLandingPad()) AddCatchInfo(I, &MMI, CallMBB); @@ -4122,7 +4185,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { #endif // FIXME: Mark exception selector register as live in. Hack for PR1508. unsigned Reg = TLI.getExceptionSelectorRegister(); - if (Reg) FuncInfo.MBB->addLiveIn(Reg); + if (Reg) FuncInfo.MBBMap[I.getParent()]->addLiveIn(Reg); } // Insert the EHSELECTION instruction. @@ -4496,13 +4559,14 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, Args.reserve(CS.arg_size()); // Check whether the function can return without sret-demotion. - SmallVector<ISD::OutputArg, 4> Outs; + SmallVector<EVT, 4> OutVTs; + SmallVector<ISD::ArgFlagsTy, 4> OutsFlags; SmallVector<uint64_t, 4> Offsets; - GetReturnInfo(RetTy, CS.getAttributes().getRetAttributes(), - Outs, TLI, &Offsets); + getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(), + OutVTs, OutsFlags, TLI, &Offsets); bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(), - FTy->isVarArg(), Outs, FTy->getContext()); + FTy->isVarArg(), OutVTs, OutsFlags, FTy->getContext()); SDValue DemoteStackSlot; @@ -4595,7 +4659,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, ComputeValueVTs(TLI, PtrRetTy, PVTs); assert(PVTs.size() == 1 && "Pointers should fit in one register"); EVT PtrVT = PVTs[0]; - unsigned NumValues = Outs.size(); + unsigned NumValues = OutVTs.size(); SmallVector<SDValue, 4> Values(NumValues); SmallVector<SDValue, 4> Chains(NumValues); @@ -4603,7 +4667,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, DemoteStackSlot, DAG.getConstant(Offsets[i], PtrVT)); - SDValue L = DAG.getLoad(Outs[i].VT, getCurDebugLoc(), Result.second, + SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second, Add, NULL, Offsets[i], false, false, 1); Values[i] = L; Chains[i] = L.getValue(1); @@ -5895,10 +5959,15 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { SmallVector<ISD::InputArg, 16> Ins; // Check whether the function can return without sret-demotion. - SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), - Outs, TLI); - + SmallVector<EVT, 4> OutVTs; + SmallVector<ISD::ArgFlagsTy, 4> OutsFlags; + getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), + OutVTs, OutsFlags, TLI); + + FuncInfo->CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), + F.isVarArg(), + OutVTs, OutsFlags, + F.getContext()); if (!FuncInfo->CanLowerReturn) { // Put in an sret pointer parameter before all the other parameters. SmallVector<EVT, 1> ValueVTs; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index f3a7f7d071..5c14bf319e 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -319,8 +319,9 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { return true; } -void -SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin, +MachineBasicBlock * +SelectionDAGISel::SelectBasicBlock(MachineBasicBlock *BB, + BasicBlock::const_iterator Begin, BasicBlock::const_iterator End, bool &HadTailCall) { // Lower all of the non-terminator instructions. If a call is emitted @@ -335,7 +336,7 @@ SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin, SDB->clear(); // Final step, emit the lowered DAG as machine code. - CodeGenAndEmitDAG(); + return CodeGenAndEmitDAG(BB); } namespace { @@ -424,7 +425,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() { } while (!Worklist.empty()); } -void SelectionDAGISel::CodeGenAndEmitDAG() { +MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) { std::string GroupName; if (TimePassesIsEnabled) GroupName = "Instruction Selection and Scheduling"; @@ -433,7 +434,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs || ViewSUnitDAGs) BlockName = MF->getFunction()->getNameStr() + ":" + - FuncInfo->MBB->getBasicBlock()->getNameStr(); + BB->getBasicBlock()->getNameStr(); DEBUG(dbgs() << "Initial selection DAG:\n"; CurDAG->dump()); @@ -540,7 +541,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { { NamedRegionTimer T("Instruction Scheduling", GroupName, TimePassesIsEnabled); - Scheduler->Run(CurDAG, FuncInfo->MBB, FuncInfo->InsertPt); + Scheduler->Run(CurDAG, BB, BB->end()); } if (ViewSUnitDAGs) Scheduler->viewGraph(); @@ -549,8 +550,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { // inserted into. { NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled); - FuncInfo->MBB = Scheduler->EmitSchedule(); - FuncInfo->InsertPt = Scheduler->InsertPos; + BB = Scheduler->EmitSchedule(); } // Free the scheduler state. @@ -562,6 +562,8 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { // Free the SelectionDAG state, now that we're finished with it. CurDAG->clear(); + + return BB; } void SelectionDAGISel::DoInstructionSelection() { @@ -623,22 +625,21 @@ void SelectionDAGISel::DoInstructionSelection() { /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and /// do other setup for EH landing-pad blocks. -void SelectionDAGISel::PrepareEHLandingPad() { +void SelectionDAGISel::PrepareEHLandingPad(MachineBasicBlock *BB) { // Add a label to mark the beginning of the landing pad. Deletion of the // landing pad can thus be detected via the MachineModuleInfo. - MCSymbol *Label = MF->getMMI().addLandingPad(FuncInfo->MBB); + MCSymbol *Label = MF->getMMI().addLandingPad(BB); const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL); - BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II) - .addSym(Label); + BuildMI(BB, SDB->getCurDebugLoc(), II).addSym(Label); // Mark exception register as live in. unsigned Reg = TLI.getExceptionAddressRegister(); - if (Reg) FuncInfo->MBB->addLiveIn(Reg); + if (Reg) BB->addLiveIn(Reg); // Mark exception selector register as live in. Reg = TLI.getExceptionSelectorRegister(); - if (Reg) FuncInfo->MBB->addLiveIn(Reg); + if (Reg) BB->addLiveIn(Reg); // FIXME: Hack around an exception handling flaw (PR1508): the personality // function and list of typeids logically belong to the invoke (or, if you @@ -651,7 +652,7 @@ void SelectionDAGISel::PrepareEHLandingPad() { // in exceptions not being caught because no typeids are associated with // the invoke. This may not be the only way things can go wrong, but it // is the only way we try to work around for the moment. - const BasicBlock *LLVMBB = FuncInfo->MBB->getBasicBlock(); + const BasicBlock *LLVMBB = BB->getBasicBlock(); const BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator()); if (Br && Br->isUnconditional()) { // Critical edge? @@ -675,73 +676,80 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { // Iterate over all basic blocks in the function. for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) { const BasicBlock *LLVMBB = &*I; - FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB]; - FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI(); + MachineBasicBlock *BB = FuncInfo->MBBMap[LLVMBB]; BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI(); BasicBlock::const_iterator const End = LLVMBB->end(); - BasicBlock::const_iterator BI = End; + BasicBlock::const_iterator BI = Begin; + // Lower any arguments needed in this block if this is the entry block. + if (LLVMBB == &Fn.getEntryBlock()) + LowerArguments(LLVMBB); + + // Setup an EH landing-pad block. + if (BB->isLandingPad()) + PrepareEHLandingPad(BB); + // Before doing SelectionDAG ISel, see if FastISel has been requested. if (FastIS) { - FastIS->startNewBlock(); - + // Emit code for any incoming arguments. This must happen before + // beginning FastISel on the entry block. + if (LLVMBB == &Fn.getEntryBlock()) { + CurDAG->setRoot(SDB->getControlRoot()); + SDB->clear(); + BB = CodeGenAndEmitDAG(BB); + } + FastIS->startNewBlock(BB); // Do FastISel on as many instructions as possible. - for (; BI != Begin; --BI) { - const Instruction *Inst = llvm::prior(BI); - - // If we no longer require this instruction, skip it. - if (!Inst->mayWriteToMemory() && - !isa<TerminatorInst>(Inst) && - !isa<DbgInfoIntrinsic>(Inst) && - !FuncInfo->isExportedInst(Inst)) + for (; BI != End; ++BI) { +#if 0 + // Defer instructions with no side effects; they'll be emitted + // on-demand later. + if (BI->isSafeToSpeculativelyExecute() && + !FuncInfo->isExportedInst(BI)) continue; - - // Bottom-up: reset the insert pos at the top, after any local-value - // instructions. - MachineBasicBlock::iterator LVIP = FastIS->getLastLocalValue(); - if (LVIP != FuncInfo->MBB->end()) - FuncInfo->InsertPt = next(LVIP); - else - FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI(); +#endif // Try to select the instruction with FastISel. - if (FastIS->SelectInstruction(Inst)) + if (FastIS->SelectInstruction(BI)) continue; // Then handle certain instructions as single-LLVM-Instruction blocks. - if (isa<CallInst>(Inst)) { + if (isa<CallInst>(BI)) { ++NumFastIselFailures; if (EnableFastISelVerbose || EnableFastISelAbort) { dbgs() << "FastISel missed call: "; - Inst->dump(); + BI->dump(); } - if (!Inst->getType()->isVoidTy() && !Inst->use_empty()) { - unsigned &R = FuncInfo->ValueMap[Inst]; + if (!BI->getType()->isVoidTy() && !BI->use_empty()) { + unsigned &R = FuncInfo->ValueMap[BI]; if (!R) - R = FuncInfo->CreateRegs(Inst->getType()); + R = FuncInfo->CreateRegs(BI->getType()); } bool HadTailCall = false; - SelectBasicBlock(Inst, BI, HadTailCall); + BB = SelectBasicBlock(BB, BI, llvm::next(BI), HadTailCall); // If the call was emitted as a tail call, we're done with the block. if (HadTailCall) { - --BI; + BI = End; break; } + // If the instruction was codegen'd with multiple blocks, + // inform the FastISel object where to resume inserting. + FastIS->setCurrentBlock(BB); continue; } // Otherwise, give up on FastISel for the rest of the block. // For now, be a little lenient about non-branch terminators. - if (!isa<TerminatorInst>(Inst) || isa<BranchInst>(Inst)) { + if (!isa<TerminatorInst>(BI) || isa<BranchInst>(BI)) { ++NumFastIselFailures; if (EnableFastISelVerbose || EnableFastISelAbort) { dbgs() << "FastISel miss: "; - Inst->dump(); + BI->dump(); } if (EnableFastISelAbort) // The "fast" selector couldn't handle something and bailed. @@ -752,23 +760,15 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { } } - FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI(); - - // Setup an EH landing-pad block. - if (FuncInfo->MBB->isLandingPad()) - PrepareEHLandingPad(); - - // Lower any arguments needed in this block if this is the entry block. - if (LLVMBB == &Fn.getEntryBlock()) - LowerArguments(LLVMBB); - // Run SelectionDAG instruction selection on the remainder of the block // not handled by FastISel. If FastISel is not run, this is the entire // block. - bool HadTailCall; - SelectBasicBlock(Begin, BI, HadTailCall); + if (BI != End) { + bool HadTailCall; + BB = SelectBasicBlock(BB, BI, End, HadTailCall); + } - FinishBasicBlock(); + FinishBasicBlock(BB); FuncInfo->PHINodesToUpdate.clear(); } @@ -776,7 +776,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { } void -SelectionDAGISel::FinishBasicBlock() { +SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) { DEBUG(dbgs() << "Total amount of phi nodes to update: " << FuncInfo->PHINodesToUpdate.size() << "\n"; @@ -794,11 +794,11 @@ SelectionDAGISel::FinishBasicBlock() { MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first; assert(PHI->isPHI() && "This is not a machine PHI node that we are updating!"); - if (!FuncInfo->MBB->isSuccessor(PHI->getParent())) + if (!BB->isSuccessor(PHI->getParent())) continue; PHI->addOperand( MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false)); - PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB)); + PHI->addOperand(MachineOperand::CreateMBB(BB)); } return; } @@ -807,35 +807,33 @@ SelectionDAGISel::FinishBasicBlock() { // Lower header first, if it wasn't already lowered if (!SDB->BitTestCases[i].Emitted) { // Set the current basic block to the mbb we wish to insert the code into - FuncInfo->MBB = SDB->BitTestCases[i].Parent; - FuncInfo->InsertPt = FuncInfo->MBB->end(); + BB = SDB->BitTestCases[i].Parent; // Emit the code - SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB); + SDB->visitBitTestHeader(SDB->BitTestCases[i], BB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - CodeGenAndEmitDAG(); + BB = CodeGenAndEmitDAG(BB); } for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) { // Set the current basic block to the mbb we wish to insert the code into - FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB; - FuncInfo->InsertPt = FuncInfo->MBB->end(); + BB = SDB->BitTestCases[i].Cases[j].ThisBB; // Emit the code if (j+1 != ej) SDB->visitBitTestCase(SDB->BitTestCases[i].Cases[j+1].ThisBB, SDB->BitTestCases[i].Reg, SDB->BitTestCases[i].Cases[j], - FuncInfo->MBB); + BB); else SDB->visitBitTestCase(SDB->BitTestCases[i].Default, SDB->BitTestCases[i].Reg, SDB->BitTestCases[i].Cases[j], - FuncInfo->MBB); + BB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - CodeGenAndEmitDAG(); + BB = CodeGenAndEmitDAG(BB); } // Update PHI Nodes @@ -880,24 +878,22 @@ SelectionDAGISel::FinishBasicBlock() { // Lower header first, if it wasn't already lowered if (!SDB->JTCases[i].first.Emitted) { // Set the current basic block to the mbb we wish to insert the code into - FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB; - FuncInfo->InsertPt = FuncInfo->MBB->end(); + BB = SDB->JTCases[i].first.HeaderBB; // Emit the code SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first, - FuncInfo->MBB); + BB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - CodeGenAndEmitDAG(); + BB = CodeGenAndEmitDAG(BB); } // Set the current basic block to the mbb we wish to insert the code into - FuncInfo->MBB = SDB->JTCases[i].second.MBB; - FuncInfo->InsertPt = FuncInfo->MBB->end(); + BB = SDB->JTCases[i].second.MBB; // Emit the code SDB->visitJumpTable(SDB->JTCases[i].second); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - CodeGenAndEmitDAG(); + BB = CodeGenAndEmitDAG(BB); // Update PHI Nodes for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size(); @@ -915,11 +911,11 @@ SelectionDAGISel::FinishBasicBlock() { (MachineOperand::CreateMBB(SDB->JTCases[i].first.HeaderBB)); } // JT BB. Just iterate over successors here - if (FuncInfo->MBB->isSuccessor(PHIBB)) { + if (BB->isSuccessor(PHIBB)) { PHI->addOperand (MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second, false)); - PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB)); + PHI->addOperand(MachineOperand::CreateMBB(BB)); } } } @@ -931,10 +927,10 @@ SelectionDAGISel::FinishBasicBlock() { MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first; assert(PHI->isPHI() && "This is not a machine PHI node that we are updating!"); - if (FuncInfo->MBB->isSuccessor(PHI->getParent())) { + if (BB->isSuccessor(PHI->getParent())) { PHI->addOperand( MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false)); - PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB)); + PHI->addOperand(MachineOperand::CreateMBB(BB)); } } @@ -942,8 +938,7 @@ SelectionDAGISel::FinishBasicBlock() { // additional DAGs necessary. for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) { // Set the current basic block to the mbb we wish to insert the code into - MachineBasicBlock *ThisBB = FuncInfo->MBB = SDB->SwitchCases[i].ThisBB; - FuncInfo->InsertPt = FuncInfo->MBB->end(); + MachineBasicBlock *ThisBB = BB = SDB->SwitchCases[i].ThisBB; // Determine the unique successors. SmallVector<MachineBasicBlock *, 2> Succs; @@ -953,24 +948,21 @@ SelectionDAGISel::FinishBasicBlock() { // Emit the code. Note that this could result in ThisBB being split, so // we need to check for updates. - SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB); + SDB->visitSwitchCase(SDB->SwitchCases[i], BB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - CodeGenAndEmitDAG(); - ThisBB = FuncInfo->MBB; + ThisBB = CodeGenAndEmitDAG(BB); // Handle any PHI nodes in successors of this chunk, as if we were coming // from the original BB before switch expansion. Note that PHI nodes can // occur multiple times in PHINodesToUpdate. We have to be very careful to // handle them the right number of times. for (unsigned i = 0, e = Succs.size(); i != e; ++i) { - FuncInfo->MBB = Succs[i]; - FuncInfo->InsertPt = FuncInfo->MBB->end(); - // FuncInfo->MBB may have been removed from the CFG if a branch was - // constant folded. - if (ThisBB->isSuccessor(FuncInfo->MBB)) { - for (MachineBasicBlock::iterator Phi = FuncInfo->MBB->begin(); - Phi != FuncInfo->MBB->end() && Phi->isPHI(); + BB = Succs[i]; + // BB may have been removed from the CFG if a branch was constant folded. + if (ThisBB->isSuccessor(BB)) { + for (MachineBasicBlock::iterator Phi = BB->begin(); + Phi != BB->end() && Phi->isPHI(); ++Phi) { // This value for this PHI node is recorded in PHINodesToUpdate. for (unsigned pn = 0; ; ++pn) { diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index a9a7e5054b..d56a8921f1 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -20,7 +20,6 @@ #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/GlobalVariable.h" #include "llvm/DerivedTypes.h" -#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineFunction.h" @@ -839,65 +838,6 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT, return 1; } -/// Get the EVTs and ArgFlags collections that represent the legalized return -/// type of the given function. This does not require a DAG or a return value, -/// and is suitable for use before any DAGs for the function are constructed. -/// TODO: Move this out of TargetLowering.cpp. -void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr, - SmallVectorImpl<ISD::OutputArg> &Outs, - const TargetLowering &TLI, - SmallVectorImpl<uint64_t> *Offsets) { - SmallVector<EVT, 4> ValueVTs; - ComputeValueVTs(TLI, ReturnType, ValueVTs); - unsigned NumValues = ValueVTs.size(); - if (NumValues == 0) return; - unsigned Offset = 0; - - for (unsigned j = 0, f = NumValues; j != f; ++j) { - EVT VT = ValueVTs[j]; - ISD::NodeType ExtendKind = ISD::ANY_EXTEND; - - if (attr & Attribute::SExt) - ExtendKind = ISD::SIGN_EXTEND; - else if (attr & Attribute::ZExt) - ExtendKind = ISD::ZERO_EXTEND; - - // FIXME: C calling convention requires the return type to be promoted to - // at least 32-bit. But this is not necessary for non-C calling - // conventions. The frontend should mark functions whose return values - // require promoting with signext or zeroext attributes. - if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { - EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); - if (VT.bitsLT(MinVT)) - VT = MinVT; - } - - unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT); - EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT); - unsigned PartSize = TLI.getTargetData()->getTypeAllocSize( - PartVT.getTypeForEVT(ReturnType->getContext())); - - // 'inreg' on function refers to return value - ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); - if (attr & Attribute::InReg) - Flags.setInReg(); - - // Propagate extension type if any - if (attr & Attribute::SExt) - Flags.setSExt(); - else if (attr & Attribute::ZExt) - Flags.setZExt(); - - for (unsigned i = 0; i < NumParts; ++i) { - Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true)); - if (Offsets) { - Offsets->push_back(Offset); - Offset += PartSize; - } - } - } -} - /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. This is the actual /// alignment, not its logarithm. diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 8300715c97..f60cbcdf9b 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -23,7 +23,6 @@ #include "llvm/GlobalVariable.h" #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" -#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/MachineConstantPool.h" @@ -85,8 +84,6 @@ private: bool X86SelectStore(const Instruction *I); - bool X86SelectRet(const Instruction *I); - bool X86SelectCmp(const Instruction *I); bool X86SelectZExt(const Instruction *I); @@ -108,7 +105,6 @@ private: bool X86SelectCall(const Instruction *I); CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false); - CCAssignFn *CCAssignFnForRet(CallingConv::ID CC, bool isTailCall = false); const X86InstrInfo *getInstrInfo() const { return getTargetMachine()->getInstrInfo(); @@ -182,20 +178,6 @@ CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC, return CC_X86_32_C; } -/// CCAssignFnForRet - Selects the correct CCAssignFn for a given calling -/// convention. -CCAssignFn *X86FastISel::CCAssignFnForRet(CallingConv::ID CC, - bool isTaillCall) { - if (Subtarget->is64Bit()) { - if (Subtarget->isTargetWin64()) - return RetCC_X86_Win64_C; - else - return RetCC_X86_64_C; - } - - return RetCC_X86_32_C; -} - /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. /// Return true and the result register by reference if it is possible. @@ -248,8 +230,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, } ResultReg = createResultReg(RC); - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(Opc), ResultReg), AM); + addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); return true; } @@ -268,7 +249,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, case MVT::i1: { // Mask out all but lowest bit. unsigned AndResult = createResultReg(X86::GR8RegisterClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + BuildMI(MBB, DL, TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1); Val = AndResult; } @@ -285,8 +266,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, break; } - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(Opc)), AM).addReg(Val); + addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val); return true; } @@ -314,8 +294,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, } if (Opc) { - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(Opc)), AM) + addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM) .addImm(Signed ? (uint64_t) CI->getSExtValue() : CI->getZExtValue()); return true; @@ -354,7 +333,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { // Don't walk into other basic blocks; it's possible we haven't // visited them yet, so the instructions may not yet be assigned // virtual registers. - if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB) + if (FuncInfo.MBBMap[I->getParent()] != MBB) return false; Opcode = I->getOpcode(); @@ -551,8 +530,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { } LoadReg = createResultReg(RC); - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(Opc), LoadReg), StubAM); + addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM); // Prevent loading GV stub multiple times in same MBB. LocalValueMap[V] = LoadReg; @@ -678,72 +656,6 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { return X86FastEmitStore(VT, I->getOperand(0), AM); } -/// X86SelectRet - Select and emit code to implement ret instructions. -bool X86FastISel::X86SelectRet(const Instruction *I) { - const ReturnInst *Ret = cast<ReturnInst>(I); - const Function &F = *I->getParent()->getParent(); - - if (!FuncInfo.CanLowerReturn) - return false; - - CallingConv::ID CC = F.getCallingConv(); - if (CC != CallingConv::C && - CC != CallingConv::Fast && - CC != CallingConv::X86_FastCall) - return false; - - if (Subtarget->isTargetWin64()) - return false; - - // fastcc with -tailcallopt is intended to provide a guaranteed - // tail call optimization. Fastisel doesn't know how to do that. - if (CC == CallingConv::Fast && GuaranteedTailCallOpt) - return false; - - // Let SDISel handle vararg functions. - if (F.isVarArg()) - return false; - - if (Ret->getNumOperands() > 0) { - SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), - Outs, TLI); - - // Analyze operands of the call, assigning locations to each operand. - SmallVector<CCValAssign, 16> ValLocs; - CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); - CCInfo.AnalyzeReturn(Outs, CCAssignFnForRet(CC)); - - const Value *RV = Ret->getOperand(0); - unsigned Reg = getRegForValue(RV); - if (Reg == 0) - return false; - - // Copy the return value into registers. - for (unsigned i = 0, e = ValLocs.size(); i != e; ++i) { - CCValAssign &VA = ValLocs[i]; - - // Don't bother handling odd stuff for now. - if (VA.getLocInfo() != CCValAssign::Full) - return false; - if (!VA.isRegLoc()) - return false; - - TargetRegisterClass* RC = TLI.getRegClassFor(VA.getValVT()); - bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - VA.getLocReg(), Reg + VA.getValNo(), - RC, RC, DL); - assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; - - MRI.addLiveOut(VA.getLocReg()); - } - } - - // Now emit the RET. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET)); - return true; -} - /// X86SelectLoad - Select and emit code to implement load instructions. /// bool X86FastISel::X86SelectLoad(const Instruction *I) { @@ -808,9 +720,8 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, // CMPri, otherwise use CMPrr. if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc)) - .addReg(Op0Reg) - .addImm(Op1C->getSExtValue()); + BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg) + .addImm(Op1C->getSExtValue()); return true; } } @@ -820,9 +731,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, unsigned Op1Reg = getRegForValue(Op1); if (Op1Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc)) - .addReg(Op0Reg) - .addReg(Op1Reg); + BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg); return true; } @@ -844,10 +753,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { unsigned EReg = createResultReg(&X86::GR8RegClass); unsigned NPReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::SETNPr), NPReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + BuildMI(MBB, DL, TII.get(X86::SETEr), EReg); + BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg); + BuildMI(MBB, DL, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg); UpdateValueMap(I, ResultReg); return true; @@ -858,13 +766,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { unsigned NEReg = createResultReg(&X86::GR8RegClass); unsigned PReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::SETNEr), NEReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::SETPr), PReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::OR8rr), ResultReg) - .addReg(PReg).addReg(NEReg); + BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg); + BuildMI(MBB, DL, TII.get(X86::SETPr), PReg); + BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg); UpdateValueMap(I, ResultReg); return true; } @@ -903,7 +807,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { if (!X86FastEmitCompare(Op0, Op1, VT)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg); + BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg); UpdateValueMap(I, ResultReg); return true; } @@ -939,7 +843,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { // Try to take advantage of fallthrough opportunities. CmpInst::Predicate Predicate = CI->getPredicate(); - if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { + if (MBB->isLayoutSuccessor(TrueMBB)) { std::swap(TrueMBB, FalseMBB); Predicate = CmpInst::getInversePredicate(Predicate); } @@ -988,18 +892,16 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { if (!X86FastEmitCompare(Op0, Op1, VT)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc)) - .addMBB(TrueMBB); + BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB); if (Predicate == CmpInst::FCMP_UNE) { // X86 requires a second branch to handle UNE (and OEQ, // which is mapped to UNE above). - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4)) - .addMBB(TrueMBB); + BuildMI(MBB, DL, TII.get(X86::JP_4)).addMBB(TrueMBB); } FastEmitBranch(FalseMBB, DL); - FuncInfo.MBB->addSuccessor(TrueMBB); + MBB->addSuccessor(TrueMBB); return true; } } else if (ExtractValueInst *EI = @@ -1025,8 +927,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned Reg = getRegForValue(EI); for (MachineBasicBlock::const_reverse_iterator - RI = FuncInfo.MBB->rbegin(), RE = FuncInfo.MBB->rend(); - RI != RE; ++RI) { + RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) { const MachineInstr &MI = *RI; if (MI.definesRegister(Reg)) { @@ -1051,11 +952,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned OpCode = SetMI->getOpcode(); if (OpCode == X86::SETOr || OpCode == X86::SETBr) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(OpCode == X86::SETOr ? X86::JO_4 : X86::JB_4)) + BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ? + X86::JO_4 : X86::JB_4)) .addMBB(TrueMBB); FastEmitBranch(FalseMBB, DL); - FuncInfo.MBB->addSuccessor(TrueMBB); + MBB->addSuccessor(TrueMBB); return true; } } @@ -1067,12 +968,10 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned OpReg = getRegForValue(BI->getCondition()); if (OpReg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) - .addReg(OpReg).addReg(OpReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4)) - .addMBB(TrueMBB); + BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg); + BuildMI(MBB, DL, TII.get(X86::JNE_4)).addMBB(TrueMBB); FastEmitBranch(FalseMBB, DL); - FuncInfo.MBB->addSuccessor(TrueMBB); + MBB->addSuccessor(TrueMBB); return true; } @@ -1129,7 +1028,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { // Fold immediate in shl(x,3). if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { unsigned ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm), + BuildMI(MBB, DL, TII.get(OpImm), ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff); UpdateValueMap(I, ResultReg); return true; @@ -1137,20 +1036,17 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { unsigned Op1Reg = getRegForValue(I->getOperand(1)); if (Op1Reg == 0) return false; - TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - CReg, Op1Reg, RC, RC, DL); + TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC, DL); // The shift instruction uses X86::CL. If we defined a super-register // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what // we're doing here. if (CReg != X86::CL) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL) + BuildMI(MBB, DL, TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL) .addReg(CReg).addImm(X86::sub_8bit); unsigned ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg) - .addReg(Op0Reg); + BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg); UpdateValueMap(I, ResultReg); return true; } @@ -1182,11 +1078,9 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { unsigned Op2Reg = getRegForValue(I->getOperand(2)); if (Op2Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) - .addReg(Op0Reg).addReg(Op0Reg); + BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg); unsigned ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) - .addReg(Op1Reg).addReg(Op2Reg); + BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg); UpdateValueMap(I, ResultReg); return true; } @@ -1200,9 +1094,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR64RegisterClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::CVTSS2SDrr), ResultReg) - .addReg(OpReg); + BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg); UpdateValueMap(I, ResultReg); return true; } @@ -1219,9 +1111,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR32RegisterClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::CVTSD2SSrr), ResultReg) - .addReg(OpReg); + BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg); UpdateValueMap(I, ResultReg); return true; } @@ -1256,8 +1146,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) { const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass; unsigned CopyReg = createResultReg(CopyRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CopyOpc), CopyReg) - .addReg(InputReg); + BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg); // Then issue an extract_subreg. unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8, @@ -1278,18 +1167,14 @@ bool X86FastISel::X86SelectExtractValue(const Instruction *I) { switch (CI->getIntrinsicID()) { default: break; case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: { + case Intrinsic::uadd_with_overflow: // Cheat a little. We know that the registers for "add" and "seto" are // allocated sequentially. However, we only keep track of the register // for "add" in the value map. Use extractvalue's index to get the // correct register for "seto". - unsigned OpReg = getRegForValue(Agg); - if (OpReg == 0) - return false; - UpdateValueMap(I, OpReg + *EI->idx_begin()); + UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin()); return true; } - } } return false; @@ -1333,7 +1218,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg). + BuildMI(MBB, DL, TII.get(OpC), ResultReg). addImm(CI->isZero() ? -1ULL : 0); UpdateValueMap(&I, ResultReg); return true; @@ -1347,12 +1232,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); // FIXME may need to add RegState::Debug to any registers produced, // although ESP/EBP should be the only ones at the moment. - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM). - addImm(0).addMetadata(DI->getVariable()); + addFullAddress(BuildMI(MBB, DL, II), AM).addImm(0). + addMetadata(DI->getVariable()); return true; } case Intrinsic::trap: { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP)); + BuildMI(MBB, DL, TII.get(X86::TRAP)); return true; } case Intrinsic::sadd_with_overflow: @@ -1388,8 +1273,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg) - .addReg(Reg1).addReg(Reg2); + BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2); unsigned DestReg1 = UpdateValueMap(&I, ResultReg); // If the add with overflow is an intra-block value then we just want to @@ -1407,7 +1291,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { unsigned Opc = X86::SETBr; if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow) Opc = X86::SETOr; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg); + BuildMI(MBB, DL, TII.get(Opc), ResultReg); return true; } } @@ -1534,8 +1418,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // Issue CALLSEQ_START unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown)) - .addImm(NumBytes); + BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes); // Process argument: walk the register/memloc assignments, inserting // copies / loads. @@ -1591,8 +1474,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (VA.isRegLoc()) { TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT); - bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - VA.getLocReg(), Arg, RC, RC, DL); + bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(), + Arg, RC, RC, DL); assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; Emitted = true; RegArgs.push_back(VA.getLocReg()); @@ -1618,8 +1501,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (Subtarget->isPICStyleGOT()) { TargetRegisterClass *RC = X86::GR32RegisterClass; unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); - bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - X86::EBX, Base, RC, RC, DL); + bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC, + DL); assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; Emitted = true; } @@ -1629,8 +1512,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (CalleeOp) { // Register-indirect call. unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) - .addReg(CalleeOp); + MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp); } else { // Direct call. @@ -1659,8 +1541,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { } - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) - .addGlobalAddress(GV, 0, OpFlags); + MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags); } // Add an implicit use GOT pointer in EBX. @@ -1673,8 +1554,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // Issue CALLSEQ_END unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp)) - .addImm(NumBytes).addImm(0); + BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0); // Now handle call return value (if any). SmallVector<unsigned, 4> UsedRegs; @@ -1701,7 +1581,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { } unsigned ResultReg = createResultReg(DstRC); - bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, ResultReg, + bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, RVLocs[0].getLocReg(), DstRC, SrcRC, DL); assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; Emitted = true; @@ -1715,21 +1595,18 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; unsigned MemSize = ResVT.getSizeInBits()/8; int FI = MFI.CreateStackObject(MemSize, MemSize, false); - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc)), FI) - .addReg(ResultReg); + addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg); DstRC = ResVT == MVT::f32 ? X86::FR32RegisterClass : X86::FR64RegisterClass; Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; ResultReg = createResultReg(DstRC); - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg), FI); + addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI); } if (AndToI1) { // Mask out all but lowest bit for some call which produces an i1. unsigned AndResult = createResultReg(X86::GR8RegisterClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + BuildMI(MBB, DL, TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1); ResultReg = AndResult; } @@ -1752,8 +1629,6 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { return X86SelectLoad(I); case Instruction::Store: return X86SelectStore(I); - case Instruction::Ret: - return X86SelectRet(I); case Instruction::ICmp: case Instruction::FCmp: return X86SelectCmp(I); @@ -1854,8 +1729,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { else Opc = X86::LEA64r; unsigned ResultReg = createResultReg(RC); - addLeaAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg), AM); + addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); return ResultReg; } return 0; @@ -1885,8 +1759,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { // Create the load from the constant pool. unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align); unsigned ResultReg = createResultReg(RC); - addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg), + addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), MCPOffset, PICBase, OpFlag); return ResultReg; @@ -1909,8 +1782,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); unsigned ResultReg = createResultReg(RC); - addLeaAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg), AM); + addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); return ResultReg; } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e35e96b565..0da99b86c4 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1218,12 +1218,13 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, bool X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, LLVMContext &Context) const { SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, Context); - return CCInfo.CheckReturn(Outs, RetCC_X86); + return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_X86); } SDValue diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 2d28e5cc2e..39bbdac684 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -740,7 +740,8 @@ namespace llvm { virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, LLVMContext &Context) const; void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results, diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index abe7b2fd42..6a25e06335 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -1135,12 +1135,13 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, bool XCoreTargetLowering:: CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, LLVMContext &Context) const { SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, Context); - return CCInfo.CheckReturn(Outs, RetCC_XCore); + return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_XCore); } SDValue diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h index febc198f4f..46643014a0 100644 --- a/lib/Target/XCore/XCoreISelLowering.h +++ b/lib/Target/XCore/XCoreISelLowering.h @@ -193,7 +193,8 @@ namespace llvm { virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &ArgsFlags, + const SmallVectorImpl<EVT> &OutTys, + const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, LLVMContext &Context) const; }; } |