diff options
author | Dale Johannesen <dalej@apple.com> | 2009-02-03 19:33:06 +0000 |
---|---|---|
committer | Dale Johannesen <dalej@apple.com> | 2009-02-03 19:33:06 +0000 |
commit | ace1610df5fe22519d82cd7418a772e46ebd965b (patch) | |
tree | 859b2dda545d25792766127d6d855160adc89b08 | |
parent | 9b872db775797dea4b391a9347cfbd2ca9c558e2 (diff) |
DebugLoc propagation. 2/3 through file.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@63650 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 753 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.h | 2 |
2 files changed, 409 insertions, 346 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7a4d34d10a..d043e956e5 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -45,7 +45,7 @@ static cl::opt<bool> DisableMMX("disable-mmx", cl::Hidden, cl::desc("Disable use of MMX")); // Forward declarations. -static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG); +static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG, DebugLoc dl); X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) : TargetLowering(TM) { @@ -914,6 +914,7 @@ SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, /// LowerRET - Lower an ISD::RET node. SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { + DebugLoc dl = Op.getNode()->getDebugLoc(); assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); SmallVector<CCValAssign, 16> RVLocs; @@ -955,7 +956,7 @@ SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { Operands.push_back(Chain.getOperand(i)); } - return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], + return DAG.getNode(X86ISD::TC_RETURN, dl, MVT::Other, &Operands[0], Operands.size()); } @@ -980,7 +981,7 @@ SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { // If this is a copy from an xmm register to ST(0), use an FPExtend to // change the value to the FP stack register class. if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) - ValToCopy = DAG.getNode(ISD::FP_EXTEND, MVT::f80, ValToCopy); + ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); RetOps.push_back(ValToCopy); // Don't emit a copytoreg. continue; @@ -1015,7 +1016,8 @@ SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { if (Flag.getNode()) RetOps.push_back(Flag); - return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, &RetOps[0], RetOps.size()); + return DAG.getNode(X86ISD::RET_FLAG, dl, + MVT::Other, &RetOps[0], RetOps.size()); } @@ -1027,7 +1029,8 @@ SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { SDNode *X86TargetLowering:: LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, unsigned CallingConv, SelectionDAG &DAG) { - + + DebugLoc dl = TheCall->getDebugLoc(); // Assign locations to each value returned by this call. SmallVector<CCValAssign, 16> RVLocs; bool isVarArg = TheCall->isVarArg(); @@ -1065,7 +1068,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, if (CopyVT != RVLocs[i].getValVT()) { // Round the F80 the right size, which also moves to the appropriate xmm // register. - Val = DAG.getNode(ISD::FP_ROUND, RVLocs[i].getValVT(), Val, + Val = DAG.getNode(ISD::FP_ROUND, dl, RVLocs[i].getValVT(), Val, // This truncation won't change the value. DAG.getIntPtrConstant(1)); } @@ -1075,8 +1078,8 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, // Merge everything together with a MERGE_VALUES node. ResultVals.push_back(Chain); - return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), &ResultVals[0], - ResultVals.size()).getNode(); + return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(), + &ResultVals[0], ResultVals.size()).getNode(); } @@ -1222,7 +1225,7 @@ SDValue X86TargetLowering::LowerMemArgument(SDValue Op, SelectionDAG &DAG, SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); if (Flags.isByVal()) return FIN; - return DAG.getLoad(VA.getValVT(), Root, FIN, + return DAG.getLoad(VA.getValVT(), Op.getNode()->getDebugLoc(), Root, FIN, PseudoSourceValue::getFixedStack(FI), 0); } @@ -1230,6 +1233,7 @@ SDValue X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); + DebugLoc dl = Op.getNode()->getDebugLoc(); const Function* Fn = MF.getFunction(); if (Fn->hasExternalLinkage() && @@ -1304,23 +1308,23 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { // bits. Insert an assert[sz]ext to capture this, then truncate to the // right size. if (VA.getLocInfo() == CCValAssign::SExt) - ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, + ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, DAG.getValueType(VA.getValVT())); else if (VA.getLocInfo() == CCValAssign::ZExt) - ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, + ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, DAG.getValueType(VA.getValVT())); if (VA.getLocInfo() != CCValAssign::Full) - ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); + ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); // Handle MMX values passed in GPRs. if (Is64Bit && RegVT != VA.getLocVT()) { if (RegVT.getSizeInBits() == 64 && RC == X86::GR64RegisterClass) - ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); + ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), ArgValue); else if (RC == X86::VR128RegisterClass) { - ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i64, ArgValue, - DAG.getConstant(0, MVT::i64)); - ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); + ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, + ArgValue, DAG.getConstant(0, MVT::i64)); + ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), ArgValue); } } @@ -1343,7 +1347,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { FuncInfo->setSRetReturnReg(Reg); } SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), Reg, ArgValues[0]); - Root = DAG.getNode(ISD::TokenFactor, MVT::Other, Copy, Root); + Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Root); } unsigned StackSize = CCInfo.getNextStackOffset(); @@ -1408,36 +1412,36 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { // Store the integer parameter registers. SmallVector<SDValue, 8> MemOps; SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); - SDValue FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, + SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, DAG.getIntPtrConstant(VarArgsGPOffset)); for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], X86::GR64RegisterClass); SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); SDValue Store = - DAG.getStore(Val.getValue(1), Val, FIN, + DAG.getStore(Val.getValue(1), dl, Val, FIN, PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0); MemOps.push_back(Store); - FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, + FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); } // Now store the XMM (fp + vector) parameter registers. - FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, + FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, DAG.getIntPtrConstant(VarArgsFPOffset)); for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass); SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); SDValue Store = - DAG.getStore(Val.getValue(1), Val, FIN, + DAG.getStore(Val.getValue(1), dl, Val, FIN, PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0); MemOps.push_back(Store); - FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, + FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, DAG.getIntPtrConstant(16)); } if (!MemOps.empty()) - Root = DAG.getNode(ISD::TokenFactor, MVT::Other, + Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0], MemOps.size()); } } @@ -1465,7 +1469,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); // Return the new list of results. - return DAG.getNode(ISD::MERGE_VALUES, Op.getNode()->getVTList(), + return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(), &ArgValues[0], ArgValues.size()).getValue(Op.getResNo()); } @@ -1475,13 +1479,14 @@ X86TargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG, const CCValAssign &VA, SDValue Chain, SDValue Arg, ISD::ArgFlagsTy Flags) { + DebugLoc dl = TheCall->getDebugLoc(); unsigned LocMemOffset = VA.getLocMemOffset(); SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); - PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); + PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); if (Flags.isByVal()) { return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); } - return DAG.getStore(Chain, Arg, PtrOff, + return DAG.getStore(Chain, dl, Arg, PtrOff, PseudoSourceValue::getStack(), LocMemOffset); } @@ -1493,7 +1498,8 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue Chain, bool IsTailCall, bool Is64Bit, - int FPDiff) { + int FPDiff, + DebugLoc dl) { if (!IsTailCall || FPDiff==0) return Chain; // Adjust the Return address stack slot. @@ -1501,7 +1507,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, OutRetAddr = getReturnAddressFrameIndex(DAG); // Load the "old" Return address. - OutRetAddr = DAG.getLoad(VT, Chain, OutRetAddr, NULL, 0); + OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, NULL, 0); return SDValue(OutRetAddr.getNode(), 1); } @@ -1510,7 +1516,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, static SDValue EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, SDValue Chain, SDValue RetAddrFrIdx, - bool Is64Bit, int FPDiff) { + bool Is64Bit, int FPDiff, DebugLoc dl) { // Store the return address to the appropriate stack slot. if (!FPDiff) return Chain; // Calculate the new stack slot for the return address. @@ -1519,7 +1525,7 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); MVT VT = Is64Bit ? MVT::i64 : MVT::i32; SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); - Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, + Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, PseudoSourceValue::getFixedStack(NewReturnAddrFI), 0); return Chain; } @@ -1535,6 +1541,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue Callee = TheCall->getCallee(); bool Is64Bit = Subtarget->is64Bit(); bool IsStructRet = CallIsStructReturn(TheCall); + DebugLoc dl = TheCall->getDebugLoc(); assert(!(isVarArg && CC == CallingConv::Fast) && "Var args not supported with calling convention fastcc"); @@ -1567,7 +1574,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue RetAddrFrIdx; // Load return adress for tail calls. Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, IsTailCall, Is64Bit, - FPDiff); + FPDiff, dl); SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; SmallVector<SDValue, 8> MemOpChains; @@ -1586,13 +1593,13 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { default: assert(0 && "Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::SExt: - Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); + Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); break; case CCValAssign::ZExt: - Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); + Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); break; case CCValAssign::AExt: - Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); + Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); break; } @@ -1606,17 +1613,17 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { case X86::RDI: case X86::RSI: case X86::RDX: case X86::RCX: case X86::R8: { // Special case: passing MMX values in GPR registers. - Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg); + Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg); break; } case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3: case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: { // Special case: passing MMX values in XMM registers. - Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg); - Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Arg); - Arg = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, - DAG.getNode(ISD::UNDEF, MVT::v2i64), Arg, - getMOVLMask(2, DAG)); + Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg); + Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); + Arg = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v2i64, + DAG.getNode(ISD::UNDEF, dl, MVT::v2i64), Arg, + getMOVLMask(2, DAG, dl)); break; } } @@ -1635,7 +1642,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { } if (!MemOpChains.empty()) - Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0], MemOpChains.size()); // Build a sequence of copy-to-reg nodes chained together with token chain @@ -1723,21 +1730,21 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); if (StackPtr.getNode() == 0) StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); - Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source); + Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain, Flags, DAG)); } else { // Store relative to framepointer. MemOpChains2.push_back( - DAG.getStore(Chain, Arg, FIN, + DAG.getStore(Chain, dl, Arg, FIN, PseudoSourceValue::getFixedStack(FI), 0)); } } } if (!MemOpChains2.empty()) - Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains2[0], MemOpChains2.size()); // Copy arguments to their registers. @@ -1750,7 +1757,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { // Store the return address to the appropriate stack slot. Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, - FPDiff); + FPDiff, dl); } // If the callee is a GlobalAddress node (quite common, every direct call is) @@ -1821,13 +1828,13 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { if (IsTailCall) { assert(InFlag.getNode() && "Flag must be set. Depend on flag being set in LowerRET"); - Chain = DAG.getNode(X86ISD::TAILCALL, + Chain = DAG.getNode(X86ISD::TAILCALL, dl, TheCall->getVTList(), &Ops[0], Ops.size()); return SDValue(Chain.getNode(), Op.getResNo()); } - Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); + Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); InFlag = Chain.getValue(1); // Create the CALLSEQ_END node. @@ -2714,11 +2721,12 @@ static SDValue CommuteVectorShuffle(SDValue Op, SDValue &V1, MVT EltVT = MaskVT.getVectorElementType(); unsigned NumElems = Mask.getNumOperands(); SmallVector<SDValue, 8> MaskVec; + DebugLoc dl = Op.getNode()->getDebugLoc(); for (unsigned i = 0; i != NumElems; ++i) { SDValue Arg = Mask.getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) { - MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); + MaskVec.push_back(DAG.getNode(ISD::UNDEF, dl, EltVT)); continue; } assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); @@ -2730,14 +2738,14 @@ static SDValue CommuteVectorShuffle(SDValue Op, SDValue &V1, } std::swap(V1, V2); - Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); - return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); + Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &MaskVec[0], NumElems); + return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, Mask); } /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming /// the two vector operands have swapped position. static -SDValue CommuteVectorShuffleMask(SDValue Mask, SelectionDAG &DAG) { +SDValue CommuteVectorShuffleMask(SDValue Mask, SelectionDAG &DAG, DebugLoc dl) { MVT MaskVT = Mask.getValueType(); MVT EltVT = MaskVT.getVectorElementType(); unsigned NumElems = Mask.getNumOperands(); @@ -2745,7 +2753,7 @@ SDValue CommuteVectorShuffleMask(SDValue Mask, SelectionDAG &DAG) { for (unsigned i = 0; i != NumElems; ++i) { SDValue Arg = Mask.getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) { - MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); + MaskVec.push_back(DAG.getNode(ISD::UNDEF, dl, EltVT)); continue; } assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); @@ -2755,7 +2763,7 @@ SDValue CommuteVectorShuffleMask(SDValue Mask, SelectionDAG &DAG) { else MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); } - return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &MaskVec[0], NumElems); } @@ -2897,7 +2905,8 @@ static bool isZeroShuffle(SDNode *N) { /// getZeroVector - Returns a vector of specified type with all zero elements. /// -static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG) { +static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG, + DebugLoc dl) { assert(VT.isVector() && "Expected a vector type"); // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest @@ -2905,20 +2914,20 @@ static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG) { SDValue Vec; if (VT.getSizeInBits() == 64) { // MMX SDValue Cst = DAG.getTargetConstant(0, MVT::i32); - Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); } else if (HasSSE2) { // SSE2 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); - Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); } else { // SSE1 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); - Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4f32, Cst, Cst, Cst, Cst); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); } - return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); + return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); } /// getOnesVector - Returns a vector of specified type with all bits set. /// -static SDValue getOnesVector(MVT VT, SelectionDAG &DAG) { +static SDValue getOnesVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) { assert(VT.isVector() && "Expected a vector type"); // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest @@ -2926,10 +2935,10 @@ static SDValue getOnesVector(MVT VT, SelectionDAG &DAG) { SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); SDValue Vec; if (VT.getSizeInBits() == 64) // MMX - Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); else // SSE - Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); - return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); + return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); } @@ -2954,14 +2963,15 @@ static SDValue NormalizeMask(SDValue Mask, SelectionDAG &DAG) { } if (Changed) - Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), + Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getNode()->getDebugLoc(), + Mask.getValueType(), &MaskVec[0], MaskVec.size()); return Mask; } /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd /// operation of specified width. -static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { +static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG, DebugLoc dl) { MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT BaseVT = MaskVT.getVectorElementType(); @@ -2969,12 +2979,14 @@ static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); for (unsigned i = 1; i != NumElems; ++i) MaskVec.push_back(DAG.getConstant(i, BaseVT)); - return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); } /// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation /// of specified width. -static SDValue getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { +static SDValue getUnpacklMask(unsigned NumElems, SelectionDAG &DAG, + DebugLoc dl) { MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT BaseVT = MaskVT.getVectorElementType(); SmallVector<SDValue, 8> MaskVec; @@ -2982,12 +2994,14 @@ static SDValue getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { MaskVec.push_back(DAG.getConstant(i, BaseVT)); MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); } - return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); } /// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation /// of specified width. -static SDValue getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { +static SDValue getUnpackhMask(unsigned NumElems, SelectionDAG &DAG, + DebugLoc dl) { MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT BaseVT = MaskVT.getVectorElementType(); unsigned Half = NumElems/2; @@ -2996,14 +3010,15 @@ static SDValue getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); } - return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); } /// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps /// element #0 of a vector with the specified index, leaving the rest of the /// elements in place. static SDValue getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, - SelectionDAG &DAG) { + SelectionDAG &DAG, DebugLoc dl) { MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT BaseVT = MaskVT.getVectorElementType(); SmallVector<SDValue, 8> MaskVec; @@ -3011,7 +3026,8 @@ static SDValue getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); for (unsigned i = 1; i != NumElems; ++i) MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT)); - return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); } /// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32. @@ -3024,6 +3040,7 @@ static SDValue PromoteSplat(SDValue Op, SelectionDAG &DAG, bool HasSSE2) { SDValue Mask = Op.getOperand(2); unsigned MaskNumElems = Mask.getNumOperands(); unsigned NumElems = MaskNumElems; + DebugLoc dl = Op.getNode()->getDebugLoc(); // Special handling of v4f32 -> v4i32. if (VT != MVT::v4f32) { // Find which element we want to splat. @@ -3032,22 +3049,22 @@ static SDValue PromoteSplat(SDValue Op, SelectionDAG &DAG, bool HasSSE2) { // unpack elements to the correct location while (NumElems > 4) { if (EltNo < NumElems/2) { - Mask = getUnpacklMask(MaskNumElems, DAG); + Mask = getUnpacklMask(MaskNumElems, DAG, dl); } else { - Mask = getUnpackhMask(MaskNumElems, DAG); + Mask = getUnpackhMask(MaskNumElems, DAG, dl); EltNo -= NumElems/2; } - V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); + V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V1, Mask); NumElems >>= 1; } SDValue Cst = DAG.getConstant(EltNo, MVT::i32); - Mask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); + Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); } - V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1); - SDValue Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, PVT, V1, + V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1); + SDValue Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, PVT, V1, DAG.getNode(ISD::UNDEF, PVT), Mask); - return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); + return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Shuffle); } /// isVectorLoad - Returns true if the node is a vector load, a scalar @@ -3073,21 +3090,23 @@ static SDValue CanonicalizeMovddup(SDValue Op, SDValue V1, SDValue Mask, MVT VT = Op.getValueType(); if (VT == PVT) return Op; + DebugLoc dl = Op.getNode()->getDebugLoc(); unsigned NumElems = PVT.getVectorNumElements(); if (NumElems == 2) { SDValue Cst = DAG.getTargetConstant(0, MVT::i32); - Mask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); + Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); } else { assert(NumElems == 4); SDValue Cst0 = DAG.getTargetConstant(0, MVT::i32); SDValue Cst1 = DAG.getTargetConstant(1, MVT::i32); - Mask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst0, Cst1, Cst0, Cst1); + Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + Cst0, Cst1, Cst0, Cst1); } - V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1); - SDValue Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, PVT, V1, - DAG.getNode(ISD::UNDEF, PVT), Mask); - return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); + V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1); + SDValue Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, PVT, V1, + DAG.getNode(ISD::UNDEF, dl, PVT), Mask); + return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Shuffle); } /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified @@ -3097,9 +3116,10 @@ static SDValue CanonicalizeMovddup(SDValue Op, SDValue V1, SDValue Mask, static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, bool isZero, bool HasSSE2, SelectionDAG &DAG) { + DebugLoc dl = V2.getNode()->getDebugLoc(); MVT VT = V2.getValueType(); SDValue V1 = isZero - ? getZeroVector(VT, HasSSE2, DAG) : DAG.getNode(ISD::UNDEF, VT); + ? getZeroVector(VT, HasSSE2, DAG, dl) : DAG.getNode(ISD::UNDEF, dl, VT); unsigned NumElems = V2.getValueType().getVectorNumElements(); MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT EVT = MaskVT.getVectorElementType(); @@ -3109,9 +3129,9 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, MaskVec.push_back(DAG.getConstant(NumElems, EVT)); else MaskVec.push_back(DAG.getConstant(i, EVT)); - SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &MaskVec[0], MaskVec.size()); - return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); + return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, Mask); } /// getNumOfConsecutiveZeros - Return the number of elements in a result of @@ -3186,15 +3206,16 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, if (NumNonZero > 8) return SDValue(); + DebugLoc dl = Op.getNode()->getDebugLoc(); SDValue V(0, 0); bool First = true; for (unsigned i = 0; i < 16; ++i) { bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; if (ThisIsNonZero && First) { if (NumZero) - V = getZeroVector(MVT::v8i16, true, DAG); + V = getZeroVector(MVT::v8i16, true, DAG, dl); else - V = DAG.getNode(ISD::UNDEF, MVT::v8i16); + V = DAG.getNode(ISD::UNDEF, dl, MVT::v8i16); First = false; } @@ -3202,24 +3223,25 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, SDValue ThisElt(0, 0), LastElt(0, 0); bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; if (LastIsNonZero) { - LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); + LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, + MVT::i16, Op.getOperand(i-1)); } if (ThisIsNonZero) { - ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); - ThisElt = DAG.getNode(ISD::SHL, MVT::i16, + ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); + ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, ThisElt, DAG.getConstant(8, MVT::i8)); if (LastIsNonZero) - ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); + ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); } else ThisElt = LastElt; if (ThisElt.getNode()) - V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, + V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, DAG.getIntPtrConstant(i/2)); } } - return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); + return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V); } /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. @@ -3230,6 +3252,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, if (NumNonZero > 4) return SDValue(); + DebugLoc dl = Op.getNode()->getDebugLoc(); SDValue V(0, 0); bool First = true; for (unsigned i = 0; i < 8; ++i) { @@ -3237,12 +3260,13 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, if (isNonZero) { if (First) { if (NumZero) - V = getZeroVector(MVT::v8i16, true, DAG); + V = getZeroVector(MVT::v8i16, true, DAG, dl); else - V = DAG.getNode(ISD::UNDEF, MVT::v8i16); + V = DAG.getNode(ISD::UNDEF, dl, MVT::v8i16); First = false; } - V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), + V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, + MVT::v8i16, V, Op.getOperand(i), DAG.getIntPtrConstant(i)); } } @@ -3254,18 +3278,19 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, /// static SDValue getVShift(bool isLeft, MVT VT, SDValue SrcOp, unsigned NumBits, SelectionDAG &DAG, - const TargetLowering &TLI) { + const TargetLowering &TLI, DebugLoc dl) { bool isMMX = VT.getSizeInBits() == 64; MVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64; unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; - SrcOp = DAG.getNode(ISD::BIT_CONVERT, ShVT, SrcOp); - return DAG.getNode(ISD::BIT_CONVERT, VT, - DAG.getNode(Opc, ShVT, SrcOp, + SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp); + return DAG.getNode(ISD::BIT_CONVERT, dl, VT, + DAG.getNode(Opc, dl, ShVT, SrcOp, DAG.getConstant(NumBits, TLI.getShiftAmountTy()))); } SDValue X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { + DebugLoc dl = Op.getNode()->getDebugLoc(); // All zero's are handled with pxor, all one's are handled with pcmpeqd. if (ISD::isBuildVectorAllZeros(Op.getNode()) || ISD::isBuildVectorAllOnes(Op.getNode())) { @@ -3276,8 +3301,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { return Op; if (ISD::isBuildVectorAllOnes(Op.getNode())) - return getOnesVector(Op.getValueType(), DAG); - return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG); + return getOnesVector(Op.getValueType(), DAG, dl); + return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl); } MVT VT = Op.getValueType(); @@ -3308,7 +3333,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { if (NumNonZero == 0) { // All undef vector. Return an UNDEF. All zero vectors were handled above. - return DAG.getNode(ISD::UNDEF, VT); + return DAG.getNode(ISD::UNDEF, dl, VT); } // Special case for single non-zero, non-undef, element. @@ -3330,8 +3355,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { // Truncate the value (which may itself be a constant) to i32, and // convert it to a vector with movd (S2V+shuffle to zero extend). - Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item); - Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item); + Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); + Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget->hasSSE2(), DAG); @@ -3339,12 +3364,12 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { // a vector. If Idx != 0, swizzle it into place. if (Idx != 0) { SDValue Ops[] = { - Item, DAG.getNode(ISD::UNDEF, Item.getValueType()), - getSwapEltZeroMask(VecElts, Idx, DAG) + Item, DAG.getNode(ISD::UNDEF, dl, Item.getValueType()), + getSwapEltZeroMask(VecElts, Idx, DAG, dl) }; - Item = DAG.getNode(ISD::VECTOR_SHUFFLE, VecVT, Ops, 3); + Item = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VecVT, Ops, 3); } - return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Item); + return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Item); } } @@ -3356,7 +3381,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { if (Idx == 0 && // Don't do this for i64 values on x86-32. (EVT != MVT::i64 || Subtarget->is64Bit())) { - Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); + Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget->hasSSE2(), DAG); @@ -3368,7 +3393,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { unsigned NumBits = VT.getSizeInBits(); return getVShift(true, VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(1)), - NumBits/2, DAG, *this); + NumBits/2, DAG, *this, dl); } if (IsAllConstants) // Otherwise, it's better to do a constpool load. @@ -3380,7 +3405,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { // movd/movss) to move this into the low element, then shuffle it into // place. if (EVTBits == 32) { - Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); + Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); // Turn it into a shuffle of zero and zero-extended scalar to vector. Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, @@ -3390,9 +3415,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { SmallVector<SDValue, 8> MaskVec; for (unsigned i = 0; i < NumElems; i++) MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); - SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, |