diff options
Diffstat (limited to 'lib/Target')
-rw-r--r-- | lib/Target/CellSPU/SPUISelDAGToDAG.cpp | 6 | ||||
-rw-r--r-- | lib/Target/CellSPU/SPUISelLowering.cpp | 86 | ||||
-rw-r--r-- | lib/Target/PowerPC/PPCISelLowering.cpp | 125 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 113 |
4 files changed, 225 insertions, 105 deletions
diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp index d9d0330bc2..23bb08c0c0 100644 --- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp +++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp @@ -705,7 +705,7 @@ SPUDAGToDAGISel::Select(SDValue Op) { /*NOTREACHED*/ break; case MVT::i32: - shufMask = CurDAG->getBUILD_VECTOR(MVT::v4i32, dl, + shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, CurDAG->getConstant(0x80808080, MVT::i32), CurDAG->getConstant(0x00010203, MVT::i32), CurDAG->getConstant(0x80808080, MVT::i32), @@ -713,7 +713,7 @@ SPUDAGToDAGISel::Select(SDValue Op) { break; case MVT::i16: - shufMask = CurDAG->getBUILD_VECTOR(MVT::v4i32, dl, + shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, CurDAG->getConstant(0x80808080, MVT::i32), CurDAG->getConstant(0x80800203, MVT::i32), CurDAG->getConstant(0x80808080, MVT::i32), @@ -721,7 +721,7 @@ SPUDAGToDAGISel::Select(SDValue Op) { break; case MVT::i8: - shufMask = CurDAG->getBUILD_VECTOR(MVT::v4i32, dl, + shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, CurDAG->getConstant(0x80808080, MVT::i32), CurDAG->getConstant(0x80808003, MVT::i32), CurDAG->getConstant(0x80808080, MVT::i32), diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 9b65de16a6..3a8fb5dcab 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -920,7 +920,7 @@ LowerConstantFP(SDValue Op, SelectionDAG &DAG) { uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble()); SDValue T = DAG.getConstant(dbits, MVT::i64); - SDValue Tvec = DAG.getBUILD_VECTOR(MVT::v2i64, dl, T, T); + SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T); return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec)); } @@ -1620,7 +1620,8 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { // NOTE: pretend the constant is an integer. LLVM won't load FP constants SDValue T = DAG.getConstant(Value32, MVT::i32); return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, - DAG.getBUILD_VECTOR(MVT::v4i32, dl, T, T, T, T)); + DAG.getNode(ISD::BUILD_VECTOR, dl, + MVT::v4i32, T, T, T, T)); break; } case MVT::v2f64: { @@ -1630,7 +1631,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { // NOTE: pretend the constant is an integer. LLVM won't load FP constants SDValue T = DAG.getConstant(f64val, MVT::i64); return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, - DAG.getBUILD_VECTOR(MVT::v2i64, dl, T, T)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T)); break; } case MVT::v16i8: { @@ -1640,7 +1641,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { for (int i = 0; i < 8; ++i) Ops[i] = DAG.getConstant(Value16, MVT::i16); return DAG.getNode(ISD::BIT_CONVERT, dl, VT, - DAG.getBUILD_VECTOR(MVT::v8i16, dl, Ops, 8)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, Ops, 8)); } case MVT::v8i16: { unsigned short Value16; @@ -1651,17 +1652,17 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { SDValue T = DAG.getConstant(Value16, VT.getVectorElementType()); SDValue Ops[8]; for (int i = 0; i < 8; ++i) Ops[i] = T; - return DAG.getBUILD_VECTOR(VT, dl, Ops, 8); + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops, 8); } case MVT::v4i32: { unsigned int Value = SplatBits; SDValue T = DAG.getConstant(Value, VT.getVectorElementType()); - return DAG.getBUILD_VECTOR(VT, dl, T, T, T, T); + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T); } case MVT::v2i32: { unsigned int Value = SplatBits; SDValue T = DAG.getConstant(Value, VT.getVectorElementType()); - return DAG.getBUILD_VECTOR(VT, dl, T, T); + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T); } case MVT::v2i64: { return SPU::LowerSplat_v2i64(VT, DAG, SplatBits, dl); @@ -1681,8 +1682,8 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal, // Magic constant that can be matched by IL, ILA, et. al. SDValue Val = DAG.getTargetConstant(upper, MVT::i32); return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT, - DAG.getBUILD_VECTOR(MVT::v4i32, dl, - Val, Val, Val, Val)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + Val, Val, Val, Val)); } else { SDValue LO32; SDValue HI32; @@ -1702,16 +1703,16 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal, if (!lower_special) { SDValue LO32C = DAG.getConstant(lower, MVT::i32); LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT, - DAG.getBUILD_VECTOR(MVT::v4i32, dl, - LO32C, LO32C, LO32C, LO32C)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + LO32C, LO32C, LO32C, LO32C)); } // Create upper vector if not a special pattern if (!upper_special) { SDValue HI32C = DAG.getConstant(upper, MVT::i32); HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT, - DAG.getBUILD_VECTOR(MVT::v4i32, dl, - HI32C, HI32C, HI32C, HI32C)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + HI32C, HI32C, HI32C, HI32C)); } // If either upper or lower are special, then the two input operands are @@ -1724,8 +1725,8 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal, // Unhappy situation... both upper and lower are special, so punt with // a target constant: SDValue Zero = DAG.getConstant(0, MVT::i32); - HI32 = LO32 = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Zero, Zero, - Zero, Zero); + HI32 = LO32 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Zero, Zero, + Zero, Zero); } for (int i = 0; i < 4; ++i) { @@ -1755,8 +1756,8 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal, } return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32, - DAG.getBUILD_VECTOR(MVT::v4i32, dl, - &ShufBytes[0], ShufBytes.size())); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + &ShufBytes[0], ShufBytes.size())); } } @@ -1885,8 +1886,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { } } - SDValue VPermMask = DAG.getBUILD_VECTOR(MVT::v16i8, dl, - &ResultMask[0], ResultMask.size()); + SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, + &ResultMask[0], ResultMask.size()); return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask); } } @@ -1920,8 +1921,8 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { for (size_t j = 0; j < n_copies; ++j) ConstVecValues.push_back(CValue); - return DAG.getBUILD_VECTOR(Op.getValueType(), dl, - &ConstVecValues[0], ConstVecValues.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(), + &ConstVecValues[0], ConstVecValues.size()); } else { // Otherwise, copy the value from one register to another: switch (Op0.getValueType().getSimpleVT()) { @@ -2021,9 +2022,9 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { ShufMask[i] = DAG.getConstant(bits, MVT::i32); } - SDValue ShufMaskVec = - DAG.getBUILD_VECTOR(MVT::v4i32, dl, - &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0])); + SDValue ShufMaskVec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + &ShufMask[0], + sizeof(ShufMask) / sizeof(ShufMask[0])); retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(), @@ -2066,29 +2067,29 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { /*NOTREACHED*/ case MVT::i8: { SDValue factor = DAG.getConstant(0x00000000, MVT::i32); - replicate = DAG.getBUILD_VECTOR(MVT::v4i32, dl, factor, factor, - factor, factor); + replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, factor, factor, + factor, factor); break; } case MVT::i16: { SDValue factor = DAG.getConstant(0x00010001, MVT::i32); - replicate = DAG.getBUILD_VECTOR(MVT::v4i32, dl, factor, factor, - factor, factor); + replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, factor, factor, + factor, factor); break; } case MVT::i32: case MVT::f32: { SDValue factor = DAG.getConstant(0x00010203, MVT::i32); - replicate = DAG.getBUILD_VECTOR(MVT::v4i32, dl, factor, factor, - factor, factor); + replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, factor, factor, + factor, factor); break; } case MVT::i64: case MVT::f64: { SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32); SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32); - replicate = DAG.getBUILD_VECTOR(MVT::v4i32, dl, - loFactor, hiFactor, loFactor, hiFactor); + replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + loFactor, hiFactor, loFactor, hiFactor); break; } } @@ -2248,8 +2249,8 @@ SDValue SPU::getCarryGenerateShufMask(SelectionDAG &DAG, DebugLoc dl) { ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32)); ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32)); - return DAG.getBUILD_VECTOR(MVT::v4i32, dl, - &ShufBytes[0], ShufBytes.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + &ShufBytes[0], ShufBytes.size()); } //! Generate the borrow-generate shuffle mask @@ -2263,8 +2264,8 @@ SDValue SPU::getBorrowGenerateShufMask(SelectionDAG &DAG, DebugLoc dl) { ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32)); ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32)); - return DAG.getBUILD_VECTOR(MVT::v4i32, dl, - &ShufBytes[0], ShufBytes.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + &ShufBytes[0], ShufBytes.size()); } //! Lower byte immediate operations for v16i8 vectors: @@ -2308,7 +2309,8 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) { tcVec[i] = tc; return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg, - DAG.getBUILD_VECTOR(VT, dl, tcVec, tcVecSize)); + DAG.getNode(ISD::BUILD_VECTOR, dl, VT, + tcVec, tcVecSize)); } } @@ -2661,11 +2663,11 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) unsigned maskHigh = 0x08090a0b; unsigned maskLow = 0x0c0d0e0f; // Use a shuffle to perform the truncation - SDValue shufMask = DAG.getBUILD_VECTOR(MVT::v4i32, dl, - DAG.getConstant(maskHigh, MVT::i32), - DAG.getConstant(maskLow, MVT::i32), - DAG.getConstant(maskHigh, MVT::i32), - DAG.getConstant(maskLow, MVT::i32)); + SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + DAG.getConstant(maskHigh, MVT::i32), + DAG.getConstant(maskLow, MVT::i32), + DAG.getConstant(maskHigh, MVT::i32), + DAG.getConstant(maskLow, MVT::i32)); SDValue PromoteScalar = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 6af60da36c..2c97b998a0 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -3093,6 +3093,100 @@ SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) { // Vector related lowering. // +// If this is a vector of constants or undefs, get the bits. A bit in +// UndefBits is set if the corresponding element of the vector is an +// ISD::UNDEF value. For undefs, the corresponding VectorBits values are +// zero. Return true if this is not an array of constants, false if it is. +// +static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], + uint64_t UndefBits[2]) { + // Start with zero'd results. + VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; + + unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits(); + for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { + SDValue OpVal = BV->getOperand(i); + + unsigned PartNo = i >= e/2; // In the upper 128 bits? + unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. + + uint64_t EltBits = 0; + if (OpVal.getOpcode() == ISD::UNDEF) { + uint64_t EltUndefBits = ~0U >> (32-EltBitSize); + UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); + continue; + } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { + EltBits = CN->getZExtValue() & (~0U >> (32-EltBitSize)); + } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { + assert(CN->getValueType(0) == MVT::f32 && + "Only one legal FP vector type!"); + EltBits = FloatToBits(CN->getValueAPF().convertToFloat()); + } else { + // Nonconstant element. + return true; + } + + VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); + } + + //printf("%llx %llx %llx %llx\n", + // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); + return false; +} + +// If this is a splat (repetition) of a value across the whole vector, return +// the smallest size that splats it. For example, "0x01010101010101..." is a +// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and +// SplatSize = 1 byte. +static bool isConstantSplat(const uint64_t Bits128[2], + const uint64_t Undef128[2], + unsigned &SplatBits, unsigned &SplatUndef, + unsigned &SplatSize) { + + // Don't let undefs prevent splats from matching. See if the top 64-bits are + // the same as the lower 64-bits, ignoring undefs. + if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0])) + return false; // Can't be a splat if two pieces don't match. + + uint64_t Bits64 = Bits128[0] | Bits128[1]; + uint64_t Undef64 = Undef128[0] & Undef128[1]; + + // Check that the top 32-bits are the same as the lower 32-bits, ignoring + // undefs. + if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64)) + return false; // Can't be a splat if two pieces don't match. + + uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32); + uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32); + + // If the top 16-bits are different than the lower 16-bits, ignoring + // undefs, we have an i32 splat. + if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) { + SplatBits = Bits32; + SplatUndef = Undef32; + SplatSize = 4; + return true; + } + + uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16); + uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16); + + // If the top 8-bits are different than the lower 8-bits, ignoring + // undefs, we have an i16 splat. + if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) { + SplatBits = Bits16; + SplatUndef = Undef16; + SplatSize = 2; + return true; + } + + // Otherwise, we have an 8-bit splat. + SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8); + SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8); + SplatSize = 1; + return true; +} + /// BuildSplatI - Build a canonical splati of Val with an element size of /// SplatSize. Cast the result to VT. static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT, @@ -3115,7 +3209,8 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT, SDValue Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType()); SmallVector<SDValue, 8> Ops; Ops.assign(CanonicalVT.getVectorNumElements(), Elt); - SDValue Res = DAG.getBUILD_VECTOR(CanonicalVT, dl, &Ops[0], Ops.size()); + SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, + &Ops[0], Ops.size()); return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res); } @@ -3152,7 +3247,7 @@ static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, for (unsigned i = 0; i != 16; ++i) Ops[i] = DAG.getConstant(i+Amt, MVT::i8); SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, LHS, RHS, - DAG.getBUILD_VECTOR(MVT::v16i8, dl, Ops,16)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops,16)); return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T); } @@ -3167,20 +3262,20 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, // UndefBits is set if the corresponding element of the vector is an // ISD::UNDEF value. For undefs, the corresponding VectorBits values are // zero. + uint64_t VectorBits[2]; + uint64_t UndefBits[2]; DebugLoc dl = Op.getDebugLoc(); - BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); - assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); - - uint64_t SplatBits; - uint64_t SplatUndef; - unsigned SplatSize; - bool HasAnyUndefs; + if (GetConstantBuildVectorBits(Op.getNode(), VectorBits, UndefBits)) + return SDValue(); // Not a constant vector. // If this is a splat (repetition) of a value across the whole vector, return // the smallest size that splats it. For example, "0x01010101010101..." is a // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and // SplatSize = 1 byte. - if (BVN->isConstantSplat(HasAnyUndefs, SplatBits, SplatUndef, SplatSize)) { + unsigned SplatBits, SplatUndef, SplatSize; + if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){ + bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0; + // First, handle single instruction cases. // All zeros? @@ -3188,7 +3283,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, // Canonicalize all zero vectors to be v4i32. if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { SDValue Z = DAG.getConstant(0, MVT::i32); - Z = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Z, Z, Z, Z); + Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z); } return Op; @@ -3401,7 +3496,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, OpLHS.getValueType(), OpLHS, OpRHS, - DAG.getBUILD_VECTOR(MVT::v16i8, dl, Ops, 16)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16)); } /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this @@ -3524,8 +3619,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, MVT::i8)); } - SDValue VPermMask = DAG.getBUILD_VECTOR(MVT::v16i8, dl, - &ResultMask[0], ResultMask.size()); + SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, + &ResultMask[0], ResultMask.size()); return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); } @@ -3713,7 +3808,7 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) { Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8); } return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, EvenParts, OddParts, - DAG.getBUILD_VECTOR(MVT::v16i8, dl, Ops, 16)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16)); } else { assert(0 && "Unknown mul to lower!"); abort(); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c73d8982ff..d18c283c4f 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2746,7 +2746,7 @@ static SDValue CommuteVectorShuffle(SDValue Op, SDValue &V1, } std::swap(V1, V2); - Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], NumElems); + Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &MaskVec[0], NumElems); return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, Mask); } @@ -2771,7 +2771,7 @@ SDValue CommuteVectorShuffleMask(SDValue Mask, SelectionDAG &DAG, DebugLoc dl) { else MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); } - return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], NumElems); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &MaskVec[0], NumElems); } @@ -2922,13 +2922,13 @@ static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG, SDValue Vec; if (VT.getSizeInBits() == 64) { // MMX SDValue Cst = DAG.getTargetConstant(0, MVT::i32); - Vec = DAG.getBUILD_VECTOR(MVT::v2i32, dl, Cst, Cst); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); } else if (HasSSE2) { // SSE2 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); - Vec = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Cst, Cst, Cst, Cst); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); } else { // SSE1 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); - Vec = DAG.getBUILD_VECTOR(MVT::v4f32, dl, Cst, Cst, Cst, Cst); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); } return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); } @@ -2943,9 +2943,9 @@ static SDValue getOnesVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) { SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); SDValue Vec; if (VT.getSizeInBits() == 64) // MMX - Vec = DAG.getBUILD_VECTOR(MVT::v2i32, dl, Cst, Cst); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); else // SSE - Vec = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Cst, Cst, Cst, Cst); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); } @@ -2971,8 +2971,9 @@ static SDValue NormalizeMask(SDValue Mask, SelectionDAG &DAG) { } if (Changed) - Mask = DAG.getBUILD_VECTOR(Mask.getValueType(), Mask.getDebugLoc(), - &MaskVec[0], MaskVec.size()); + Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getDebugLoc(), + Mask.getValueType(), + &MaskVec[0], MaskVec.size()); return Mask; } @@ -2986,7 +2987,8 @@ static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG, DebugLoc dl) { MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); for (unsigned i = 1; i != NumElems; ++i) MaskVec.push_back(DAG.getConstant(i, BaseVT)); - return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); } /// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation @@ -3000,7 +3002,8 @@ static SDValue getUnpacklMask(unsigned NumElems, SelectionDAG &DAG, MaskVec.push_back(DAG.getConstant(i, BaseVT)); MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); } - return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); } /// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation @@ -3015,7 +3018,8 @@ static SDValue getUnpackhMask(unsigned NumElems, SelectionDAG &DAG, MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); } - return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); } /// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps @@ -3030,7 +3034,8 @@ static SDValue getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); for (unsigned i = 1; i != NumElems; ++i) MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT)); - return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); } /// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32. @@ -3061,7 +3066,7 @@ static SDValue PromoteSplat(SDValue Op, SelectionDAG &DAG, bool HasSSE2) { NumElems >>= 1; } SDValue Cst = DAG.getConstant(EltNo, MVT::i32); - Mask = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Cst, Cst, Cst, Cst); + Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); } V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1); @@ -3097,12 +3102,13 @@ static SDValue CanonicalizeMovddup(SDValue Op, SDValue V1, SDValue Mask, unsigned NumElems = PVT.getVectorNumElements(); if (NumElems == 2) { SDValue Cst = DAG.getTargetConstant(0, MVT::i32); - Mask = DAG.getBUILD_VECTOR(MVT::v2i32, dl, Cst, Cst); + Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); } else { assert(NumElems == 4); SDValue Cst0 = DAG.getTargetConstant(0, MVT::i32); SDValue Cst1 = DAG.getTargetConstant(1, MVT::i32); - Mask = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Cst0, Cst1, Cst0, Cst1); + Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + Cst0, Cst1, Cst0, Cst1); } V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1); @@ -3131,7 +3137,8 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, MaskVec.push_back(DAG.getConstant(NumElems, EVT)); else MaskVec.push_back(DAG.getConstant(i, EVT)); - SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size()); + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, Mask); } @@ -3417,7 +3424,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { SmallVector<SDValue, 8> MaskVec; for (unsigned i = 0; i < NumElems; i++) MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); - SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size()); + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, Item, DAG.getUNDEF(VT), Mask); } @@ -3506,8 +3514,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); else MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); - SDValue ShufMask = DAG.getBUILD_VECTOR(MaskVT, dl, - &MaskVec[0], MaskVec.size()); + SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size()); return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V[0], V[1], ShufMask); } @@ -3612,7 +3620,7 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2, SmallVector<SDValue,8> MaskV; MaskV.push_back(DAG.getConstant(BestLoQuad < 0 ? 0 : BestLoQuad, MVT::i64)); MaskV.push_back(DAG.getConstant(BestHiQuad < 0 ? 1 : BestHiQuad, MVT::i64)); - SDValue Mask = DAG.getBUILD_VECTOR(MVT::v2i64, dl, &MaskV[0], 2); + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, &MaskV[0], 2); NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v2i64, DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V1), @@ -3658,7 +3666,8 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2, MVT::i16)); return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16, NewV, DAG.getUNDEF(MVT::v8i16), - DAG.getBUILD_VECTOR(MVT::v8i16, dl, &MaskV[0], 8)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, + &MaskV[0], 8)); } } @@ -3685,7 +3694,8 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2, } V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V1); V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, - DAG.getBUILD_VECTOR(MVT::v16i8, dl, &pshufbMask[0], 16)); + DAG.getNode(ISD::BUILD_VECTOR, dl, + MVT::v16i8, &pshufbMask[0], 16)); if (!TwoInputs) return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); @@ -3704,7 +3714,8 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2, } V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V2); V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, - DAG.getBUILD_VECTOR(MVT::v16i8, dl, &pshufbMask[0], 16)); + DAG.getNode(ISD::BUILD_VECTOR, dl, + MVT::v16i8, &pshufbMask[0], 16)); V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); } @@ -3730,7 +3741,8 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2, MaskV.push_back(DAG.getConstant(i, MVT::i16)); NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16, NewV, DAG.getUNDEF(MVT::v8i16), - DAG.getBUILD_VECTOR(MVT::v8i16, dl, &MaskV[0], 8)); + DAG.getNode(ISD::BUILD_VECTOR, dl, + MVT::v8i16, &MaskV[0], 8)); } // If BestHi >= 0, generate a pshufhw to put the high elements in order, @@ -3753,7 +3765,8 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2, } NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16, NewV, DAG.getUNDEF(MVT::v8i16), - DAG.getBUILD_VECTOR(MVT::v8i16, dl, &MaskV[0], 8)); + DAG.getNode(ISD::BUILD_VECTOR, dl, + MVT::v8i16, &MaskV[0], 8)); } // In case BestHi & BestLo were both -1, which means each quadword has a word @@ -3839,7 +3852,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(SDValue V1, SDValue V2, if (V2Only) V1 = V2; V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, - DAG.getBUILD_VECTOR(MVT::v16i8, dl, &pshufbMask[0], 16)); + DAG.getNode(ISD::BUILD_VECTOR, dl, + MVT::v16i8, &pshufbMask[0], 16)); if (!TwoInputs) return V1; @@ -3855,7 +3869,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(SDValue V1, SDValue V2, pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); } V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, - DAG.getBUILD_VECTOR(MVT::v16i8, dl, &pshufbMask[0], 16)); + DAG.getNode(ISD::BUILD_VECTOR, dl, + MVT::v16i8, &pshufbMask[0], 16)); return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); } @@ -3963,7 +3978,8 @@ SDValue RewriteAsNarrowerShuffle(SDValue V1, SDValue V2, V1 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V1); V2 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V2); return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, NewVT, V1, V2, - DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size())); + DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskVec[0], MaskVec.size())); } /// getVZextMovL - Return a zero-extending vector move low node. @@ -4040,7 +4056,8 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2, // The second shuffle, which takes the first shuffle as both of its // vector operands, put the elements into the right order. V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, - DAG.getBUILD_VECTOR(MaskVT, dl, &Mask1[0], Mask1.size())); + DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &Mask1[0], Mask1.size())); SmallVector<SDValue, 8> Mask2(4, DAG.getUNDEF(MaskEVT)); for (unsigned i = 0; i != 4; ++i) { @@ -4054,8 +4071,8 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2, } return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V1, - DAG.getBUILD_VECTOR(MaskVT, dl, - &Mask2[0], Mask2.size())); + DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &Mask2[0], Mask2.size())); } else if (NumLo == 3 || NumHi == 3) { // Otherwise, we must have three elements from one vector, call it X, and // one element from the other, call it Y. First, use a shufps to build an @@ -4086,7 +4103,7 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2, Mask1[2] = PermMask.getOperand(HiIndex^1); Mask1[3] = DAG.getUNDEF(MaskEVT); V2 = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, - DAG.getBUILD_VECTOR(MaskVT, dl, &Mask1[0], 4)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &Mask1[0], 4)); if (HiIndex >= 2) { Mask1[0] = PermMask.getOperand(0); @@ -4094,7 +4111,8 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2, Mask1[2] = DAG.getConstant(HiIndex & 1 ? 6 : 4, MaskEVT); Mask1[3] = DAG.getConstant(HiIndex & 1 ? 4 : 6, MaskEVT); return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, - DAG.getBUILD_VECTOR(MaskVT, dl, &Mask1[0], 4)); + DAG.getNode(ISD::BUILD_VECTOR, dl, + MaskVT, &Mask1[0], 4)); } else { Mask1[0] = DAG.getConstant(HiIndex & 1 ? 2 : 0, MaskEVT); Mask1[1] = DAG.getConstant(HiIndex & 1 ? 0 : 2, MaskEVT); @@ -4109,7 +4127,8 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2, DAG.getConstant(cast<ConstantSDNode>(Mask1[3])->getZExtValue()+4, MaskEVT); return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V2, V1, - DAG.getBUILD_VECTOR(MaskVT, dl, &Mask1[0], 4)); + DAG.getNode(ISD::BUILD_VECTOR, dl, + MaskVT, &Mask1[0], 4)); } } @@ -4143,10 +4162,10 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2, } SDValue LoShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, - DAG.getBUILD_VECTOR(MaskVT, dl, + DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &LoMask[0], LoMask.size())); SDValue HiShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, - DAG.getBUILD_VECTOR(MaskVT, dl, + DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &HiMask[0], HiMask.size())); SmallVector<SDValue, 8> MaskOps; for (unsigned i = 0; i != 4; ++i) { @@ -4158,7 +4177,8 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2, } } return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, LoShuffle, HiShuffle, - DAG.getBUILD_VECTOR(MaskVT, dl, &MaskOps[0], MaskOps.size())); + DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &MaskOps[0], MaskOps.size())); } SDValue @@ -4494,7 +4514,8 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { push_back(DAG.getUNDEF(MaskVT.getVectorElementType())); IdxVec. push_back(DAG.getUNDEF(MaskVT.getVectorElementType())); - SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &IdxVec[0], IdxVec.size()); + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &IdxVec[0], IdxVec.size()); SDValue Vec = Op.getOperand(0); Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, Vec.getValueType(), Vec, DAG.getUNDEF(Vec.getValueType()), Mask); @@ -4516,7 +4537,8 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { IdxVec.push_back(DAG.getConstant(1, MaskVT.getVectorElementType())); IdxVec. push_back(DAG.getUNDEF(MaskVT.getVectorElementType())); - SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &IdxVec[0], IdxVec.size()); + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, + &IdxVec[0], IdxVec.size()); SDValue Vec = Op.getOperand(0); Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, Vec.getValueType(), Vec, DAG.getUNDEF(Vec.getValueType()), @@ -5017,13 +5039,13 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) { MaskVec.push_back(DAG.getConstant(4, MVT::i32)); |