aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2012-01-26 21:37:55 +0000
committerChris Lattner <sabre@nondot.org>2012-01-26 21:37:55 +0000
commit6b0dc92043ab1f63d78b8796098575e1d777b701 (patch)
tree2ff62b3e597697a2569a0f9940de2f12c32a7168 /lib
parent5b676ce7932cf60d4fd6d101323d8d54b8395804 (diff)
progress making the world safe to ConstantDataVector. While
we're at it, allow PatternMatch's "neg" pattern to match integer vector negations, and enhance ComputeNumSigned bits to handle shl of vectors. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@149082 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Analysis/ConstantFolding.cpp84
-rw-r--r--lib/Analysis/ValueTracking.cpp31
-rw-r--r--lib/CodeGen/MachineFunction.cpp5
3 files changed, 64 insertions, 56 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 6a49e6d98a..fe28926f1b 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -65,17 +65,17 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
// If this is a bitcast from constant vector -> vector, fold it.
- ConstantVector *CV = dyn_cast<ConstantVector>(C);
- if (CV == 0)
+ // FIXME: Remove ConstantVector support.
+ if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
// If the element types match, VMCore can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
- unsigned NumSrcElt = CV->getNumOperands();
+ unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
- Type *SrcEltTy = CV->getType()->getElementType();
+ Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
// Otherwise, we're changing the number of elements in a vector, which
@@ -95,7 +95,6 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
- if (!C) return ConstantExpr::getBitCast(C, DestTy);
// Finally, VMCore can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
@@ -109,8 +108,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
// Ask VMCore to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
- CV = dyn_cast<ConstantVector>(C);
- if (!CV) // If VMCore wasn't able to fold it, bail out.
+ // If VMCore wasn't able to fold it, bail out.
+ if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
+ !isa<ConstantDataVector>(C))
return C;
}
@@ -132,7 +132,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
Constant *Elt = Zero;
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
- Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(SrcElt++));
+ Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
@@ -149,28 +149,29 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
Result.push_back(Elt);
}
- } else {
- // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
- unsigned Ratio = NumDstElt/NumSrcElt;
- unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
+ return ConstantVector::get(Result);
+ }
+
+ // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
+ unsigned Ratio = NumDstElt/NumSrcElt;
+ unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
+
+ // Loop over each source value, expanding into multiple results.
+ for (unsigned i = 0; i != NumSrcElt; ++i) {
+ Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
+ if (!Src) // Reject constantexpr elements.
+ return ConstantExpr::getBitCast(C, DestTy);
- // Loop over each source value, expanding into multiple results.
- for (unsigned i = 0; i != NumSrcElt; ++i) {
- Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(i));
- if (!Src) // Reject constantexpr elements.
- return ConstantExpr::getBitCast(C, DestTy);
+ unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
+ for (unsigned j = 0; j != Ratio; ++j) {
+ // Shift the piece of the value into the right place, depending on
+ // endianness.
+ Constant *Elt = ConstantExpr::getLShr(Src,
+ ConstantInt::get(Src->getType(), ShiftAmt));
+ ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
- unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
- for (unsigned j = 0; j != Ratio; ++j) {
- // Shift the piece of the value into the right place, depending on
- // endianness.
- Constant *Elt = ConstantExpr::getLShr(Src,
- ConstantInt::get(Src->getType(), ShiftAmt));
- ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
-
- // Truncate and remember this piece.
- Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
- }
+ // Truncate and remember this piece.
+ Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
@@ -311,6 +312,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
// not reached.
}
+ // FIXME: Remove ConstantVector
if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
isa<ConstantDataSequential>(C)) {
Type *EltTy = cast<SequentialType>(C->getType())->getElementType();
@@ -1115,11 +1117,8 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
/// available for the result. Returns null if the conversion cannot be
/// performed, otherwise returns the Constant value resulting from the
/// conversion.
-static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
- Type *Ty) {
- assert(Op && "Called with NULL operand");
- APFloat Val(Op->getValueAPF());
-
+static Constant *ConstantFoldConvertToInt(const APFloat &Val,
+ bool roundTowardZero, Type *Ty) {
// All of these conversion intrinsics form an integer of at most 64bits.
unsigned ResultWidth = cast<IntegerType>(Ty)->getBitWidth();
assert(ResultWidth <= 64 &&
@@ -1271,24 +1270,31 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
}
- if (ConstantVector *Op = dyn_cast<ConstantVector>(Operands[0])) {
+ // Support ConstantVector in case we have an Undef in the top.
+ if (isa<ConstantVector>(Operands[0]) ||
+ isa<ConstantDataVector>(Operands[0])) {
+ Constant *Op = cast<Constant>(Operands[0]);
switch (F->getIntrinsicID()) {
default: break;
case Intrinsic::x86_sse_cvtss2si:
case Intrinsic::x86_sse_cvtss2si64:
case Intrinsic::x86_sse2_cvtsd2si:
case Intrinsic::x86_sse2_cvtsd2si64:
- if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
- return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/false, Ty);
+ if (ConstantFP *FPOp =
+ dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ /*roundTowardZero=*/false, Ty);
case Intrinsic::x86_sse_cvttss2si:
case Intrinsic::x86_sse_cvttss2si64:
case Intrinsic::x86_sse2_cvttsd2si:
case Intrinsic::x86_sse2_cvttsd2si64:
- if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
- return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/true, Ty);
+ if (ConstantFP *FPOp =
+ dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ /*roundTowardZero=*/true, Ty);
}
}
-
+
if (isa<UndefValue>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 21008a1467..6403f03b01 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -89,6 +89,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
// Handle a constant vector by taking the intersection of the known bits of
// each element.
+ // FIXME: Remove.
if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
KnownZero.setAllBits(); KnownOne.setAllBits();
for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
@@ -1005,30 +1006,28 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp;
- case Instruction::AShr:
+ case Instruction::AShr: {
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
- // ashr X, C -> adds C sign bits.
- if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
- Tmp += C->getZExtValue();
+ // ashr X, C -> adds C sign bits. Vectors too.
+ const APInt *ShAmt;
+ if (match(U->getOperand(1), m_APInt(ShAmt))) {
+ Tmp += ShAmt->getZExtValue();
if (Tmp > TyBits) Tmp = TyBits;
}
- // vector ashr X, <C, C, C, C> -> adds C sign bits
- if (ConstantVector *C = dyn_cast<ConstantVector>(U->getOperand(1))) {
- if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
- Tmp += CI->getZExtValue();
- if (Tmp > TyBits) Tmp = TyBits;
- }
- }
return Tmp;
- case Instruction::Shl:
- if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
+ }
+ case Instruction::Shl: {
+ const APInt *ShAmt;
+ if (match(U->getOperand(1), m_APInt(ShAmt))) {
// shl destroys sign bits.
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
- if (C->getZExtValue() >= TyBits || // Bad shift.
- C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
- return Tmp - C->getZExtValue();
+ Tmp2 = ShAmt->getZExtValue();
+ if (Tmp2 >= TyBits || // Bad shift.
+ Tmp2 >= Tmp) break; // Shifted all sign bits out.
+ return Tmp - Tmp2;
}
break;
+ }
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: // NOT is handled here.
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index 6bfd97055c..a7204670b1 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -655,9 +655,12 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
if (A->getType() == B->getType()) return false;
// For now, only support constants with the same size.
- if (TD->getTypeStoreSize(A->getType()) != TD->getTypeStoreSize(B->getType()))
+ uint64_t StoreSize = TD->getTypeStoreSize(A->getType());
+ if (StoreSize != TD->getTypeStoreSize(B->getType()) ||
+ StoreSize > 128)
return false;
+
// If a floating-point value and an integer value have the same encoding,
// they can share a constant-pool entry.
if (const ConstantFP *AFP = dyn_cast<ConstantFP>(A))