aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-07-23 05:32:17 +0000
committerChris Lattner <sabre@nondot.org>2009-07-23 05:32:17 +0000
commitf98d253bc5c64b6e79b9e9d3942bae41c30eb656 (patch)
tree78bc19332e765bb71e2d05e1d7c62dd464e602d0
parente5f6bfffe7d72e4682fb6e5823776f1c7afc8c13 (diff)
Make some existing optimizations that would only trigger on scalars
also apply to vectors. This allows us to compile this: #include <emmintrin.h> __m128i a(__m128 a, __m128 b) { return a==a & b==b; } __m128i b(__m128 a, __m128 b) { return a!=a | b!=b; } to: _a: cmpordps %xmm1, %xmm0 ret _b: cmpunordps %xmm1, %xmm0 ret with clang instead of to a ton of horrible code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@76863 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp22
-rw-r--r--test/Transforms/InstCombine/vector-casts.ll33
2 files changed, 51 insertions, 4 deletions
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index c3055ae482..000243ae64 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -3948,6 +3948,13 @@ Instruction *InstCombiner::FoldAndOfFCmps(Instruction &I, FCmpInst *LHS,
return new FCmpInst(*Context, FCmpInst::FCMP_ORD,
LHS->getOperand(0), RHS->getOperand(0));
}
+
+ // Handle vector zeros. This occurs because the canonical form of
+ // "fcmp ord x,x" is "fcmp ord x, 0".
+ if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
+ isa<ConstantAggregateZero>(RHS->getOperand(1)))
+ return new FCmpInst(*Context, FCmpInst::FCMP_ORD,
+ LHS->getOperand(0), RHS->getOperand(0));
return 0;
}
@@ -4242,7 +4249,8 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
+ if (SrcTy == Op1C->getOperand(0)->getType() &&
+ SrcTy->isIntOrIntVector() &&
// Only do this if the casts both really cause code to be generated.
ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
I.getType(), TD) &&
@@ -4901,7 +4909,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (!isa<ICmpInst>(Op0C->getOperand(0)) ||
!isa<ICmpInst>(Op1C->getOperand(0))) {
const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
+ if (SrcTy == Op1C->getOperand(0)->getType() &&
+ SrcTy->isIntOrIntVector() &&
// Only do this if the casts both really cause code to be
// generated.
ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
@@ -4937,6 +4946,15 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return new FCmpInst(*Context, FCmpInst::FCMP_UNO,
LHS->getOperand(0), RHS->getOperand(0));
}
+
+ // Handle vector zeros. This occurs because the canonical form of
+ // "fcmp uno x,x" is "fcmp uno x, 0".
+ if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
+ isa<ConstantAggregateZero>(RHS->getOperand(1)))
+ return new FCmpInst(*Context, FCmpInst::FCMP_UNO,
+ LHS->getOperand(0), RHS->getOperand(0));
+
+
} else {
Value *Op0LHS, *Op0RHS, *Op1LHS, *Op1RHS;
FCmpInst::Predicate Op0CC, Op1CC;
diff --git a/test/Transforms/InstCombine/vector-casts.ll b/test/Transforms/InstCombine/vector-casts.ll
index c6d1eaa1ff..e617185206 100644
--- a/test/Transforms/InstCombine/vector-casts.ll
+++ b/test/Transforms/InstCombine/vector-casts.ll
@@ -5,7 +5,7 @@ define <2 x i1> @test1(<2 x i64> %a) {
%t = trunc <2 x i64> %a to <2 x i1>
ret <2 x i1> %t
-; CHECK: define <2 x i1> @test1
+; CHECK: @test1
; CHECK: and <2 x i64> %a, <i64 1, i64 1>
; CHECK: icmp ne <2 x i64> %tmp, zeroinitializer
}
@@ -16,7 +16,36 @@ define <2 x i64> @test2(<2 x i64> %a) {
%t = ashr <2 x i64> %b, <i64 1, i64 1>
ret <2 x i64> %t
-; CHECK: define <2 x i64> @test2
+; CHECK: @test2
; CHECK: and <2 x i64> %a, <i64 65535, i64 65535>
; CHECK: lshr <2 x i64> %b, <i64 1, i64 1>
}
+
+
+
+define <2 x i64> @test3(<4 x float> %a, <4 x float> %b) nounwind readnone {
+entry:
+ %cmp = fcmp ord <4 x float> %a, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %cmp4 = fcmp ord <4 x float> %b, zeroinitializer
+ %sext5 = sext <4 x i1> %cmp4 to <4 x i32>
+ %and = and <4 x i32> %sext, %sext5
+ %conv = bitcast <4 x i32> %and to <2 x i64>
+ ret <2 x i64> %conv
+
+; CHECK: @test3
+; CHECK: fcmp ord <4 x float> %a, %b
+}
+
+define <2 x i64> @test4(<4 x float> %a, <4 x float> %b) nounwind readnone {
+entry:
+ %cmp = fcmp uno <4 x float> %a, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %cmp4 = fcmp uno <4 x float> %b, zeroinitializer
+ %sext5 = sext <4 x i1> %cmp4 to <4 x i32>
+ %or = or <4 x i32> %sext, %sext5
+ %conv = bitcast <4 x i32> %or to <2 x i64>
+ ret <2 x i64> %conv
+; CHECK: @test4
+; CHECK: fcmp uno <4 x float> %a, %b
+}