aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEli Friedman <eli.friedman@gmail.com>2008-06-19 06:09:51 +0000
committerEli Friedman <eli.friedman@gmail.com>2008-06-19 06:09:51 +0000
commit2c8e64d3e4a3daca9ec74c617a518d03d33eee90 (patch)
treec3da22e58f81480e3ded450d2ed84c89d73ba1af
parent29b039976fd682716c6b8ed1cb7084226b2ad84b (diff)
Fix a bug with <8 x i16> shuffle lowering on X86 where parts of the
shuffle could be skipped. The check is invalid because the loop index i doesn't correspond to the element actually inserted. The correct check is already done a few lines earlier, for whether the element is already in the right spot, so this shouldn't have any effect on the codegen for code that was already correct. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@52486 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--test/CodeGen/X86/2008-06-18-BadShuffle.ll10
2 files changed, 10 insertions, 2 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 4211508225..79629240b3 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -3445,8 +3445,6 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2,
continue;
SDOperand Elt = MaskElts[i];
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
- if (EltIdx == i)
- continue;
SDOperand ExtOp = (EltIdx < 8)
? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
DAG.getConstant(EltIdx, PtrVT))
diff --git a/test/CodeGen/X86/2008-06-18-BadShuffle.ll b/test/CodeGen/X86/2008-06-18-BadShuffle.ll
new file mode 100644
index 0000000000..ba0a1f90ab
--- /dev/null
+++ b/test/CodeGen/X86/2008-06-18-BadShuffle.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=i386 -mattr=+sse2 | grep pinsrw
+
+; Test to make sure we actually insert the bottom element of the vector
+define <8 x i16> @a(<8 x i16> %a) nounwind {
+entry:
+ shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> < i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8 >
+ %add = add <8 x i16> %0, %a
+ ret <8 x i16> %add
+}
+