diff options
Diffstat (limited to 'test/CodeGen/PowerPC/vec_spat.ll')
-rw-r--r-- | test/CodeGen/PowerPC/vec_spat.ll | 12 |
1 files changed, 11 insertions, 1 deletions
diff --git a/test/CodeGen/PowerPC/vec_spat.ll b/test/CodeGen/PowerPC/vec_spat.ll index c8c6e4a5e6..f6587b010e 100644 --- a/test/CodeGen/PowerPC/vec_spat.ll +++ b/test/CodeGen/PowerPC/vec_spat.ll @@ -1,7 +1,7 @@ ; Test that vectors are scalarized/lowered correctly. ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vspltw | wc -l | grep 2 && ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g3 | grep stfs | wc -l | grep 4 && -; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplti | wc -l | grep 2 && +; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplti | wc -l | grep 3 && ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplth | wc -l | grep 1 %f4 = type <4 x float> @@ -59,3 +59,13 @@ void %splat_h(short %tmp, <16 x ubyte>* %dst) { ret void } +void %spltish(<16 x ubyte>* %A, <16 x ubyte>* %B) { + ; Gets converted to 16 x ubyte + %tmp = load <16 x ubyte>* %B + %tmp = cast <16 x ubyte> %tmp to <16 x sbyte> + %tmp4 = sub <16 x sbyte> %tmp, cast (<8 x short> < short 15, short 15, short 15, short 15, short 15, short 15, short 15, short 15 > to <16 x sbyte>) + %tmp4 = cast <16 x sbyte> %tmp4 to <16 x ubyte> + store <16 x ubyte> %tmp4, <16 x ubyte>* %A + ret void +} + |