diff options
author | Chris Lattner <sabre@nondot.org> | 2006-04-08 07:13:46 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2006-04-08 07:13:46 +0000 |
commit | a7cdc88799fa32d731e68ce14d42c3a27df8b3eb (patch) | |
tree | 31182028358d9ffec87a67dd1e23860cc0d66913 | |
parent | 140a58f9dfda30dbb80edd3da1b5632c178f7efc (diff) |
add new testcase
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27537 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | test/CodeGen/PowerPC/vec_shuffle.ll | 16 | ||||
-rw-r--r-- | test/CodeGen/PowerPC/vec_spat.ll | 12 |
2 files changed, 27 insertions, 1 deletions
diff --git a/test/CodeGen/PowerPC/vec_shuffle.ll b/test/CodeGen/PowerPC/vec_shuffle.ll index efea8f21e4..44562fe169 100644 --- a/test/CodeGen/PowerPC/vec_shuffle.ll +++ b/test/CodeGen/PowerPC/vec_shuffle.ll @@ -253,6 +253,21 @@ void %tw_h(<4 x int>* %A, <4 x int>* %B) { entry: %tmp = load <4 x int>* %A ; <<4 x int>> [#uses=2] %tmp2 = load <4 x int>* %B ; <<4 x int>> [#uses=2] + %tmp = extractelement <4 x int> %tmp2, uint 0 ; <int> [#uses=1] + %tmp3 = extractelement <4 x int> %tmp, uint 0 ; <int> [#uses=1] + %tmp4 = extractelement <4 x int> %tmp2, uint 1 ; <int> [#uses=1] + %tmp5 = extractelement <4 x int> %tmp, uint 1 ; <int> [#uses=1] + %tmp6 = insertelement <4 x int> undef, int %tmp, uint 0 ; <<4 x int>> [#uses=1] + %tmp7 = insertelement <4 x int> %tmp6, int %tmp3, uint 1 ; <<4 x int>> [#uses=1] + %tmp8 = insertelement <4 x int> %tmp7, int %tmp4, uint 2 ; <<4 x int>> [#uses=1] + %tmp9 = insertelement <4 x int> %tmp8, int %tmp5, uint 3 ; <<4 x int>> [#uses=1] + store <4 x int> %tmp9, <4 x int>* %A + ret void +} + +void %tw_h_flop(<4 x int>* %A, <4 x int>* %B) { + %tmp = load <4 x int>* %A ; <<4 x int>> [#uses=2] + %tmp2 = load <4 x int>* %B ; <<4 x int>> [#uses=2] %tmp = extractelement <4 x int> %tmp, uint 0 ; <int> [#uses=1] %tmp3 = extractelement <4 x int> %tmp2, uint 0 ; <int> [#uses=1] %tmp4 = extractelement <4 x int> %tmp, uint 1 ; <int> [#uses=1] @@ -265,6 +280,7 @@ entry: ret void } + void %VMRG_UNARY_tb_l(<16 x sbyte>* %A, <16 x sbyte>* %B) { entry: %tmp = load <16 x sbyte>* %A ; <<16 x sbyte>> [#uses=16] diff --git a/test/CodeGen/PowerPC/vec_spat.ll b/test/CodeGen/PowerPC/vec_spat.ll index c8c6e4a5e6..f6587b010e 100644 --- a/test/CodeGen/PowerPC/vec_spat.ll +++ b/test/CodeGen/PowerPC/vec_spat.ll @@ -1,7 +1,7 @@ ; Test that vectors are scalarized/lowered correctly. ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vspltw | wc -l | grep 2 && ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g3 | grep stfs | wc -l | grep 4 && -; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplti | wc -l | grep 2 && +; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplti | wc -l | grep 3 && ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplth | wc -l | grep 1 %f4 = type <4 x float> @@ -59,3 +59,13 @@ void %splat_h(short %tmp, <16 x ubyte>* %dst) { ret void } +void %spltish(<16 x ubyte>* %A, <16 x ubyte>* %B) { + ; Gets converted to 16 x ubyte + %tmp = load <16 x ubyte>* %B + %tmp = cast <16 x ubyte> %tmp to <16 x sbyte> + %tmp4 = sub <16 x sbyte> %tmp, cast (<8 x short> < short 15, short 15, short 15, short 15, short 15, short 15, short 15, short 15 > to <16 x sbyte>) + %tmp4 = cast <16 x sbyte> %tmp4 to <16 x ubyte> + store <16 x ubyte> %tmp4, <16 x ubyte>* %A + ret void +} + |