diff options
| author | Nadav Rotem <nadav.rotem@intel.com> | 2011-09-13 19:17:42 +0000 |
|---|---|---|
| committer | Nadav Rotem <nadav.rotem@intel.com> | 2011-09-13 19:17:42 +0000 |
| commit | aec5861bb6ace3734163c000cb75ca2e22e29caa (patch) | |
| tree | b554e33aa701259868dc31f14a20761fc497514c /test/CodeGen | |
| parent | 48ae99fac4010e6bbe5550fd914cc879091049fb (diff) | |
Add vselect target support for targets that do not support blend but do support
xor/and/or (For example SSE2).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@139623 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
| -rw-r--r-- | test/CodeGen/X86/sse2-blend.ll | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/test/CodeGen/X86/sse2-blend.ll b/test/CodeGen/X86/sse2-blend.ll new file mode 100644 index 0000000000..20b732508a --- /dev/null +++ b/test/CodeGen/X86/sse2-blend.ll @@ -0,0 +1,37 @@ +; RUN: llc < %s -march=x86 -mcpu=yonah -promote-elements -mattr=+sse2,-sse41 + +define void@vsel_float(<4 x float>* %v1, <4 x float>* %v2) { + %A = load <4 x float>* %v1 + %B = load <4 x float>* %v2 + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %A, <4 x float> %B + store <4 x float > %vsel, <4 x float>* %v1 + ret void +} + +define void@vsel_i32(<4 x i32>* %v1, <4 x i32>* %v2) { + %A = load <4 x i32>* %v1 + %B = load <4 x i32>* %v2 + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %A, <4 x i32> %B + store <4 x i32 > %vsel, <4 x i32>* %v1 + ret void +} + + +define void@vsel_i64(<4 x i64>* %v1, <4 x i64>* %v2) { + %A = load <4 x i64>* %v1 + %B = load <4 x i64>* %v2 + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> %A, <4 x i64> %B + store <4 x i64 > %vsel, <4 x i64>* %v1 + ret void +} + + +define void@vsel_double(<4 x double>* %v1, <4 x double>* %v2) { + %A = load <4 x double>* %v1 + %B = load <4 x double>* %v2 + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> %A, <4 x double> %B + store <4 x double > %vsel, <4 x double>* %v1 + ret void +} + + |
