aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2010-08-20 04:54:02 +0000
committerBob Wilson <bob.wilson@apple.com>2010-08-20 04:54:02 +0000
commitb31a11b466281b7e01cfde007b2041eefa2341e4 (patch)
tree97ed9d7695da68596c898a0d552ee737ba678da2 /test/CodeGen
parent2df9504fec1ddf198321c7fe8c968154b4edbff3 (diff)
Replace the arm.neon.vmovls and vmovlu intrinsics with vector sign-extend and
zero-extend operations. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@111614 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/neon-ops.ll7
-rw-r--r--test/CodeGen/ARM/vmov.ll20
2 files changed, 6 insertions, 21 deletions
diff --git a/test/CodeGen/ARM/neon-ops.ll b/test/CodeGen/ARM/neon-ops.ll
deleted file mode 100644
index d7e893f64f..0000000000
--- a/test/CodeGen/ARM/neon-ops.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc -march=arm -mattr=+neon -O2 -o /dev/null
-
-; This used to crash.
-define <4 x i32> @test1(<4 x i16> %a) {
- %A = zext <4 x i16> %a to <4 x i32>
- ret <4 x i32> %A
-}
diff --git a/test/CodeGen/ARM/vmov.ll b/test/CodeGen/ARM/vmov.ll
index 5e872ab6d0..f863bf8a09 100644
--- a/test/CodeGen/ARM/vmov.ll
+++ b/test/CodeGen/ARM/vmov.ll
@@ -192,7 +192,7 @@ define <8 x i16> @vmovls8(<8 x i8>* %A) nounwind {
;CHECK: vmovls8:
;CHECK: vmovl.s8
%tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vmovls.v8i16(<8 x i8> %tmp1)
+ %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
ret <8 x i16> %tmp2
}
@@ -200,7 +200,7 @@ define <4 x i32> @vmovls16(<4 x i16>* %A) nounwind {
;CHECK: vmovls16:
;CHECK: vmovl.s16
%tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %tmp1)
+ %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
ret <4 x i32> %tmp2
}
@@ -208,7 +208,7 @@ define <2 x i64> @vmovls32(<2 x i32>* %A) nounwind {
;CHECK: vmovls32:
;CHECK: vmovl.s32
%tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vmovls.v2i64(<2 x i32> %tmp1)
+ %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
ret <2 x i64> %tmp2
}
@@ -216,7 +216,7 @@ define <8 x i16> @vmovlu8(<8 x i8>* %A) nounwind {
;CHECK: vmovlu8:
;CHECK: vmovl.u8
%tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vmovlu.v8i16(<8 x i8> %tmp1)
+ %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
ret <8 x i16> %tmp2
}
@@ -224,7 +224,7 @@ define <4 x i32> @vmovlu16(<4 x i16>* %A) nounwind {
;CHECK: vmovlu16:
;CHECK: vmovl.u16
%tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vmovlu.v4i32(<4 x i16> %tmp1)
+ %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
ret <4 x i32> %tmp2
}
@@ -232,18 +232,10 @@ define <2 x i64> @vmovlu32(<2 x i32>* %A) nounwind {
;CHECK: vmovlu32:
;CHECK: vmovl.u32
%tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vmovlu.v2i64(<2 x i32> %tmp1)
+ %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
ret <2 x i64> %tmp2
}
-declare <8 x i16> @llvm.arm.neon.vmovls.v8i16(<8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmovls.v2i64(<2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmovlu.v8i16(<8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmovlu.v4i32(<4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmovlu.v2i64(<2 x i32>) nounwind readnone
-
define <8 x i8> @vmovni16(<8 x i16>* %A) nounwind {
;CHECK: vmovni16:
;CHECK: vmovn.i16