aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM/vld1.ll
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2009-08-11 05:39:44 +0000
committerBob Wilson <bob.wilson@apple.com>2009-08-11 05:39:44 +0000
commitb0abb4dc4203b903d8d0b48a952ba0a6312eeeb7 (patch)
treeda1622ea7dce5183b471d7df05bcc92522bfe737 /test/CodeGen/ARM/vld1.ll
parent9b6a53a0cee762ca3e454eadceaa128f000e6474 (diff)
Use vAny type to get rid of Neon intrinsics that differed only in whether
the overloaded vector types allowed floating-point or integer vector elements. Most of these operations actually depend on the element type, so bitcasting was not an option. If you include the vpadd intrinsics that I updated earlier, this gets rid of 20 intrinsics. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78646 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM/vld1.ll')
-rw-r--r--test/CodeGen/ARM/vld1.ll40
1 files changed, 20 insertions, 20 deletions
diff --git a/test/CodeGen/ARM/vld1.ll b/test/CodeGen/ARM/vld1.ll
index d5191338c9..81f1bdec9e 100644
--- a/test/CodeGen/ARM/vld1.ll
+++ b/test/CodeGen/ARM/vld1.ll
@@ -3,81 +3,81 @@
define <8 x i8> @vld1i8(i8* %A) nounwind {
;CHECK: vld1i8:
;CHECK: vld1.8
- %tmp1 = call <8 x i8> @llvm.arm.neon.vld1i.v8i8(i8* %A)
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A)
ret <8 x i8> %tmp1
}
define <4 x i16> @vld1i16(i16* %A) nounwind {
;CHECK: vld1i16:
;CHECK: vld1.16
- %tmp1 = call <4 x i16> @llvm.arm.neon.vld1i.v4i16(i16* %A)
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i16* %A)
ret <4 x i16> %tmp1
}
define <2 x i32> @vld1i32(i32* %A) nounwind {
;CHECK: vld1i32:
;CHECK: vld1.32
- %tmp1 = call <2 x i32> @llvm.arm.neon.vld1i.v2i32(i32* %A)
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i32* %A)
ret <2 x i32> %tmp1
}
define <2 x float> @vld1f(float* %A) nounwind {
;CHECK: vld1f:
;CHECK: vld1.32
- %tmp1 = call <2 x float> @llvm.arm.neon.vld1f.v2f32(float* %A)
+ %tmp1 = call <2 x float> @llvm.arm.neon.vld1.v2f32(float* %A)
ret <2 x float> %tmp1
}
define <1 x i64> @vld1i64(i64* %A) nounwind {
;CHECK: vld1i64:
;CHECK: vld1.64
- %tmp1 = call <1 x i64> @llvm.arm.neon.vld1i.v1i64(i64* %A)
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64(i64* %A)
ret <1 x i64> %tmp1
}
define <16 x i8> @vld1Qi8(i8* %A) nounwind {
;CHECK: vld1Qi8:
;CHECK: vld1.8
- %tmp1 = call <16 x i8> @llvm.arm.neon.vld1i.v16i8(i8* %A)
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A)
ret <16 x i8> %tmp1
}
define <8 x i16> @vld1Qi16(i16* %A) nounwind {
;CHECK: vld1Qi16:
;CHECK: vld1.16
- %tmp1 = call <8 x i16> @llvm.arm.neon.vld1i.v8i16(i16* %A)
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i16* %A)
ret <8 x i16> %tmp1
}
define <4 x i32> @vld1Qi32(i32* %A) nounwind {
;CHECK: vld1Qi32:
;CHECK: vld1.32
- %tmp1 = call <4 x i32> @llvm.arm.neon.vld1i.v4i32(i32* %A)
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vld1.v4i32(i32* %A)
ret <4 x i32> %tmp1
}
define <4 x float> @vld1Qf(float* %A) nounwind {
;CHECK: vld1Qf:
;CHECK: vld1.32
- %tmp1 = call <4 x float> @llvm.arm.neon.vld1f.v4f32(float* %A)
+ %tmp1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(float* %A)
ret <4 x float> %tmp1
}
define <2 x i64> @vld1Qi64(i64* %A) nounwind {
;CHECK: vld1Qi64:
;CHECK: vld1.64
- %tmp1 = call <2 x i64> @llvm.arm.neon.vld1i.v2i64(i64* %A)
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i64* %A)
ret <2 x i64> %tmp1
}
-declare <8 x i8> @llvm.arm.neon.vld1i.v8i8(i8*) nounwind readonly
-declare <4 x i16> @llvm.arm.neon.vld1i.v4i16(i8*) nounwind readonly
-declare <2 x i32> @llvm.arm.neon.vld1i.v2i32(i8*) nounwind readonly
-declare <2 x float> @llvm.arm.neon.vld1f.v2f32(i8*) nounwind readonly
-declare <1 x i64> @llvm.arm.neon.vld1i.v1i64(i8*) nounwind readonly
+declare <8 x i8> @llvm.arm.neon.vld1.v8i8(i8*) nounwind readonly
+declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*) nounwind readonly
+declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*) nounwind readonly
+declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*) nounwind readonly
+declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*) nounwind readonly
-declare <16 x i8> @llvm.arm.neon.vld1i.v16i8(i8*) nounwind readonly
-declare <8 x i16> @llvm.arm.neon.vld1i.v8i16(i8*) nounwind readonly
-declare <4 x i32> @llvm.arm.neon.vld1i.v4i32(i8*) nounwind readonly
-declare <4 x float> @llvm.arm.neon.vld1f.v4f32(i8*) nounwind readonly
-declare <2 x i64> @llvm.arm.neon.vld1i.v2i64(i8*) nounwind readonly
+declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*) nounwind readonly
+declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*) nounwind readonly
+declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*) nounwind readonly
+declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
+declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*) nounwind readonly