From d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Tue, 12 Apr 2011 22:46:31 +0000 Subject: Remove the unaligned load intrinsics in favor of using native unaligned loads. Now that we have a first-class way to represent unaligned loads, the unaligned load intrinsics are superfluous. First part of . git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129401 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/Assembler/AutoUpgradeIntrinsics.ll | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'test/Assembler') diff --git a/test/Assembler/AutoUpgradeIntrinsics.ll b/test/Assembler/AutoUpgradeIntrinsics.ll index 6752bd8281..e4e2d3a56e 100644 --- a/test/Assembler/AutoUpgradeIntrinsics.ll +++ b/test/Assembler/AutoUpgradeIntrinsics.ll @@ -7,6 +7,8 @@ ; RUN: llvm-as < %s | llvm-dis | \ ; RUN: not grep {llvm\\.bswap\\.i\[0-9\]*\\.i\[0-9\]*} ; RUN: llvm-as < %s | llvm-dis | \ +; RUN: not grep {llvm\\.x86\\.sse2\\.loadu} +; RUN: llvm-as < %s | llvm-dis | \ ; RUN: grep {llvm\\.x86\\.mmx\\.ps} | grep {x86_mmx} | count 16 declare i32 @llvm.ctpop.i28(i28 %val) @@ -79,3 +81,13 @@ define void @sh64(<1 x i64> %A, <2 x i32> %B) { %r2 = call <1 x i64> @llvm.x86.mmx.psrl.q( <1 x i64> %A, <2 x i32> %B ) ; <<1 x i64>> [#uses=0] ret void } + +declare <4 x float> @llvm.x86.sse.loadu.ps(i8*) nounwind readnone +declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*) nounwind readnone +declare <2 x double> @llvm.x86.sse2.loadu.pd(double*) nounwind readnone +define void @test_loadu(i8* %a, double* %b) { + %v0 = call <4 x float> @llvm.x86.sse.loadu.ps(i8* %a) + %v1 = call <16 x i8> @llvm.x86.sse2.loadu.dq(i8* %a) + %v2 = call <2 x double> @llvm.x86.sse2.loadu.pd(double* %b) + ret void +} -- cgit v1.2.3-18-g5258