diff options
Diffstat (limited to 'test/CodeGen/NVPTX')
-rw-r--r-- | test/CodeGen/NVPTX/intrin-nocapture.ll | 21 | ||||
-rw-r--r-- | test/CodeGen/NVPTX/vector-loads.ll | 66 |
2 files changed, 87 insertions, 0 deletions
diff --git a/test/CodeGen/NVPTX/intrin-nocapture.ll b/test/CodeGen/NVPTX/intrin-nocapture.ll new file mode 100644 index 0000000000..55781bb15a --- /dev/null +++ b/test/CodeGen/NVPTX/intrin-nocapture.ll @@ -0,0 +1,21 @@ +; RUN: opt < %s -O3 -S | FileCheck %s + +; Address space intrinsics were erroneously marked NoCapture, leading to bad +; optimizations (such as the store below being eliminated as dead code). This +; test makes sure we don't regress. + +declare void @foo(i32 addrspace(1)*) + +declare i32 addrspace(1)* @llvm.nvvm.ptr.gen.to.global.p1i32.p0i32(i32*) + +; CHECK: @bar +define void @bar() { + %t1 = alloca i32 +; CHECK: call i32 addrspace(1)* @llvm.nvvm.ptr.gen.to.global.p1i32.p0i32(i32* %t1) +; CHECK-NEXT: store i32 10, i32* %t1 + %t2 = call i32 addrspace(1)* @llvm.nvvm.ptr.gen.to.global.p1i32.p0i32(i32* %t1) + store i32 10, i32* %t1 + call void @foo(i32 addrspace(1)* %t2) + ret void +} + diff --git a/test/CodeGen/NVPTX/vector-loads.ll b/test/CodeGen/NVPTX/vector-loads.ll new file mode 100644 index 0000000000..f5a1795e3c --- /dev/null +++ b/test/CodeGen/NVPTX/vector-loads.ll @@ -0,0 +1,66 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s + +; Even though general vector types are not supported in PTX, we can still +; optimize loads/stores with pseudo-vector instructions of the form: +; +; ld.v2.f32 {%f0, %f1}, [%r0] +; +; which will load two floats at once into scalar registers. + +define void @foo(<2 x float>* %a) { +; CHECK: .func foo +; CHECK: ld.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}]; + %t1 = load <2 x float>* %a + %t2 = fmul <2 x float> %t1, %t1 + store <2 x float> %t2, <2 x float>* %a + ret void +} + +define void @foo2(<4 x float>* %a) { +; CHECK: .func foo2 +; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}]; + %t1 = load <4 x float>* %a + %t2 = fmul <4 x float> %t1, %t1 + store <4 x float> %t2, <4 x float>* %a + ret void +} + +define void @foo3(<8 x float>* %a) { +; CHECK: .func foo3 +; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}]; +; CHECK-NEXT: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}+16]; + %t1 = load <8 x float>* %a + %t2 = fmul <8 x float> %t1, %t1 + store <8 x float> %t2, <8 x float>* %a + ret void +} + + + +define void @foo4(<2 x i32>* %a) { +; CHECK: .func foo4 +; CHECK: ld.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}]; + %t1 = load <2 x i32>* %a + %t2 = mul <2 x i32> %t1, %t1 + store <2 x i32> %t2, <2 x i32>* %a + ret void +} + +define void @foo5(<4 x i32>* %a) { +; CHECK: .func foo5 +; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}]; + %t1 = load <4 x i32>* %a + %t2 = mul <4 x i32> %t1, %t1 + store <4 x i32> %t2, <4 x i32>* %a + ret void +} + +define void @foo6(<8 x i32>* %a) { +; CHECK: .func foo6 +; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}]; +; CHECK-NEXT: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}+16]; + %t1 = load <8 x i32>* %a + %t2 = mul <8 x i32> %t1, %t1 + store <8 x i32> %t2, <8 x i32>* %a + ret void +} |