diff options
author | Eli Bendersky <eliben@chromium.org> | 2013-07-18 18:00:27 -0700 |
---|---|---|
committer | Eli Bendersky <eliben@chromium.org> | 2013-07-18 18:00:27 -0700 |
commit | 4412ea4b8e019d00dc7574fe1723eea0473a8ec1 (patch) | |
tree | 2badd5ce0727bfad02f10d0d82c8bcfa65677676 /test | |
parent | 4a9f2a703db400ccf760f34101bcdd57642f96e4 (diff) | |
parent | 5b548094edef39376e17445aea28ad2b37d701c4 (diff) |
Merge remote-tracking branch 'origin/master'
Diffstat (limited to 'test')
-rw-r--r-- | test/CodeGen/X86/no-global-in-disp-x86-64.ll | 50 | ||||
-rw-r--r-- | test/NaCl/PNaClABI/abi-alignment.ll | 27 | ||||
-rw-r--r-- | test/NaCl/PNaClABI/abi-stripped-pointers.ll | 2 | ||||
-rw-r--r-- | test/NaCl/PNaClABI/instructions.ll | 25 | ||||
-rw-r--r-- | test/NaCl/PNaClABI/intrinsics.ll | 18 | ||||
-rw-r--r-- | test/NaCl/PNaClLLC/lit.local.cfg | 1 | ||||
-rw-r--r-- | test/NaCl/PNaClLLC/test-runs-verify.ll | 13 | ||||
-rw-r--r-- | test/Transforms/NaCl/atomics.ll | 471 | ||||
-rw-r--r-- | test/Transforms/NaCl/pnacl-abi-simplify-postopt.ll | 1 | ||||
-rw-r--r-- | test/Transforms/NaCl/resolve-pnacl-intrinsics.ll | 152 |
10 files changed, 725 insertions, 35 deletions
diff --git a/test/CodeGen/X86/no-global-in-disp-x86-64.ll b/test/CodeGen/X86/no-global-in-disp-x86-64.ll new file mode 100644 index 0000000000..db911ccff9 --- /dev/null +++ b/test/CodeGen/X86/no-global-in-disp-x86-64.ll @@ -0,0 +1,50 @@ +; RUN: pnacl-llc -O2 -mtriple=x86_64-none-nacl < %s | \ +; RUN: FileCheck %s --check-prefix=NACLON +; RUN: pnacl-llc -O2 -mtriple=x86_64-linux < %s | \ +; RUN: FileCheck %s --check-prefix=NACLOFF + +; This test is derived from the following C code: +; +; int myglobal[100]; +; void test(int arg) +; { +; myglobal[arg] = arg; +; myglobal[arg+1] = arg; +; } +; int main(int argc, char **argv) +; { +; test(argc); +; } +; +; The goal is NOT to produce an instruction with "myglobal" as the +; displacement value in any addressing mode, e.g. this (bad) instruction: +; +; movl %eax, %nacl:myglobal(%r15,%rax,4) +; +; The NACLOFF test is a canary that tries to ensure that the NACLON test is +; testing the right thing. If the NACLOFF test starts failing, it's likely +; that the LLVM -O2 optimizations are no longer generating the problematic +; pattern that NACLON tests for. In that case, the test should be modified. + + +@myglobal = global [100 x i32] zeroinitializer, align 4 + +define void @test(i32 %arg) #0 { +entry: +; NACLON: test: +; NACLON-NOT: mov{{.*}}nacl:myglobal( +; NACLOFF: test: +; NACLOFF: mov{{.*}}myglobal( + %arg.addr = alloca i32, align 4 + store i32 %arg, i32* %arg.addr, align 4 + %0 = load i32* %arg.addr, align 4 + %1 = load i32* %arg.addr, align 4 + %arrayidx = getelementptr inbounds [100 x i32]* @myglobal, i32 0, i32 %1 + store i32 %0, i32* %arrayidx, align 4 + %2 = load i32* %arg.addr, align 4 + %3 = load i32* %arg.addr, align 4 + %add = add nsw i32 %3, 1 + %arrayidx1 = getelementptr inbounds [100 x i32]* @myglobal, i32 0, i32 %add + store i32 %2, i32* %arrayidx1, align 4 + ret void +} diff --git a/test/NaCl/PNaClABI/abi-alignment.ll b/test/NaCl/PNaClABI/abi-alignment.ll index a31914e4be..743807f07d 100644 --- a/test/NaCl/PNaClABI/abi-alignment.ll +++ b/test/NaCl/PNaClABI/abi-alignment.ll @@ -1,8 +1,7 @@ ; RUN: pnacl-abicheck < %s | FileCheck %s ; Test the "align" attributes that are allowed on load and store -; instructions. Note that "cmpxchg" and "atomicrmw" do not take -; "align" attributes, so are not tested here. +; instructions. declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) @@ -27,14 +26,6 @@ define internal void @allowed_cases(i32 %ptr, float %f, double %d) { store double %d, double* %ptr.double, align 1 store double %d, double* %ptr.double, align 8 - ; Stricter alignments are required for atomics. - load atomic i32* %ptr.i32 seq_cst, align 4 - store atomic i32 123, i32* %ptr.i32 seq_cst, align 4 - load atomic float* %ptr.float seq_cst, align 4 - store atomic float %f, float* %ptr.float seq_cst, align 4 - load atomic double* %ptr.double seq_cst, align 8 - store atomic double %d, double* %ptr.double seq_cst, align 8 - ; memcpy() et el take an alignment parameter, which is allowed to be 1. %ptr.p = inttoptr i32 %ptr to i8* call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, @@ -77,22 +68,6 @@ define internal void @rejected_cases(i32 %ptr, float %f, double %d, i32 %align) ; CHECK-NEXT: disallowed: bad alignment: store double{{.*}} align 2 ; CHECK-NEXT: disallowed: bad alignment: store double{{.*}} align 4 - ; Too-small alignments for atomics are rejected. - load atomic i32* %ptr.i32 seq_cst, align 2 - load atomic float* %ptr.float seq_cst, align 2 - load atomic double* %ptr.double seq_cst, align 4 -; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic i32{{.*}} align 2 -; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic float{{.*}} align 2 -; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic double{{.*}} align 4 - - ; Too-large alignments for atomics are also rejected. - load atomic i32* %ptr.i32 seq_cst, align 8 - load atomic float* %ptr.float seq_cst, align 8 - load atomic double* %ptr.double seq_cst, align 16 -; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic i32{{.*}} align 8 -; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic float{{.*}} align 8 -; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic double{{.*}} align 16 - ; Non-pessimistic alignments for memcpy() et al are rejected. %ptr.p = inttoptr i32 %ptr to i8* call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, diff --git a/test/NaCl/PNaClABI/abi-stripped-pointers.ll b/test/NaCl/PNaClABI/abi-stripped-pointers.ll index 8f23cbce16..5fccf52300 100644 --- a/test/NaCl/PNaClABI/abi-stripped-pointers.ll +++ b/test/NaCl/PNaClABI/abi-stripped-pointers.ll @@ -41,8 +41,6 @@ define internal void @allowed_cases(i32 %arg) { ; These instructions may use a NormalizedPtr, which may be a global. load i32* @ptr, align 1 store i32 123, i32* @ptr, align 1 - cmpxchg i32* @ptr, i32 1, i32 2 seq_cst - atomicrmw add i32* @ptr, i32 3 seq_cst ; A NormalizedPtr may be a bitcast. %ptr_bitcast = bitcast [4 x i8]* @var to i32* diff --git a/test/NaCl/PNaClABI/instructions.ll b/test/NaCl/PNaClABI/instructions.ll index eb659cbffb..1f4dfd1838 100644 --- a/test/NaCl/PNaClABI/instructions.ll +++ b/test/NaCl/PNaClABI/instructions.ll @@ -75,12 +75,29 @@ define internal void @memory() { %ptr = inttoptr i32 0 to i32* %a2 = load i32* %ptr, align 1 store i32 undef, i32* %ptr, align 1 - fence acq_rel - %a3 = cmpxchg i32* %ptr, i32 undef, i32 undef acq_rel - %a4 = atomicrmw add i32* %ptr, i32 1 acquire ; CHECK-NOT: disallowed ; CHECK: disallowed: bad instruction opcode: {{.*}} getelementptr - %a5 = getelementptr { i32, i32}* undef + %a3 = getelementptr { i32, i32}* undef + ret void +} + +define internal void @atomic() { + %a1 = alloca i8, i32 4 + %ptr = inttoptr i32 0 to i32* + ; CHECK: disallowed: atomic load: {{.*}} load atomic + %a2 = load atomic i32* %ptr seq_cst, align 4 +; CHECK: disallowed: volatile load: {{.*}} load volatile + %a3 = load volatile i32* %ptr, align 4 +; CHECK: disallowed: atomic store: store atomic + store atomic i32 undef, i32* %ptr seq_cst, align 4 +; CHECK: disallowed: volatile store: store volatile + store volatile i32 undef, i32* %ptr, align 4 +; CHECK: disallowed: bad instruction opcode: fence + fence acq_rel +; CHECK: disallowed: bad instruction opcode: {{.*}} cmpxchg + %a4 = cmpxchg i32* %ptr, i32 undef, i32 undef acq_rel +; CHECK: disallowed: bad instruction opcode: {{.*}} atomicrmw + %a5 = atomicrmw add i32* %ptr, i32 1 acquire ret void } diff --git a/test/NaCl/PNaClABI/intrinsics.ll b/test/NaCl/PNaClABI/intrinsics.ll index 7c5e76e795..9e02511794 100644 --- a/test/NaCl/PNaClABI/intrinsics.ll +++ b/test/NaCl/PNaClABI/intrinsics.ll @@ -25,6 +25,24 @@ declare void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, declare i8* @llvm.nacl.read.tp() +declare i8 @llvm.nacl.atomic.load.i8(i8*, i32) +declare i16 @llvm.nacl.atomic.load.i16(i16*, i32) +declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) +declare i64 @llvm.nacl.atomic.load.i64(i64*, i32) +declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32) +declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32) +declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32) +declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32) +declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32) +declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32) +declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32) +declare i64 @llvm.nacl.atomic.rmw.i64(i32, i64*, i64, i32) +declare i8 @llvm.nacl.atomic.cmpxchg.i8(i8*, i8, i8, i32, i32) +declare i16 @llvm.nacl.atomic.cmpxchg.i16(i16*, i16, i16, i32, i32) +declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32) +declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32) +declare void @llvm.nacl.atomic.fence(i32) + declare i16 @llvm.bswap.i16(i16) declare i32 @llvm.bswap.i32(i32) declare i64 @llvm.bswap.i64(i64) diff --git a/test/NaCl/PNaClLLC/lit.local.cfg b/test/NaCl/PNaClLLC/lit.local.cfg new file mode 100644 index 0000000000..c6106e4746 --- /dev/null +++ b/test/NaCl/PNaClLLC/lit.local.cfg @@ -0,0 +1 @@ +config.suffixes = ['.ll'] diff --git a/test/NaCl/PNaClLLC/test-runs-verify.ll b/test/NaCl/PNaClLLC/test-runs-verify.ll new file mode 100644 index 0000000000..9e7c9d32a5 --- /dev/null +++ b/test/NaCl/PNaClLLC/test-runs-verify.ll @@ -0,0 +1,13 @@ +; RUN: not pnacl-llc -mtriple=i386-unknown-nacl -filetype=asm %s -o - 2>&1 | FileCheck %s + +; Test that the Verifier pass is running in pnacl-llc. + +define i32 @f1(i32 %x) { + %y = add i32 %z, 1 + %z = add i32 %x, 1 + ret i32 %y +; CHECK: Instruction does not dominate all uses! +; CHECK-NEXT: %z = add i32 %x, 1 +; CHECK-NEXT: %y = add i32 %z, 1 +} + diff --git a/test/Transforms/NaCl/atomics.ll b/test/Transforms/NaCl/atomics.ll new file mode 100644 index 0000000000..b124241062 --- /dev/null +++ b/test/Transforms/NaCl/atomics.ll @@ -0,0 +1,471 @@ +; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s + +; Each of these tests validates that the corresponding legacy GCC-style +; builtins are properly rewritten to NaCl atomic builtins. Only the +; GCC-style builtins that have corresponding primitives in C11/C++11 and +; which emit different code are tested. These legacy GCC-builtins only +; support sequential-consistency. +; +; test_* tests the corresponding __sync_* builtin. See: +; http://gcc.gnu.org/onlinedocs/gcc-4.8.1/gcc/_005f_005fsync-Builtins.html +; +; There are also tests which validate that volatile loads/stores get +; rewritten into NaCl atomic builtins. The memory ordering for volatile +; loads/stores is not validated: it could technically be constrained to +; sequential consistency, or left as relaxed. +; +; Alignment is also expected to be at least natural alignment. + +target datalayout = "p:32:32:32" + +; CHECK: @test_fetch_and_add_i8 +define zeroext i8 @test_fetch_and_add_i8(i8* %ptr, i8 zeroext %value) { + ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %value, i32 6) + ; CHECK-NEXT: ret i8 %res + %res = atomicrmw add i8* %ptr, i8 %value seq_cst + ret i8 %res +} + +; CHECK: @test_fetch_and_add_i16 +define zeroext i16 @test_fetch_and_add_i16(i16* %ptr, i16 zeroext %value) { + ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %value, i32 6) + ; CHECK-NEXT: ret i16 %res + %res = atomicrmw add i16* %ptr, i16 %value seq_cst + ret i16 %res +} + +; CHECK: @test_fetch_and_add_i32 +define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) { + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %value, i32 6) + ; CHECK-NEXT: ret i32 %res + %res = atomicrmw add i32* %ptr, i32 %value seq_cst + ret i32 %res +} + +; CHECK: @test_fetch_and_add_i64 +define i64 @test_fetch_and_add_i64(i64* %ptr, i64 %value) { + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %value, i32 6) + ; CHECK-NEXT: ret i64 %res + %res = atomicrmw add i64* %ptr, i64 %value seq_cst + ret i64 %res +} + +; CHECK: @test_fetch_and_sub_i8 +define zeroext i8 @test_fetch_and_sub_i8(i8* %ptr, i8 zeroext %value) { + ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %value, i32 6) + ; CHECK-NEXT: ret i8 %res + %res = atomicrmw sub i8* %ptr, i8 %value seq_cst + ret i8 %res +} + +; CHECK: @test_fetch_and_sub_i16 +define zeroext i16 @test_fetch_and_sub_i16(i16* %ptr, i16 zeroext %value) { + ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %value, i32 6) + ; CHECK-NEXT: ret i16 %res + %res = atomicrmw sub i16* %ptr, i16 %value seq_cst + ret i16 %res +} + +; CHECK: @test_fetch_and_sub_i32 +define i32 @test_fetch_and_sub_i32(i32* %ptr, i32 %value) { + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %value, i32 6) + ; CHECK-NEXT: ret i32 %res + %res = atomicrmw sub i32* %ptr, i32 %value seq_cst + ret i32 %res +} + +; CHECK: @test_fetch_and_sub_i64 +define i64 @test_fetch_and_sub_i64(i64* %ptr, i64 %value) { + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %value, i32 6) + ; CHECK-NEXT: ret i64 %res + %res = atomicrmw sub i64* %ptr, i64 %value seq_cst + ret i64 %res +} + +; CHECK: @test_fetch_and_or_i8 +define zeroext i8 @test_fetch_and_or_i8(i8* %ptr, i8 zeroext %value) { + ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %value, i32 6) + ; CHECK-NEXT: ret i8 %res + %res = atomicrmw or i8* %ptr, i8 %value seq_cst + ret i8 %res +} + +; CHECK: @test_fetch_and_or_i16 +define zeroext i16 @test_fetch_and_or_i16(i16* %ptr, i16 zeroext %value) { + ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %value, i32 6) + ; CHECK-NEXT: ret i16 %res + %res = atomicrmw or i16* %ptr, i16 %value seq_cst + ret i16 %res +} + +; CHECK: @test_fetch_and_or_i32 +define i32 @test_fetch_and_or_i32(i32* %ptr, i32 %value) { + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %value, i32 6) + ; CHECK-NEXT: ret i32 %res + %res = atomicrmw or i32* %ptr, i32 %value seq_cst + ret i32 %res +} + +; CHECK: @test_fetch_and_or_i64 +define i64 @test_fetch_and_or_i64(i64* %ptr, i64 %value) { + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %value, i32 6) + ; CHECK-NEXT: ret i64 %res + %res = atomicrmw or i64* %ptr, i64 %value seq_cst + ret i64 %res +} + +; CHECK: @test_fetch_and_and_i8 +define zeroext i8 @test_fetch_and_and_i8(i8* %ptr, i8 zeroext %value) { + ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %value, i32 6) + ; CHECK-NEXT: ret i8 %res + %res = atomicrmw and i8* %ptr, i8 %value seq_cst + ret i8 %res +} + +; CHECK: @test_fetch_and_and_i16 +define zeroext i16 @test_fetch_and_and_i16(i16* %ptr, i16 zeroext %value) { + ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %value, i32 6) + ; CHECK-NEXT: ret i16 %res + %res = atomicrmw and i16* %ptr, i16 %value seq_cst + ret i16 %res +} + +; CHECK: @test_fetch_and_and_i32 +define i32 @test_fetch_and_and_i32(i32* %ptr, i32 %value) { + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %value, i32 6) + ; CHECK-NEXT: ret i32 %res + %res = atomicrmw and i32* %ptr, i32 %value seq_cst + ret i32 %res +} + +; CHECK: @test_fetch_and_and_i64 +define i64 @test_fetch_and_and_i64(i64* %ptr, i64 %value) { + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %value, i32 6) + ; CHECK-NEXT: ret i64 %res + %res = atomicrmw and i64* %ptr, i64 %value seq_cst + ret i64 %res +} + +; CHECK: @test_fetch_and_xor_i8 +define zeroext i8 @test_fetch_and_xor_i8(i8* %ptr, i8 zeroext %value) { + ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %value, i32 6) + ; CHECK-NEXT: ret i8 %res + %res = atomicrmw xor i8* %ptr, i8 %value seq_cst + ret i8 %res +} + +; CHECK: @test_fetch_and_xor_i16 +define zeroext i16 @test_fetch_and_xor_i16(i16* %ptr, i16 zeroext %value) { + ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %value, i32 6) + ; CHECK-NEXT: ret i16 %res + %res = atomicrmw xor i16* %ptr, i16 %value seq_cst + ret i16 %res +} + +; CHECK: @test_fetch_and_xor_i32 +define i32 @test_fetch_and_xor_i32(i32* %ptr, i32 %value) { + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %value, i32 6) + ; CHECK-NEXT: ret i32 %res + %res = atomicrmw xor i32* %ptr, i32 %value seq_cst + ret i32 %res +} + +; CHECK: @test_fetch_and_xor_i64 +define i64 @test_fetch_and_xor_i64(i64* %ptr, i64 %value) { + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %value, i32 6) + ; CHECK-NEXT: ret i64 %res + %res = atomicrmw xor i64* %ptr, i64 %value seq_cst + ret i64 %res +} + +; CHECK: @test_val_compare_and_swap_i8 +define zeroext i8 @test_val_compare_and_swap_i8(i8* %ptr, i8 zeroext %oldval, i8 zeroext %newval) { + ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %oldval, i8 %newval, i32 6, i32 6) + ; CHECK-NEXT: ret i8 %res + %res = cmpxchg i8* %ptr, i8 %oldval, i8 %newval seq_cst + ret i8 %res +} + +; CHECK: @test_val_compare_and_swap_i16 +define zeroext i16 @test_val_compare_and_swap_i16(i16* %ptr, i16 zeroext %oldval, i16 zeroext %newval) { + ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %oldval, i16 %newval, i32 6, i32 6) + ; CHECK-NEXT: ret i16 %res + %res = cmpxchg i16* %ptr, i16 %oldval, i16 %newval seq_cst + ret i16 %res +} + +; CHECK: @test_val_compare_and_swap_i32 +define i32 @test_val_compare_and_swap_i32(i32* %ptr, i32 %oldval, i32 %newval) { + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 6) + ; CHECK-NEXT: ret i32 %res + %res = cmpxchg i32* %ptr, i32 %oldval, i32 %newval seq_cst + ret i32 %res +} + +; CHECK: @test_val_compare_and_swap_i64 +define i64 @test_val_compare_and_swap_i64(i64* %ptr, i64 %oldval, i64 %newval) { + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %oldval, i64 %newval, i32 6, i32 6) + ; CHECK-NEXT: ret i64 %res + %res = cmpxchg i64* %ptr, i64 %oldval, i64 %newval seq_cst + ret i64 %res +} + +; CHECK: @test_synchronize +define void @test_synchronize() { + ; CHECK-NEXT: call void @llvm.nacl.atomic.fence(i32 6) + ; CHECK-NEXT: ret void + fence seq_cst + ret void +} + +; CHECK: @test_lock_test_and_set_i8 +define zeroext i8 @test_lock_test_and_set_i8(i8* %ptr, i8 zeroext %value) { + ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %value, i32 6) + ; CHECK-NEXT: ret i8 %res + %res = atomicrmw xchg i8* %ptr, i8 %value seq_cst + ret i8 %res +} + +; CHECK: @test_lock_release_i8 +define void @test_lock_release_i8(i8* %ptr) { + ; Note that the 'release' was changed to a 'seq_cst'. + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 0, i8* %ptr, i32 6) + ; CHECK-NEXT: ret void + store atomic i8 0, i8* %ptr release, align 1 + ret void +} + +; CHECK: @test_lock_test_and_set_i16 +define zeroext i16 @test_lock_test_and_set_i16(i16* %ptr, i16 zeroext %value) { + ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i16* %ptr, i16 %value, i32 6) + ; CHECK-NEXT: ret i16 %res + %res = atomicrmw xchg i16* %ptr, i16 %value seq_cst + ret i16 %res +} + +; CHECK: @test_lock_release_i16 +define void @test_lock_release_i16(i16* %ptr) { + ; Note that the 'release' was changed to a 'seq_cst'. + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 0, i16* %ptr, i32 6) + ; CHECK-NEXT: ret void + store atomic i16 0, i16* %ptr release, align 2 + ret void +} + +; CHECK: @test_lock_test_and_set_i32 +define i32 @test_lock_test_and_set_i32(i32* %ptr, i32 %value) { + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %value, i32 6) + ; CHECK-NEXT: ret i32 %res + %res = atomicrmw xchg i32* %ptr, i32 %value seq_cst + ret i32 %res +} + +; CHECK: @test_lock_release_i32 +define void @test_lock_release_i32(i32* %ptr) { + ; Note that the 'release' was changed to a 'seq_cst'. + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 0, i32* %ptr, i32 6) + ; CHECK-NEXT: ret void + store atomic i32 0, i32* %ptr release, align 4 + ret void +} + +; CHECK: @test_lock_test_and_set_i64 +define i64 @test_lock_test_and_set_i64(i64* %ptr, i64 %value) { + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %value, i32 6) + ; CHECK-NEXT: ret i64 %res + %res = atomicrmw xchg i64* %ptr, i64 %value seq_cst + ret i64 %res +} + +; CHECK: @test_lock_release_i64 +define void @test_lock_release_i64(i64* %ptr) { + ; Note that the 'release' was changed to a 'seq_cst'. + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 0, i64* %ptr, i32 6) + ; CHECK-NEXT: ret void + store atomic i64 0, i64* %ptr release, align 8 + ret void +} + +; CHECK: @test_volatile_load_i8 +define zeroext i8 @test_volatile_load_i8(i8* %ptr) { + ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6) + ; CHECK-NEXT: ret i8 %res + %res = load volatile i8* %ptr, align 1 + ret i8 %res +} + +; CHECK: @test_volatile_store_i8 +define void @test_volatile_store_i8(i8* %ptr, i8 zeroext %value) { + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 %value, i8* %ptr, i32 6) + ; CHECK-NEXT: ret void + store volatile i8 %value, i8* %ptr, align 1 + ret void +} + +; CHECK: @test_volatile_load_i16 +define zeroext i16 @test_volatile_load_i16(i16* %ptr) { + ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6) + ; CHECK-NEXT: ret i16 %res + %res = load volatile i16* %ptr, align 2 + ret i16 %res +} + +; CHECK: @test_volatile_store_i16 +define void @test_volatile_store_i16(i16* %ptr, i16 zeroext %value) { + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 %value, i16* %ptr, i32 6) + ; CHECK-NEXT: ret void + store volatile i16 %value, i16* %ptr, align 2 + ret void +} + +; CHECK: @test_volatile_load_i32 +define i32 @test_volatile_load_i32(i32* %ptr) { + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) + ; CHECK-NEXT: ret i32 %res + %res = load volatile i32* %ptr, align 4 + ret i32 %res +} + +; CHECK: @test_volatile_store_i32 +define void @test_volatile_store_i32(i32* %ptr, i32 %value) { + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32 6) + ; CHECK-NEXT: ret void + store volatile i32 %value, i32* %ptr, align 4 + ret void +} + +; CHECK: @test_volatile_load_i64 +define i64 @test_volatile_load_i64(i64* %ptr) { + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) + ; CHECK-NEXT: ret i64 %res + %res = load volatile i64* %ptr, align 8 + ret i64 %res +} + +; CHECK: @test_volatile_store_i64 +define void @test_volatile_store_i64(i64* %ptr, i64 %value) { + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value, i64* %ptr, i32 6) + ; CHECK-NEXT: ret void + store volatile i64 %value, i64* %ptr, align 8 + ret void +} + +; CHECK: @test_volatile_load_float +define float @test_volatile_load_float(float* %ptr) { + ; CHECK-NEXT: %ptr.cast = bitcast float* %ptr to i32* + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6) + ; CHECK-NEXT: %res.cast = bitcast i32 %res to float + ; CHECK-NEXT: ret float %res.cast + %res = load volatile float* %ptr, align 4 + ret float %res +} + +; CHECK: @test_volatile_store_float +define void @test_volatile_store_float(float* %ptr, float %value) { + ; CHECK-NEXT: %ptr.cast = bitcast float* %ptr to i32* + ; CHECK-NEXT: %value.cast = bitcast float %value to i32 + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr.cast, i32 6) + ; CHECK-NEXT: ret void + store volatile float %value, float* %ptr, align 4 + ret void +} + +; CHECK: @test_volatile_load_double +define double @test_volatile_load_double(double* %ptr) { + ; CHECK-NEXT: %ptr.cast = bitcast double* %ptr to i64* + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr.cast, i32 6) + ; CHECK-NEXT: %res.cast = bitcast i64 %res to double + ; CHECK-NEXT: ret double %res.cast + %res = load volatile double* %ptr, align 8 + ret double %res +} + +; CHECK: @test_volatile_store_double +define void @test_volatile_store_double(double* %ptr, double %value) { + ; CHECK-NEXT: %ptr.cast = bitcast double* %ptr to i64* + ; CHECK-NEXT: %value.cast = bitcast double %value to i64 + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr.cast, i32 6) + ; CHECK-NEXT: ret void + store volatile double %value, double* %ptr, align 8 + ret void +} + +; CHECK: @test_volatile_load_i32_pointer +define i32* @test_volatile_load_i32_pointer(i32** %ptr) { + ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32* + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6) + ; CHECK-NEXT: %res.cast = inttoptr i32 %res to i32* + ; CHECK-NEXT: ret i32* %res.cast + %res = load volatile i32** %ptr, align 4 + ret i32* %res +} + +; CHECK: @test_volatile_store_i32_pointer +define void @test_volatile_store_i32_pointer(i32** %ptr, i32* %value) { + ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32* + ; CHECK-NEXT: %value.cast = ptrtoint i32* %value to i32 + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr.cast, i32 6) + ; CHECK-NEXT: ret void + store volatile i32* %value, i32** %ptr, align 4 + ret void +} + +; CHECK: @test_volatile_load_double_pointer +define double* @test_volatile_load_double_pointer(double** %ptr) { + ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32* + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6) + ; CHECK-NEXT: %res.cast = inttoptr i32 %res to double* + ; CHECK-NEXT: ret double* %res.cast + %res = load volatile double** %ptr, align 4 + ret double* %res +} + +; CHECK: @test_volatile_store_double_pointer +define void @test_volatile_store_double_pointer(double** %ptr, double* %value) { + ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32* + ; CHECK-NEXT: %value.cast = ptrtoint double* %value to i32 + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr.cast, i32 6) + ; CHECK-NEXT: ret void + store volatile double* %value, double** %ptr, align 4 + ret void +} + +; CHECK: @test_volatile_load_v4i8 +define <4 x i8> @test_volatile_load_v4i8(<4 x i8>* %ptr) { + ; CHECK-NEXT: %ptr.cast = bitcast <4 x i8>* %ptr to i32* + ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6) + ; CHECK-NEXT: %res.cast = bitcast i32 %res to <4 x i8> + ; CHECK-NEXT: ret <4 x i8> %res.cast + %res = load volatile <4 x i8>* %ptr, align 8 + ret <4 x i8> %res +} + +; CHECK: @test_volatile_store_v4i8 +define void @test_volatile_store_v4i8(<4 x i8>* %ptr, <4 x i8> %value) { + ; CHECK-NEXT: %ptr.cast = bitcast <4 x i8>* %ptr to i32* + ; CHECK-NEXT: %value.cast = bitcast <4 x i8> %value to i32 + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr.cast, i32 6) + ; CHECK-NEXT: ret void + store volatile <4 x i8> %value, <4 x i8>* %ptr, align 8 + ret void +} + +; CHECK: @test_volatile_load_v4i16 +define <4 x i16> @test_volatile_load_v4i16(<4 x i16>* %ptr) { + ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64* + ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr.cast, i32 6) + ; CHECK-NEXT: %res.cast = bitcast i64 %res to <4 x i16> + ; CHECK-NEXT: ret <4 x i16> %res.cast + %res = load volatile <4 x i16>* %ptr, align 8 + ret <4 x i16> %res +} + +; CHECK: @test_volatile_store_v4i16 +define void @test_volatile_store_v4i16(<4 x i16>* %ptr, <4 x i16> %value) { + ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64* + ; CHECK-NEXT: %value.cast = bitcast <4 x i16> %value to i64 + ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr.cast, i32 6) + ; CHECK-NEXT: ret void + store volatile <4 x i16> %value, <4 x i16>* %ptr, align 8 + ret void +} diff --git a/test/Transforms/NaCl/pnacl-abi-simplify-postopt.ll b/test/Transforms/NaCl/pnacl-abi-simplify-postopt.ll index 87a4f48dd5..74b7f8cf9b 100644 --- a/test/Transforms/NaCl/pnacl-abi-simplify-postopt.ll +++ b/test/Transforms/NaCl/pnacl-abi-simplify-postopt.ll @@ -6,6 +6,7 @@ ; thoroughly in other *.ll files. This file is a smoke test to check ; that the passes work together OK. +target datalayout = "p:32:32:32" @var = global i32 256 ; CHECK: @var = global [4 x i8] diff --git a/test/Transforms/NaCl/resolve-pnacl-intrinsics.ll b/test/Transforms/NaCl/resolve-pnacl-intrinsics.ll index 3aa263fa9a..af9a38df0f 100644 --- a/test/Transforms/NaCl/resolve-pnacl-intrinsics.ll +++ b/test/Transforms/NaCl/resolve-pnacl-intrinsics.ll @@ -1,17 +1,41 @@ +; RUN: opt < %s -resolve-pnacl-intrinsics -S | FileCheck %s -check-prefix=CLEANED ; RUN: opt < %s -resolve-pnacl-intrinsics -S | FileCheck %s +; CLEANED-NOT: call i32 @llvm.nacl.setjmp +; CLEANED-NOT: call void @llvm.nacl.longjmp +; CLEANED-NOT: call {{.*}} @llvm.nacl.atomic + declare i32 @llvm.nacl.setjmp(i8*) declare void @llvm.nacl.longjmp(i8*, i32) +; Intrinsic name mangling is based on overloaded parameters only, +; including return type. Note that all pointers parameters are +; overloaded on type-pointed-to in Intrinsics.td, and are therefore +; mangled on the type-pointed-to only. +declare i8 @llvm.nacl.atomic.load.i8(i8*, i32) +declare i16 @llvm.nacl.atomic.load.i16(i16*, i32) +declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) +declare i64 @llvm.nacl.atomic.load.i64(i64*, i32) +declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32) +declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32) +declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32) +declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32) +declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32) +declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32) +declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32) +declare i64 @llvm.nacl.atomic.rmw.i64(i32, i64*, i64, i32) +declare i8 @llvm.nacl.atomic.cmpxchg.i8(i8*, i8, i8, i32, i32) +declare i16 @llvm.nacl.atomic.cmpxchg.i16(i16*, i16, i16, i32, i32) +declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32) +declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32) +declare void @llvm.nacl.atomic.fence(i32) + ; These declarations must be here because the function pass expects ; to find them. In real life they're inserted by the translator ; before the function pass runs. declare i32 @setjmp(i8*) declare void @longjmp(i8*, i32) -; CHECK-NOT: call i32 @llvm.nacl.setjmp -; CHECK-NOT: call void @llvm.nacl.longjmp - define i32 @call_setjmp(i8* %arg) { %val = call i32 @llvm.nacl.setjmp(i8* %arg) ; CHECK: %val = call i32 @setjmp(i8* %arg) @@ -23,3 +47,125 @@ define void @call_longjmp(i8* %arg, i32 %num) { ; CHECK: call void @longjmp(i8* %arg, i32 %num) ret void } + +; atomics. + +; CHECK: @test_fetch_and_add_i32 +define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) { + ; CHECK: %1 = atomicrmw add i32* %ptr, i32 %value seq_cst + %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %value, i32 6) + ret i32 %1 +} + +; CHECK: @test_fetch_and_sub_i32 +define i32 @test_fetch_and_sub_i32(i32* %ptr, i32 %value) { + ; CHECK: %1 = atomicrmw sub i32* %ptr, i32 %value seq_cst + %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %value, i32 6) + ret i32 %1 +} + +; CHECK: @test_fetch_and_or_i32 +define i32 @test_fetch_and_or_i32(i32* %ptr, i32 %value) { + ; CHECK: %1 = atomicrmw or i32* %ptr, i32 %value seq_cst + %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %value, i32 6) + ret i32 %1 +} + +; CHECK: @test_fetch_and_and_i32 +define i32 @test_fetch_and_and_i32(i32* %ptr, i32 %value) { + ; CHECK: %1 = atomicrmw and i32* %ptr, i32 %value seq_cst + %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %value, i32 6) + ret i32 %1 +} + +; CHECK: @test_fetch_and_xor_i32 +define i32 @test_fetch_and_xor_i32(i32* %ptr, i32 %value) { + ; CHECK: %1 = atomicrmw xor i32* %ptr, i32 %value seq_cst + %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %value, i32 6) + ret i32 %1 +} + +; CHECK: @test_val_compare_and_swap_i32 +define i32 @test_val_compare_and_swap_i32(i32* %ptr, i32 %oldval, i32 %newval) { + ; CHECK: %1 = cmpxchg i32* %ptr, i32 %oldval, i32 %newval seq_cst + %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 6) + ret i32 %1 +} + +; CHECK: @test_synchronize +define void @test_synchronize() { + ; CHECK: fence seq_cst + call void @llvm.nacl.atomic.fence(i32 6) + ret void +} + +; CHECK: @test_lock_test_and_set_i32 +define i32 @test_lock_test_and_set_i32(i32* %ptr, i32 %value) { + ; CHECK: %1 = atomicrmw xchg i32* %ptr, i32 %value seq_cst + %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %value, i32 6) + ret i32 %1 +} + +; CHECK: @test_lock_release_i32 +define void @test_lock_release_i32(i32* %ptr) { + ; Note that the 'release' was changed to a 'seq_cst'. + ; CHECK: store atomic i32 0, i32* %ptr seq_cst, align 4 + call void @llvm.nacl.atomic.store.i32(i32 0, i32* %ptr, i32 6) + ret void +} + +; CHECK: @test_atomic_load_i8 +define zeroext i8 @test_atomic_load_i8(i8* %ptr) { + ; CHECK: %1 = load atomic i8* %ptr seq_cst, align 1 + %1 = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6) + ret i8 %1 +} + +; CHECK: @test_atomic_store_i8 +define void @test_atomic_store_i8(i8* %ptr, i8 zeroext %value) { + ; CHECK: store atomic i8 %value, i8* %ptr seq_cst, align 1 + call void @llvm.nacl.atomic.store.i8(i8 %value, i8* %ptr, i32 6) + ret void +} + +; CHECK: @test_atomic_load_i16 +define zeroext i16 @test_atomic_load_i16(i16* %ptr) { + ; CHECK: %1 = load atomic i16* %ptr seq_cst, align 2 + %1 = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6) + ret i16 %1 +} + +; CHECK: @test_atomic_store_i16 +define void @test_atomic_store_i16(i16* %ptr, i16 zeroext %value) { + ; CHECK: store atomic i16 %value, i16* %ptr seq_cst, align 2 + call void @llvm.nacl.atomic.store.i16(i16 %value, i16* %ptr, i32 6) + ret void +} + +; CHECK: @test_atomic_load_i32 +define i32 @test_atomic_load_i32(i32* %ptr) { + ; CHECK: %1 = load atomic i32* %ptr seq_cst, align 4 + %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) + ret i32 %1 +} + +; CHECK: @test_atomic_store_i32 +define void @test_atomic_store_i32(i32* %ptr, i32 %value) { + ; CHECK: store atomic i32 %value, i32* %ptr seq_cst, align 4 + call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32 6) + ret void +} + +; CHECK: @test_atomic_load_i64 +define i64 @test_atomic_load_i64(i64* %ptr) { + ; CHECK: %1 = load atomic i64* %ptr seq_cst, align 8 + %1 = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) + ret i64 %1 +} + +; CHECK: @test_atomic_store_i64 +define void @test_atomic_store_i64(i64* %ptr, i64 %value) { + ; CHECK: store atomic i64 %value, i64* %ptr seq_cst, align 8 + call void @llvm.nacl.atomic.store.i64(i64 %value, i64* %ptr, i32 6) + ret void +} |