diff options
author | Eli Bendersky <eliben@chromium.org> | 2012-11-14 08:48:11 -0800 |
---|---|---|
committer | Eli Bendersky <eliben@chromium.org> | 2012-11-14 08:48:11 -0800 |
commit | 948e79be6c511b696c0a2ed496e6ab7843298b7b (patch) | |
tree | 37ebae79d5126ff45be6b1d43f82342eed805082 /test/NaCl | |
parent | 5bf33de8c8e5cb8dff4f29919f434f3b453007af (diff) |
Implement sandboxing of NEON store instructions
BUG=http://code.google.com/p/nativeclient/issues/detail?id=3124
Review URL: https://codereview.chromium.org/11361249
Diffstat (limited to 'test/NaCl')
-rw-r--r-- | test/NaCl/ARM/neon-vst1-sandboxing.ll | 116 | ||||
-rw-r--r-- | test/NaCl/ARM/neon-vst2-sandboxing.ll | 95 | ||||
-rw-r--r-- | test/NaCl/ARM/neon-vst3-sandboxing.ll | 48 | ||||
-rw-r--r-- | test/NaCl/ARM/neon-vst4-sandboxing.ll | 53 | ||||
-rw-r--r-- | test/NaCl/ARM/neon-vstlane-sandboxing.ll | 196 | ||||
-rw-r--r-- | test/NaCl/ARM/vstr-sandboxing1.ll | 6 |
6 files changed, 511 insertions, 3 deletions
diff --git a/test/NaCl/ARM/neon-vst1-sandboxing.ll b/test/NaCl/ARM/neon-vst1-sandboxing.ll new file mode 100644 index 0000000000..8fd580bb49 --- /dev/null +++ b/test/NaCl/ARM/neon-vst1-sandboxing.ll @@ -0,0 +1,116 @@ +; RUN: llc -mtriple=armv7-unknown-nacl -mattr=+neon -sfi-store -filetype=obj %s -o - \ +; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s + +define void @vst1i8(i8* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %B + call void @llvm.arm.neon.vst1.v8i8(i8* %A, <8 x i8> %tmp1, i32 16) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.8 {{{d[0-9]+}}}, [r0, :64] + ret void +} + +define void @vst1i16(i16* %A, <4 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <4 x i16>* %B + call void @llvm.arm.neon.vst1.v4i16(i8* %tmp0, <4 x i16> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.16 {{{d[0-9]+}}}, [r0] + ret void +} + +define void @vst1i32(i32* %A, <2 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <2 x i32>* %B + call void @llvm.arm.neon.vst1.v2i32(i8* %tmp0, <2 x i32> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.32 {{{d[0-9]+}}}, [r0] + ret void +} + +define void @vst1f(float* %A, <2 x float>* %B) nounwind { + %tmp0 = bitcast float* %A to i8* + %tmp1 = load <2 x float>* %B + call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.32 {{{d[0-9]+}}}, [r0] + ret void +} + +define void @vst1i64(i64* %A, <1 x i64>* %B) nounwind { + %tmp0 = bitcast i64* %A to i8* + %tmp1 = load <1 x i64>* %B + call void @llvm.arm.neon.vst1.v1i64(i8* %tmp0, <1 x i64> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.64 {{{d[0-9]+}}}, [r0] + ret void +} + +define void @vst1Qi8(i8* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %B + call void @llvm.arm.neon.vst1.v16i8(i8* %A, <16 x i8> %tmp1, i32 8) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [r0, :64] + ret void +} + +define void @vst1Qi16(i16* %A, <8 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <8 x i16>* %B + call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 32) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.16 {{{d[0-9]+, d[0-9]+}}}, [r0, :128] + ret void +} + +define void @vst1Qi32(i32* %A, <4 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <4 x i32>* %B + call void @llvm.arm.neon.vst1.v4i32(i8* %tmp0, <4 x i32> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r0] + ret void +} + +define void @vst1Qf(float* %A, <4 x float>* %B) nounwind { + %tmp0 = bitcast float* %A to i8* + %tmp1 = load <4 x float>* %B + call void @llvm.arm.neon.vst1.v4f32(i8* %tmp0, <4 x float> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r0] + ret void +} + +define void @vst1Qi64(i64* %A, <2 x i64>* %B) nounwind { + %tmp0 = bitcast i64* %A to i8* + %tmp1 = load <2 x i64>* %B + call void @llvm.arm.neon.vst1.v2i64(i8* %tmp0, <2 x i64> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r0] + ret void +} + +;Check for a post-increment updating store. +define void @vst1f_update(float** %ptr, <2 x float>* %B) nounwind { + %A = load float** %ptr + %tmp0 = bitcast float* %A to i8* + %tmp1 = load <2 x float>* %B + call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1) +; CHECK: bic r1, r1, #3221225472 +; CHECK-NEXT: vst1.32 {{{d[0-9]+}}}, [r1]! + %tmp2 = getelementptr float* %A, i32 2 + store float* %tmp2, float** %ptr + ret void +} + +declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) nounwind +declare void @llvm.arm.neon.vst1.v4i16(i8*, <4 x i16>, i32) nounwind +declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>, i32) nounwind +declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32) nounwind +declare void @llvm.arm.neon.vst1.v1i64(i8*, <1 x i64>, i32) nounwind + +declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32) nounwind +declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind +declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>, i32) nounwind +declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind +declare void @llvm.arm.neon.vst1.v2i64(i8*, <2 x i64>, i32) nounwind + diff --git a/test/NaCl/ARM/neon-vst2-sandboxing.ll b/test/NaCl/ARM/neon-vst2-sandboxing.ll new file mode 100644 index 0000000000..e87373c174 --- /dev/null +++ b/test/NaCl/ARM/neon-vst2-sandboxing.ll @@ -0,0 +1,95 @@ +; RUN: llc -mtriple=armv7-unknown-nacl -mattr=+neon -sfi-store -filetype=obj %s -o - \ +; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s + +define void @vst2i8(i8* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %B + call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.8 {{{d[0-9]+, d[0-9]+}}}, [r0, :64] + ret void +} + +define void @vst2i16(i16* %A, <4 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <4 x i16>* %B + call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 32) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.16 {{{d[0-9]+, d[0-9]+}}}, [r0, :128] + ret void +} + +define void @vst2i32(i32* %A, <2 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <2 x i32>* %B + call void @llvm.arm.neon.vst2.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.32 {{{d[0-9]+, d[0-9]+}}}, [r0] + ret void +} + +define void @vst2f(float* %A, <2 x float>* %B) nounwind { + %tmp0 = bitcast float* %A to i8* + %tmp1 = load <2 x float>* %B + call void @llvm.arm.neon.vst2.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.32 {{{d[0-9]+, d[0-9]+}}}, [r0] + ret void +} + +define void @vst2Qi8(i8* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %B + call void @llvm.arm.neon.vst2.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 8) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.8 {{{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}}, [r0, :64] + ret void +} + +define void @vst2Qi16(i16* %A, <8 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <8 x i16>* %B + call void @llvm.arm.neon.vst2.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 16) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.16 {{{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}}, [r0, :128] + ret void +} + +define void @vst2Qi32(i32* %A, <4 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <4 x i32>* %B + call void @llvm.arm.neon.vst2.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 64) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.32 {{{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}}, [r0, :256] + ret void +} + +define void @vst2Qf(float* %A, <4 x float>* %B) nounwind { + %tmp0 = bitcast float* %A to i8* + %tmp1 = load <4 x float>* %B + call void @llvm.arm.neon.vst2.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.32 {{{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}}, [r0] + ret void +} + +;Check for a post-increment updating store with register increment. +define void @vst2i8_update(i8** %ptr, <8 x i8>* %B, i32 %inc) nounwind { + %A = load i8** %ptr + %tmp1 = load <8 x i8>* %B + call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 4) +; CHECK: bic r1, r1, #3221225472 +; CHECK-NEXT: vst2.8 {{{d[0-9]+, d[0-9]+}}}, [r1], r2 + %tmp2 = getelementptr i8* %A, i32 %inc + store i8* %tmp2, i8** %ptr + ret void +} + +declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind +declare void @llvm.arm.neon.vst2.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind +declare void @llvm.arm.neon.vst2.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind +declare void @llvm.arm.neon.vst2.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind +declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32) nounwind + +declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>, i32) nounwind +declare void @llvm.arm.neon.vst2.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind +declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind +declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind diff --git a/test/NaCl/ARM/neon-vst3-sandboxing.ll b/test/NaCl/ARM/neon-vst3-sandboxing.ll new file mode 100644 index 0000000000..b496c0c592 --- /dev/null +++ b/test/NaCl/ARM/neon-vst3-sandboxing.ll @@ -0,0 +1,48 @@ +; RUN: llc -mtriple=armv7-unknown-nacl -mattr=+neon -sfi-store -filetype=obj %s -o - \ +; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s + +define void @vst3i8(i8* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %B + call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 32) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst3.8 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}}, [r0, :64] + ret void +} + +define void @vst3i16(i16* %A, <4 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <4 x i16>* %B + call void @llvm.arm.neon.vst3.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst3.16 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}}, [r0] + ret void +} + +define void @vst3i32(i32* %A, <2 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <2 x i32>* %B + call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst3.32 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}}, [r0] + ret void +} + +;Check for a post-increment updating store. +define void @vst3Qi16_update(i16** %ptr, <8 x i16>* %B) nounwind { + %A = load i16** %ptr + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <8 x i16>* %B + call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1) +; CHECK: bic r1, r1, #3221225472 +; CHECK-NEXT: vst3.16 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}}, [r1]! + %tmp2 = getelementptr i16* %A, i32 24 + store i16* %tmp2, i16** %ptr + ret void +} + +declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind +declare void @llvm.arm.neon.vst3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind +declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind +declare void @llvm.arm.neon.vst3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind + +declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind diff --git a/test/NaCl/ARM/neon-vst4-sandboxing.ll b/test/NaCl/ARM/neon-vst4-sandboxing.ll new file mode 100644 index 0000000000..032f194231 --- /dev/null +++ b/test/NaCl/ARM/neon-vst4-sandboxing.ll @@ -0,0 +1,53 @@ +; RUN: llc -mtriple=armv7-unknown-nacl -mattr=+neon -sfi-store -filetype=obj %s -o - \ +; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s + +define void @vst4i8(i8* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %B + call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst4.8 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}}, [r0, :64] + ret void +} + +define void @vst4i16(i16* %A, <4 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <4 x i16>* %B + call void @llvm.arm.neon.vst4.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 16) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst4.16 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}}, [r0, :128] + ret void +} + +define void @vst4i32(i32* %A, <2 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <2 x i32>* %B + call void @llvm.arm.neon.vst4.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 32) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst4.32 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}}, [r0, :256] + ret void +} + +;Check for a post-increment updating store. +define void @vst4Qf_update(float** %ptr, <4 x float>* %B) nounwind { + %A = load float** %ptr + %tmp0 = bitcast float* %A to i8* + %tmp1 = load <4 x float>* %B + call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1) +; CHECK: bic r1, r1, #3221225472 +; CHECK-NEXT: vst4.32 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}}, [r1]! + %tmp2 = getelementptr float* %A, i32 16 + store float* %tmp2, float** %ptr + ret void +} + +declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind +declare void @llvm.arm.neon.vst4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind +declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind +declare void @llvm.arm.neon.vst4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind +declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32) nounwind + +declare void @llvm.arm.neon.vst4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32) nounwind +declare void @llvm.arm.neon.vst4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind +declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind +declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind + diff --git a/test/NaCl/ARM/neon-vstlane-sandboxing.ll b/test/NaCl/ARM/neon-vstlane-sandboxing.ll new file mode 100644 index 0000000000..5b4dc63a14 --- /dev/null +++ b/test/NaCl/ARM/neon-vstlane-sandboxing.ll @@ -0,0 +1,196 @@ +; RUN: llc -mtriple=armv7-unknown-nacl -mattr=+neon -sfi-store -filetype=obj %s -o - \ +; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s + +define void @vst1lanei8(i8* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %B + %tmp2 = extractelement <8 x i8> %tmp1, i32 3 + store i8 %tmp2, i8* %A, align 8 +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.8 {d{{[0-9]+}}[3]}, [r0] + ret void +} + +define void @vst1lanei16(i16* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %B + %tmp2 = extractelement <4 x i16> %tmp1, i32 2 + store i16 %tmp2, i16* %A, align 8 +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.16 {d{{[0-9]+}}[2]}, [r0, :16] + ret void +} + +define void @vst1lanei32(i32* %A, <2 x i32>* %B) nounwind { + %tmp1 = load <2 x i32>* %B + %tmp2 = extractelement <2 x i32> %tmp1, i32 1 + store i32 %tmp2, i32* %A, align 8 +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.32 {d{{[0-9]+}}[1]}, [r0, :32] + ret void +} + +define void @vst1laneQi8(i8* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %B + %tmp2 = extractelement <16 x i8> %tmp1, i32 9 + store i8 %tmp2, i8* %A, align 8 +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.8 {d{{[0-9]+}}[1]}, [r0] + ret void +} + +define void @vst1laneQi16(i16* %A, <8 x i16>* %B) nounwind { + %tmp1 = load <8 x i16>* %B + %tmp2 = extractelement <8 x i16> %tmp1, i32 5 + store i16 %tmp2, i16* %A, align 8 +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst1.16 {d{{[0-9]+}}[1]}, [r0, :16] + ret void +} + +define void @vst2lanei8(i8* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %B + call void @llvm.arm.neon.vst2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 4) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.8 {d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0, :16] + ret void +} + +define void @vst2lanei16(i16* %A, <4 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <4 x i16>* %B + call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.16 {d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0, :32] + ret void +} + +define void @vst2lanei32(i32* %A, <2 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <2 x i32>* %B + call void @llvm.arm.neon.vst2lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.32 {d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0] + ret void +} + +define void @vst2laneQi16(i16* %A, <8 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <8 x i16>* %B + call void @llvm.arm.neon.vst2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 5, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.16 {d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0] + ret void +} + +define void @vst2laneQi32(i32* %A, <4 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <4 x i32>* %B + call void @llvm.arm.neon.vst2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 16) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst2.32 {d{{[0-9]+}}[0], d{{[0-9]+}}[0]}, [r0, :64] + ret void +} + +define void @vst3lanei8(i8* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %B + call void @llvm.arm.neon.vst3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst3.8 {d{{[0-9]+}}[1], d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0] + ret void +} + +define void @vst3lanei16(i16* %A, <4 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <4 x i16>* %B + call void @llvm.arm.neon.vst3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst3.16 {d{{[0-9]+}}[1], d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0] + ret void +} + +define void @vst3lanei32(i32* %A, <2 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <2 x i32>* %B + call void @llvm.arm.neon.vst3lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst3.32 {d{{[0-9]+}}[1], d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0] + ret void +} + +define void @vst4lanei8(i8* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %B + call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst4.8 {d{{[0-9]+}}[1], d{{[0-9]+}}[1], d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0, :32] + ret void +} + +define void @vst4lanei16(i16* %A, <4 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <4 x i16>* %B + call void @llvm.arm.neon.vst4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst4.16 {d{{[0-9]+}}[1], d{{[0-9]+}}[1], d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0] + ret void +} + +define void @vst4lanei32(i32* %A, <2 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <2 x i32>* %B + call void @llvm.arm.neon.vst4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 16) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst4.32 {d{{[0-9]+}}[1], d{{[0-9]+}}[1], d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0, :128] + ret void +} + +define void @vst4laneQi16(i16* %A, <8 x i16>* %B) nounwind { + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <8 x i16>* %B + call void @llvm.arm.neon.vst4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7, i32 16) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst4.16 {d{{[0-9]+}}[3], d{{[0-9]+}}[3], d{{[0-9]+}}[3], d{{[0-9]+}}[3]}, [r0, :64] + ret void +} + +define void @vst4laneQi32(i32* %A, <4 x i32>* %B) nounwind { + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <4 x i32>* %B + call void @llvm.arm.neon.vst4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1) +; CHECK: bic r0, r0, #3221225472 +; CHECK-NEXT: vst4.32 {d{{[0-9]+}}[0], d{{[0-9]+}}[0], d{{[0-9]+}}[0], d{{[0-9]+}}[0]}, [r0] + ret void +} + +declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind +declare void @llvm.arm.neon.vst2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32, i32) nounwind +declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32) nounwind +declare void @llvm.arm.neon.vst2lane.v2f32(i8*, <2 x float>, <2 x float>, i32, i32) nounwind +declare void @llvm.arm.neon.vst2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32) nounwind +declare void @llvm.arm.neon.vst2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32) nounwind +declare void @llvm.arm.neon.vst2lane.v4f32(i8*, <4 x float>, <4 x float>, i32, i32) nounwind +declare void @llvm.arm.neon.vst3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind +declare void @llvm.arm.neon.vst3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind +declare void @llvm.arm.neon.vst3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind +declare void @llvm.arm.neon.vst3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32, i32) nounwind +declare void @llvm.arm.neon.vst3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32) nounwind +declare void @llvm.arm.neon.vst3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32) nounwind +declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32, i32) nounwind +declare void @llvm.arm.neon.vst4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind +declare void @llvm.arm.neon.vst4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind +declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind +declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32) nounwind +declare void @llvm.arm.neon.vst4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32) nounwind +declare void @llvm.arm.neon.vst4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32) nounwind +declare void @llvm.arm.neon.vst4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32, i32) nounwind + +;Check for a post-increment updating store with register increment. +define void @vst2lanei16_update(i16** %ptr, <4 x i16>* %B, i32 %inc) nounwind { + %A = load i16** %ptr + %tmp0 = bitcast i16* %A to i8* + %tmp1 = load <4 x i16>* %B + call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 2) +; CHECK: bic r1, r1, #3221225472 +; CHECK-NEXT: vst2.16 {d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r1], r2 + %tmp2 = getelementptr i16* %A, i32 %inc + store i16* %tmp2, i16** %ptr + ret void +} diff --git a/test/NaCl/ARM/vstr-sandboxing1.ll b/test/NaCl/ARM/vstr-sandboxing1.ll index 06aeff1037..6646cbc717 100644 --- a/test/NaCl/ARM/vstr-sandboxing1.ll +++ b/test/NaCl/ARM/vstr-sandboxing1.ll @@ -5,9 +5,9 @@ define void @test_vstr_sandbox(<8 x i8>* %ptr) nounwind { %1 = insertelement <8 x i8> undef, i8 -128, i32 0 %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer store <8 x i8> %2, <8 x i8>* %ptr, align 8 - ret void -} - ; CHECK: bic r0, r0, #3221225472 ; CHECK-NEXT: vstr {{[0-9a-z]+}}, [r0] + ret void +} + |