aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/avx-load-store.ll
diff options
context:
space:
mode:
authorMichael Liao <michael.liao@intel.com>2013-03-25 23:50:10 +0000
committerMichael Liao <michael.liao@intel.com>2013-03-25 23:50:10 +0000
commitd4584c9e5658887ec50c43760c988d04eaa13e34 (patch)
tree1f64d66547a6aed3e1b1ee10531534229ad01f18 /test/CodeGen/X86/avx-load-store.ll
parentb4f98ea1213c866f39aa5b341ec0116f9c2335d7 (diff)
Revise alignment checking/calculation on 256-bit unaligned memory access
- It's still considered aligned when the specified alignment is larger than the natural alignment; - The new alignment for the high 128-bit vector should be min(16, alignment) as the pointer is advanced by 16, a power-of-2 offset. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@177947 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/avx-load-store.ll')
-rw-r--r--test/CodeGen/X86/avx-load-store.ll24
1 files changed, 23 insertions, 1 deletions
diff --git a/test/CodeGen/X86/avx-load-store.ll b/test/CodeGen/X86/avx-load-store.ll
index 0afaff830d..a6775aba09 100644
--- a/test/CodeGen/X86/avx-load-store.ll
+++ b/test/CodeGen/X86/avx-load-store.ll
@@ -81,7 +81,7 @@ define void @storev32i8_01(<32 x i8> %a) nounwind {
; CHECK: _double_save
; CHECK-NOT: vinsertf128 $1
; CHECK-NOT: vinsertf128 $0
-; CHECK: vmovups %xmm
+; CHECK: vmovaps %xmm
; CHECK: vmovaps %xmm
define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
entry:
@@ -127,3 +127,25 @@ define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
store <8 x i32> %x, <8 x i32>* %ret, align 1
ret void
}
+
+; CHECK: add4i64a64
+; CHECK: vmovaps ({{.*}}), %ymm{{.*}}
+; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
+define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
+ %b = load <4 x i64>* %bp, align 64
+ %x = add <4 x i64> zeroinitializer, %b
+ store <4 x i64> %x, <4 x i64>* %ret, align 64
+ ret void
+}
+
+; CHECK: add4i64a16
+; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
+; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
+; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
+; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
+define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
+ %b = load <4 x i64>* %bp, align 16
+ %x = add <4 x i64> zeroinitializer, %b
+ store <4 x i64> %x, <4 x i64>* %ret, align 16
+ ret void
+}