diff options
author | Hal Finkel <hfinkel@anl.gov> | 2013-02-08 21:35:47 +0000 |
---|---|---|
committer | Hal Finkel <hfinkel@anl.gov> | 2013-02-08 21:35:47 +0000 |
commit | 089a5f8a8c5e24f996dd41419de2c7bc7b42ea29 (patch) | |
tree | 91f63db4375a3607aadf2b0bd4efdbd1b2fdca4c /test | |
parent | 0cf5d396c14c71dd4fa1d102c2b3d178b1191436 (diff) |
DAGCombiner: Constant folding around pre-increment loads/stores
Previously, even when a pre-increment load or store was generated,
we often needed to keep a copy of the original base register for use
with other offsets. If all of these offsets are constants (including
the offset which was combined into the addressing mode), then this is
clearly unnecessary. This change adjusts these other offsets to use the
new incremented address.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174746 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/CodeGen/PowerPC/stdux-constuse.ll | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/test/CodeGen/PowerPC/stdux-constuse.ll b/test/CodeGen/PowerPC/stdux-constuse.ll new file mode 100644 index 0000000000..e62d438014 --- /dev/null +++ b/test/CodeGen/PowerPC/stdux-constuse.ll @@ -0,0 +1,47 @@ +; RUN: llc -mcpu=a2 -disable-lsr < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +define i32 @test1(i64 %add, i64* %ptr) nounwind { +entry: + %p1 = getelementptr i64* %ptr, i64 144115188075855 + br label %for.cond2.preheader + +for.cond2.preheader: + %nl.018 = phi i32 [ 0, %entry ], [ %inc9, %for.end ] + br label %for.body4 + +for.body4: + %lsr.iv = phi i32 [ %lsr.iv.next, %for.body4 ], [ 16000, %for.cond2.preheader ] + %i0 = phi i64* [ %p1, %for.cond2.preheader ], [ %i6, %for.body4 ] + %i6 = getelementptr i64* %i0, i64 400000 + %i7 = getelementptr i64* %i6, i64 300000 + %i8 = getelementptr i64* %i6, i64 200000 + %i9 = getelementptr i64* %i6, i64 100000 + store i64 %add, i64* %i6, align 32 + store i64 %add, i64* %i7, align 32 + store i64 %add, i64* %i8, align 32 + store i64 %add, i64* %i9, align 32 + %lsr.iv.next = add i32 %lsr.iv, -16 + %exitcond.15 = icmp eq i32 %lsr.iv.next, 0 + br i1 %exitcond.15, label %for.end, label %for.body4 + +; Make sure that we generate the most compact form of this loop with no +; unnecessary moves +; CHECK: @test1 +; CHECK: mtctr +; CHECK: stdux +; CHECK-NEXT: stdx +; CHECK-NEXT: stdx +; CHECK-NEXT: stdx +; CHECK-NEXT: bdnz + +for.end: + %inc9 = add nsw i32 %nl.018, 1 + %exitcond = icmp eq i32 %inc9, 400000 + br i1 %exitcond, label %for.end10, label %for.cond2.preheader + +for.end10: + ret i32 0 +} + |