aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2012-04-16 01:12:56 +0000
committerChandler Carruth <chandlerc@gmail.com>2012-04-16 01:12:56 +0000
commit70daea90afc167a010a1851defda01d7e0eb45eb (patch)
tree43f1a66e3e9b5a7a1ab5d54eeefb87768d954183 /test/CodeGen/X86
parent8325c11d473a340217fa0de648ba8f733f2d626e (diff)
Rewrite how machine block placement handles loop rotation.
This is a complex change that resulted from a great deal of experimentation with several different benchmarks. The one which proved the most useful is included as a test case, but I don't know that it captures all of the relevant changes, as I didn't have specific regression tests for each, they were more the result of reasoning about what the old algorithm would possibly do wrong. I'm also failing at the moment to craft more targeted regression tests for these changes, if anyone has ideas, it would be welcome. The first big thing broken with the old algorithm is the idea that we can take a basic block which has a loop-exiting successor and a looping successor and use the looping successor as the layout top in order to get that particular block to be the bottom of the loop after layout. This happens to work in many cases, but not in all. The second big thing broken was that we didn't try to select the exit which fell into the nearest enclosing loop (to which we exit at all). As a consequence, even if the rotation worked perfectly, it would result in one of two bad layouts. Either the bottom of the loop would get fallthrough, skipping across a nearer enclosing loop and thereby making it discontiguous, or it would be forced to take an explicit jump over the nearest enclosing loop to earch its successor. The point of the rotation is to get fallthrough, so we need it to fallthrough to the nearest loop it can. The fix to the first issue is to actually layout the loop from the loop header, and then rotate the loop such that the correct exiting edge can be a fallthrough edge. This is actually much easier than I anticipated because we can handle all the hard parts of finding a viable rotation before we do the layout. We just store that, and then rotate after layout is finished. No inner loops get split across the post-rotation backedge because we check for them when selecting the rotation. That fix exposed a latent problem with our exitting block selection -- we should allow the backedge to point into the middle of some inner-loop chain as there is no real penalty to it, the whole point is that it *won't* be a fallthrough edge. This may have blocked the rotation at all in some cases, I have no idea and no test case as I've never seen it in practice, it was just noticed by inspection. Finally, all of these fixes, and studying the loops they produce, highlighted another problem: in rotating loops like this, we sometimes fail to align the destination of these backwards jumping edges. Fix this by actually walking the backwards edges rather than relying on loopinfo. This fixes regressions on heapsort if block placement is enabled as well as lots of other cases where the previous logic would introduce an abundance of unnecessary branches into the execution. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@154783 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86')
-rw-r--r--test/CodeGen/X86/block-placement.ll129
1 files changed, 126 insertions, 3 deletions
diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll
index 167d522d47..f3c9727605 100644
--- a/test/CodeGen/X86/block-placement.ll
+++ b/test/CodeGen/X86/block-placement.ll
@@ -76,11 +76,11 @@ define i32 @test_loop_cold_blocks(i32 %i, i32* %a) {
; Check that we sink cold loop blocks after the hot loop body.
; CHECK: test_loop_cold_blocks:
; CHECK: %entry
+; CHECK: %unlikely1
+; CHECK: %unlikely2
; CHECK: %body1
; CHECK: %body2
; CHECK: %body3
-; CHECK: %unlikely1
-; CHECK: %unlikely2
; CHECK: %exit
entry:
@@ -348,7 +348,6 @@ define void @unnatural_cfg2() {
; CHECK: %entry
; CHECK: %loop.body1
; CHECK: %loop.body2
-; CHECK: %loop.header
; CHECK: %loop.body3
; CHECK: %loop.inner1.begin
; The end block is folded with %loop.body3...
@@ -356,6 +355,7 @@ define void @unnatural_cfg2() {
; CHECK: %loop.body4
; CHECK: %loop.inner2.begin
; The loop.inner2.end block is folded
+; CHECK: %loop.header
; CHECK: %bail
entry:
@@ -928,3 +928,126 @@ entry:
exit:
ret void
}
+
+define void @benchmark_heapsort(i32 %n, double* nocapture %ra) {
+; This test case comes from the heapsort benchmark, and exemplifies several
+; important aspects to block placement in the presence of loops:
+; 1) Loop rotation needs to *ensure* that the desired exiting edge can be
+; a fallthrough.
+; 2) The exiting edge from the loop which is rotated to be laid out at the
+; bottom of the loop needs to be exiting into the nearest enclosing loop (to
+; which there is an exit). Otherwise, we force that enclosing loop into
+; strange layouts that are siginificantly less efficient, often times maing
+; it discontiguous.
+;
+; CHECK: @benchmark_heapsort
+; CHECK: %entry
+; First rotated loop top.
+; CHECK: .align
+; CHECK: %if.end10
+; Second rotated loop top
+; CHECK: .align
+; CHECK: %while.cond.outer
+; Third rotated loop top
+; CHECK: .align
+; CHECK: %while.cond
+; CHECK: %while.body
+; CHECK: %land.lhs.true
+; CHECK: %if.then19
+; CHECK: %if.then19
+; CHECK: %if.then24
+; CHECK: %while.end
+; CHECK: %for.cond
+; CHECK: %if.then
+; CHECK: %if.else
+; CHECK: %if.then8
+; CHECK: ret
+
+entry:
+ %shr = ashr i32 %n, 1
+ %add = add nsw i32 %shr, 1
+ %arrayidx3 = getelementptr inbounds double* %ra, i64 1
+ br label %for.cond
+
+for.cond:
+ %ir.0 = phi i32 [ %n, %entry ], [ %ir.1, %while.end ]
+ %l.0 = phi i32 [ %add, %entry ], [ %l.1, %while.end ]
+ %cmp = icmp sgt i32 %l.0, 1
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %dec = add nsw i32 %l.0, -1
+ %idxprom = sext i32 %dec to i64
+ %arrayidx = getelementptr inbounds double* %ra, i64 %idxprom
+ %0 = load double* %arrayidx, align 8
+ br label %if.end10
+
+if.else:
+ %idxprom1 = sext i32 %ir.0 to i64
+ %arrayidx2 = getelementptr inbounds double* %ra, i64 %idxprom1
+ %1 = load double* %arrayidx2, align 8
+ %2 = load double* %arrayidx3, align 8
+ store double %2, double* %arrayidx2, align 8
+ %dec6 = add nsw i32 %ir.0, -1
+ %cmp7 = icmp eq i32 %dec6, 1
+ br i1 %cmp7, label %if.then8, label %if.end10
+
+if.then8:
+ store double %1, double* %arrayidx3, align 8
+ ret void
+
+if.end10:
+ %ir.1 = phi i32 [ %ir.0, %if.then ], [ %dec6, %if.else ]
+ %l.1 = phi i32 [ %dec, %if.then ], [ %l.0, %if.else ]
+ %rra.0 = phi double [ %0, %if.then ], [ %1, %if.else ]
+ %add31 = add nsw i32 %ir.1, 1
+ br label %while.cond.outer
+
+while.cond.outer:
+ %j.0.ph.in = phi i32 [ %l.1, %if.end10 ], [ %j.1, %if.then24 ]
+ %j.0.ph = shl i32 %j.0.ph.in, 1
+ br label %while.cond
+
+while.cond:
+ %j.0 = phi i32 [ %add31, %if.end20 ], [ %j.0.ph, %while.cond.outer ]
+ %cmp11 = icmp sgt i32 %j.0, %ir.1
+ br i1 %cmp11, label %while.end, label %while.body
+
+while.body:
+ %cmp12 = icmp slt i32 %j.0, %ir.1
+ br i1 %cmp12, label %land.lhs.true, label %if.end20
+
+land.lhs.true:
+ %idxprom13 = sext i32 %j.0 to i64
+ %arrayidx14 = getelementptr inbounds double* %ra, i64 %idxprom13
+ %3 = load double* %arrayidx14, align 8
+ %add15 = add nsw i32 %j.0, 1
+ %idxprom16 = sext i32 %add15 to i64
+ %arrayidx17 = getelementptr inbounds double* %ra, i64 %idxprom16
+ %4 = load double* %arrayidx17, align 8
+ %cmp18 = fcmp olt double %3, %4
+ br i1 %cmp18, label %if.then19, label %if.end20
+
+if.then19:
+ br label %if.end20
+
+if.end20:
+ %j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
+ %idxprom21 = sext i32 %j.1 to i64
+ %arrayidx22 = getelementptr inbounds double* %ra, i64 %idxprom21
+ %5 = load double* %arrayidx22, align 8
+ %cmp23 = fcmp olt double %rra.0, %5
+ br i1 %cmp23, label %if.then24, label %while.cond
+
+if.then24:
+ %idxprom27 = sext i32 %j.0.ph.in to i64
+ %arrayidx28 = getelementptr inbounds double* %ra, i64 %idxprom27
+ store double %5, double* %arrayidx28, align 8
+ br label %while.cond.outer
+
+while.end:
+ %idxprom33 = sext i32 %j.0.ph.in to i64
+ %arrayidx34 = getelementptr inbounds double* %ra, i64 %idxprom33
+ store double %rra.0, double* %arrayidx34, align 8
+ br label %for.cond
+}