aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2011-08-11 06:26:54 +0000
committerChris Lattner <sabre@nondot.org>2011-08-11 06:26:54 +0000
commitf4ea68fa5a85d3e883cf35075133e64de4dfc046 (patch)
tree1c5a5891952433f234bff3416684ba8535659651
parentb02c0ace207333fb5c66ea6826531ed2f7cee532 (diff)
fix PR10605 / rdar://9930964 by adding a pretty scary missed check.
It's somewhat surprising anything works without this. Before we would compile the testcase into: test: # @test movl $4, 8(%rdi) movl 8(%rdi), %eax orl %esi, %eax cmpl $32, %edx movl %eax, -4(%rsp) # 4-byte Spill je .LBB0_2 now we produce: test: # @test movl 8(%rdi), %eax movl $4, 8(%rdi) orl %esi, %eax cmpl $32, %edx movl %eax, -4(%rsp) # 4-byte Spill je .LBB0_2 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137303 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp5
-rw-r--r--test/CodeGen/X86/fast-isel-gep.ll27
2 files changed, 32 insertions, 0 deletions
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 87bb296b8c..ec8f014f65 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -754,6 +754,11 @@ bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
TheUser = TheUser->use_back();
}
+ // If we didn't find the fold instruction, then we failed to collapse the
+ // sequence.
+ if (TheUser != FoldInst)
+ return false;
+
// Don't try to fold volatile loads. Target has to deal with alignment
// constraints.
if (LI->isVolatile()) return false;
diff --git a/test/CodeGen/X86/fast-isel-gep.ll b/test/CodeGen/X86/fast-isel-gep.ll
index 1a2e34ec7f..d141487c25 100644
--- a/test/CodeGen/X86/fast-isel-gep.ll
+++ b/test/CodeGen/X86/fast-isel-gep.ll
@@ -107,3 +107,30 @@ lpad: ; preds = %if.end19, %if.then1
unreachable
}
declare i8* @_ZNK18G__FastAllocString4dataEv() nounwind
+
+
+; PR10605 / rdar://9930964 - Don't fold loads incorrectly. The load should
+; happen before the store.
+define i32 @test7({i32,i32,i32}* %tmp1, i32 %tmp71, i32 %tmp63) nounwind {
+; X64: test7:
+; X64: movl 8({{%rdi|%rcx}}), %eax
+; X64 movl $4, 8({{%rdi|%rcx}})
+
+
+ %tmp29 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
+ %tmp30 = load i32* %tmp29, align 4
+
+ %p2 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
+ store i32 4, i32* %p2
+
+ %tmp72 = or i32 %tmp71, %tmp30
+ %tmp73 = icmp ne i32 %tmp63, 32
+ br i1 %tmp73, label %T, label %F
+
+T:
+ ret i32 %tmp72
+
+F:
+ ret i32 4
+}
+