diff options
author | Chris Lattner <sabre@nondot.org> | 2009-11-01 19:50:13 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2009-11-01 19:50:13 +0000 |
commit | e3c6281a98655cee3b8d870109c81fa4338fdf9d (patch) | |
tree | e5ae8edbbd3d20c205a7e5cb3686e8eae9ac989a | |
parent | 3e54f8850a262fe0c8c7253b4e0ac700566c7365 (diff) |
fix a bug noticed by inspection: when instcombine sinks loads through
phis, it didn't preserve the alignment of the load. This is a missed
optimization of the alignment is high and a miscompilation when the
alignment is low.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@85736 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/Transforms/Scalar/InstructionCombining.cpp | 28 | ||||
-rw-r--r-- | test/Transforms/InstCombine/phi.ll | 21 |
2 files changed, 45 insertions, 4 deletions
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index d1561d68c6..59c772d313 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -10744,7 +10744,15 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { // code size and simplifying code. Constant *ConstantOp = 0; const Type *CastSrcTy = 0; + + // When processing loads, we need to propagate two bits of information to the + // sunk load: whether it is volatile, and what its alignment is. We currently + // don't sink loads when some have their alignment specified and some don't. + // visitLoadInst will propagate an alignment onto the load when TD is around, + // and if TD isn't around, we can't handle the mixed case. bool isVolatile = false; + unsigned LoadAlignment = 0; + if (isa<CastInst>(FirstInst)) { CastSrcTy = FirstInst->getOperand(0)->getType(); } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) { @@ -10754,7 +10762,10 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { if (ConstantOp == 0) return FoldPHIArgBinOpIntoPHI(PN); } else if (LoadInst *LI = dyn_cast<LoadInst>(FirstInst)) { + isVolatile = LI->isVolatile(); + LoadAlignment = LI->getAlignment(); + // We can't sink the load if the loaded value could be modified between the // load and the PHI. if (LI->getParent() != PN.getIncomingBlock(0) || @@ -10791,6 +10802,13 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { !isSafeAndProfitableToSinkLoad(LI)) return 0; + // If some of the loads have an alignment specified but not all of them, + // we can't do the transformation. + if ((LoadAlignment != 0) != (LI->getAlignment() != 0)) + return 0; + + LoadAlignment = std::max(LoadAlignment, LI->getAlignment()); + // If the PHI is of volatile loads and the load block has multiple // successors, sinking it would remove a load of the volatile value from // the path through the other successor. @@ -10832,10 +10850,12 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { } // Insert and return the new operation. - if (CastInst* FirstCI = dyn_cast<CastInst>(FirstInst)) + if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType()); + if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp); + if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(), PhiVal, ConstantOp); @@ -10847,8 +10867,8 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { if (isVolatile) for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false); - - return new LoadInst(PhiVal, "", isVolatile); + + return new LoadInst(PhiVal, "", isVolatile, LoadAlignment); } /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle @@ -11304,7 +11324,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { - // Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1 + // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 if (AI.isArrayAllocation()) { // Check C != 1 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { const Type *NewTy = diff --git a/test/Transforms/InstCombine/phi.ll b/test/Transforms/InstCombine/phi.ll index a5a535c6ac..08d28b4de7 100644 --- a/test/Transforms/InstCombine/phi.ll +++ b/test/Transforms/InstCombine/phi.ll @@ -141,4 +141,25 @@ BB2: ; CHECK-NEXT: ret i32* %B } +define i32 @test9(i32* %A, i32* %B) { +entry: + %c = icmp eq i32* %A, null + br i1 %c, label %bb1, label %bb + +bb: + %C = load i32* %B, align 1 + br label %bb2 + +bb1: + %D = load i32* %A, align 1 + br label %bb2 + +bb2: + %E = phi i32 [ %C, %bb ], [ %D, %bb1 ] + ret i32 %E +; CHECK: bb2: +; CHECK-NEXT: phi i32* [ %B, %bb ], [ %A, %bb1 ] +; CHECK-NEXT: %E = load i32* %{{[^,]*}}, align 1 +; CHECK-NEXT: ret i32 %E +} |