diff options
-rw-r--r-- | lib/CodeGen/CGBuiltin.cpp | 2 | ||||
-rw-r--r-- | lib/CodeGen/CGExpr.cpp | 59 | ||||
-rw-r--r-- | lib/CodeGen/CGExprAgg.cpp | 2 | ||||
-rw-r--r-- | lib/CodeGen/CGExprCXX.cpp | 5 | ||||
-rw-r--r-- | lib/CodeGen/CGExprScalar.cpp | 95 | ||||
-rw-r--r-- | lib/CodeGen/CodeGenFunction.h | 33 | ||||
-rw-r--r-- | test/CodeGen/trapv.c | 9 |
7 files changed, 84 insertions, 121 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 1c95b57a7a..942fc5f602 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -404,7 +404,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, } case Builtin::BI__builtin_unreachable: { if (CatchUndefined) - EmitBranch(getTrapBB()); + EmitCheck(Builder.getFalse()); else Builder.CreateUnreachable(); diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index e17494aac8..4fb81eb43c 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -493,7 +493,7 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E, // storage of suitable size and alignment to contain an object of the // reference's type, the behavior is undefined. QualType Ty = E->getType(); - EmitCheck(CT_ReferenceBinding, Value, Ty); + EmitTypeCheck(TCK_ReferenceBinding, Value, Ty); } if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull()) return RValue::get(Value); @@ -558,14 +558,14 @@ unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, ->getZExtValue(); } -void CodeGenFunction::EmitCheck(CheckType CT, llvm::Value *Address, QualType Ty, - CharUnits Alignment) { +void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, llvm::Value *Address, + QualType Ty, CharUnits Alignment) { if (!CatchUndefined) return; llvm::Value *Cond = 0; - if (CT != CT_Load && CT != CT_Store) { + if (TCK != TCK_Load && TCK != TCK_Store) { // The glvalue must not be an empty glvalue. Don't bother checking this for // loads and stores, because we will get a segfault anyway (if the operation // isn't optimized out). @@ -600,11 +600,8 @@ void CodeGenFunction::EmitCheck(CheckType CT, llvm::Value *Address, QualType Ty, Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0))); } - if (Cond) { - llvm::BasicBlock *Cont = createBasicBlock(); - Builder.CreateCondBr(Cond, Cont, getTrapBB()); - EmitBlock(Cont); - } + if (Cond) + EmitCheck(Cond); } @@ -681,10 +678,10 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); } -LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, CheckType CT) { +LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { LValue LV = EmitLValue(E); if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) - EmitCheck(CT, LV.getAddress(), E->getType(), LV.getAlignment()); + EmitTypeCheck(TCK, LV.getAddress(), E->getType(), LV.getAlignment()); return LV; } @@ -1927,33 +1924,33 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { } } -llvm::BasicBlock *CodeGenFunction::getTrapBB() { +void CodeGenFunction::EmitCheck(llvm::Value *Checked) { const CodeGenOptions &GCO = CGM.getCodeGenOpts(); + llvm::BasicBlock *Cont = createBasicBlock("cont"); + // If we are not optimzing, don't collapse all calls to trap in the function // to the same call, that way, in the debugger they can see which operation // did in fact fail. If we are optimizing, we collapse all calls to trap down // to just one per function to save on codesize. - if (GCO.OptimizationLevel && TrapBB) - return TrapBB; + bool NeedNewTrapBB = !GCO.OptimizationLevel || !TrapBB; - llvm::BasicBlock *Cont = 0; - if (HaveInsertPoint()) { - Cont = createBasicBlock("cont"); - EmitBranch(Cont); - } - TrapBB = createBasicBlock("trap"); - EmitBlock(TrapBB); + if (NeedNewTrapBB) + TrapBB = createBasicBlock("trap"); - llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); - llvm::CallInst *TrapCall = Builder.CreateCall(F); - TrapCall->setDoesNotReturn(); - TrapCall->setDoesNotThrow(); - Builder.CreateUnreachable(); + Builder.CreateCondBr(Checked, Cont, TrapBB); + + if (NeedNewTrapBB) { + EmitBlock(TrapBB); + + llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); + llvm::CallInst *TrapCall = Builder.CreateCall(F); + TrapCall->setDoesNotReturn(); + TrapCall->setDoesNotThrow(); + Builder.CreateUnreachable(); + } - if (Cont) - EmitBlock(Cont); - return TrapBB; + EmitBlock(Cont); } /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an @@ -2156,10 +2153,10 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { if (E->isArrow()) { llvm::Value *Ptr = EmitScalarExpr(BaseExpr); QualType PtrTy = BaseExpr->getType()->getPointeeType(); - EmitCheck(CT_MemberAccess, Ptr, PtrTy); + EmitTypeCheck(TCK_MemberAccess, Ptr, PtrTy); BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy); } else - BaseLV = EmitCheckedLValue(BaseExpr, CT_MemberAccess); + BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); NamedDecl *ND = E->getMemberDecl(); if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp index a2101fb442..48b21d2f85 100644 --- a/lib/CodeGen/CGExprAgg.cpp +++ b/lib/CodeGen/CGExprAgg.cpp @@ -552,7 +552,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // FIXME: Can this actually happen? We have no test coverage for it. assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(), - CodeGenFunction::CT_Load); + CodeGenFunction::TCK_Load); // FIXME: Do we also need to handle property references here? if (LV.isSimple()) CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp index 17e2bc1874..b4cd5c1c16 100644 --- a/lib/CodeGen/CGExprCXX.cpp +++ b/lib/CodeGen/CGExprCXX.cpp @@ -36,7 +36,8 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, // C++11 [class.mfct.non-static]p2: // If a non-static member function of a class X is called for an object that // is not of type X, or of a type derived from X, the behavior is undefined. - EmitCheck(CT_MemberCall, This, getContext().getRecordType(MD->getParent())); + EmitTypeCheck(TCK_MemberCall, This, + getContext().getRecordType(MD->getParent())); CallArgList Args; @@ -342,7 +343,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, else This = EmitLValue(BaseExpr).getAddress(); - EmitCheck(CT_MemberCall, This, QualType(MPT->getClass(), 0)); + EmitTypeCheck(TCK_MemberCall, This, QualType(MPT->getClass(), 0)); // Ask the ABI to load the callee. Note that This is modified. llvm::Value *Callee = diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index d5ed233a97..ad373095de 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -80,8 +80,8 @@ public: llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } - LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::CheckType CT) { - return CGF.EmitCheckedLValue(E, CT); + LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) { + return CGF.EmitCheckedLValue(E, TCK); } Value *EmitLoadOfLValue(LValue LV) { @@ -92,7 +92,7 @@ public: /// value l-value, this method emits the address of the l-value, then loads /// and returns the result. Value *EmitLoadOfLValue(const Expr *E) { - return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::CT_Load)); + return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load)); } /// EmitConversionToBool - Convert the specified expression value to a @@ -416,13 +416,7 @@ public: /// Create a binary op that checks for overflow. /// Currently only supports +, - and *. Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); - // Emit the overflow BB when -ftrapv option is activated. - void EmitOverflowBB(llvm::BasicBlock *overflowBB) { - Builder.SetInsertPoint(overflowBB); - llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap); - Builder.CreateCall(Trap); - Builder.CreateUnreachable(); - } + // Check for undefined division and modulus behaviors. void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, llvm::Value *Zero,bool isDiv); @@ -1688,7 +1682,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( OpInfo.Opcode = E->getOpcode(); OpInfo.E = E; // Load/convert the LHS. - LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::CT_Store); + LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); OpInfo.LHS = EmitLoadOfLValue(LHSLV); llvm::PHINode *atomicPHI = 0; @@ -1760,14 +1754,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, } void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( - const BinOpInfo &Ops, - llvm::Value *Zero, bool isDiv) { - llvm::Function::iterator insertPt = Builder.GetInsertBlock(); - llvm::BasicBlock *contBB = - CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn, - llvm::next(insertPt)); - llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); - + const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); if (Ops.Ty->hasSignedIntegerRepresentation()) { @@ -1775,37 +1762,24 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth())); llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL); - llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero); - llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin); - llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne); - llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and"); - Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"), - overflowBB, contBB); + llvm::Value *Cond1 = Builder.CreateICmpNE(Ops.RHS, Zero); + llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin); + llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne); + llvm::Value *Cond2 = Builder.CreateOr(LHSCmp, RHSCmp, "or"); + CGF.EmitCheck(Builder.CreateAnd(Cond1, Cond2, "and")); } else { - CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero), - overflowBB, contBB); + CGF.EmitCheck(Builder.CreateICmpNE(Ops.RHS, Zero)); } - EmitOverflowBB(overflowBB); - Builder.SetInsertPoint(contBB); } Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { - if (isTrapvOverflowBehavior()) { + if (isTrapvOverflowBehavior()) { llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); if (Ops.Ty->isIntegerType()) EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); - else if (Ops.Ty->isRealFloatingType()) { - llvm::Function::iterator insertPt = Builder.GetInsertBlock(); - llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn, - llvm::next(insertPt)); - llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", - CGF.CurFn); - CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero), - overflowBB, DivCont); - EmitOverflowBB(overflowBB); - Builder.SetInsertPoint(DivCont); - } + else if (Ops.Ty->isRealFloatingType()) + CGF.EmitCheck(Builder.CreateFCmpUNE(Ops.RHS, Zero)); } if (Ops.LHS->getType()->isFPOrFPVectorTy()) { llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); @@ -1874,6 +1848,14 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); + // Handle overflow with llvm.trap if no custom handler has been specified. + const std::string *handlerName = + &CGF.getContext().getLangOpts().OverflowHandler; + if (handlerName->empty()) { + CGF.EmitCheck(Builder.CreateNot(overflow)); + return result; + } + // Branch in case of overflow. llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); llvm::Function::iterator insertPt = initialBB; @@ -1883,15 +1865,6 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { Builder.CreateCondBr(overflow, overflowBB, continueBB); - // Handle overflow with llvm.trap. - const std::string *handlerName = - &CGF.getContext().getLangOpts().OverflowHandler; - if (handlerName->empty()) { - EmitOverflowBB(overflowBB); - Builder.SetInsertPoint(continueBB); - return result; - } - // If an overflow handler is set, then we want to call it and then use its // result, if it returns. Builder.SetInsertPoint(overflowBB); @@ -2122,18 +2095,13 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { if (CGF.CatchUndefined && isa<llvm::IntegerType>(Ops.LHS->getType())) { unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth(); - llvm::BasicBlock *Cont = CGF.createBasicBlock("shl.cont"); - llvm::BasicBlock *Trap = CGF.getTrapBB(); llvm::Value *WidthMinusOne = llvm::ConstantInt::get(RHS->getType(), Width - 1); - CGF.Builder.CreateCondBr(Builder.CreateICmpULE(RHS, WidthMinusOne), - Cont, Trap); - CGF.EmitBlock(Cont); + CGF.EmitCheck(Builder.CreateICmpULE(RHS, WidthMinusOne)); if (Ops.Ty->hasSignedIntegerRepresentation()) { // Check whether we are shifting any non-zero bits off the top of the // integer. - Cont = CGF.createBasicBlock("shl.ok"); llvm::Value *BitsShiftedOff = Builder.CreateLShr(Ops.LHS, Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros", @@ -2148,9 +2116,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); } llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); - Builder.CreateCondBr(Builder.CreateICmpEQ(BitsShiftedOff, Zero), - Cont, Trap); - CGF.EmitBlock(Cont); + CGF.EmitCheck(Builder.CreateICmpEQ(BitsShiftedOff, Zero)); } } @@ -2166,11 +2132,8 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { if (CGF.CatchUndefined && isa<llvm::IntegerType>(Ops.LHS->getType())) { unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth(); - llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); - CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS, - llvm::ConstantInt::get(RHS->getType(), Width)), - Cont, CGF.getTrapBB()); - CGF.EmitBlock(Cont); + llvm::Value *WidthVal = llvm::ConstantInt::get(RHS->getType(), Width); + CGF.EmitCheck(Builder.CreateICmpULT(RHS, WidthVal)); } if (Ops.Ty->hasUnsignedIntegerRepresentation()) @@ -2361,7 +2324,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { case Qualifiers::OCL_Weak: RHS = Visit(E->getRHS()); - LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::CT_Store); + LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore); break; @@ -2371,7 +2334,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // __block variables need to have the rhs evaluated first, plus // this should improve codegen just a little. RHS = Visit(E->getRHS()); - LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::CT_Store); + LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); // Store the value into the LHS. Bit-fields are handled specially // because the result is altered by the store, i.e., [C99 6.5.16p1] diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h index 0e4e5e63c4..c9c4a6dbe4 100644 --- a/lib/CodeGen/CodeGenFunction.h +++ b/lib/CodeGen/CodeGenFunction.h @@ -1835,27 +1835,27 @@ public: /// \brief Situations in which we might emit a check for the suitability of a /// pointer or glvalue. - enum CheckType { + enum TypeCheckKind { /// Checking the operand of a load. Must be suitably sized and aligned. - CT_Load, + TCK_Load, /// Checking the destination of a store. Must be suitably sized and aligned. - CT_Store, + TCK_Store, /// Checking the bound value in a reference binding. Must be suitably sized /// and aligned, but is not required to refer to an object (until the /// reference is used), per core issue 453. - CT_ReferenceBinding, + TCK_ReferenceBinding, /// Checking the object expression in a non-static data member access. Must /// be an object within its lifetime. - CT_MemberAccess, + TCK_MemberAccess, /// Checking the 'this' pointer for a call to a non-static member function. /// Must be an object within its lifetime. - CT_MemberCall + TCK_MemberCall }; - /// EmitCheck - Emit a check that \p V is the address of storage of the + /// \brief Emit a check that \p V is the address of storage of the /// appropriate size and alignment for an object of type \p Type. - void EmitCheck(CheckType CT, llvm::Value *V, - QualType Type, CharUnits Alignment = CharUnits::Zero()); + void EmitTypeCheck(TypeCheckKind TCK, llvm::Value *V, + QualType Type, CharUnits Alignment = CharUnits::Zero()); llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); @@ -2053,11 +2053,10 @@ public: /// LValue EmitLValue(const Expr *E); - /// EmitCheckedLValue - Same as EmitLValue but additionally we generate - /// checking code to guard against undefined behavior. This is only - /// suitable when we know that the address will be used to access the - /// object. - LValue EmitCheckedLValue(const Expr *E, CheckType CT); + /// \brief Same as EmitLValue but additionally we generate checking code to + /// guard against undefined behavior. This is only suitable when we know + /// that the address will be used to access the object. + LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK); /// EmitToMemory - Change a scalar value from its value /// representation to its in-memory representation. @@ -2536,9 +2535,9 @@ public: void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock); - /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll - /// generate a branch around the created basic block as necessary. - llvm::BasicBlock *getTrapBB(); + /// \brief Create a basic block that will call the trap intrinsic, and emit a + /// conditional branch to it. + void EmitCheck(llvm::Value *Checked); /// EmitCallArg - Emit a single call argument. void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType); diff --git a/test/CodeGen/trapv.c b/test/CodeGen/trapv.c index abad5850b4..dd50d5cb0a 100644 --- a/test/CodeGen/trapv.c +++ b/test/CodeGen/trapv.c @@ -18,7 +18,8 @@ void test0() { // CHECK-NEXT: [[T3:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 [[T2]]) // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T3]], 0 // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T3]], 1 - // CHECK-NEXT: br i1 [[T5]] + // CHECK-NEXT: [[T6:%.*]] = xor i1 [[T5]], true + // CHECK-NEXT: br i1 [[T6]] // CHECK: call void @llvm.trap() i = j + k; } @@ -32,7 +33,8 @@ void test1() { // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 - // CHECK-NEXT: br i1 [[T4]] + // CHECK-NEXT: [[T5:%.*]] = xor i1 [[T4]], true + // CHECK-NEXT: br i1 [[T5]] // CHECK: call void @llvm.trap() } @@ -45,6 +47,7 @@ void test2() { // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 - // CHECK-NEXT: br i1 [[T4]] + // CHECK-NEXT: [[T5:%.*]] = xor i1 [[T4]], true + // CHECK-NEXT: br i1 [[T5]] // CHECK: call void @llvm.trap() } |