diff options
Diffstat (limited to 'lib/CodeGen')
-rw-r--r-- | lib/CodeGen/CGDecl.cpp | 16 | ||||
-rw-r--r-- | lib/CodeGen/CGExpr.cpp | 37 | ||||
-rw-r--r-- | lib/CodeGen/CGExprAgg.cpp | 2 | ||||
-rw-r--r-- | lib/CodeGen/CGExprComplex.cpp | 4 | ||||
-rw-r--r-- | lib/CodeGen/CGExprConstant.cpp | 2 | ||||
-rw-r--r-- | lib/CodeGen/CGExprScalar.cpp | 52 | ||||
-rw-r--r-- | lib/CodeGen/CodeGenFunction.h | 9 |
7 files changed, 104 insertions, 18 deletions
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp index dd7cdb69a8..7c7501ba94 100644 --- a/lib/CodeGen/CGDecl.cpp +++ b/lib/CodeGen/CGDecl.cpp @@ -494,7 +494,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, llvm::Value *value = EmitScalarExpr(init); if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); - EmitStoreThroughLValue(RValue::get(value), lvalue); + EmitStoreThroughLValue(RValue::get(value), lvalue, true); return; } @@ -535,7 +535,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, // Otherwise just do a simple store. else - EmitStoreOfScalar(zero, tempLV); + EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true); } // Emit the initializer. @@ -581,19 +581,19 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, // both __weak and __strong, but __weak got filtered out above. if (accessedByInit && lifetime == Qualifiers::OCL_Strong) { llvm::Value *oldValue = EmitLoadOfScalar(lvalue); - EmitStoreOfScalar(value, lvalue); + EmitStoreOfScalar(value, lvalue, /* isInitialization */ true); EmitARCRelease(oldValue, /*precise*/ false); return; } - EmitStoreOfScalar(value, lvalue); + EmitStoreOfScalar(value, lvalue, /* isInitialization */ true); } /// EmitScalarInit - Initialize the given lvalue with the given object. void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) { Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime(); if (!lifetime) - return EmitStoreThroughLValue(RValue::get(init), lvalue); + return EmitStoreThroughLValue(RValue::get(init), lvalue, true); switch (lifetime) { case Qualifiers::OCL_None: @@ -617,7 +617,7 @@ void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) { break; } - EmitStoreOfScalar(init, lvalue); + EmitStoreOfScalar(init, lvalue, /* isInitialization */ true); } /// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the @@ -1045,7 +1045,7 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, RValue rvalue = EmitReferenceBindingToExpr(init, D); if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); - EmitStoreThroughLValue(rvalue, lvalue); + EmitStoreThroughLValue(rvalue, lvalue, true); } else if (!hasAggregateLLVMType(type)) { EmitScalarInit(init, D, lvalue, capturedByInit); } else if (type->isAnyComplexType()) { @@ -1505,7 +1505,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg, if (doStore) { LValue lv = MakeAddrLValue(DeclPtr, Ty, getContext().getDeclAlign(&D)); - EmitStoreOfScalar(Arg, lv); + EmitStoreOfScalar(Arg, lv, /* isInitialization */ true); } } diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index c6ba65c03c..43ab116dae 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -764,6 +764,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, Load->setAlignment(Alignment); if (TBAAInfo) CGM.DecorateInstruction(Load, TBAAInfo); + // If this is an atomic type, all normal reads must be atomic + if (Ty->isAtomicType()) + Load->setAtomic(llvm::SequentiallyConsistent); return EmitFromMemory(Load, Ty); } @@ -800,7 +803,8 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, - llvm::MDNode *TBAAInfo) { + llvm::MDNode *TBAAInfo, + bool isInit) { Value = EmitToMemory(Value, Ty); llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); @@ -808,12 +812,15 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, Store->setAlignment(Alignment); if (TBAAInfo) CGM.DecorateInstruction(Store, TBAAInfo); + if (!isInit && Ty->isAtomicType()) + Store->setAtomic(llvm::SequentiallyConsistent); } -void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) { +void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, + bool isInit) { EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), lvalue.getAlignment().getQuantity(), lvalue.getType(), - lvalue.getTBAAInfo()); + lvalue.getTBAAInfo(), isInit); } /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this @@ -961,7 +968,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { /// EmitStoreThroughLValue - Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. -void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) { +void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { if (!Dst.isSimple()) { if (Dst.isVectorElt()) { // Read/modify/write the vector, inserting the new element. @@ -1041,7 +1048,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) { } assert(Src.isScalar() && "Can't emit an agg store with this method"); - EmitStoreOfScalar(Src.getScalarVal(), Dst); + EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); } void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, @@ -2052,6 +2059,11 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); + + // These two casts are currently treated as no-ops, although they could + // potentially be real operations depending on the target's ABI. + case CK_NonAtomicToAtomic: + case CK_AtomicToNonAtomic: case CK_NoOp: case CK_LValueToRValue: @@ -2541,6 +2553,7 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, case AtomicExpr::CmpXchgWeak: case AtomicExpr::CmpXchgStrong: case AtomicExpr::Store: + case AtomicExpr::Init: case AtomicExpr::Load: assert(0 && "Already handled!"); case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break; case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break; @@ -2588,8 +2601,20 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { getContext().getTargetInfo().getMaxAtomicInlineWidth(); bool UseLibcall = (Size != Align || Size > MaxInlineWidth); + + llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; Ptr = EmitScalarExpr(E->getPtr()); + + if (E->getOp() == AtomicExpr::Init) { + assert(!Dest && "Init does not return a value"); + Val1 = EmitScalarExpr(E->getVal1()); + llvm::StoreInst *Store = Builder.CreateStore(Val1, Ptr); + Store->setAlignment(Size); + Store->setVolatile(E->isVolatile()); + return RValue::get(0); + } + Order = EmitScalarExpr(E->getOrder()); if (E->isCmpXChg()) { Val1 = EmitScalarExpr(E->getVal1()); @@ -2703,7 +2728,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { // enforce that in general. break; } - if (E->getOp() == AtomicExpr::Store) + if (E->getOp() == AtomicExpr::Store || E->getOp() == AtomicExpr::Init) return RValue::get(0); return ConvertTempToRValue(*this, E->getType(), OrigDest); } diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp index 47984af7f4..fbcdcc0d3d 100644 --- a/lib/CodeGen/CGExprAgg.cpp +++ b/lib/CodeGen/CGExprAgg.cpp @@ -337,6 +337,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { case CK_LValueToRValue: // hope for downstream optimization case CK_NoOp: + case CK_AtomicToNonAtomic: + case CK_NonAtomicToAtomic: case CK_UserDefinedConversion: case CK_ConstructorConversion: assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp index d58db67917..b4b1b1d5f9 100644 --- a/lib/CodeGen/CGExprComplex.cpp +++ b/lib/CodeGen/CGExprComplex.cpp @@ -358,6 +358,10 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op, switch (CK) { case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); + // Atomic to non-atomic casts may be more than a no-op for some platforms and + // for some types. + case CK_AtomicToNonAtomic: + case CK_NonAtomicToAtomic: case CK_NoOp: case CK_LValueToRValue: case CK_UserDefinedConversion: diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp index f27586da04..789d863bbd 100644 --- a/lib/CodeGen/CGExprConstant.cpp +++ b/lib/CodeGen/CGExprConstant.cpp @@ -624,6 +624,8 @@ public: return CGM.getCXXABI().EmitMemberPointerConversion(C, E); case CK_LValueToRValue: + case CK_AtomicToNonAtomic: + case CK_NonAtomicToAtomic: case CK_NoOp: return C; diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index c2aec36ee8..8087c3b802 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -1064,6 +1064,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { Value *Src = Visit(const_cast<Expr*>(E)); return Builder.CreateBitCast(Src, ConvertType(DestTy)); } + case CK_AtomicToNonAtomic: + case CK_NonAtomicToAtomic: case CK_NoOp: case CK_UserDefinedConversion: return Visit(const_cast<Expr*>(E)); @@ -1293,9 +1295,21 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, QualType type = E->getSubExpr()->getType(); llvm::Value *value = EmitLoadOfLValue(LV); llvm::Value *input = value; + llvm::PHINode *atomicPHI = 0; int amount = (isInc ? 1 : -1); + if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { + llvm::BasicBlock *startBB = Builder.GetInsertBlock(); + llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); + Builder.CreateBr(opBB); + Builder.SetInsertPoint(opBB); + atomicPHI = Builder.CreatePHI(value->getType(), 2); + atomicPHI->addIncoming(value, startBB); + type = atomicTy->getValueType(); + value = atomicPHI; + } + // Special case of integer increment that we have to check first: bool++. // Due to promotion rules, we get: // bool++ -> bool = bool + 1 @@ -1415,6 +1429,18 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr"); value = Builder.CreateBitCast(value, input->getType()); } + + if (atomicPHI) { + llvm::BasicBlock *opBB = Builder.GetInsertBlock(); + llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); + llvm::Value *old = Builder.CreateAtomicCmpXchg(LV.getAddress(), atomicPHI, + value, llvm::SequentiallyConsistent); + atomicPHI->addIncoming(old, opBB); + llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI); + Builder.CreateCondBr(success, contBB, opBB); + Builder.SetInsertPoint(contBB); + return isPre ? value : input; + } // Store the updated result through the lvalue. if (LV.isBitField()) @@ -1670,12 +1696,38 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( OpInfo.LHS = EmitLoadOfLValue(LHSLV); OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType()); + + llvm::PHINode *atomicPHI = 0; + if (const AtomicType *atomicTy = OpInfo.Ty->getAs<AtomicType>()) { + // FIXME: For floating point types, we should be saving and restoring the + // floating point environment in the loop. + llvm::BasicBlock *startBB = Builder.GetInsertBlock(); + llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); + Builder.CreateBr(opBB); + Builder.SetInsertPoint(opBB); + atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); + atomicPHI->addIncoming(OpInfo.LHS, startBB); + OpInfo.Ty = atomicTy->getValueType(); + OpInfo.LHS = atomicPHI; + } // Expand the binary operator. Result = (this->*Func)(OpInfo); // Convert the result back to the LHS type. Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); + + if (atomicPHI) { + llvm::BasicBlock *opBB = Builder.GetInsertBlock(); + llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); + llvm::Value *old = Builder.CreateAtomicCmpXchg(LHSLV.getAddress(), atomicPHI, + Result, llvm::SequentiallyConsistent); + atomicPHI->addIncoming(old, opBB); + llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI); + Builder.CreateCondBr(success, contBB, opBB); + Builder.SetInsertPoint(contBB); + return LHSLV; + } // Store the result value into the LHS lvalue. Bit-fields are handled // specially because the result is altered by the store, i.e., [C99 6.5.16p1] diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h index e55732a840..0c13951aa5 100644 --- a/lib/CodeGen/CodeGenFunction.h +++ b/lib/CodeGen/CodeGenFunction.h @@ -2029,13 +2029,14 @@ public: /// the LLVM value representation. void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, - llvm::MDNode *TBAAInfo = 0); + llvm::MDNode *TBAAInfo = 0, bool isInit=false); /// EmitStoreOfScalar - Store a scalar value to an address, taking /// care to appropriately convert from the memory representation to /// the LLVM value representation. The l-value must be a simple - /// l-value. - void EmitStoreOfScalar(llvm::Value *value, LValue lvalue); + /// l-value. The isInit flag indicates whether this is an initialization. + /// If so, atomic qualifiers are ignored and the store is always non-atomic. + void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false); /// EmitLoadOfLValue - Given an expression that represents a value lvalue, /// this method emits the address of the lvalue, then loads the result as an @@ -2047,7 +2048,7 @@ public: /// EmitStoreThroughLValue - Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. - void EmitStoreThroughLValue(RValue Src, LValue Dst); + void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false); void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst); /// EmitStoreThroughLValue - Store Src into Dst with same constraints as |