aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn McCall <rjmccall@apple.com>2010-11-16 10:08:07 +0000
committerJohn McCall <rjmccall@apple.com>2010-11-16 10:08:07 +0000
commitb418d74c11498b7e1044103131e2e3be4d63512e (patch)
tree36330e02a9b184864d87e3dff7fcbe0ffe68961d
parentac21a21a49131544777f3bd56d2351a4072954ca (diff)
Simplify some complex emission and implement correct semantics for
assignment to volatiles in C. This in effect reverts some of mjs's work in and around r72572. Basically, the C++ standard is quite clear, except that it lies about volatile behavior approximating C's, whereas the C standard is almost actively misleading. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@119344 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/AST/ExprClassification.cpp18
-rw-r--r--lib/CodeGen/CGExpr.cpp3
-rw-r--r--lib/CodeGen/CGExprComplex.cpp127
-rw-r--r--lib/CodeGen/CGExprScalar.cpp46
-rw-r--r--lib/CodeGen/CodeGenFunction.h7
-rw-r--r--test/CodeGen/assign.c6
-rw-r--r--test/CodeGen/volatile-1.c204
7 files changed, 304 insertions, 107 deletions
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index 1daa475b9e..60fbfd298f 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -171,11 +171,23 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_LValue;
// GNU extensions, simply look through them.
- case UO_Real:
- case UO_Imag:
case UO_Extension:
return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr());
+ // Treat _Real and _Imag basically as if they were member
+ // expressions: l-value only if the operand is a true l-value.
+ case UO_Real:
+ case UO_Imag: {
+ const Expr *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
+ Cl::Kinds K = ClassifyInternal(Ctx, Op);
+ if (K != Cl::CL_LValue) return K;
+
+ if (isa<ObjCPropertyRefExpr>(Op) ||
+ isa<ObjCImplicitSetterGetterRefExpr>(Op))
+ return Cl::CL_SubObjCPropertySetting;
+ return Cl::CL_LValue;
+ }
+
// C++ [expr.pre.incr]p1: The result is the updated operand; it is an
// lvalue, [...]
// Not so in C.
@@ -343,7 +355,7 @@ static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
if (E->isArrow())
return Cl::CL_LValue;
// ObjC property accesses are not lvalues, but get special treatment.
- Expr *Base = E->getBase();
+ Expr *Base = E->getBase()->IgnoreParens();
if (isa<ObjCPropertyRefExpr>(Base) ||
isa<ObjCImplicitSetterGetterRefExpr>(Base))
return Cl::CL_SubObjCPropertySetting;
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 2ef22b2401..0c24bf5d1a 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -87,8 +87,7 @@ RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
if (!hasAggregateLLVMType(E->getType()))
return RValue::get(EmitScalarExpr(E, IgnoreResult));
else if (E->getType()->isAnyComplexType())
- return RValue::getComplex(EmitComplexExpr(E, false, false,
- IgnoreResult, IgnoreResult));
+ return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
EmitAggExpr(E, AggSlot, IgnoreResult);
return AggSlot.asRValue();
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 5697a8b1bf..df65d5a3ac 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -35,14 +35,9 @@ class ComplexExprEmitter
// True is we should ignore the value of a
bool IgnoreReal;
bool IgnoreImag;
- // True if we should ignore the value of a=b
- bool IgnoreRealAssign;
- bool IgnoreImagAssign;
public:
- ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false,
- bool irn=false, bool iin=false)
- : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii),
- IgnoreRealAssign(irn), IgnoreImagAssign(iin) {
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii) {
}
@@ -60,16 +55,6 @@ public:
IgnoreImag = false;
return I;
}
- bool TestAndClearIgnoreRealAssign() {
- bool I = IgnoreRealAssign;
- IgnoreRealAssign = false;
- return I;
- }
- bool TestAndClearIgnoreImagAssign() {
- bool I = IgnoreImagAssign;
- IgnoreImagAssign = false;
- return I;
- }
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
@@ -174,8 +159,6 @@ public:
ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
return Visit(E->getSubExpr());
}
ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
@@ -211,6 +194,10 @@ public:
};
BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &),
+ ComplexPairTy &Val);
ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)
(const BinOpInfo &));
@@ -220,15 +207,15 @@ public:
ComplexPairTy EmitBinMul(const BinOpInfo &Op);
ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
- ComplexPairTy VisitBinMul(const BinaryOperator *E) {
- return EmitBinMul(EmitBinOps(E));
- }
ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
return EmitBinAdd(EmitBinOps(E));
}
ComplexPairTy VisitBinSub(const BinaryOperator *E) {
return EmitBinSub(EmitBinOps(E));
}
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
return EmitBinDiv(EmitBinOps(E));
}
@@ -251,6 +238,9 @@ public:
// Logical and/or always return int, never complex.
// No comparisons produce a complex result.
+
+ LValue EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val);
ComplexPairTy VisitBinAssign (const BinaryOperator *E);
ComplexPairTy VisitBinComma (const BinaryOperator *E);
@@ -383,8 +373,6 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
ComplexPairTy Op = Visit(E->getSubExpr());
llvm::Value *ResR, *ResI;
@@ -401,8 +389,6 @@ ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
// ~(a+ib) = a + i*-b
ComplexPairTy Op = Visit(E->getSubExpr());
llvm::Value *ResI;
@@ -516,8 +502,6 @@ ComplexExprEmitter::BinOpInfo
ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
BinOpInfo Ops;
Ops.LHS = Visit(E->getLHS());
Ops.RHS = Visit(E->getRHS());
@@ -526,14 +510,12 @@ ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
}
-// Compound assignments.
-ComplexPairTy ComplexExprEmitter::
-EmitCompoundAssign(const CompoundAssignOperator *E,
- ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+LValue ComplexExprEmitter::
+EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
+ ComplexPairTy &Val) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- bool ignreal = TestAndClearIgnoreRealAssign();
- bool ignimag = TestAndClearIgnoreImagAssign();
QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
BinOpInfo OpInfo;
@@ -569,6 +551,7 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
// Truncate the result back to the LHS type.
Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+ Val = Result;
// Store the result value into the LHS lvalue.
if (LHS.isPropertyRef())
@@ -579,30 +562,41 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
else
EmitStoreOfComplex(Result, LHS.getAddress(), LHS.isVolatileQualified());
- // Restore the Ignore* flags.
- IgnoreReal = ignreal;
- IgnoreImag = ignimag;
- IgnoreRealAssign = ignreal;
- IgnoreImagAssign = ignimag;
-
+ return LHS;
+}
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ ComplexPairTy Val;
+ LValue LV = EmitCompoundAssignLValue(E, Func, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return Val;
+
// Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef() || LHS.isKVCRef())
- return Result;
+ if (LV.isPropertyRef() || LV.isKVCRef())
+ return Val;
- // Otherwise, reload the value.
- return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val;
+
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
}
-ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
- TestAndClearIgnoreReal();
- TestAndClearIgnoreImag();
- bool ignreal = TestAndClearIgnoreRealAssign();
- bool ignimag = TestAndClearIgnoreImagAssign();
+LValue ComplexExprEmitter::EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val) {
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
E->getRHS()->getType()) &&
"Invalid assignment");
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+
// Emit the RHS.
- ComplexPairTy Val = Visit(E->getRHS());
+ Val = Visit(E->getRHS());
// Compute the address to store into.
LValue LHS = CGF.EmitLValue(E->getLHS());
@@ -615,13 +609,26 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
else
EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
- // Restore the Ignore* flags.
- IgnoreReal = ignreal;
- IgnoreImag = ignimag;
- IgnoreRealAssign = ignreal;
- IgnoreImagAssign = ignimag;
+ return LHS;
+}
- return Val;
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ ComplexPairTy Val;
+ LValue LV = EmitBinAssignLValue(E, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return Val;
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LV.isPropertyRef() || LV.isKVCRef())
+ return Val;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val;
+
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
}
ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
@@ -632,11 +639,8 @@ ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
ComplexPairTy ComplexExprEmitter::
VisitConditionalOperator(const ConditionalOperator *E) {
-
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
@@ -724,12 +728,11 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, ignoring the result.
ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
- bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) {
+ bool IgnoreImag) {
assert(E && E->getType()->isAnyComplexType() &&
"Invalid complex expression to emit");
- return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign,
- IgnoreImagAssign)
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag)
.Visit(const_cast<Expr*>(E));
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index a64debd6c1..0e09642931 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -1176,12 +1176,11 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
case CK_FloatingComplexToReal:
case CK_IntegralComplexToReal:
- return CGF.EmitComplexExpr(E, false, true, false, true).first;
+ return CGF.EmitComplexExpr(E, false, true).first;
case CK_FloatingComplexToBoolean:
case CK_IntegralComplexToBoolean: {
- CodeGenFunction::ComplexPairTy V
- = CGF.EmitComplexExpr(E, false, false, false, false);
+ CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
// TODO: kill this function off, inline appropriate case here
return EmitComplexToScalarConversion(V, E->getType(), DestTy);
@@ -1471,21 +1470,38 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
Expr *Op = E->getSubExpr();
- if (Op->getType()->isAnyComplexType())
- return CGF.EmitComplexExpr(Op, false, true, false, true).first;
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
+ .getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, false, true).first;
+ }
+
return Visit(Op);
}
+
Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
Expr *Op = E->getSubExpr();
- if (Op->getType()->isAnyComplexType())
- return CGF.EmitComplexExpr(Op, true, false, true, false).second;
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (Op->isLvalue(CGF.getContext()) == Expr::LV_Valid)
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
+ .getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, true, false).second;
+ }
// __imag on a scalar returns zero. Emit the subexpr to ensure side
// effects are evaluated, but not the actual value.
- if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
- CGF.EmitLValue(Op);
- else
- CGF.EmitScalarExpr(Op, true);
+ CGF.EmitScalarExpr(Op, true);
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
@@ -1562,6 +1578,10 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
if (Ignore)
return 0;
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return RHS;
+
// Objective-C property assignment never reloads the value following a store.
if (LHS.isPropertyRef() || LHS.isKVCRef())
return RHS;
@@ -2050,6 +2070,10 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
if (Ignore)
return 0;
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return RHS;
+
// Objective-C property assignment never reloads the value following a store.
if (LHS.isPropertyRef() || LHS.isKVCRef())
return RHS;
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 2e9feefe0b..86a46deddd 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -1593,10 +1593,9 @@ public:
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, returning the result.
- ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
- bool IgnoreImag = false,
- bool IgnoreRealAssign = false,
- bool IgnoreImagAssign = false);
+ ComplexPairTy EmitComplexExpr(const Expr *E,
+ bool IgnoreReal = false,
+ bool IgnoreImag = false);
/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
/// of complex type, storing into the specified Value*.
diff --git a/test/CodeGen/assign.c b/test/CodeGen/assign.c
index eab3d35769..05141bb0bb 100644
--- a/test/CodeGen/assign.c
+++ b/test/CodeGen/assign.c
@@ -15,15 +15,15 @@ void f0() {
y = (x = 1);
}
-// Check that we do generate reloads for volatile access.
+// This used to test that we generate reloads for volatile access,
+// but that does not appear to be correct behavior for C.
//
// CHECK: define void @f1()
// CHECK: [[x_1:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[y_1:%.*]] = alloca i32, align 4
// CHECK-NEXT: volatile store i32 1, i32* [[x_1]]
// CHECK-NEXT: volatile store i32 1, i32* [[x_1]]
-// CHECK-NEXT: [[tmp_1:%.*]] = volatile load i32* [[x_1]]
-// CHECK-NEXT: volatile store i32 [[tmp_1]], i32* [[y_1]]
+// CHECK-NEXT: volatile store i32 1, i32* [[y_1]]
// CHECK: }
void f1() {
volatile int x, y;
diff --git a/test/CodeGen/volatile-1.c b/test/CodeGen/volatile-1.c
index e0c672b41e..e4c6c679bd 100644
--- a/test/CodeGen/volatile-1.c
+++ b/test/CodeGen/volatile-1.c
@@ -1,10 +1,10 @@
-// RUN: %clang_cc1 -Wno-unused-value -emit-llvm < %s -o %t
-// RUN: grep volatile %t | count 145
-// RUN: grep memcpy %t | count 4
+// RUN: %clang_cc1 -Wno-unused-value -emit-llvm %s -o - | FileCheck %s
+// CHECK: @i = common global [[INT:i[0-9]+]] 0
volatile int i, j, k;
volatile int ar[5];
volatile char c;
+// CHECK: @ci = common global [[CINT:%.*]] zeroinitializer
volatile _Complex int ci;
volatile struct S {
#ifdef __cplusplus
@@ -16,67 +16,190 @@ volatile struct S {
//void operator =(volatile struct S&o1, volatile struct S&o2) volatile;
int printf(const char *, ...);
-int main() {
- // A use.
+
+// Note that these test results are very much specific to C!
+// Assignments in C++ yield l-values, not r-values, and the situations
+// that do implicit lvalue-to-rvalue conversion are substantially
+// reduced.
+
+// CHECK: define void @test()
+void test() {
+ // CHECK: volatile load [[INT]]* @i
i;
- // A use of the real part
+ // CHECK-NEXT: volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // CHECK-NEXT: sitofp [[INT]]
(float)(ci);
- // A use.
+ // CHECK-NEXT: volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
(void)ci;
- // A use.
+ // CHECK-NEXT: bitcast
+ // CHECK-NEXT: memcpy
(void)a;
- // Not a use.
+ // CHECK-NEXT: [[R:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: [[I:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // CHECK-NEXT: volatile store [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: volatile store [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
(void)(ci=ci);
- // Not a use.
+ // CHECK-NEXT: [[T:%.*]] = volatile load [[INT]]* @j
+ // CHECK-NEXT: volatile store [[INT]] [[T]], [[INT]]* @i
(void)(i=j);
+ // CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // CHECK-NEXT: [[R2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: [[I2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // Not sure why they're ordered this way.
+ // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]]
+ // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]]
+ // CHECK-NEXT: volatile store [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: volatile store [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
ci+=ci;
+
+ // CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // CHECK-NEXT: [[R2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: [[I2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]]
+ // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]]
+ // CHECK-NEXT: volatile store [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: volatile store [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // CHECK-NEXT: [[R2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
+ // CHECK-NEXT: [[I2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // These additions can be elided
+ // CHECK-NEXT: add [[INT]] [[R]], [[R2]]
+ // CHECK-NEXT: add [[INT]] [[I]], [[I2]]
(ci += ci) + ci;
+ // CHECK-NEXT: call void asm
asm("nop");
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: add nsw [[INT]]
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: add nsw [[INT]]
(i += j) + k;
+ // CHECK-NEXT: call void asm
asm("nop");
- // A use
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: add nsw [[INT]]
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: add nsw [[INT]]
(i += j) + 1;
+ // CHECK-NEXT: call void asm
asm("nop");
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: add [[INT]]
+ // CHECK-NEXT: add [[INT]]
ci+ci;
- // A use.
+
+ // CHECK-NEXT: volatile load
__real i;
- // A use.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ci;
+ // CHECK-NEXT: call void asm
asm("nop");
- // Not a use.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
(void)(i=i);
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: sitofp
(float)(i=i);
- // A use.
+ // CHECK-NEXT: volatile load
(void)i;
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
i=i;
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile store
i=i=i;
#ifndef __cplusplus
- // Not a use.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
(void)__builtin_choose_expr(0, i=i, j=j);
#endif
- // A use.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: icmp
+ // CHECK-NEXT: br i1
+ // CHECK: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: br label
+ // CHECK: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: br label
k ? (i=i) : (j=j);
+ // CHECK: phi
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
(void)(i,(i=i));
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile load
i=i,i;
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
(i=j,k=j);
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile load
(i=j,k);
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
(i,j);
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: trunc
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: sext
+ // CHECK-NEXT: volatile store
i=c=k;
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: add nsw [[INT]]
+ // CHECK-NEXT: volatile store
i+=k;
- // A use of both.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
ci;
#ifndef __cplusplus
- // A use of _real.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
(int)ci;
- // A use of both.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: icmp ne
+ // CHECK-NEXT: icmp ne
+ // CHECK-NEXT: or i1
(_Bool)ci;
#endif
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile store
ci=ci;
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile store
ci=ci=ci;
+ // CHECK-NEXT: [[T:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // CHECK-NEXT: volatile store [[INT]] [[T]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
+ // CHECK-NEXT: volatile store [[INT]] [[T]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
__imag ci = __imag ci = __imag ci;
- // Not a use.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
__real (i = j);
- // Not a use.
+ // CHECK-NEXT: volatile load
__imag i;
// ============================================================
@@ -91,6 +214,9 @@ int main() {
// gcc.
// Not a use. gcc forgets to do the assignment.
+ // CHECK-NEXT: call void @llvm.memcpy{{.*}}, i1 true
+ // CHECK-NEXT: bitcast
+ // CHECK-NEXT: call void @llvm.memcpy{{.*}}, i1 true
((a=a),a);
// Not a use. gcc gets this wrong, it doesn't emit the copy!
@@ -98,38 +224,72 @@ int main() {
// Not a use. gcc got this wrong in 4.2 and omitted the side effects
// entirely, but it is fixed in 4.4.0.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
__imag (i = j);
#ifndef __cplusplus
// A use of the real part
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: sitofp
(float)(ci=ci);
// Not a use, bug? gcc treats this as not a use, that's probably a bug due to
// tree folding ignoring volatile.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile store
(int)(ci=ci);
#endif
// A use.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: sitofp
(float)(i=i);
// A use. gcc treats this as not a use, that's probably a bug due to tree
// folding ignoring volatile.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
(int)(i=i);
// A use.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: sub
-(i=j);
// A use. gcc treats this a not a use, that's probably a bug due to tree
// folding ignoring volatile.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+(i=k);
// A use. gcc treats this a not a use, that's probably a bug due to tree
// folding ignoring volatile.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile store
__real (ci=ci);
// A use.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: add
i + 0;
// A use.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: add
(i=j) + i;
// A use. gcc treats this as not a use, that's probably a bug due to tree
// folding ignoring volatile.
+ // CHECK-NEXT: volatile load
+ // CHECK-NEXT: volatile store
+ // CHECK-NEXT: add
(i=j) + 0;
#ifdef __cplusplus