aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/CGValue.h
AgeCommit message (Collapse)Author
2013-04-04Initial support for struct-path aware TBAA.Manman Ren
Added TBAABaseType and TBAAOffset in LValue. These two fields are initialized to the actual type and 0, and are updated in EmitLValueForField. Path-aware TBAA tags are enabled for EmitLoadOfScalar and EmitStoreOfScalar. Added command line option -struct-path-tbaa. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@178797 91177308-0d34-0410-b5e6-96231b3b80d8
2013-04-04revert r178784 since it does not have a commit messageManman Ren
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@178796 91177308-0d34-0410-b5e6-96231b3b80d8
2013-04-04Index: include/clang/Driver/CC1Options.tdManman Ren
=================================================================== --- include/clang/Driver/CC1Options.td (revision 178718) +++ include/clang/Driver/CC1Options.td (working copy) @@ -161,6 +161,8 @@ HelpText<"Use register sized accesses to bit-fields, when possible.">; def relaxed_aliasing : Flag<["-"], "relaxed-aliasing">, HelpText<"Turn off Type Based Alias Analysis">; +def struct_path_tbaa : Flag<["-"], "struct-path-tbaa">, + HelpText<"Turn on struct-path aware Type Based Alias Analysis">; def masm_verbose : Flag<["-"], "masm-verbose">, HelpText<"Generate verbose assembly output">; def mcode_model : Separate<["-"], "mcode-model">, Index: include/clang/Driver/Options.td =================================================================== --- include/clang/Driver/Options.td (revision 178718) +++ include/clang/Driver/Options.td (working copy) @@ -587,6 +587,7 @@ Flags<[CC1Option]>, HelpText<"Disable spell-checking">; def fno_stack_protector : Flag<["-"], "fno-stack-protector">, Group<f_Group>; def fno_strict_aliasing : Flag<["-"], "fno-strict-aliasing">, Group<f_Group>; +def fstruct_path_tbaa : Flag<["-"], "fstruct-path-tbaa">, Group<f_Group>; def fno_strict_enums : Flag<["-"], "fno-strict-enums">, Group<f_Group>; def fno_strict_overflow : Flag<["-"], "fno-strict-overflow">, Group<f_Group>; def fno_threadsafe_statics : Flag<["-"], "fno-threadsafe-statics">, Group<f_Group>, Index: include/clang/Frontend/CodeGenOptions.def =================================================================== --- include/clang/Frontend/CodeGenOptions.def (revision 178718) +++ include/clang/Frontend/CodeGenOptions.def (working copy) @@ -85,6 +85,7 @@ VALUE_CODEGENOPT(OptimizeSize, 2, 0) ///< If -Os (==1) or -Oz (==2) is specified. CODEGENOPT(RelaxAll , 1, 0) ///< Relax all machine code instructions. CODEGENOPT(RelaxedAliasing , 1, 0) ///< Set when -fno-strict-aliasing is enabled. +CODEGENOPT(StructPathTBAA , 1, 0) ///< Whether or not to use struct-path TBAA. CODEGENOPT(SaveTempLabels , 1, 0) ///< Save temporary labels. CODEGENOPT(SanitizeAddressZeroBaseShadow , 1, 0) ///< Map shadow memory at zero ///< offset in AddressSanitizer. Index: lib/CodeGen/CGExpr.cpp =================================================================== --- lib/CodeGen/CGExpr.cpp (revision 178718) +++ lib/CodeGen/CGExpr.cpp (working copy) @@ -1044,7 +1044,8 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) { return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), lvalue.getAlignment().getQuantity(), - lvalue.getType(), lvalue.getTBAAInfo()); + lvalue.getType(), lvalue.getTBAAInfo(), + lvalue.getTBAABaseType(), lvalue.getTBAAOffset()); } static bool hasBooleanRepresentation(QualType Ty) { @@ -1106,7 +1107,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, - llvm::MDNode *TBAAInfo) { + llvm::MDNode *TBAAInfo, + QualType TBAABaseType, + uint64_t TBAAOffset) { // For better performance, handle vector loads differently. if (Ty->isVectorType()) { llvm::Value *V; @@ -1158,8 +1161,11 @@ Load->setVolatile(true); if (Alignment) Load->setAlignment(Alignment); - if (TBAAInfo) - CGM.DecorateInstruction(Load, TBAAInfo); + if (TBAAInfo) { + llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo, + TBAAOffset); + CGM.DecorateInstruction(Load, TBAAPath); + } if ((SanOpts->Bool && hasBooleanRepresentation(Ty)) || (SanOpts->Enum && Ty->getAs<EnumType>())) { @@ -1217,7 +1223,8 @@ bool Volatile, unsigned Alignment, QualType Ty, llvm::MDNode *TBAAInfo, - bool isInit) { + bool isInit, QualType TBAABaseType, + uint64_t TBAAOffset) { // Handle vectors differently to get better performance. if (Ty->isVectorType()) { @@ -1268,15 +1275,19 @@ llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); if (Alignment) Store->setAlignment(Alignment); - if (TBAAInfo) - CGM.DecorateInstruction(Store, TBAAInfo); + if (TBAAInfo) { + llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo, + TBAAOffset); + CGM.DecorateInstruction(Store, TBAAPath); + } } void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit) { EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), lvalue.getAlignment().getQuantity(), lvalue.getType(), - lvalue.getTBAAInfo(), isInit); + lvalue.getTBAAInfo(), isInit, lvalue.getTBAABaseType(), + lvalue.getTBAAOffset()); } /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this @@ -2494,9 +2505,12 @@ llvm::Value *addr = base.getAddress(); unsigned cvr = base.getVRQualifiers(); + bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA; if (rec->isUnion()) { // For unions, there is no pointer adjustment. assert(!type->isReferenceType() && "union has reference member"); + // TODO: handle path-aware TBAA for union. + TBAAPath = false; } else { // For structs, we GEP to the field that the record layout suggests. unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); @@ -2508,6 +2522,8 @@ if (cvr & Qualifiers::Volatile) load->setVolatile(true); load->setAlignment(alignment.getQuantity()); + // Loading the reference will disable path-aware TBAA. + TBAAPath = false; if (CGM.shouldUseTBAA()) { llvm::MDNode *tbaa; if (mayAlias) @@ -2541,6 +2557,16 @@ LValue LV = MakeAddrLValue(addr, type, alignment); LV.getQuals().addCVRQualifiers(cvr); + if (TBAAPath) { + const ASTRecordLayout &Layout = + getContext().getASTRecordLayout(field->getParent()); + // Set the base type to be the base type of the base LValue and + // update offset to be relative to the base type. + LV.setTBAABaseType(base.getTBAABaseType()); + LV.setTBAAOffset(base.getTBAAOffset() + + Layout.getFieldOffset(field->getFieldIndex()) / + getContext().getCharWidth()); + } // __weak attribute on a field is ignored. if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) Index: lib/CodeGen/CGValue.h =================================================================== --- lib/CodeGen/CGValue.h (revision 178718) +++ lib/CodeGen/CGValue.h (working copy) @@ -157,6 +157,11 @@ Expr *BaseIvarExp; + /// Used by struct-path-aware TBAA. + QualType TBAABaseType; + /// Offset relative to the base type. + uint64_t TBAAOffset; + /// TBAAInfo - TBAA information to attach to dereferences of this LValue. llvm::MDNode *TBAAInfo; @@ -175,6 +180,10 @@ this->ImpreciseLifetime = false; this->ThreadLocalRef = false; this->BaseIvarExp = 0; + + // Initialize fields for TBAA. + this->TBAABaseType = Type; + this->TBAAOffset = 0; this->TBAAInfo = TBAAInfo; } @@ -232,6 +241,12 @@ Expr *getBaseIvarExp() const { return BaseIvarExp; } void setBaseIvarExp(Expr *V) { BaseIvarExp = V; } + QualType getTBAABaseType() const { return TBAABaseType; } + void setTBAABaseType(QualType T) { TBAABaseType = T; } + + uint64_t getTBAAOffset() const { return TBAAOffset; } + void setTBAAOffset(uint64_t O) { TBAAOffset = O; } + llvm::MDNode *getTBAAInfo() const { return TBAAInfo; } void setTBAAInfo(llvm::MDNode *N) { TBAAInfo = N; } Index: lib/CodeGen/CodeGenFunction.h =================================================================== --- lib/CodeGen/CodeGenFunction.h (revision 178718) +++ lib/CodeGen/CodeGenFunction.h (working copy) @@ -2211,7 +2211,9 @@ /// the LLVM value representation. llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, - llvm::MDNode *TBAAInfo = 0); + llvm::MDNode *TBAAInfo = 0, + QualType TBAABaseTy = QualType(), + uint64_t TBAAOffset = 0); /// EmitLoadOfScalar - Load a scalar value from an address, taking /// care to appropriately convert from the memory representation to @@ -2224,7 +2226,9 @@ /// the LLVM value representation. void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, - llvm::MDNode *TBAAInfo = 0, bool isInit=false); + llvm::MDNode *TBAAInfo = 0, bool isInit = false, + QualType TBAABaseTy = QualType(), + uint64_t TBAAOffset = 0); /// EmitStoreOfScalar - Store a scalar value to an address, taking /// care to appropriately convert from the memory representation to Index: lib/CodeGen/CodeGenModule.cpp =================================================================== --- lib/CodeGen/CodeGenModule.cpp (revision 178718) +++ lib/CodeGen/CodeGenModule.cpp (working copy) @@ -227,6 +227,20 @@ return TBAA->getTBAAStructInfo(QTy); } +llvm::MDNode *CodeGenModule::getTBAAStructTypeInfo(QualType QTy) { + if (!TBAA) + return 0; + return TBAA->getTBAAStructTypeInfo(QTy); +} + +llvm::MDNode *CodeGenModule::getTBAAStructTagInfo(QualType BaseTy, + llvm::MDNode *AccessN, + uint64_t O) { + if (!TBAA) + return 0; + return TBAA->getTBAAStructTagInfo(BaseTy, AccessN, O); +} + void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst, llvm::MDNode *TBAAInfo) { Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo); Index: lib/CodeGen/CodeGenModule.h =================================================================== --- lib/CodeGen/CodeGenModule.h (revision 178718) +++ lib/CodeGen/CodeGenModule.h (working copy) @@ -501,6 +501,11 @@ llvm::MDNode *getTBAAInfo(QualType QTy); llvm::MDNode *getTBAAInfoForVTablePtr(); llvm::MDNode *getTBAAStructInfo(QualType QTy); + /// Return the MDNode in the type DAG for the given struct type. + llvm::MDNode *getTBAAStructTypeInfo(QualType QTy); + /// Return the path-aware tag for given base type, access node and offset. + llvm::MDNode *getTBAAStructTagInfo(QualType BaseTy, llvm::MDNode *AccessN, + uint64_t O); bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor); Index: lib/CodeGen/CodeGenTBAA.cpp =================================================================== --- lib/CodeGen/CodeGenTBAA.cpp (revision 178718) +++ lib/CodeGen/CodeGenTBAA.cpp (working copy) @@ -21,6 +21,7 @@ #include "clang/AST/Mangle.h" #include "clang/AST/RecordLayout.h" #include "clang/Frontend/CodeGenOptions.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/IR/Constants.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" @@ -225,3 +226,87 @@ // For now, handle any other kind of type conservatively. return StructMetadataCache[Ty] = NULL; } + +/// Check if the given type can be handled by path-aware TBAA. +static bool isTBAAPathStruct(QualType QTy) { + if (const RecordType *TTy = QTy->getAs<RecordType>()) { + const RecordDecl *RD = TTy->getDecl()->getDefinition(); + // RD can be struct, union, class, interface or enum. + // For now, we only handle struct. + if (RD->isStruct() && !RD->hasFlexibleArrayMember()) + return true; + } + return false; +} + +llvm::MDNode * +CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) { + const Type *Ty = Context.getCanonicalType(QTy).getTypePtr(); + assert(isTBAAPathStruct(QTy)); + + if (llvm::MDNode *N = StructTypeMetadataCache[Ty]) + return N; + + if (const RecordType *TTy = QTy->getAs<RecordType>()) { + const RecordDecl *RD = TTy->getDecl()->getDefinition(); + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + SmallVector <std::pair<uint64_t, llvm::MDNode*>, 4> Fields; + // To reduce the size of MDNode for a given struct type, we only output + // once for all the fields with the same scalar types. + // Offsets for scalar fields in the type DAG are not used. + llvm::SmallSet <llvm::MDNode*, 4> ScalarFieldTypes; + unsigned idx = 0; + for (RecordDecl::field_iterator i = RD->field_begin(), + e = RD->field_end(); i != e; ++i, ++idx) { + QualType FieldQTy = i->getType(); + llvm::MDNode *FieldNode; + if (isTBAAPathStruct(FieldQTy)) + FieldNode = getTBAAStructTypeInfo(FieldQTy); + else { + FieldNode = getTBAAInfo(FieldQTy); + // Ignore this field if the type already exists. + if (ScalarFieldTypes.count(FieldNode)) + continue; + ScalarFieldTypes.insert(FieldNode); + } + if (!FieldNode) + return StructTypeMetadataCache[Ty] = NULL; + Fields.push_back(std::make_pair( + Layout.getFieldOffset(idx) / Context.getCharWidth(), FieldNode)); + } + + // TODO: This is using the RTTI name. Is there a better way to get + // a unique string for a type? + SmallString<256> OutName; + llvm::raw_svector_ostream Out(OutName); + MContext.mangleCXXRTTIName(QualType(Ty, 0), Out); + Out.flush(); + // Create the struct type node with a vector of pairs (offset, type). + return StructTypeMetadataCache[Ty] = + MDHelper.createTBAAStructTypeNode(OutName, Fields); + } + + return StructMetadataCache[Ty] = NULL; +} + +llvm::MDNode * +CodeGenTBAA::getTBAAStructTagInfo(QualType BaseQTy, llvm::MDNode *AccessNode, + uint64_t Offset) { + if (!CodeGenOpts.StructPathTBAA) + return AccessNode; + + const Type *BTy = Context.getCanonicalType(BaseQTy).getTypePtr(); + TBAAPathTag PathTag = TBAAPathTag(BTy, AccessNode, Offset); + if (llvm::MDNode *N = StructTagMetadataCache[PathTag]) + return N; + + llvm::MDNode *BNode = 0; + if (isTBAAPathStruct(BaseQTy)) + BNode = getTBAAStructTypeInfo(BaseQTy); + if (!BNode) + return StructTagMetadataCache[PathTag] = AccessNode; + + return StructTagMetadataCache[PathTag] = + MDHelper.createTBAAStructTagNode(BNode, AccessNode, Offset); +} Index: lib/CodeGen/CodeGenTBAA.h =================================================================== --- lib/CodeGen/CodeGenTBAA.h (revision 178718) +++ lib/CodeGen/CodeGenTBAA.h (working copy) @@ -35,6 +35,14 @@ namespace CodeGen { class CGRecordLayout; + struct TBAAPathTag { + TBAAPathTag(const Type *B, const llvm::MDNode *A, uint64_t O) + : BaseT(B), AccessN(A), Offset(O) {} + const Type *BaseT; + const llvm::MDNode *AccessN; + uint64_t Offset; + }; + /// CodeGenTBAA - This class organizes the cross-module state that is used /// while lowering AST types to LLVM types. class CodeGenTBAA { @@ -46,8 +54,13 @@ // MDHelper - Helper for creating metadata. llvm::MDBuilder MDHelper; - /// MetadataCache - This maps clang::Types to llvm::MDNodes describing them. + /// MetadataCache - This maps clang::Types to scalar llvm::MDNodes describing + /// them. llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache; + /// This maps clang::Types to a struct node in the type DAG. + llvm::DenseMap<const Type *, llvm::MDNode *> StructTypeMetadataCache; + /// This maps TBAAPathTags to a tag node. + llvm::DenseMap<TBAAPathTag, llvm::MDNode *> StructTagMetadataCache; /// StructMetadataCache - This maps clang::Types to llvm::MDNodes describing /// them for struct assignments. @@ -89,9 +102,49 @@ /// getTBAAStructInfo - Get the TBAAStruct MDNode to be used for a memcpy of /// the given type. llvm::MDNode *getTBAAStructInfo(QualType QTy); + + /// Get the MDNode in the type DAG for given struct type QType. + llvm::MDNode *getTBAAStructTypeInfo(QualType QType); + /// Get the tag MDNode for a given base type, the actual sclar access MDNode + /// and offset into the base type. + llvm::MDNode *getTBAAStructTagInfo(QualType BaseQType, + llvm::MDNode *AccessNode, uint64_t Offset); }; } // end namespace CodeGen } // end namespace clang +namespace llvm { + +template<> struct DenseMapInfo<clang::CodeGen::TBAAPathTag> { + static clang::CodeGen::TBAAPathTag getEmptyKey() { + return clang::CodeGen::TBAAPathTag( + DenseMapInfo<const clang::Type *>::getEmptyKey(), + DenseMapInfo<const MDNode *>::getEmptyKey(), + DenseMapInfo<uint64_t>::getEmptyKey()); + } + + static clang::CodeGen::TBAAPathTag getTombstoneKey() { + return clang::CodeGen::TBAAPathTag( + DenseMapInfo<const clang::Type *>::getTombstoneKey(), + DenseMapInfo<const MDNode *>::getTombstoneKey(), + DenseMapInfo<uint64_t>::getTombstoneKey()); + } + + static unsigned getHashValue(const clang::CodeGen::TBAAPathTag &Val) { + return DenseMapInfo<const clang::Type *>::getHashValue(Val.BaseT) ^ + DenseMapInfo<const MDNode *>::getHashValue(Val.AccessN) ^ + DenseMapInfo<uint64_t>::getHashValue(Val.Offset); + } + + static bool isEqual(const clang::CodeGen::TBAAPathTag &LHS, + const clang::CodeGen::TBAAPathTag &RHS) { + return LHS.BaseT == RHS.BaseT && + LHS.AccessN == RHS.AccessN && + LHS.Offset == RHS.Offset; + } +}; + +} // end namespace llvm + #endif Index: lib/Driver/Tools.cpp =================================================================== --- lib/Driver/Tools.cpp (revision 178718) +++ lib/Driver/Tools.cpp (working copy) @@ -2105,6 +2105,8 @@ options::OPT_fno_strict_aliasing, getToolChain().IsStrictAliasingDefault())) CmdArgs.push_back("-relaxed-aliasing"); + if (Args.hasArg(options::OPT_fstruct_path_tbaa)) + CmdArgs.push_back("-struct-path-tbaa"); if (Args.hasFlag(options::OPT_fstrict_enums, options::OPT_fno_strict_enums, false)) CmdArgs.push_back("-fstrict-enums"); Index: lib/Frontend/CompilerInvocation.cpp =================================================================== --- lib/Frontend/CompilerInvocation.cpp (revision 178718) +++ lib/Frontend/CompilerInvocation.cpp (working copy) @@ -324,6 +324,7 @@ Opts.UseRegisterSizedBitfieldAccess = Args.hasArg( OPT_fuse_register_sized_bitfield_access); Opts.RelaxedAliasing = Args.hasArg(OPT_relaxed_aliasing); + Opts.StructPathTBAA = Args.hasArg(OPT_struct_path_tbaa); Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags); Opts.MergeAllConstants = !Args.hasArg(OPT_fno_merge_all_constants); Opts.NoCommon = Args.hasArg(OPT_fno_common); Index: test/CodeGen/tbaa.cpp =================================================================== --- test/CodeGen/tbaa.cpp (revision 0) +++ test/CodeGen/tbaa.cpp (working copy) @@ -0,0 +1,217 @@ +// RUN: %clang_cc1 -O1 -disable-llvm-optzns %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -O1 -struct-path-tbaa -disable-llvm-optzns %s -emit-llvm -o - | FileCheck %s -check-prefix=PATH +// Test TBAA metadata generated by front-end. + +#include <stdint.h> +typedef struct +{ + uint16_t f16; + uint32_t f32; + uint16_t f16_2; + uint32_t f32_2; +} StructA; +typedef struct +{ + uint16_t f16; + StructA a; + uint32_t f32; +} StructB; +typedef struct +{ + uint16_t f16; + StructB b; + uint32_t f32; +} StructC; +typedef struct +{ + uint16_t f16; + StructB b; + uint32_t f32; + uint8_t f8; +} StructD; + +typedef struct +{ + uint16_t f16; + uint32_t f32; +} StructS; +typedef struct +{ + uint16_t f16; + uint32_t f32; +} StructS2; + +uint32_t g(uint32_t *s, StructA *A, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa !4 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa !5 + *s = 1; + A->f32 = 4; + return *s; +} + +uint32_t g2(uint32_t *s, StructA *A, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa !5 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa !8 + *s = 1; + A->f16 = 4; + return *s; +} + +uint32_t g3(StructA *A, StructB *B, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa !4 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !5 +// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa !9 + A->f32 = 1; + B->a.f32 = 4; + return A->f32; +} + +uint32_t g4(StructA *A, StructB *B, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa !5 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !5 +// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa !11 + A->f32 = 1; + B->a.f16 = 4; + return A->f32; +} + +uint32_t g5(StructA *A, StructB *B, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa !4 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !5 +// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa !12 + A->f32 = 1; + B->f32 = 4; + return A->f32; +} + +uint32_t g6(StructA *A, StructB *B, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa !4 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !5 +// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa !13 + A->f32 = 1; + B->a.f32_2 = 4; + return A->f32; +} + +uint32_t g7(StructA *A, StructS *S, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa !4 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !5 +// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa !14 + A->f32 = 1; + S->f32 = 4; + return A->f32; +} + +uint32_t g8(StructA *A, StructS *S, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa !5 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !5 +// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa !16 + A->f32 = 1; + S->f16 = 4; + return A->f32; +} + +uint32_t g9(StructS *S, StructS2 *S2, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa !4 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !14 +// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa !17 + S->f32 = 1; + S2->f32 = 4; + return S->f32; +} + +uint32_t g10(StructS *S, StructS2 *S2, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa !5 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !14 +// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa !19 + S->f32 = 1; + S2->f16 = 4; + return S->f32; +} + +uint32_t g11(StructC *C, StructD *D, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa !4 +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !20 +// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa !22 + C->b.a.f32 = 1; + D->b.a.f32 = 4; + return C->b.a.f32; +} + +uint32_t g12(StructC *C, StructD *D, uint64_t count) { +// CHECK: define i32 @{{.*}}( +// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa !4 +// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa !4 +// TODO: differentiate the two accesses. +// PATH: define i32 @{{.*}}( +// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa !9 +// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa !9 + StructB *b1 = &(C->b); + StructB *b2 = &(D->b); + // b1, b2 have different context. + b1->a.f32 = 1; + b2->a.f32 = 4; + return b1->a.f32; +} + +// CHECK: !1 = metadata !{metadata !"omnipotent char", metadata !2} +// CHECK: !2 = metadata !{metadata !"Simple C/C++ TBAA"} +// CHECK: !4 = metadata !{metadata !"int", metadata !1} +// CHECK: !5 = metadata !{metadata !"short", metadata !1} + +// PATH: !1 = metadata !{metadata !"omnipotent char", metadata !2} +// PATH: !4 = metadata !{metadata !"int", metadata !1} +// PATH: !5 = metadata !{metadata !6, metadata !4, i64 4} +// PATH: !6 = metadata !{metadata !"_ZTS7StructA", i64 0, metadata !7, i64 4, metadata !4} +// PATH: !7 = metadata !{metadata !"short", metadata !1} +// PATH: !8 = metadata !{metadata !6, metadata !7, i64 0} +// PATH: !9 = metadata !{metadata !10, metadata !4, i64 8} +// PATH: !10 = metadata !{metadata !"_ZTS7StructB", i64 0, metadata !7, i64 4, metadata !6, i64 20, metadata !4} +// PATH: !11 = metadata !{metadata !10, metadata !7, i64 4} +// PATH: !12 = metadata !{metadata !10, metadata !4, i64 20} +// PATH: !13 = metadata !{metadata !10, metadata !4, i64 16} +// PATH: !14 = metadata !{metadata !15, metadata !4, i64 4} +// PATH: !15 = metadata !{metadata !"_ZTS7StructS", i64 0, metadata !7, i64 4, metadata !4} +// PATH: !16 = metadata !{metadata !15, metadata !7, i64 0} +// PATH: !17 = metadata !{metadata !18, metadata !4, i64 4} +// PATH: !18 = metadata !{metadata !"_ZTS8StructS2", i64 0, metadata !7, i64 4, metadata !4} +// PATH: !19 = metadata !{metadata !18, metadata !7, i64 0} +// PATH: !20 = metadata !{metadata !21, metadata !4, i64 12} +// PATH: !21 = metadata !{metadata !"_ZTS7StructC", i64 0, metadata !7, i64 4, metadata !10, i64 28, metadata !4} +// PATH: !22 = metadata !{metadata !23, metadata !4, i64 12} +// PATH: !23 = metadata !{metadata !"_ZTS7StructD", i64 0, metadata !7, i64 4, metadata !10, i64 28, metadata !4, i64 32, metadata !1} git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@178784 91177308-0d34-0410-b5e6-96231b3b80d8
2013-03-13Remove trailing comma in enum list.John McCall
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@176926 91177308-0d34-0410-b5e6-96231b3b80d8
2013-03-13Tighten up the rules for precise lifetime and documentJohn McCall
the requirements on the ARC optimizer. rdar://13407451 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@176924 91177308-0d34-0410-b5e6-96231b3b80d8
2013-03-07Promote atomic type sizes up to a power of two, capped byJohn McCall
MaxAtomicPromoteWidth. Fix a ton of terrible bugs with _Atomic types and (non-intrinsic-mediated) loads and stores thereto. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@176658 91177308-0d34-0410-b5e6-96231b3b80d8
2013-03-07Change hasAggregateLLVMType, which conflates complex andJohn McCall
aggregate types in a profoundly wrong way that has to be worked around in every call site, to getEvaluationKind, which classifies and distinguishes between all of these cases. Also, normalize the API for loading and storing complexes. I'm working on a larger patch and wanted to pull these changes out, but it would have be annoying to detangle them from each other. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@176656 91177308-0d34-0410-b5e6-96231b3b80d8
2013-01-25patch for PR9027 and // rdar://11861085Fariborz Jahanian
Title: [PR9027] volatile struct bug: member is not loaded at -O; This is caused by last flag passed to @llvm.memcpy being false, not honoring that aggregate has at least one 'volatile' data member (even though aggregate itself has not been qualified as 'volatile'. As a result, optimization optimizes away the memcpy altogether. Patch review by John MaCall (I still need to fix up a test though). git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@173535 91177308-0d34-0410-b5e6-96231b3b80d8
2013-01-02Rewrite #includes for llvm/Foo.h to llvm/IR/Foo.h as appropriate toChandler Carruth
reflect the migration in r171366. Re-sort the #include lines to reflect the new paths. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@171369 91177308-0d34-0410-b5e6-96231b3b80d8
2012-12-24CGValue.h: Update one \param to Addr in MakeBitfield(). [-Wdocumentation]NAKAMURA Takumi
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@171011 91177308-0d34-0410-b5e6-96231b3b80d8
2012-12-06Rework the bitfield access IR generation to address PR13619 andChandler Carruth
generally support the C++11 memory model requirements for bitfield accesses by relying more heavily on LLVM's memory model. The primary change this introduces is to move from a manually aligned and strided access pattern across the bits of the bitfield to a much simpler lump access of all bits in the bitfield followed by math to extract the bits relevant for the particular field. This simplifies the code significantly, but relies on LLVM to intelligently lowering these integers. I have tested LLVM's lowering both synthetically and in benchmarks. The lowering appears to be functional, and there are no really significant performance regressions. Different code patterns accessing bitfields will vary in how this impacts them. The only real regressions I'm seeing are a few patterns where the LLVM code generation for loads that feed directly into a mask operation don't take advantage of the x86 ability to do a smaller load and a cheap zero-extension. This doesn't regress any benchmark in the nightly test suite on my box past the noise threshold, but my box is quite noisy. I'll be watching the LNT numbers, and will look into further improvements to the LLVM lowering as needed. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@169489 91177308-0d34-0410-b5e6-96231b3b80d8
2012-12-04Sort all of Clang's files under 'lib', and fix up the broken headersChandler Carruth
uncovered. This required manually correcting all of the incorrect main-module headers I could find, and running the new llvm/utils/sort_includes.py script over the files. I also manually added quite a few missing headers that were uncovered by shuffling the order or moving headers up to be main-module-headers. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@169237 91177308-0d34-0410-b5e6-96231b3b80d8
2012-08-15Fix for PR#13606: http://llvm.org/bugs/show_bug.cgi?id=13606John Criswell
Changed the alignment of an LValue to be 64 bits so that we can handle alignment values up to half of a 64-bit address space. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@161971 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-02Significantly simplify CGExprAgg's logic about ignored results:John McCall
if we want to ignore a result, the Dest will be null. Otherwise, we must copy into it. This means we need to ensure a slot when loading from a volatile l-value. With all that in place, fix a bug with chained assignments into __block variables of aggregate type where we were losing insight into the actual source of the value during the second assignment. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@159630 91177308-0d34-0410-b5e6-96231b3b80d8
2012-06-27Propagate lvalue alignment into bitfields. Per report on cfe-dev.Eli Friedman
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@159295 91177308-0d34-0410-b5e6-96231b3b80d8
2012-03-29Revert r153613 as it's causing large compile-time regressions on the nightly ↵Chad Rosier
testers. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@153660 91177308-0d34-0410-b5e6-96231b3b80d8
2012-03-28When we can't prove that the target of an aggregate copy isJohn McCall
a complete object, the memcpy needs to use the data size of the structure instead of its sizeof() value. Fixes PR12204. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@153613 91177308-0d34-0410-b5e6-96231b3b80d8
2012-03-22Make sure we correctly set the alignment for vector loads and stores ↵Eli Friedman
associated with vector element lvalues. Patch by Kevin Schoedel (with some minor modifications by me). git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@153285 91177308-0d34-0410-b5e6-96231b3b80d8
2011-12-11Reuse forAddr to create ignored AggValueSlots.Benjamin Kramer
Silences valgrind warnings about uninitalized alignment values. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@146342 91177308-0d34-0410-b5e6-96231b3b80d8
2011-12-03Switch LValue so that it exposes alignment in CharUnits. (No functional ↵Eli Friedman
change.) git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@145753 91177308-0d34-0410-b5e6-96231b3b80d8
2011-12-03Add a utility to get a RValue for a given LValue for an aggregate; switch a ↵Eli Friedman
few places over to it. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@145747 91177308-0d34-0410-b5e6-96231b3b80d8
2011-12-03Switch the Alignment argument on AggValueSlot over to CharUnits, per John's ↵Eli Friedman
review comment. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@145741 91177308-0d34-0410-b5e6-96231b3b80d8
2011-12-03Track alignment in AggValueSlot. No functional change in this patch, but ↵Eli Friedman
I'll be introducing uses of the specified alignment soon. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@145736 91177308-0d34-0410-b5e6-96231b3b80d8
2011-11-07Rip the ObjCPropertyRef l-value kind out of IR-generation.John McCall
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@143908 91177308-0d34-0410-b5e6-96231b3b80d8
2011-11-06Change the AST representation of operations on Objective-CJohn McCall
property references to use a new PseudoObjectExpr expression which pairs a syntactic form of the expression with a set of semantic expressions implementing it. This should significantly reduce the complexity required elsewhere in the compiler to deal with these kinds of expressions (e.g. IR generation's special l-value kind, the static analyzer's Message abstraction), at the lower cost of specifically dealing with the odd AST structure of these expressions. It should also greatly simplify efforts to implement similar language features in the future, most notably Managed C++'s properties and indexed properties. Most of the effort here is in dealing with the various clients of the AST. I've gone ahead and simplified the ObjC rewriter's use of properties; other clients, like IR-gen and the static analyzer, have all the old complexity *and* all the new complexity, at least temporarily. Many thanks to Ted for writing and advising on the necessary changes to the static analyzer. I've xfailed a small diagnostics regression in the static analyzer at Ted's request. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@143867 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-26What say we document some of these AggValueSlot flags a bitJohn McCall
better. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@138628 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-26Since the 'is aliased' bit is critical for correctness in C++, itJohn McCall
really shouldn't be optional. Fix the remaining place where a temporary was being passed as potentially-aliased memory. Fixes PR10756. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@138627 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-25Track whether an AggValueSlot is potentially aliased, and do notJohn McCall
emit call results into potentially aliased slots. This allows us to properly mark indirect return slots as noalias, at the cost of requiring an extra memcpy when assigning an aggregate call result into a l-value. It also brings us into compliance with the x86-64 ABI. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@138599 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-25Use stronger typing for the flags on AggValueSlot and requireJohn McCall
creators to tell us whether something needs GC barriers. No functionality change. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@138581 91177308-0d34-0410-b5e6-96231b3b80d8
2011-06-16Restore correct use of GC barriers.John McCall
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@133144 91177308-0d34-0410-b5e6-96231b3b80d8
2011-06-15Automatic Reference Counting.John McCall
Language-design credit goes to a lot of people, but I particularly want to single out Blaine Garst and Patrick Beard for their contributions. Compiler implementation credit goes to Argyrios, Doug, Fariborz, and myself, in no particular order. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@133103 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-03More capturing of 'this': implicit member expressions. Getting thatJohn McCall
right for anonymous struct/union members led to me discovering some seemingly broken code in that area of Sema, which I fixed, partly by changing the representation of member pointer constants so that IndirectFieldDecls aren't expanded. This led to assorted cleanups with member pointers in CodeGen, and while I was doing that I saw some random other things to clean up. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@124785 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-04Kill the KVC l-value kind and calculate the base expression when emittingJohn McCall
the l-value. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@120884 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-02Improve codegen for initializer lists to use memset more aggressivelyChris Lattner
when an initializer is variable (I handled the constant case in a previous patch). This has three pieces: 1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that the memory being stored into has previously been memset to zero. 2. Teach CGExprAgg to not emit stores of zero to isZeroed memory. 3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine whether they are profitable to emit a memset + inividual stores vs stores for everything. The heuristic used is that a global has to be more than 16 bytes and has to be 3/4 zero to be candidate for this xform. The two testcases are illustrative of the scenarios this catches. We now codegen test9 into: call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false) %.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0 %tmp = load i32* %X.addr, align 4 store i32 %tmp, i32* %.array and test10 into: call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false) %tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0 %tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0 %tmp2 = load i32* %X.addr, align 4 store i32 %tmp2, i32* %tmp1, align 4 %tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3 %tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4 %tmp11 = load i32* %X.addr, align 4 store i32 %tmp11, i32* %tmp10, align 4 Previously we produced 99 stores of zero for test9 and also tons for test10. This xforms should substantially speed up -O0 builds when it kicks in as well as reducing code size and optimizer heartburn on insane cases. This resolves PR279. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@120692 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-02Simplify the ASTs by consolidating ObjCImplicitGetterSetterExpr and ↵John McCall
ObjCPropertyRefExpr into the latter. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@120643 91177308-0d34-0410-b5e6-96231b3b80d8
2010-11-17Reset the lifetime-managed flag between emission of the agg conditionalJohn McCall
branches. Fixes PR8623. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@119408 91177308-0d34-0410-b5e6-96231b3b80d8
2010-10-22After discussion with Doug and John, I am revertingFariborz Jahanian
the patch. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@117159 91177308-0d34-0410-b5e6-96231b3b80d8
2010-10-22Patch fixes miscompile with non-trivial copy constructors and Fariborz Jahanian
statement expressions, //rdar: //8540501 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@117146 91177308-0d34-0410-b5e6-96231b3b80d8
2010-10-14Experimental TBAA support.Dan Gohman
This enables metadata generation by default, however the TBAA pass in the optimizer is still disabled for now. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@116536 91177308-0d34-0410-b5e6-96231b3b80d8
2010-09-16Removes unused setter.Fariborz Jahanian
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@114085 91177308-0d34-0410-b5e6-96231b3b80d8
2010-09-16Further beautification: this pointer can now be typed.John McCall
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@114061 91177308-0d34-0410-b5e6-96231b3b80d8
2010-09-16Initialize AggValueSlot's flags along all paths, plus minor beautification.John McCall
Prospective fix for broken commit in r114045. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@114060 91177308-0d34-0410-b5e6-96231b3b80d8
2010-09-16Patch to move RequiresGCollection bit toFariborz Jahanian
AggValueSlot slot. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@114045 91177308-0d34-0410-b5e6-96231b3b80d8
2010-09-15one piece of code is responsible for the lifetime of every aggregateJohn McCall
slot. The easiest way to do that was to bundle up the information we care about for aggregate slots into a new structure which demands that its creators at least consider the question. I could probably be convinced that the ObjC 'needs GC' bit should be rolled into this structure. Implement generalized copy elision. The main obstacle here is that IR-generation must be much more careful about making sure that exactly git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@113962 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-26CGValue: Increase width of allowed alignment. We could switch to log2, but weDaniel Dunbar
don't currently need the bits anyway. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@112173 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-21IRgen/CGValue: Give MakeAddrLValue() an alignment argument, and eliminate ↵Daniel Dunbar
old form of MakeAddr(). git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@111723 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-21IRgen: Move remaining LValue::Set... methods to LValue::set... (non-static) ↵Daniel Dunbar
methods. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@111722 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-21IRgen/CGValue: Add getQuals(), so qualifiers can be modified on an LValue.Daniel Dunbar
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@111719 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-21IRgen/LValue: Add LValue::setNonGC instead of SetObjCNonGC, for consistency ↵Daniel Dunbar
with isNonGC(). git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@111718 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-21IRgen: Add an LValue::MakeAddr variant which takes a type and uses that to buildDaniel Dunbar
the qualifiers. Also, add CodeGenFunction::MakeAddrLValue() helper function which passes in the ASTContext. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@111714 91177308-0d34-0410-b5e6-96231b3b80d8