diff options
author | Owen Anderson <resistor@mac.com> | 2009-07-29 22:16:19 +0000 |
---|---|---|
committer | Owen Anderson <resistor@mac.com> | 2009-07-29 22:16:19 +0000 |
commit | 96e0fc726c6fe7538522c60743705d5e696b40af (patch) | |
tree | ece7063f7ecc38f4d96b803d5457c7f762756bc1 /lib/CodeGen | |
parent | 6217b80b7a1379b74cced1c076338262c3c980b3 (diff) |
Update for LLVM API change.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@77514 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen')
-rw-r--r-- | lib/CodeGen/CGBlocks.cpp | 54 | ||||
-rw-r--r-- | lib/CodeGen/CGBuiltin.cpp | 18 | ||||
-rw-r--r-- | lib/CodeGen/CGCXX.cpp | 6 | ||||
-rw-r--r-- | lib/CodeGen/CGDecl.cpp | 12 | ||||
-rw-r--r-- | lib/CodeGen/CGExpr.cpp | 16 | ||||
-rw-r--r-- | lib/CodeGen/CGExprAgg.cpp | 4 | ||||
-rw-r--r-- | lib/CodeGen/CGExprConstant.cpp | 16 | ||||
-rw-r--r-- | lib/CodeGen/CGExprScalar.cpp | 27 | ||||
-rw-r--r-- | lib/CodeGen/CGObjCGNU.cpp | 128 | ||||
-rw-r--r-- | lib/CodeGen/CGObjCMac.cpp | 252 | ||||
-rw-r--r-- | lib/CodeGen/CodeGenFunction.cpp | 4 | ||||
-rw-r--r-- | lib/CodeGen/CodeGenModule.cpp | 32 | ||||
-rw-r--r-- | lib/CodeGen/TargetABIInfo.cpp | 64 |
13 files changed, 314 insertions, 319 deletions
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp index 8e7a7922ad..c7d900cf48 100644 --- a/lib/CodeGen/CGBlocks.cpp +++ b/lib/CodeGen/CGBlocks.cpp @@ -184,12 +184,12 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { QualType Ty = E->getType(); if (BDRE && BDRE->isByRef()) { uint64_t Align = getContext().getDeclAlignInBytes(BDRE->getDecl()); - Types[i+5] = VMContext.getPointerType(BuildByRefType(Ty, Align), 0); + Types[i+5] = llvm::PointerType::get(BuildByRefType(Ty, Align), 0); } else Types[i+5] = ConvertType(Ty); } - llvm::StructType *Ty = VMContext.getStructType(Types, true); + llvm::StructType *Ty = llvm::StructType::get(Types, true); llvm::AllocaInst *A = CreateTempAlloca(Ty); A->setAlignment(subBlockAlign); @@ -268,7 +268,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { llvm::ConstantInt::get(llvm::Type::Int64Ty, offset), "block.literal"); - Ty = VMContext.getPointerType(Ty, 0); + Ty = llvm::PointerType::get(Ty, 0); Loc = Builder.CreateBitCast(Loc, Ty); Loc = Builder.CreateLoad(Loc, false); // Loc = Builder.CreateBitCast(Loc, Ty); @@ -310,7 +310,7 @@ const llvm::Type *BlockModule::getBlockDescriptorType() { // unsigned long reserved; // unsigned long block_size; // }; - BlockDescriptorType = VMContext.getStructType(UnsignedLongTy, + BlockDescriptorType = llvm::StructType::get(UnsignedLongTy, UnsignedLongTy, NULL); @@ -325,7 +325,7 @@ const llvm::Type *BlockModule::getGenericBlockLiteralType() { return GenericBlockLiteralType; const llvm::Type *BlockDescPtrTy = - VMContext.getPointerTypeUnqual(getBlockDescriptorType()); + llvm::PointerType::getUnqual(getBlockDescriptorType()); const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( getTypes().ConvertType(getContext().IntTy)); @@ -337,7 +337,7 @@ const llvm::Type *BlockModule::getGenericBlockLiteralType() { // void (*__invoke)(void *); // struct __block_descriptor *__descriptor; // }; - GenericBlockLiteralType = VMContext.getStructType(PtrToInt8Ty, + GenericBlockLiteralType = llvm::StructType::get(PtrToInt8Ty, IntTy, IntTy, PtrToInt8Ty, @@ -355,7 +355,7 @@ const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() { return GenericExtendedBlockLiteralType; const llvm::Type *BlockDescPtrTy = - VMContext.getPointerTypeUnqual(getBlockDescriptorType()); + llvm::PointerType::getUnqual(getBlockDescriptorType()); const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( getTypes().ConvertType(getContext().IntTy)); @@ -369,7 +369,7 @@ const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() { // void *__copy_func_helper_decl; // void *__destroy_func_decl; // }; - GenericExtendedBlockLiteralType = VMContext.getStructType(PtrToInt8Ty, + GenericExtendedBlockLiteralType = llvm::StructType::get(PtrToInt8Ty, IntTy, IntTy, PtrToInt8Ty, @@ -392,7 +392,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) { // Get a pointer to the generic block literal. const llvm::Type *BlockLiteralTy = - VMContext.getPointerTypeUnqual(CGM.getGenericBlockLiteralType()); + llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); // Bitcast the callee to a block literal. llvm::Value *BlockLiteral = @@ -403,7 +403,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) { BlockLiteral = Builder.CreateBitCast(BlockLiteral, - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty), + llvm::PointerType::getUnqual(llvm::Type::Int8Ty), "tmp"); // Add the block literal. @@ -429,7 +429,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) { const llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo, false); - const llvm::Type *BlockFTyPtr = VMContext.getPointerTypeUnqual(BlockFTy); + const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); Func = Builder.CreateBitCast(Func, BlockFTyPtr); // And call the block. @@ -460,11 +460,11 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { bool needsCopyDispose = BlockRequiresCopying(E->getType()); uint64_t Align = getContext().getDeclAlignInBytes(E->getDecl()); const llvm::Type *PtrStructTy - = VMContext.getPointerType(BuildByRefType(E->getType(), Align), 0); + = llvm::PointerType::get(BuildByRefType(E->getType(), Align), 0); // The block literal will need a copy/destroy helper. BlockHasCopyDispose = true; Ty = PtrStructTy; - Ty = VMContext.getPointerType(Ty, 0); + Ty = llvm::PointerType::get(Ty, 0); V = Builder.CreateBitCast(V, Ty); V = Builder.CreateLoad(V, false); V = Builder.CreateStructGEP(V, 1, "forwarding"); @@ -472,7 +472,7 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { V = Builder.CreateBitCast(V, PtrStructTy); V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x"); } else { - Ty = VMContext.getPointerType(Ty, 0); + Ty = llvm::PointerType::get(Ty, 0); V = Builder.CreateBitCast(V, Ty); } return V; @@ -686,7 +686,7 @@ uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) { uint64_t Pad = BlockOffset - OldOffset; if (Pad) { - VMContext.getArrayType(llvm::Type::Int8Ty, Pad); + llvm::ArrayType::get(llvm::Type::Int8Ty, Pad); QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, llvm::APInt(32, Pad), ArrayType::Normal, 0); @@ -750,13 +750,13 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, if (NoteForHelperp) { std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; - PtrPtrT = VMContext.getPointerType(VMContext.getPointerType(T, 0), 0); + PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); SrcObj = Builder.CreateLoad(SrcObj); llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst); llvm::Type *PtrPtrT; - PtrPtrT = VMContext.getPointerType(VMContext.getPointerType(T, 0), 0); + PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); DstObj = Builder.CreateBitCast(DstObj, PtrPtrT); DstObj = Builder.CreateLoad(DstObj); @@ -769,7 +769,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, llvm::Value *Srcv = SrcObj; Srcv = Builder.CreateStructGEP(Srcv, index); Srcv = Builder.CreateBitCast(Srcv, - VMContext.getPointerType(PtrToInt8Ty, 0)); + llvm::PointerType::get(PtrToInt8Ty, 0)); Srcv = Builder.CreateLoad(Srcv); llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); @@ -830,7 +830,7 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose, llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); llvm::Type *PtrPtrT; - PtrPtrT = VMContext.getPointerType(VMContext.getPointerType(T, 0), 0); + PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); SrcObj = Builder.CreateLoad(SrcObj); @@ -843,7 +843,7 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose, llvm::Value *Srcv = SrcObj; Srcv = Builder.CreateStructGEP(Srcv, index); Srcv = Builder.CreateBitCast(Srcv, - VMContext.getPointerType(PtrToInt8Ty, 0)); + llvm::PointerType::get(PtrToInt8Ty, 0)); Srcv = Builder.CreateLoad(Srcv); BuildBlockRelease(Srcv, flag); @@ -911,7 +911,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { // dst->x llvm::Value *V = CGF.GetAddrOfLocalVar(Dst); - V = Builder.CreateBitCast(V, VMContext.getPointerType(T, 0)); + V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); V = Builder.CreateLoad(V); V = Builder.CreateStructGEP(V, 6, "x"); llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty); @@ -921,7 +921,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { V = Builder.CreateLoad(V); V = Builder.CreateBitCast(V, T); V = Builder.CreateStructGEP(V, 6, "x"); - V = Builder.CreateBitCast(V, VMContext.getPointerType(PtrToInt8Ty, 0)); + V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); llvm::Value *SrcObj = Builder.CreateLoad(V); flag |= BLOCK_BYREF_CALLER; @@ -973,10 +973,10 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); llvm::Value *V = CGF.GetAddrOfLocalVar(Src); - V = Builder.CreateBitCast(V, VMContext.getPointerType(T, 0)); + V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); V = Builder.CreateLoad(V); V = Builder.CreateStructGEP(V, 6, "x"); - V = Builder.CreateBitCast(V, VMContext.getPointerType(PtrToInt8Ty, 0)); + V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); V = Builder.CreateLoad(V); flag |= BLOCK_BYREF_CALLER; @@ -1026,7 +1026,7 @@ llvm::Value *BlockFunction::getBlockObjectDispose() { const llvm::Type *ResultType = llvm::Type::VoidTy; ArgTys.push_back(PtrToInt8Ty); ArgTys.push_back(llvm::Type::Int32Ty); - FTy = VMContext.getFunctionType(ResultType, ArgTys, false); + FTy = llvm::FunctionType::get(ResultType, ArgTys, false); CGM.BlockObjectDispose = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); } @@ -1041,7 +1041,7 @@ llvm::Value *BlockFunction::getBlockObjectAssign() { ArgTys.push_back(PtrToInt8Ty); ArgTys.push_back(PtrToInt8Ty); ArgTys.push_back(llvm::Type::Int32Ty); - FTy = VMContext.getFunctionType(ResultType, ArgTys, false); + FTy = llvm::FunctionType::get(ResultType, ArgTys, false); CGM.BlockObjectAssign = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); } @@ -1061,7 +1061,7 @@ ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B) : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) { - PtrToInt8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); BlockHasCopyDispose = false; } diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 86650a1e7e..05f27d57a6 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -78,7 +78,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__builtin_va_end: { Value *ArgValue = EmitVAListRef(E->getArg(0)); const llvm::Type *DestType = - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); if (ArgValue->getType() != DestType) ArgValue = Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); @@ -92,7 +92,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *SrcPtr = EmitVAListRef(E->getArg(1)); const llvm::Type *Type = - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); DstPtr = Builder.CreateBitCast(DstPtr, Type); SrcPtr = Builder.CreateBitCast(SrcPtr, Type); @@ -482,7 +482,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, { const llvm::Type *ResType[2]; ResType[0]= ConvertType(E->getArg(1)->getType()); - ResType[1] = VMContext.getPointerTypeUnqual(ResType[0]); + ResType[1] = llvm::PointerType::getUnqual(ResType[0]); Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2); Value *OldVal = EmitScalarExpr(E->getArg(1)); Value *PrevVal = Builder.CreateCall3(AtomF, @@ -637,7 +637,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_psrlqi128: case X86::BI__builtin_ia32_psrlwi128: { Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext"); - const llvm::Type *Ty = VMContext.getVectorType(llvm::Type::Int64Ty, 2); + const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 2); llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); Ops[1] = Builder.CreateInsertElement(VMContext.getUndef(Ty), Ops[1], Zero, "insert"); @@ -692,7 +692,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_psrlqi: case X86::BI__builtin_ia32_psrlwi: { Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext"); - const llvm::Type *Ty = VMContext.getVectorType(llvm::Type::Int64Ty, 1); + const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 1); Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); const char *name = 0; Intrinsic::ID ID = Intrinsic::not_intrinsic; @@ -744,7 +744,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss"); } case X86::BI__builtin_ia32_ldmxcsr: { - llvm::Type *PtrTy = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp"); Builder.CreateStore(Ops[0], Tmp); @@ -752,7 +752,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, Builder.CreateBitCast(Tmp, PtrTy)); } case X86::BI__builtin_ia32_stmxcsr: { - llvm::Type *PtrTy = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp"); One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), @@ -770,8 +770,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_storehps: case X86::BI__builtin_ia32_storelps: { const llvm::Type *EltTy = llvm::Type::Int64Ty; - llvm::Type *PtrTy = VMContext.getPointerTypeUnqual(EltTy); - llvm::Type *VecTy = VMContext.getVectorType(EltTy, 2); + llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy); + llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); // cast val v2i64 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp index d6173a0ec1..f80956b93f 100644 --- a/lib/CodeGen/CGCXX.cpp +++ b/lib/CodeGen/CGCXX.cpp @@ -45,7 +45,7 @@ CodeGenFunction::GenerateStaticCXXBlockVarDeclInit(const VarDecl &D, GuardVName.c_str()); // Load the first byte of the guard variable. - const llvm::Type *PtrTy = VMContext.getPointerType(llvm::Type::Int8Ty, 0); + const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::Int8Ty, 0); llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy), "tmp"); @@ -166,7 +166,7 @@ llvm::Value *CodeGenFunction::AddressCXXOfBaseClass(llvm::Value *BaseValue, // FIXME. Once type layout is complete, this will probably change. const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl); - llvm::Type *I8Ptr = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::Type *I8Ptr = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); uint64_t Offset = Layout.getBaseClassOffset(BaseClassDecl) / 8; llvm::Value *OffsetVal = llvm::ConstantInt::get( @@ -177,7 +177,7 @@ llvm::Value *CodeGenFunction::AddressCXXOfBaseClass(llvm::Value *BaseValue, getContext().getCanonicalType( getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(BaseClassDecl))); const llvm::Type *BasePtr = ConvertType(BTy); - BasePtr = VMContext.getPointerTypeUnqual(BasePtr); + BasePtr = llvm::PointerType::getUnqual(BasePtr); BaseValue = Builder.CreateBitCast(BaseValue, BasePtr); return BaseValue; } diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp index 554c4f0b0e..3c3814c70d 100644 --- a/lib/CodeGen/CGDecl.cpp +++ b/lib/CodeGen/CGDecl.cpp @@ -194,7 +194,7 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) { // RAUW's the GV uses of this constant will be invalid. const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType()); const llvm::Type *LPtrTy = - VMContext.getPointerType(LTy, D.getType().getAddressSpace()); + llvm::PointerType::get(LTy, D.getType().getAddressSpace()); DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy); // Emit global variable debug descriptor for static vars. @@ -225,7 +225,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty, bool needsCopyDispose = BlockRequiresCopying(Ty); std::vector<const llvm::Type *> Types(needsCopyDispose*2+5); const llvm::PointerType *PtrToInt8Ty - = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); Types[0] = PtrToInt8Ty; Types[1] = PtrToInt8Ty; Types[2] = llvm::Type::Int32Ty; @@ -238,7 +238,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty, assert((Align <= unsigned(Target.getPointerAlign(0))/8) && "Can't align more than pointer yet"); Types[needsCopyDispose*2 + 4] = LTy; - return VMContext.getStructType(Types, false); + return llvm::StructType::get(Types, false); } /// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a @@ -283,7 +283,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { if (!DidCallStackSave) { // Save the stack. const llvm::Type *LTy = - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); @@ -306,7 +306,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { // Get the element type. const llvm::Type *LElemTy = ConvertTypeForMem(Ty); const llvm::Type *LElemPtrTy = - VMContext.getPointerType(LElemTy, D.getType().getAddressSpace()); + llvm::PointerType::get(LElemTy, D.getType().getAddressSpace()); llvm::Value *VLASize = EmitVLASize(Ty); @@ -376,7 +376,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { if (isByRef) { const llvm::PointerType *PtrToInt8Ty - = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); EnsureInsertPoint(); llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0); diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index 8c77f386ed..ea14242a00 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -124,7 +124,7 @@ RValue CodeGenFunction::GetUndefRValue(QualType Ty) { llvm::Value *U = VMContext.getUndef(EltTy); return RValue::getComplex(std::make_pair(U, U)); } else if (hasAggregateLLVMType(Ty)) { - const llvm::Type *LTy = VMContext.getPointerTypeUnqual(ConvertType(Ty)); + const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); return RValue::getAggregate(VMContext.getUndef(LTy)); } else { return RValue::get(VMContext.getUndef(ConvertType(Ty))); @@ -140,7 +140,7 @@ RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, const char *Name) { ErrorUnsupported(E, Name); - llvm::Type *Ty = VMContext.getPointerTypeUnqual(ConvertType(E->getType())); + llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); return LValue::MakeAddr(VMContext.getUndef(Ty), E->getType().getCVRQualifiers(), getContext().getObjCGCAttrKind(E->getType()), @@ -254,7 +254,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); if (DstPtr->getElementType() != SrcTy) { const llvm::Type *MemTy = - VMContext.getPointerType(SrcTy, DstPtr->getAddressSpace()); + llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); } } @@ -692,7 +692,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { bool needsCopyDispose = BlockRequiresCopying(VD->getType()); const llvm::Type *PtrStructTy = V->getType(); const llvm::Type *Ty = PtrStructTy; - Ty = VMContext.getPointerType(Ty, 0); + Ty = llvm::PointerType::get(Ty, 0); V = Builder.CreateStructGEP(V, 1, "forwarding"); V = Builder.CreateBitCast(V, Ty); V = Builder.CreateLoad(V, false); @@ -875,7 +875,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { // Extend or truncate the index type to 32 or 64-bits. unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); if (IdxBitwidth != LLVMPointerWidth) - Idx = Builder.CreateIntCast(Idx, VMContext.getIntegerType(LLVMPointerWidth), + Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(LLVMPointerWidth), IdxSigned, "idxprom"); // We know that the pointer points to a type of the correct size, @@ -902,7 +902,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { Idx = Builder.CreateMul(Idx, InterfaceSize); - llvm::Type *i8PTy = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), Idx, "arrayidx"); Address = Builder.CreateBitCast(Address, Base->getType()); @@ -1037,7 +1037,7 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, cast<llvm::PointerType>(BaseValue->getType()); unsigned AS = BaseTy->getAddressSpace(); BaseValue = Builder.CreateBitCast(BaseValue, - VMContext.getPointerType(FieldTy, AS), + llvm::PointerType::get(FieldTy, AS), "tmp"); llvm::Value *Idx = @@ -1068,7 +1068,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, cast<llvm::PointerType>(BaseValue->getType()); unsigned AS = BaseTy->getAddressSpace(); V = Builder.CreateBitCast(V, - VMContext.getPointerType(FieldTy, AS), + llvm::PointerType::get(FieldTy, AS), "tmp"); } if (Field->getType()->isReferenceType()) diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp index 7dbf2df6dd..a711f49ef7 100644 --- a/lib/CodeGen/CGExprAgg.cpp +++ b/lib/CodeGen/CGExprAgg.cpp @@ -543,7 +543,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, // equal, but other compilers do this optimization, and almost every memcpy // implementation handles this case safely. If there is a libc that does not // safely handle this, we can add a target hook. - const llvm::Type *BP = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); if (DestPtr->getType() != BP) DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); if (SrcPtr->getType() != BP) @@ -553,7 +553,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty); // FIXME: Handle variable sized types. - const llvm::Type *IntPtr = VMContext.getIntegerType(LLVMPointerWidth); + const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth); // FIXME: If we have a volatile struct, the optimizer can remove what might // appear to be `extra' memory ops: diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp index 8e45997aee..e5d1b06bb8 100644 --- a/lib/CodeGen/CGExprConstant.cpp +++ b/lib/CodeGen/CGExprConstant.cpp @@ -83,7 +83,7 @@ class VISIBILITY_HIDDEN ConstStructBuilder { const llvm::Type *Ty = llvm::Type::Int8Ty; if (NumBytes > 1) - Ty = CGM.getLLVMContext().getArrayType(Ty, NumBytes); + Ty = llvm::ArrayType::get(Ty, NumBytes); llvm::Constant *Padding = CGM.getLLVMContext().getNullValue(Ty); PackedElements.push_back(Padding); @@ -251,7 +251,7 @@ class VISIBILITY_HIDDEN ConstStructBuilder { const llvm::Type *Ty = llvm::Type::Int8Ty; if (NumBytes > 1) - Ty = CGM.getLLVMContext().getArrayType(Ty, NumBytes); + Ty = llvm::ArrayType::get(Ty, NumBytes); llvm::Constant *C = CGM.getLLVMContext().getNullValue(Ty); Elements.push_back(C); @@ -434,7 +434,7 @@ public: std::vector<const llvm::Type*> Types; for (unsigned i = 0; i < Elts.size(); ++i) Types.push_back(Elts[i]->getType()); - const llvm::StructType *SType = VMContext.getStructType(Types, true); + const llvm::StructType *SType = llvm::StructType::get(Types, true); return llvm::ConstantStruct::get(SType, Elts); } @@ -549,7 +549,7 @@ public: std::vector<const llvm::Type*> Types; for (unsigned i = 0; i < Elts.size(); ++i) Types.push_back(Elts[i]->getType()); - SType = VMContext.getStructType(Types, true); + SType = llvm::StructType::get(Types, true); } return llvm::ConstantStruct::get(SType, Elts); @@ -572,13 +572,13 @@ public: if (unsigned NumPadBytes = TotalSize - CurSize) { const llvm::Type *Ty = llvm::Type::Int8Ty; if (NumPadBytes > 1) - Ty = VMContext.getArrayType(Ty, NumPadBytes); + Ty = llvm::ArrayType::get(Ty, NumPadBytes); Elts.push_back(VMContext.getNullValue(Ty)); Types.push_back(Ty); } - llvm::StructType* STy = VMContext.getStructType(Types, false); + llvm::StructType* STy = llvm::StructType::get(Types, false); return llvm::ConstantStruct::get(STy, Elts); } @@ -609,7 +609,7 @@ public: InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0)); const llvm::ArrayType *RetTy = - VMContext.getArrayType(NV->getType(), NumElts); + llvm::ArrayType::get(NV->getType(), NumElts); return llvm::ConstantArray::get(RetTy, Elts); } @@ -831,7 +831,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, // Apply offset if necessary. if (!Offset->isNullValue()) { const llvm::Type *Type = - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type); Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1); C = llvm::ConstantExpr::getBitCast(Casted, C->getType()); diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index c9a1276738..64261dde6f 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -452,7 +452,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); // First, convert to the correct width so that we control the kind of // extension. - const llvm::Type *MiddleTy = VMContext.getIntegerType(CGF.LLVMPointerWidth); + const llvm::Type *MiddleTy = llvm::IntegerType::get(CGF.LLVMPointerWidth); bool InputSigned = SrcType->isSignedIntegerType(); llvm::Value* IntResult = Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); @@ -707,11 +707,11 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, size = -size; Inc = llvm::ConstantInt::get(Inc->getType(), size); const llvm::Type *i8Ty = - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); InVal = Builder.CreateBitCast(InVal, i8Ty); NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); llvm::Value *lhs = LV.getAddress(); - lhs = Builder.CreateBitCast(lhs, VMContext.getPointerTypeUnqual(i8Ty)); + lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty)); LV = LValue::MakeAddr(lhs, ValTy.getCVRQualifiers(), CGF.getContext().getObjCGCAttrKind(ValTy)); } @@ -719,7 +719,7 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, NextVal = Builder.CreateGEP(InVal, Inc, "ptrincdec"); } else { const llvm::Type *i8Ty = - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); @@ -988,11 +988,11 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { handerArgTypes.push_back(llvm::Type::Int64Ty); handerArgTypes.push_back(llvm::Type::Int8Ty); handerArgTypes.push_back(llvm::Type::Int8Ty); - llvm::FunctionType *handlerTy = VMContext.getFunctionType(llvm::Type::Int64Ty, + llvm::FunctionType *handlerTy = llvm::FunctionType::get(llvm::Type::Int64Ty, handerArgTypes, false); llvm::Value *handlerFunction = CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler", - VMContext.getPointerTypeUnqual(handlerTy)); + llvm::PointerType::getUnqual(handlerTy)); handlerFunction = Builder.CreateLoad(handlerFunction); llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction, @@ -1056,7 +1056,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { if (Width < CGF.LLVMPointerWidth) { // Zero or sign extend the pointer value based on whether the index is // signed or not. - const llvm::Type *IdxType = VMContext.getIntegerType(CGF.LLVMPointerWidth); + const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth); if (IdxExp->getType()->isSignedIntegerType()) Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); else @@ -1070,7 +1070,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { llvm::ConstantInt::get(Idx->getType(), CGF.getContext().getTypeSize(OIT) / 8); Idx = Builder.CreateMul(Idx, InterfaceSize); - const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); return Builder.CreateBitCast(Res, Ptr->getType()); @@ -1080,7 +1080,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { // extensions. The GNU void* casts amount to no-ops since our void* // type is i8*, but this is future proof. if (ElementType->isVoidType() || ElementType->isFunctionType()) { - const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); return Builder.CreateBitCast(Res, Ptr->getType()); @@ -1118,8 +1118,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { if (Width < CGF.LLVMPointerWidth) { // Zero or sign extend the pointer value based on whether the index is // signed or not. - const llvm::Type *IdxType = - VMContext.getIntegerType(CGF.LLVMPointerWidth); + const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth); if (Ops.E->getRHS()->getType()->isSignedIntegerType()) Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); else @@ -1136,7 +1135,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { CGF.getContext().getTypeSize(OIT) / 8); Idx = Builder.CreateMul(Idx, InterfaceSize); const llvm::Type *i8Ty = - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr"); return Builder.CreateBitCast(Res, Ops.LHS->getType()); @@ -1147,7 +1146,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { // void* type is i8*, but this is future proof. if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { const llvm::Type *i8Ty = - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr"); return Builder.CreateBitCast(Res, Ops.LHS->getType()); @@ -1614,7 +1613,7 @@ Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) { llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals, unsigned NumVals, bool isSplat) { llvm::Value *Vec - = VMContext.getUndef(VMContext.getVectorType(Vals[0]->getType(), NumVals)); + = VMContext.getUndef(llvm::VectorType::get(Vals[0]->getType(), NumVals)); for (unsigned i = 0, e = NumVals; i != e; ++i) { llvm::Value *Val = isSplat ? Vals[0] : Vals[i]; diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp index 450b4cdac4..85a5141951 100644 --- a/lib/CodeGen/CGObjCGNU.cpp +++ b/lib/CodeGen/CGObjCGNU.cpp @@ -213,15 +213,15 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm) Zeros[0] = llvm::ConstantInt::get(LongTy, 0); Zeros[1] = Zeros[0]; NULLPtr = VMContext.getConstantPointerNull( - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty)); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty)); // C string type. Used in lots of places. PtrToInt8Ty = - VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); // Get the selector Type. SelectorTy = cast<llvm::Po |