aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2011-07-18 04:24:23 +0000
committerChris Lattner <sabre@nondot.org>2011-07-18 04:24:23 +0000
commit2acc6e3feda5e4f7d9009bdcf8b1cd777fecfe2d (patch)
treeff3b5c99b29349c6c5806be7168b466cf4f8d31b /lib/CodeGen
parentb5f65475d25b67f87e368daa1583b762af7e2e45 (diff)
de-constify llvm::Type, patch by David Blaikie!
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135370 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/CGBlocks.cpp50
-rw-r--r--lib/CodeGen/CGBlocks.h2
-rw-r--r--lib/CodeGen/CGBuiltin.cpp72
-rw-r--r--lib/CodeGen/CGCXX.cpp16
-rw-r--r--lib/CodeGen/CGCXXABI.cpp4
-rw-r--r--lib/CodeGen/CGCall.cpp58
-rw-r--r--lib/CodeGen/CGClass.cpp26
-rw-r--r--lib/CodeGen/CGCleanup.cpp2
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp4
-rw-r--r--lib/CodeGen/CGDecl.cpp14
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp12
-rw-r--r--lib/CodeGen/CGException.cpp38
-rw-r--r--lib/CodeGen/CGExpr.cpp26
-rw-r--r--lib/CodeGen/CGExprAgg.cpp22
-rw-r--r--lib/CodeGen/CGExprCXX.cpp34
-rw-r--r--lib/CodeGen/CGExprComplex.cpp6
-rw-r--r--lib/CodeGen/CGExprConstant.cpp48
-rw-r--r--lib/CodeGen/CGExprScalar.cpp56
-rw-r--r--lib/CodeGen/CGObjC.cpp40
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp40
-rw-r--r--lib/CodeGen/CGObjCMac.cpp52
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp6
-rw-r--r--lib/CodeGen/CGRTTI.cpp16
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp12
-rw-r--r--lib/CodeGen/CGStmt.cpp12
-rw-r--r--lib/CodeGen/CGVTT.cpp10
-rw-r--r--lib/CodeGen/CGVTables.cpp22
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp8
-rw-r--r--lib/CodeGen/CodeGenFunction.h20
-rw-r--r--lib/CodeGen/CodeGenModule.cpp58
-rw-r--r--lib/CodeGen/CodeGenModule.h16
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp4
-rw-r--r--lib/CodeGen/CodeGenTypes.h2
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp36
-rw-r--r--lib/CodeGen/TargetInfo.cpp60
35 files changed, 452 insertions, 452 deletions
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 9815d1d4ef..9544cb4eb7 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -59,8 +59,8 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
ASTContext &C = CGM.getContext();
- const llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
- const llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
+ llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
llvm::SmallVector<llvm::Constant*, 6> elements;
@@ -507,7 +507,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// Build the block descriptor.
llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
- const llvm::Type *intTy = ConvertType(getContext().IntTy);
+ llvm::Type *intTy = ConvertType(getContext().IntTy);
llvm::AllocaInst *blockAddr =
CreateTempAlloca(blockInfo.StructureType, "block");
@@ -723,7 +723,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
llvm::Value *Callee = EmitScalarExpr(E->getCallee());
// Get a pointer to the generic block literal.
- const llvm::Type *BlockLiteralTy =
+ llvm::Type *BlockLiteralTy =
llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
// Bitcast the callee to a block literal.
@@ -756,10 +756,10 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
FuncTy->getExtInfo());
// Cast the function pointer to the right type.
- const llvm::Type *BlockFTy =
+ llvm::Type *BlockFTy =
CGM.getTypes().GetFunctionType(FnInfo, false);
- const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+ llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
Func = Builder.CreateBitCast(Func, BlockFTyPtr);
// And call the block.
@@ -783,7 +783,7 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
// to byref*.
addr = Builder.CreateLoad(addr);
- const llvm::PointerType *byrefPointerType
+ llvm::PointerType *byrefPointerType
= llvm::PointerType::get(BuildByRefType(variable), 0);
addr = Builder.CreateBitCast(addr, byrefPointerType,
"byref.addr");
@@ -863,7 +863,7 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
literal->setAlignment(blockInfo.BlockAlign.getQuantity());
// Return a constant of the appropriately-casted type.
- const llvm::Type *requiredType =
+ llvm::Type *requiredType =
CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
return llvm::ConstantExpr::getBitCast(literal, requiredType);
}
@@ -918,7 +918,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
if (CGM.ReturnTypeUsesSRet(fnInfo))
blockInfo.UsesStret = true;
- const llvm::FunctionType *fnLLVMType =
+ llvm::FunctionType *fnLLVMType =
CGM.getTypes().GetFunctionType(fnInfo, fnType->isVariadic());
MangleBuffer name;
@@ -1065,7 +1065,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// FIXME: it would be nice if these were mergeable with things with
// identical semantics.
- const llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1088,7 +1088,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
true);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
- const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
src = Builder.CreateLoad(src);
@@ -1180,7 +1180,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
- const llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1201,7 +1201,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
false, true);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
- const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
src = Builder.CreateLoad(src);
@@ -1399,7 +1399,7 @@ public:
static llvm::Constant *
generateByrefCopyHelper(CodeGenFunction &CGF,
- const llvm::StructType &byrefType,
+ llvm::StructType &byrefType,
CodeGenModule::ByrefHelpers &byrefInfo) {
ASTContext &Context = CGF.getContext();
@@ -1416,7 +1416,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
CodeGenTypes &Types = CGF.CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1438,7 +1438,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsCopy()) {
- const llvm::Type *byrefPtrType = byrefType.getPointerTo(0);
+ llvm::Type *byrefPtrType = byrefType.getPointerTo(0);
// dst->x
llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst);
@@ -1462,7 +1462,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
/// Build the copy helper for a __block variable.
static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
- const llvm::StructType &byrefType,
+ llvm::StructType &byrefType,
CodeGenModule::ByrefHelpers &info) {
CodeGenFunction CGF(CGM);
return generateByrefCopyHelper(CGF, byrefType, info);
@@ -1471,7 +1471,7 @@ static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
/// Generate code for a __block variable's dispose helper.
static llvm::Constant *
generateByrefDisposeHelper(CodeGenFunction &CGF,
- const llvm::StructType &byrefType,
+ llvm::StructType &byrefType,
CodeGenModule::ByrefHelpers &byrefInfo) {
ASTContext &Context = CGF.getContext();
QualType R = Context.VoidTy;
@@ -1484,7 +1484,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
CodeGenTypes &Types = CGF.CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1521,7 +1521,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
/// Build the dispose helper for a __block variable.
static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
- const llvm::StructType &byrefType,
+ llvm::StructType &byrefType,
CodeGenModule::ByrefHelpers &info) {
CodeGenFunction CGF(CGM);
return generateByrefDisposeHelper(CGF, byrefType, info);
@@ -1529,7 +1529,7 @@ static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
///
template <class T> static T *buildByrefHelpers(CodeGenModule &CGM,
- const llvm::StructType &byrefTy,
+ llvm::StructType &byrefTy,
T &byrefInfo) {
// Increase the field's alignment to be at least pointer alignment,
// since the layout of the byref struct will guarantee at least that.
@@ -1553,7 +1553,7 @@ template <class T> static T *buildByrefHelpers(CodeGenModule &CGM,
}
CodeGenModule::ByrefHelpers *
-CodeGenFunction::buildByrefHelpers(const llvm::StructType &byrefType,
+CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission) {
const VarDecl &var = *emission.Variable;
QualType type = var.getType();
@@ -1658,8 +1658,8 @@ llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
/// T x;
/// } x
///
-const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
- std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
+llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
+ std::pair<llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
if (Info.first)
return Info.first;
@@ -1742,7 +1742,7 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
llvm::Value *addr = emission.Address;
// That's an alloca of the byref structure type.
- const llvm::StructType *byrefType = cast<llvm::StructType>(
+ llvm::StructType *byrefType = cast<llvm::StructType>(
cast<llvm::PointerType>(addr->getType())->getElementType());
// Build the byref helpers if necessary. This is null if we don't need any.
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 4d8dead2be..6e71c1fdc0 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -176,7 +176,7 @@ public:
/// because it gets set later in the block-creation process.
mutable bool UsesStret : 1;
- const llvm::StructType *StructureType;
+ llvm::StructType *StructureType;
const BlockExpr *Block;
CharUnits BlockSize;
CharUnits BlockAlign;
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 1566bd9e66..1f0f380984 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -44,7 +44,7 @@ static void EmitMemoryBarrier(CodeGenFunction &CGF,
/// Emit the conversions required to turn the given value into an
/// integer of the given size.
static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, const llvm::IntegerType *IntType) {
+ QualType T, llvm::IntegerType *IntType) {
V = CGF.EmitToMemory(V, T);
if (V->getType()->isPointerTy())
@@ -55,7 +55,7 @@ static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
}
static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, const llvm::Type *ResultType) {
+ QualType T, llvm::Type *ResultType) {
V = CGF.EmitFromMemory(V, T);
if (ResultType->isPointerTy())
@@ -105,7 +105,7 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
llvm::Value *Args[2];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- const llvm::Type *ValueType = Args[1]->getType();
+ llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
@@ -139,7 +139,7 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
llvm::Value *Args[2];
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- const llvm::Type *ValueType = Args[1]->getType();
+ llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -195,7 +195,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_va_start:
case Builtin::BI__builtin_va_end: {
Value *ArgValue = EmitVAListRef(E->getArg(0));
- const llvm::Type *DestType = Int8PtrTy;
+ llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
ArgValue = Builder.CreateBitCast(ArgValue, DestType,
ArgValue->getName().data());
@@ -208,7 +208,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *DstPtr = EmitVAListRef(E->getArg(0));
Value *SrcPtr = EmitVAListRef(E->getArg(1));
- const llvm::Type *Type = Int8PtrTy;
+ llvm::Type *Type = Int8PtrTy;
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
@@ -236,7 +236,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
@@ -251,7 +251,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
@@ -267,7 +267,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
llvm::ConstantInt::get(ArgType, 1), "tmp");
Value *Zero = llvm::Constant::getNullValue(ArgType);
@@ -287,7 +287,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
"tmp");
@@ -304,7 +304,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
@@ -471,7 +471,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_fpclassify: {
Value *V = EmitScalarExpr(E->getArg(5));
- const llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
+ llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
// Create Result
BasicBlock *Begin = Builder.GetInsertBlock();
@@ -661,7 +661,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
case Builtin::BI__builtin_dwarf_sp_column: {
- const llvm::IntegerType *Ty
+ llvm::IntegerType *Ty
= cast<llvm::IntegerType>(ConvertType(E->getType()));
int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
if (Column == -1) {
@@ -680,7 +680,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Int = EmitScalarExpr(E->getArg(0));
Value *Ptr = EmitScalarExpr(E->getArg(1));
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
+ llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
@@ -874,7 +874,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Args[3];
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = EmitScalarExpr(E->getArg(1));
- const llvm::Type *ValueType = Args[1]->getType();
+ llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(*this, Args[1], T, IntType);
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
@@ -934,7 +934,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ElTy =
+ llvm::Type *ElTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
llvm::StoreInst *Store =
Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
@@ -1007,11 +1007,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
LLVMContext &C = CGM.getLLVMContext();
Value *Arg = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgTy = Arg->getType();
+ llvm::Type *ArgTy = Arg->getType();
if (ArgTy->isPPC_FP128Ty())
break; // FIXME: I'm not sure what the right implementation is here.
int ArgWidth = ArgTy->getPrimitiveSizeInBits();
- const llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
+ llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
@@ -1045,7 +1045,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
assert(Error == ASTContext::GE_None && "Should not codegen an error");
Function *F = CGM.getIntrinsic(IntrinsicID);
- const llvm::FunctionType *FTy = F->getFunctionType();
+ llvm::FunctionType *FTy = F->getFunctionType();
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Value *ArgValue;
@@ -1064,7 +1064,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
- const llvm::Type *PTy = FTy->getParamType(i);
+ llvm::Type *PTy = FTy->getParamType(i);
if (PTy != ArgValue->getType()) {
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param");
@@ -1077,7 +1077,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
- const llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
+ llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
@@ -1154,12 +1154,12 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
return Builder.CreateCall(F, Ops, name);
}
-Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
ConstantInt *CI = cast<ConstantInt>(V);
int SV = CI->getSExtValue();
- const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
return llvm::ConstantVector::get(CV);
@@ -1196,8 +1196,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::SmallVector<Value*, 2> Ops;
for (unsigned i = 0; i < E->getNumArgs(); i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
- const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
- const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
llvm::StringRef Name = FD->getName();
return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
@@ -1530,7 +1530,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
default: assert(0 && "unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
- const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+ llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
SmallVector<Value*, 6> Args;
Args.push_back(Ops[1]);
@@ -1562,14 +1562,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
case ARM::BI__builtin_neon_vmovl_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
if (usgn)
return Builder.CreateZExt(Ops[0], Ty, "vmovl");
return Builder.CreateSExt(Ops[0], Ty, "vmovl");
}
case ARM::BI__builtin_neon_vmovn_v: {
- const llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
}
@@ -1587,7 +1587,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- const llvm::Type *EltTy =
+ llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
@@ -1602,7 +1602,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
@@ -1983,7 +1983,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrlqi128:
case X86::BI__builtin_ia32_psrlwi128: {
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
+ llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
Ops[1], Zero, "insert");
@@ -2046,7 +2046,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrlqi:
case X86::BI__builtin_ia32_psrlwi: {
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
+ llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
@@ -2098,7 +2098,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Ops, "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
- const llvm::Type *PtrTy = Int8PtrTy;
+ llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
Builder.CreateStore(Ops[0], Tmp);
@@ -2106,7 +2106,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Builder.CreateBitCast(Tmp, PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
- const llvm::Type *PtrTy = Int8PtrTy;
+ llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
@@ -2156,7 +2156,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// than 16 bytes, emit a logical right shift of the destination.
if (shiftVal < 16) {
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
- const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
@@ -2186,7 +2186,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors more than 16 but less
// than 32 bytes, emit a logical right shift of the destination.
if (shiftVal < 32) {
- const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index f6fc202eaa..f841b25009 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -138,7 +138,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
return true;
// Derive the type for the alias.
- const llvm::PointerType *AliasType
+ llvm::PointerType *AliasType
= getTypes().GetFunctionType(AliasDecl)->getPointerTo();
// Find the referrent. Some aliases might require a bitcast, in
@@ -221,7 +221,7 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(ctor, ctorType);
const FunctionProtoType *proto = ctor->getType()->castAs<FunctionProtoType>();
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
getTypes().GetFunctionType(*fnInfo, proto->isVariadic());
return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
/*ForVTable=*/false));
@@ -288,7 +288,7 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(dtor, dtorType);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
getTypes().GetFunctionType(*fnInfo, false);
return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
@@ -296,7 +296,7 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
}
static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
- llvm::Value *This, const llvm::Type *Ty) {
+ llvm::Value *This, llvm::Type *Ty) {
Ty = Ty->getPointerTo()->getPointerTo();
llvm::Value *VTable = CGF.GetVTablePtr(This, Ty);
@@ -307,7 +307,7 @@ static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
llvm::Value *
CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
- const llvm::Type *Ty) {
+ llvm::Type *Ty) {
MD = MD->getCanonicalDecl();
uint64_t VTableIndex = CGM.getVTables().getMethodVTableIndex(MD);
@@ -320,7 +320,7 @@ CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
llvm::Value *
CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
NestedNameSpecifier *Qual,
- const llvm::Type *Ty) {
+ llvm::Type *Ty) {
llvm::Value *VTable = 0;
assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
"BuildAppleKextVirtualCall - bad Qual kind");
@@ -366,7 +366,7 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
&CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
Dtor_Complete);
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty
+ llvm::Type *Ty
= CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
llvm::Value *VTable = CGM.getVTables().GetAddrOfVTable(RD);
@@ -387,7 +387,7 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
llvm::Value *
CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
- llvm::Value *This, const llvm::Type *Ty) {
+ llvm::Value *This, llvm::Type *Ty) {
DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
uint64_t VTableIndex =
CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index dcc28b45cf..ff7213df1f 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -49,7 +49,7 @@ llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
MPT->getPointeeType()->getAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
FPT->isVariadic());
return llvm::Constant::getNullValue(FTy->getPointerTo());
@@ -60,7 +60,7 @@ llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "loads of member pointers");
- const llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
return llvm::Constant::getNullValue(Ty);
}
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index f8783ad08d..d2ad5dc887 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -367,12 +367,12 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
/// with an in-memory size smaller than DstSize.
static llvm::Value *
EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
- const llvm::StructType *SrcSTy,
+ llvm::StructType *SrcSTy,
uint64_t DstSize, CodeGenFunction &CGF) {
// We can't dive into a zero-element struct.
if (SrcSTy->getNumElements() == 0) return SrcPtr;
- const llvm::Type *FirstElt = SrcSTy->getElementType(0);
+ llvm::Type *FirstElt = SrcSTy->getElementType(0);
// If the first elt is at least as large as what we're looking for, or if the
// first element is the same size as the whole struct, we can enter it.
@@ -386,9 +386,9 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
// If the first element is a struct, recurse.
- const llvm::Type *SrcTy =
+ llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
- if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
return SrcPtr;
@@ -398,7 +398,7 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
/// are either integers or pointers. This does a truncation of the value if it
/// is too large or a zero extension if it is too small.
static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
- const llvm::Type *Ty,
+ llvm::Type *Ty,
CodeGenFunction &CGF) {
if (Val->getType() == Ty)
return Val;
@@ -412,7 +412,7 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
}
- const llvm::Type *DestIntTy = Ty;
+ llvm::Type *DestIntTy = Ty;
if (isa<llvm::PointerType>(DestIntTy))
DestIntTy = CGF.IntPtrTy;
@@ -433,9 +433,9 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
/// destination type; in this situation the values of bits which not
/// present in the src are undefined.
static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
- const llvm::Type *Ty,
+ llvm::Type *Ty,
CodeGenFunction &CGF) {
- const llvm::Type *SrcTy =
+ llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
// If SrcTy and Ty are the same, just do a load.
@@ -444,7 +444,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
- if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
}
@@ -495,7 +495,7 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
llvm::Value *DestPtr, bool DestIsVolatile,
bool LowAlignment) {
// Prefer scalar stores to first-class aggregate stores.
- if (const llvm::StructType *STy =
+ if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(Val->getType())) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
@@ -519,8 +519,8 @@ static void CreateCoercedStore(llvm::Value *Src,
llvm::Value *DstPtr,
bool DstIsVolatile,
CodeGenFunction &CGF) {
- const llvm::Type *SrcTy = Src->getType();
- const llvm::Type *DstTy =
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy =
cast<llvm::PointerType>(DstPtr->getType())->getElementType();
if (SrcTy == DstTy) {
CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
@@ -529,7 +529,7 @@ static void CreateCoercedStore(llvm::Value *Src,
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
- if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
+ if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
}
@@ -615,7 +615,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
assert(Inserted && "Recursively being processed?");
llvm::SmallVector<llvm::Type*, 8> argTypes;
- const llvm::Type *resultType = 0;
+ llvm::Type *resultType = 0;
const ABIArgInfo &retAI = FI.getReturnInfo();
switch (retAI.getKind()) {
@@ -632,7 +632,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
resultType = llvm::Type::getVoidTy(getLLVMContext());
QualType ret = FI.getReturnType();
- const llvm::Type *ty = ConvertType(ret);
+ llvm::Type *ty = ConvertType(ret);
unsigned addressSpace = Context.getTargetAddressSpace(ret);
argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
break;
@@ -653,7 +653,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
case ABIArgInfo::Indirect: {
// indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type);
+ llvm::Type *LTy = ConvertTypeForMem(it->type);
argTypes.push_back(LTy->getPointerTo());
break;
}
@@ -664,7 +664,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
llvm::Type *argType = argAI.getCoerceToType();
- if (const llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
+ if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
argTypes.push_back(st->getElementType(i));
} else {
@@ -685,7 +685,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
return llvm::FunctionType::get(resultType, argTypes, isVariadic);
}
-const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
+llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
@@ -803,7 +803,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
}
// FIXME: handle sseregparm someday...
- if (const llvm::StructType *STy =
+ if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(AI.getCoerceToType()))
Index += STy->getNumElements()-1; // 1 will be added below.
break;
@@ -847,7 +847,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
const VarDecl *var,
llvm::Value *value) {
- const llvm::Type *varType = CGF.ConvertType(var->getType());
+ llvm::Type *varType = CGF.ConvertType(var->getType());
// This can happen with promotions that actually don't change the
// underlying type, like the enum promotions.
@@ -872,7 +872,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
if (FD->hasImplicitReturnZero()) {
QualType RetTy = FD->getResultType().getUnqualifiedType();
- const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
+ llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
Builder.CreateStore(Zero, ReturnValue);
}
@@ -918,7 +918,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
//
// FIXME: We should have a common utility for generating an aggregate
// copy.
- const llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
CharUnits Size = getContext().getTypeSizeInChars(Ty);
llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
@@ -985,7 +985,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
- if (const llvm::StructType *STy =
+ if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
@@ -1054,7 +1054,7 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
if (BB->empty()) return 0;
if (&BB->back() != result) return 0;
- const llvm::Type *resultType = result->getType();
+ llvm::Type *resultType = result->getType();
// result is in a BasicBlock and is therefore an Instruction.
llvm::Instruction *generator = cast<llvm::Instruction>(result);
@@ -1324,7 +1324,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// The dest and src types don't necessarily match in LLVM terms
// because of the crazy ObjC compatibility rules.
- const llvm::PointerType *destType =
+ llvm::PointerType *destType =
cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
// If the address is a constant null, just pass the appropriate null.
@@ -1630,7 +1630,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
- if (const llvm::StructType *STy =
+ if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
SrcPtr = Builder.CreateBitCast(SrcPtr,
llvm::PointerType::getUnqual(STy));
@@ -1668,10 +1668,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// with unprototyped functions.
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
- const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
- const llvm::FunctionType *CurFT =
+ llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
+ llvm::FunctionType *CurFT =
cast<llvm::FunctionType>(CurPT->getElementType());
- const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
+ llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
if (CE->getOpcode() == llvm::Instruction::BitCast &&
ActualFT->getReturnType() == CurFT->getReturnType() &&
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 7dbaaf8529..d2b104eec0 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -62,7 +62,7 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
if (Offset.isZero())
return 0;
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
Types.ConvertType(getContext().getPointerDiffType());
return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
@@ -95,7 +95,7 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
// TODO: for complete types, this should be possible with a GEP.
llvm::Value *V = This;
if (Offset.isPositive()) {
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
V = Builder.CreateBitCast(V, Int8PtrTy);
V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
}
@@ -107,7 +107,7 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
static llvm::Value *
ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
CharUnits NonVirtual, llvm::Value *Virtual) {
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
llvm::Value *NonVirtualOffset = 0;
@@ -125,7 +125,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
BaseOffset = NonVirtualOffset;
// Apply the base offset.
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
@@ -155,7 +155,7 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
Start, PathEnd);
// Get the base pointer type.
- const llvm::Type *BasePtrTy =
+ llvm::Type *BasePtrTy =
ConvertType((PathEnd[-1])->getType())->getPointerTo();
if (NonVirtualOffset.isZero() && !VBase) {
@@ -225,7 +225,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
QualType DerivedTy =
getContext().getCanonicalType(getContext().getTagDeclType(Derived));
- const llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
+ llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
llvm::Value *NonVirtualOffset =
CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
@@ -567,13 +567,13 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
= CGF.getContext().getAsConstantArrayType(FieldType);
if (Array && Constructor->isImplicit() &&
Constructor->isCopyConstructor()) {
- const llvm::Type *SizeTy
+ llvm::Type *SizeTy
= CGF.ConvertType(CGF.getContext().getSizeType());
// The LHS is a pointer to the first object we'll be constructing, as
// a flat array.
QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(),
BasePtr);
@@ -1236,7 +1236,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
// Push the src ptr.
QualType QT = *(FPT->arg_type_begin());
- const llvm::Type *t = CGM.getTypes().ConvertType(QT);
+ llvm::Type *t = CGM.getTypes().ConvertType(QT);
Src = Builder.CreateBitCast(Src, t);
Args.add(RValue::get(Src), QT);
@@ -1399,7 +1399,7 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
llvm::Value *VBaseOffsetPtr =
Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
"vbase.offset.ptr");
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
ConvertType(getContext().getPointerDiffType());
VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
@@ -1465,7 +1465,7 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
VirtualOffset);
// Finally, store the address point.
- const llvm::Type *AddressPointPtrTy =
+ llvm::Type *AddressPointPtrTy =
VTableAddressPoint->getType()->getPointerTo();
VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
Builder.CreateStore(VTableAddressPoint, VTableField);
@@ -1549,7 +1549,7 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
}
llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
- const llvm::Type *Ty) {
+ llvm::Type *Ty) {
llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
return Builder.CreateLoad(VTablePtrSrc, "vtable");
}
@@ -1677,7 +1677,7 @@ CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
llvm::Value *This) {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
- const llvm::Type *Ty =
+ llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 9c5dd1f237..564b662638 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -48,7 +48,7 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
if (rv.isComplex()) {
CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
- const llvm::Type *ComplexTy =
+ llvm::Type *ComplexTy =
llvm::StructType::get(V.first->getType(), V.second->getType(),
(void*) 0);
llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 4c12445917..c1687b38fe 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -1958,7 +1958,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
if (VD->hasAttr<BlocksAttr>()) {
CharUnits offset = CharUnits::fromQuantity(32);
llvm::SmallVector<llvm::Value *, 9> addr;
- const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
offset = CGM.getContext().toCharUnitsFromBits(
@@ -2066,7 +2066,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
llvm::SmallVector<llvm::Value *, 9> addr;
- const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
if (isByRef) {
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 62c3a9791d..8093dd0042 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -175,7 +175,7 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
std::string Name = GetStaticDeclName(*this, D, Separator);
- const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
@@ -290,8 +290,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
//
// FIXME: It is really dangerous to store this in the map; if anyone
// RAUW's the GV uses of this constant will be invalid.
- const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
- const llvm::Type *LPtrTy =
+ llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
+ llvm::Type *LPtrTy =
LTy->getPointerTo(CGM.getContext().getTargetAddressSpace(D.getType()));
DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
@@ -519,7 +519,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
getByRefValueLLVMField(cast<VarDecl>(D))));
}
- const llvm::PointerType *ty
+ llvm::PointerType *ty
= cast<llvm::PointerType>(tempLV.getAddress()->getType());
ty = cast<llvm::PointerType>(ty->getElementType());
@@ -762,7 +762,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// A normal fixed sized variable becomes an alloca in the entry block,
// unless it's an NRVO variable.
- const llvm::Type *LTy = ConvertTypeForMem(Ty);
+ llvm::Type *LTy = ConvertTypeForMem(Ty);
if (NRVO) {
// The named return value optimization: allocate this variable in the
@@ -829,7 +829,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
QualType elementType;
llvm::tie(elementCount, elementType) = getVLASize(Ty);
- const llvm::Type *llvmTy = ConvertTypeForMem(elementType);
+ llvm::Type *llvmTy = ConvertTypeForMem(elementType);
// Allocate memory for the array.
llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
@@ -953,7 +953,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::ConstantInt::get(IntPtrTy,
getContext().getTypeSizeInChars(type).getQuantity());
- const llvm::Type *BP = Int8PtrTy;
+ llvm::Type *BP = Int8PtrTy;
if (Loc->getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP, "tmp");
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 0ae6a3d2ee..1df8768d53 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -136,7 +136,7 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
// Get the __cxa_atexit function type
// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
- const llvm::FunctionType *AtExitFnTy =
+ llvm::FunctionType *AtExitFnTy =
llvm::FunctionType::get(ConvertType(getContext().IntTy), Params, false);
llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
@@ -167,7 +167,7 @@ void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
- const llvm::FunctionType *FTy,
+ llvm::FunctionType *FTy,
llvm::StringRef Name) {
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
@@ -188,7 +188,7 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
void
CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
llvm::GlobalVariable *Addr) {
- const llvm::FunctionType *FTy
+ llvm::FunctionType *FTy
= llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false);
@@ -225,7 +225,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
- const llvm::FunctionType *FTy
+ llvm::FunctionType *FTy
= llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false);
@@ -259,7 +259,7 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
if (CXXGlobalDtors.empty())
return;
- const llvm::FunctionType *FTy
+ llvm::FunctionType *FTy
= llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false);
@@ -351,7 +351,7 @@ CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(getContext().VoidTy, args,
FunctionType::ExtInfo());
- const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 418bea6ee4..5ce9c35581 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -30,7 +30,7 @@ static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
llvm::Type *ArgTys[] = { CGF.SizeTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
@@ -40,7 +40,7 @@ static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
// void __cxa_free_exception(void *thrown_exception);
llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
@@ -51,7 +51,7 @@ static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
// void (*dest) (void *));
llvm::Type *Args[3] = { CGF.Int8PtrTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, Args, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
@@ -60,7 +60,7 @@ static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
// void __cxa_rethrow();
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
@@ -70,7 +70,7 @@ static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
// void *__cxa_get_exception_ptr(void*);
llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
@@ -80,7 +80,7 @@ static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
// void *__cxa_begin_catch(void*);
llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
@@ -89,7 +89,7 @@ static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
// void __cxa_end_catch();
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
@@ -99,7 +99,7 @@ static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
// void __cxa_call_unexepcted(void *thrown_exception);
llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
@@ -107,7 +107,7 @@ static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
llvm::Type *ArgTys[] = { Int8PtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
@@ -117,7 +117,7 @@ llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
llvm::Type *ArgTys[] = { Int8PtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
@@ -128,7 +128,7 @@ llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
// void __terminate();
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
llvm::StringRef name;
@@ -147,7 +147,7 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
llvm::StringRef Name) {
llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, Name);
@@ -346,7 +346,7 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
- const llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo();
+ llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo();
llvm::Value *typedAddr = CGF.Builder.CreateBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
@@ -397,7 +397,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
QualType ThrowType = E->getSubExpr()->getType();
// Now allocate the exception object.
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
@@ -958,7 +958,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
CanQualType CatchType =
CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
- const llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
+ llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
// If we're catching by reference, we can just cast the object
// pointer to the appropriate pointer.
@@ -1001,7 +1001,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// pad. The best solution is to fix the personality function.
} else {
// Pull the pointer for the reference type off.
- const llvm::Type *PtrTy =
+ llvm::Type *PtrTy =
cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
// Create the temporary and write the adjusted pointer into it.
@@ -1037,7 +1037,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// Otherwise, it returns a pointer into the exception object.
- const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
if (IsComplex) {
@@ -1055,7 +1055,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
assert(isa<RecordType>(CatchType) && "unexpected catch type!");
- const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
// Check for a copy expression. If we don't have a copy expression,
// that means a trivial copy is okay.
@@ -1315,7 +1315,7 @@ void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
// In the latter case we need to pass it the exception object.
// But we can't use the exception slot because the @finally might
// have a landing pad (which would overwrite the exception slot).
- const llvm::FunctionType *rethrowFnTy =
+ llvm::FunctionType *rethrowFnTy =
cast<llvm::FunctionType>(
cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
SavedExnVar = 0;
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index a7e8003eaa..a4992a4ea0 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -34,7 +34,7 @@ llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
unsigned addressSpace =
cast<llvm::PointerType>(value->getType())->getAddressSpace();
- const llvm::PointerType *destType = Int8PtrTy;
+ llvm::PointerType *destType = Int8PtrTy;
if (addressSpace)
destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
@@ -44,7 +44,7 @@ llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
-llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
const llvm::Twine &Name) {
if (!Builder.isNamePreserving())
return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
@@ -183,7 +183,7 @@ CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
Out.flush();
- const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
+ llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
// Create the reference temporary.
llvm::GlobalValue *RefTemp =
@@ -578,7 +578,7 @@ RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
return RValue::get(0);
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- const llvm::Type *EltTy = ConvertType(CTy->getElementType());
+ llvm::Type *EltTy = ConvertType(CTy->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return RValue::getComplex(std::make_pair(U, U));
}
@@ -833,7 +833,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
// Get the output type.
- const llvm::Type *ResLTy = ConvertType(LV.getType());
+ llvm::Type *ResLTy = ConvertType(LV.getType());
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
// Compute the result as an OR of all of the individual component accesses.
@@ -857,7 +857,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
}
// Cast to the access type.
- const llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
+ llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
AI.AccessWidth,
CGM.getContext().getTargetAddressSpace(LV.getType()));
Ptr = Builder.CreateBitCast(Ptr, PTy);
@@ -1002,7 +1002,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
llvm::Value *src = Src.getScalarVal();
if (Dst.isObjCIvar()) {
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
- const llvm::Type *ResultType = ConvertType(getContext().LongTy);
+ llvm::Type *ResultType = ConvertType(getContext().LongTy);
llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
llvm::Value *dst = RHS;
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
@@ -1029,7 +1029,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
// Get the output type.
- const llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
+ llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
// Get the source value, truncated to the width of the bit-field.
@@ -1045,7 +1045,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Return the new value of the bit-field, if requested.
if (Result) {
// Cast back to the proper type for result.
- const llvm::Type *SrcTy = Src.getScalarVal()->getType();
+ llvm::Type *SrcTy = Src.getScalarVal()->getType();
llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
"bf.reload.val");
@@ -1082,10 +1082,10 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
}
// Cast to the access type.
- const llvm::Type *AccessLTy =
+ llvm::Type *AccessLTy =
llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
- const llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
+ llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
Ptr = Builder.CreateBitCast(Ptr, PTy);
// Extract the piece of the bit-field value to write in this access, limited
@@ -1684,7 +1684,7 @@ llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
llvm::SmallVector<unsigned, 4> &Elts) {
llvm::SmallVector<llvm::Constant*, 4> CElts;
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
@@ -1904,7 +1904,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
// for both unions and structs. A union needs a bitcast, a struct element
// will need a bitcast if the LLVM type laid out doesn't match the desired
// type.
- const llvm::Type *llvmType = ConvertTypeForMem(FieldType);
+ llvm::Type *llvmType = ConvertTypeForMem(FieldType);
unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 915ffd6034..a217337248 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -184,7 +184,7 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
if (Dest.requiresGCollection()) {
CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
Src.getAggregateAddr(),
@@ -215,7 +215,7 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
if (Dest.requiresGCollection()) {
CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
Dest.getAddr(),
@@ -647,9 +647,9 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
- const llvm::PointerType *APType =
+ llvm::PointerType *APType =
cast<llvm::PointerType>(DestPtr->getType());
- const llvm::ArrayType *AType =
+ llvm::ArrayType *AType =
cast<llvm::ArrayType>(APType->getElementType());
uint64_t NumInitElements = E->getNumInits();
@@ -999,7 +999,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
CharUnits Align = TypeInfo.second;
llvm::Value *Loc = Slot.getAddr();
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
Loc = CGF.Builder.CreateBitCast(Loc, BP);
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
@@ -1088,13 +1088,13 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
- const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
- const llvm::Type *DBP =
+ llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
+ llvm::Type *DBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
- const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
- const llvm::Type *SBP =
+ llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
+ llvm::Type *SBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
@@ -1105,7 +1105,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
RecordDecl *Record = RecordTy->getDecl();
if (Record->hasObjectMember()) {
CharUnits size = TypeInfo.first;
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
@@ -1116,7 +1116,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasObjectMember()) {
CharUnits size = TypeInfo.first;
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
llvm::Value *SizeVal =
llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 4396f567f2..ee3dc72fc8 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -236,7 +236,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
FInfo = &CGM.getTypes().getFunctionInfo(MD);
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty
+ llvm::Type *Ty
= CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
// C++ [class.virtual]p12:
@@ -483,7 +483,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// the cookie size would bring the total size >= 0.
bool isSigned
= e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
- const llvm::IntegerType *numElementsType
+ llvm::IntegerType *numElementsType
= cast<llvm::IntegerType>(numElements->getType());
unsigned numElementsWidth = numElementsType->getBitWidth();
@@ -716,7 +716,7 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
if (E->getNumConstructorArgs() == 0)
return;
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
// Create a temporary for the loop index and initialize it with 0.
llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
@@ -1105,7 +1105,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
operatorDeleteCleanup = EHStack.stable_begin();
}
- const llvm::Type *elementPtrTy
+ llvm::Type *elementPtrTy
= ConvertTypeForMem(allocType)->getPointerTo(AS);
llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
@@ -1115,7 +1115,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// NewPtr is a pointer to the base element type. If we're
// allocating an array of arrays, we'll need to cast back to the
// array pointer type.
- const llvm::Type *resultType = ConvertTypeForMem(E->getType());
+ llvm::Type *resultType = ConvertTypeForMem(E->getType());
if (result->getType() != resultType)
result = Builder.CreateBitCast(result, resultType);
} else {
@@ -1218,7 +1218,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
ElementType);
}
- const llvm::Type *Ty =
+ llvm::Type *Ty =
CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
Dtor_Complete),
/*isVariadic=*/false);
@@ -1307,7 +1307,7 @@ namespace {
// Pass the original requested size as the second argument.
if (DeleteFTy->getNumArgs() == 2) {
QualType size_t = DeleteFTy->getArgType(1);
- const llvm::IntegerType *SizeTy
+ llvm::IntegerType *SizeTy
= cast<llvm::IntegerType>(CGF.ConvertType(size_t));
CharUnits ElementTypeSize =
@@ -1439,8 +1439,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
// void __cxa_bad_typeid();
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- const llvm::FunctionType *FTy =
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
@@ -1454,7 +1454,7 @@ static void EmitBadTypeidCall(CodeGenFunction &CGF) {
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
const Expr *E,
- const llvm::Type *StdTypeInfoPtrTy) {
+ llvm::Type *StdTypeInfoPtrTy) {
// Get the vtable pointer.
llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
@@ -1487,7 +1487,7 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
}
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
- const llvm::Type *StdTypeInfoPtrTy =
+ llvm::Type *StdTypeInfoPtrTy =
ConvertType(E->getType())->getPointerTo();
if (E->isTypeOperand()) {
@@ -1528,7 +1528,7 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(Int8PtrTy, Args, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
@@ -1537,8 +1537,8 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
// void __cxa_bad_cast();
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- const llvm::FunctionType *FTy =
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
@@ -1554,9 +1554,9 @@ static llvm::Value *
EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
QualType SrcTy, QualType DestTy,
llvm::BasicBlock *CastEnd) {
- const llvm::Type *PtrDiffLTy =
+ llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
if (PTy->getPointeeType()->isVoidType()) {
@@ -1626,7 +1626,7 @@ EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
QualType DestTy) {
- const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
if (DestTy->isPointerType())
return llvm::Constant::getNullValue(DestLTy);
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 35cff1d727..672721ef7f 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -312,7 +312,7 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
CGF.ErrorUnsupported(E, "complex expression");
- const llvm::Type *EltTy =
+ llvm::Type *EltTy =
CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return ComplexPairTy(U, U);
@@ -740,7 +740,7 @@ ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Empty init list intializes to null
QualType Ty = E->getType()->getAs<ComplexType>()->getElementType();
- const llvm::Type* LTy = CGF.ConvertType(Ty);
+ llvm::Type* LTy = CGF.ConvertType(Ty);
llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
return ComplexPairTy(zeroConstant, zeroConstant);
}
@@ -751,7 +751,7 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
if (!ArgPtr) {
CGF.ErrorUnsupported(E, "complex va_arg expression");
- const llvm::Type *EltTy =
+ llvm::Type *EltTy =
CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return ComplexPairTy(U, U);
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 45e44dda0f..fc0da99f4f 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -213,7 +213,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
// padding and then an hole for our i8 to get plopped into.
assert(isa<llvm::ArrayType>(LastElt->getType()) &&
"Expected array padding of undefs");
- const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
+ llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
assert(AT->getElementType()->isIntegerTy(CharWidth) &&
AT->getNumElements() != 0 &&
"Expected non-empty array padding of undefs");
@@ -281,7 +281,7 @@ void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
if (PadSize.isZero())
return;
- const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
if (PadSize > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
@@ -317,7 +317,7 @@ void ConstStructBuilder::ConvertStructToPacked() {
CharUnits NumChars =
AlignedElementOffsetInChars - ElementOffsetInChars;
- const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
if (NumChars > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
@@ -435,11 +435,11 @@ llvm::Constant *ConstStructBuilder::
// Pick the type to use. If the type is layout identical to the ConvertType
// type then use it, otherwise use whatever the builder produced for us.
- const llvm::StructType *STy =
+ llvm::StructType *STy =
llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
Builder.Elements,Builder.Packed);
- const llvm::Type *ILETy = CGM.getTypes().ConvertType(ILE->getType());
- if (const llvm::StructType *ILESTy = dyn_cast<llvm::StructType>(ILETy)) {
+ llvm::Type *ILETy = CGM.getTypes().ConvertType(ILE->getType());
+ if (llvm::StructType *ILESTy = dyn_cast<llvm::StructType>(ILETy)) {
if (ILESTy->isLayoutIdentical(STy))
STy = ILESTy;
}
@@ -513,7 +513,7 @@ public:
llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
E->getRHS()->getType(), CGF);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Type *ResultType = ConvertType(E->getType());
LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
@@ -527,7 +527,7 @@ public:
llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
if (!C) return 0;
- const llvm::Type *destType = ConvertType(E->getType());
+ llvm::Type *destType = ConvertType(E->getType());
switch (E->getCastKind()) {
case CK_ToUnion: {
@@ -680,9 +680,9 @@ public:
return Visit(ILE->getInit(0));
std::vector<llvm::Constant*> Elts;
- const llvm::ArrayType *AType =
+ llvm::ArrayType *AType =
cast<llvm::ArrayType>(ConvertType(ILE->getType()));
- const llvm::Type *ElemTy = AType->getElementType();
+ llvm::Type *ElemTy = AType->getElementType();
unsigned NumElements = AType->getNumElements();
// Initialising an array requires us to automatically
@@ -719,7 +719,7 @@ public:
std::vector<llvm::Type*> Types;
for (unsigned i = 0; i < Elts.size(); ++i)
Types.push_back(Elts[i]->getType());
- const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
Types, true);
return llvm::ConstantStruct::get(SType, Elts);
}
@@ -831,7 +831,7 @@ public:
}
// Utility methods
- const llvm::Type *ConvertType(QualType T) {
+ llvm::Type *ConvertType(QualType T) {
return CGM.getTypes().ConvertType(T);
}
@@ -951,7 +951,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
assert(0 && "Constant expressions should be initialized.");
return 0;
case APValue::LValue: {
- const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
llvm::Constant *Offset =
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
Result.Val.getLValueOffset().getQuantity());
@@ -962,7 +962,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
// Apply offset if necessary.
if (!Offset->isNullValue()) {
- const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
@@ -994,7 +994,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
Result.Val.getInt());
if (C->getType()->isIntegerTy(1)) {
- const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
return C;
@@ -1064,7 +1064,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
if (C && C->getType()->isIntegerTy(1)) {
- const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
return C;
@@ -1181,14 +1181,14 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
}
static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
- const llvm::Type *baseType,
+ llvm::Type *baseType,
const CXXRecordDecl *base);
static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
const CXXRecordDecl *record,
bool asCompleteObject) {
const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
- const llvm::StructType *structure =
+ llvm::StructType *structure =
(asCompleteObject ? layout.getLLVMType()
: layout.getBaseSubobjectLLVMType());
@@ -1212,7 +1212,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
continue;
unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
- const llvm::Type *baseType = structure->getElementType(fieldIndex);
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
}
@@ -1245,7 +1245,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
// We might have already laid this field out.
if (elements[fieldIndex]) continue;
- const llvm::Type *baseType = structure->getElementType(fieldIndex);
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
}
}
@@ -1261,7 +1261,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
/// Emit the null constant for a base subobject.
static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
- const llvm::Type *baseType,
+ llvm::Type *baseType,
const CXXRecordDecl *base) {
const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
@@ -1277,7 +1277,7 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
// Otherwise, some bases are represented as arrays of i8 if the size
// of the base is smaller than its corresponding LLVM type. Figure
// out how many elements this base array has.
- const llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
+ llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
unsigned numBaseElements = baseArrayType->getNumElements();
// Fill in null data member pointers.
@@ -1287,7 +1287,7 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
// Now go through all other elements and zero them out.
if (numBaseElements) {
- const llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
llvm::Constant *i8_zero = llvm::Constant::getNullValue(i8);
for (unsigned i = 0; i != numBaseElements; ++i) {
if (!baseElements[i])
@@ -1312,7 +1312,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
for (unsigned i = 0; i != NumElements; ++i)
Array[i] = Element;
- const llvm::ArrayType *ATy =
+ llvm::ArrayType *ATy =
cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
return llvm::ConstantArray::get(ATy, Array);
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index a73e667e78..37d424a2b3 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -78,7 +78,7 @@ public:
return I;
}
- const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+ llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
@@ -552,7 +552,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (DstType->isBooleanType())
return EmitConversionToBool(Src, SrcType);
- const llvm::Type *DstTy = ConvertType(DstType);
+ llvm::Type *DstTy = ConvertType(DstType);
// Ignore conversions like int -> uint.
if (Src->getType() == DstTy)
@@ -569,7 +569,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
// First, convert to the correct width so that we control the kind of
// extension.
- const llvm::Type *MiddleTy = CGF.IntPtrTy;
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -686,7 +686,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
Value *Mask;
- const llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+ llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
unsigned LHSElts = LTy->getNumElements();
if (E->getNumSubExprs() == 3) {
@@ -706,7 +706,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Mask = RHS;
}
- const llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
+ llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
llvm::Constant* EltMask;
// Treat vec3 like vec4.
@@ -734,7 +734,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// n = extract mask i
// x = extract val n
// newv = insert newv, x, i
- const llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
+ llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
MTy->getNumElements());
Value* NewV = llvm::UndefValue::get(RTy);
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
@@ -760,7 +760,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
// Handle vec3 special since the index will be off by one for the RHS.
- const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
+ llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
llvm::SmallVector<llvm::Constant*, 32> indices;
for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
unsigned Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
@@ -815,7 +815,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
}
static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
- unsigned Off, const llvm::Type *I32Ty) {
+ unsigned Off, llvm::Type *I32Ty) {
int MV = SVI->getMaskValue(Idx);
if (MV == -1)
return llvm::UndefValue::get(I32Ty);
@@ -831,7 +831,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
- const llvm::VectorType *VType =
+ llvm::VectorType *VType =
dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
// We have a scalar in braces. Just use the first element.
@@ -853,7 +853,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Value *Init = Visit(IE);
llvm::SmallVector<llvm::Constant*, 16> Args;
- const llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
+ llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
// Handle scalar elements. If the scalar initializer is actually one
// element of a different vector of the same width, use shuffle instead of
@@ -911,7 +911,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (isa<ExtVectorElementExpr>(IE)) {
llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
Value *SVOp = SVI->getOperand(0);
- const llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+ llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
if (OpTy->getNumElements() == ResElts) {
for (unsigned j = 0; j != CurIdx; ++j) {
@@ -968,7 +968,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// FIXME: evaluate codegen vs. shuffling against constant null vector.
// Emit remaining default initializers.
- const llvm::Type *EltTy = VType->getElementType();
+ llvm::Type *EltTy = VType->getElementType();
// Emit remaining default initializers
for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
@@ -1147,7 +1147,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// First, convert to the correct width so that we control the kind of
// extension.
- const llvm::Type *MiddleTy = CGF.IntPtrTy;
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -1163,7 +1163,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return 0;
}
case CK_VectorSplat: {
- const llvm::Type *DstTy = ConvertType(DestTy);
+ llvm::Type *DstTy = ConvertType(DestTy);
Value *Elt = Visit(const_cast<Expr*>(E));
// Insert the element in element zero of an undef vector
@@ -1432,7 +1432,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Loop over the components of the offsetof to compute the value.
unsigned n = E->getNumComponents();
- const llvm::Type* ResultType = ConvertType(E->getType());
+ llvm::Type* ResultType = ConvertType(E->getType());
llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
QualType CurrentType = E->getTypeSourceInfo()->getType();
for (unsigned i = 0; i != n; ++i) {
@@ -1686,7 +1686,7 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
llvm::next(insertPt));
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
- const llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+ llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
if (Ops.Ty->hasSignedIntegerRepresentation()) {
llvm::Value *IntMin =
@@ -2294,7 +2294,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
- const llvm::Type *ResTy = ConvertType(E->getType());
+ llvm::Type *ResTy = ConvertType(E->getType());
// If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
// If we have 1 && X, just emit X without inserting the control flow.
@@ -2349,7 +2349,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
- const llvm::Type *ResTy = ConvertType(E->getType());
+ llvm::Type *ResTy = ConvertType(E->getType());
// If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
// If we have 0 || X, just emit X without inserting the control flow.
@@ -2471,11 +2471,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *LHS = Visit(lhsExpr);
llvm::Value *RHS = Visit(rhsExpr);
- const llvm::Type *condType = ConvertType(condExpr->getType());
- const llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
+ llvm::Type *condType = ConvertType(condExpr->getType());
+ llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
unsigned numElem = vecTy->getNumElements();
- const llvm::Type *elemType = vecTy->getElementType();
+ llvm::Type *elemType = vecTy->getElementType();
std::vector<llvm::Constant*> Zvals;
for (unsigned i = 0; i < numElem; ++i)
@@ -2493,7 +2493,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *RHSTmp = RHS;
llvm::Value *LHSTmp = LHS;
bool wasCast = false;
- const llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
+ llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
if (rhsVTy->getElementType()->isFloatTy()) {
RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
@@ -2578,11 +2578,11 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
- const llvm::Type *DstTy = ConvertType(E->getType());
+ llvm::Type *DstTy = ConvertType(E->getType());
// Going from vec4->vec3 or vec3->vec4 is a special case and requires
// a shuffle vector instead of a bitcast.
- const llvm::Type *SrcTy = Src->getType();
+ llvm::Type *SrcTy = Src->getType();
if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) {
unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements();
unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements();
@@ -2592,15 +2592,15 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// In the case of going from int4->float3, a bitcast is needed before
// doing a shuffle.
- const llvm::Type *srcElemTy =
+ llvm::Type *srcElemTy =
cast<llvm::VectorType>(SrcTy)->getElementType();
- const llvm::Type *dstElemTy =
+ llvm::Type *dstElemTy =
cast<llvm::VectorType>(DstTy)->getElementType();
if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy())
|| (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) {
// Create a float type of the same size as the source or destination.
- const llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
+ llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
numElementsSrc);
Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast");
@@ -2678,7 +2678,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
// object->isa or (*object).isa
// Generate code as for: *(Class*)object
// build Class* type
- const llvm::Type *ClassPtrTy = ConvertType(E->getType());
+ llvm::Type *ClassPtrTy = ConvertType(E->getType());
Expr *BaseExpr = E->getBase();
if (BaseExpr->isRValue()) {
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 426cca0014..9fc2add875 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -33,7 +33,7 @@ tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
/// Given the address of a variable of pointer type, find the correct
/// null to store into it.
static llvm::Constant *getNullForVariable(llvm::Value *addr) {
- const llvm::Type *type =
+ llvm::Type *type =
cast<llvm::PointerType>(addr->getType())->getElementType();
return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
}
@@ -206,7 +206,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// The delegate return type isn't necessarily a matching type; in
// fact, it's quite likely to be 'id'.
- const llvm::Type *selfTy =
+ llvm::Type *selfTy =
cast<llvm::PointerType>(selfAddr->getType())->getElementType();
newSelf = Builder.CreateBitCast(newSelf, selfTy);
@@ -896,7 +896,7 @@ void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
if (Src.isScalar()) {
llvm::Value *SrcVal = Src.getScalarVal();
QualType DstType = getContext().getCanonicalType(ArgType);
- const llvm::Type *DstTy = ConvertType(DstType);
+ llvm::Type *DstTy = ConvertType(DstType);
if (SrcVal->getType() != DstTy)
Src =
RValue::get(EmitScalarConversion(SrcVal, E->getType(), DstType));
@@ -985,7 +985,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
// The third argument is the capacity of that temporary array.
- const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+ llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
Args.add(RValue::get(Count), getContext().UnsignedLongTy);
@@ -1089,7 +1089,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
elementType = cast<Expr>(S.getElement())->getType();
elementIsVariable = false;
}
- const llvm::Type *convertedElementType = ConvertType(elementType);
+ llvm::Type *convertedElementType = ConvertType(elementType);
// Fetch the buffer out of the enumeration state.
// TODO: this pointer should actually be invariant between
@@ -1276,7 +1276,7 @@ llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
- const llvm::FunctionType *type,
+ llvm::FunctionType *type,
llvm::StringRef fnName) {
llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
@@ -1300,13 +1300,13 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
if (!fn) {
std::vector<llvm::Type*> args(1, CGF.Int8PtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
// Cast the argument to 'id'.
- const llvm::Type *origType = value->getType();
+ llvm::Type *origType = value->getType();
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
// Call the function.
@@ -1325,13 +1325,13 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
llvm::StringRef fnName) {
if (!fn) {
std::vector<llvm::Type*> args(1, CGF.Int8PtrPtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
// Cast the argument to 'id*'.
- const llvm::Type *origType = addr->getType();
+ llvm::Type *origType = addr->getType();
addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
// Call the function.
@@ -1363,12 +1363,12 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
argTypes[0] = CGF.Int8PtrPtrTy;
argTypes[1] = CGF.Int8PtrTy;
- const llvm::FunctionType *fnType
+ llvm::FunctionType *fnType
= llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
- const llvm::Type *origType = value->getType();
+ llvm::Type *origType = value->getType();
addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
@@ -1392,7 +1392,7 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
if (!fn) {
std::vector<llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy);
- const llvm::FunctionType *fnType
+ llvm::FunctionType *fnType
= llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
@@ -1490,7 +1490,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
if (!fn) {
std::vector<llvm::Type*> args(1, Int8PtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), args, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
}
@@ -1520,7 +1520,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
if (!fn) {
llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
- const llvm::FunctionType *fnType
+ llvm::FunctionType *fnType
= llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
}
@@ -1607,7 +1607,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
if (isa<llvm::ConstantPointerNull>(value)) return value;
- const llvm::Type *origType = value->getType();
+ llvm::Type *origType = value->getType();
value = Builder.CreateBitCast(value, Int8PtrTy);
value = EmitARCRetainBlock(value);
value = EmitARCAutorelease(value);
@@ -1674,7 +1674,7 @@ void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
if (!fn) {
std::vector<llvm::Type*> args(1, Int8PtrPtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), args, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
}
@@ -1709,7 +1709,7 @@ void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
if (!fn) {
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Int8PtrTy, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
}
@@ -1728,7 +1728,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
if (!fn) {
std::vector<llvm::Type*> args(1, Int8PtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), args, false);
// We don't want to use a weak import here; instead we should not
@@ -1917,7 +1917,7 @@ static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
// The desired result type, if it differs from the type of the
// ultimate opaque expression.
- const llvm::Type *resultType = 0;
+ llvm::Type *resultType = 0;
// If we're loading retained from a __strong xvalue, we can avoid
// an extra retain/release pair by zeroing out the source of this
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 61027feb5c..730357ff4f 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -82,7 +82,7 @@ class LazyRuntimeFunction {
if (!Function) {
if (0 == FunctionName) return 0;
// We put the return type on the end of the vector, so pop it back off
- const llvm::Type *RetTy = ArgTys.back();
+ llvm::Type *RetTy = ArgTys.back();
ArgTys.pop_back();
llvm::FunctionType *FTy = llvm::FunctionType::get(RetTy, ArgTys, false);
Function =
@@ -111,17 +111,17 @@ protected:
llvm::Module &TheModule;
/// strut objc_super. Used for sending messages to super. This structure
/// contains the receiver (object) and the expected class.
- const llvm::StructType *ObjCSuperTy;
+ llvm::StructType *ObjCSuperTy;
/// struct objc_super*. The type of the argument to the superclass message
/// lookup functions.
- const llvm::PointerType *PtrToObjCSuperTy;
+ llvm::PointerType *PtrToObjCSuperTy;
/// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring
/// SEL is included in a header somewhere, in which case it will be whatever
/// type is declared in that header, most likely {i8*, i8*}.
llvm::PointerType *SelectorTy;
/// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
/// places where it's used
- const llvm::IntegerType *Int8Ty;
+ llvm::IntegerType *Int8Ty;
/// Pointer to i8 - LLVM type of char*, for all of the places where the
/// runtime needs to deal with C strings.
llvm::PointerType *PtrToInt8Ty;
@@ -138,7 +138,7 @@ protected:
llvm::PointerType *IdTy;
/// Pointer to a pointer to an Objective-C object. Used in the new ABI
/// message lookup function and some GC-related functions.
- const llvm::PointerType *PtrToIdTy;
+ llvm::PointerType *PtrToIdTy;
/// The clang type of id. Used when using the clang CGCall infrastructure to
/// call Objective-C methods.
CanQualType ASTIdTy;
@@ -153,14 +153,14 @@ protected:
/// compatibility with GCC...
llvm::IntegerType *LongTy;
/// LLVM type for C size_t. Used in various runtime data structures.
- const llvm::IntegerType *SizeTy;
+ llvm::IntegerType *SizeTy;
/// LLVM type for C ptrdiff_t. Mainly used in property accessor functions.
- const llvm::IntegerType *PtrDiffTy;
+ llvm::IntegerType *PtrDiffTy;
/// LLVM type for C int*. Used for GCC-ABI-compatible non-fragile instance
/// variables.
- const llvm::PointerType *PtrToIntTy;
+ llvm::PointerType *PtrToIntTy;
/// LLVM type for Objective-C BOOL type.
- const llvm::Type *BoolTy;
+ llvm::Type *BoolTy;
/// Metadata kind used to tie method lookups to message sends. The GNUstep
/// runtime provides some LLVM passes that can use this to do things like
/// automatic IMP caching and speculative inlining.
@@ -191,7 +191,7 @@ protected:
/// Generates a global structure, initialized by the elements in the vector.
/// The element types must match the types of the structure elements in the
/// first argument.
- llvm::GlobalVariable *MakeGlobal(const llvm::StructType *Ty,
+ llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty,
std::vector<llvm::Constant*> &V,
llvm::StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
@@ -203,7 +203,7 @@ protected:
/// Generates a global array. The vector must contain the same number of
/// elements that the array type declares, of the type specified as the array
/// element type.
- llvm::GlobalVariable *MakeGlobal(const llvm::ArrayType *Ty,
+ llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty,
std::vector<llvm::Constant*> &V,
llvm::StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
@@ -214,7 +214,7 @@ protected:
}
/// Generates a global array, inferring the array type from the specified
/// element type and the size of the initialiser.
- llvm::GlobalVariable *MakeGlobalArray(const llvm::Type *Ty,
+ llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty,
std::vector<llvm::Constant*> &V,
llvm::StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
@@ -225,7 +225,7 @@ protected:
/// Ensures that the value has the required type, by inserting a bitcast if
/// required. This function lets us avoid inserting bitcasts that are
/// redundant.
- llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, const llvm::Type *Ty){
+ llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, llvm::Type *Ty){
if (V->getType() == Ty) return V;
return B.CreateBitCast(V, Ty);
}
@@ -1048,7 +1048,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
- const llvm::FunctionType *impType =
+ llvm::FunctionType *impType =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
// Get the IMP
@@ -1148,7 +1148,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
FunctionType::ExtInfo());
- const llvm::FunctionType *impType =
+ llvm::FunctionType *impType =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
imp = EnforceType(Builder, imp, llvm::PointerType::getUnqual(impType));
@@ -1175,7 +1175,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
} else if (msgRet.isAggregate()) {
llvm::Value *v = msgRet.getAggregateAddr();
llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
- const llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
+ llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
llvm::AllocaInst *NullVal =
CGF.CreateTempAlloca(RetTy->getElementType(), "null");
CGF.InitTempAlloca(NullVal,
@@ -1438,7 +1438,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(
llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD) {
llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
- const llvm::Type *T =
+ llvm::Type *T =
CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
}
@@ -1963,7 +1963,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// setting up the alias. These are: The base address for the global, the
// ivar array (second field), the ivar in this list (set for each ivar), and
// the offset (third field in ivar structure)
- const llvm::Type *IndexTy = llvm::Type::getInt32Ty(VMContext);
+ llvm::Type *IndexTy = llvm::Type::getInt32Ty(VMContext);
llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
llvm::ConstantInt::get(IndexTy, 1), 0,
llvm::ConstantInt::get(IndexTy, 2) };
@@ -2033,7 +2033,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Add all referenced protocols to a category.
GenerateProtocolHolderCategory();
- const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+ llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
SelectorTy->getElementType());
llvm::Type *SelStructPtrTy = SelectorTy;
if (SelStructTy == 0) {
@@ -2225,7 +2225,7 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
bool isClassMethod = !OMD->isInstanceMethod();
CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *MethodTy =
+ llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
MethodName, isClassMethod);
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 010b9e174e..dbf95d4d73 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -212,7 +212,7 @@ public:
Params.push_back(SelType);
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(Ctx.BoolTy);
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(IdType, Params,
FunctionType::ExtInfo()),
false);
@@ -232,7 +232,7 @@ public:
Params.push_back(IdType);
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
FunctionType::ExtInfo()),
false);
@@ -250,7 +250,7 @@ public:
Params.push_back(Ctx.LongTy);
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
FunctionType::ExtInfo()),
false);
@@ -263,7 +263,7 @@ public:
// void objc_enumerationMutation (id)
llvm::SmallVector<CanQualType,1> Params;
Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
FunctionType::ExtInfo()),
false);
@@ -1510,7 +1510,7 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
}
// FIXME: We shouldn't need to do this cast, rectify the ASTContext and
// ObjCTypes types.
- const llvm::Type *ClassTy =
+ llvm::Type *ClassTy =
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
CGF.Builder.CreateStore(Target,
@@ -1557,7 +1557,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
FunctionType::ExtInfo());
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
if (Method)
@@ -2501,7 +2501,7 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
GetNameForMethod(OMD, CD, Name);
CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *MethodTy =
+ llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
llvm::Function *Method =
llvm::Function::Create(MethodTy,
@@ -2519,7 +2519,7 @@ CGObjCCommonMac::CreateMetadataVar(llvm::Twine Name,
const char *Section,
unsigned Align,
bool AddToUsed) {
- const llvm::Type *Ty = Init->getType();
+ llvm::Type *Ty = Init->getType();
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Ty, false,
llvm::GlobalValue::InternalLinkage, Init, Name);
@@ -3230,7 +3230,7 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
///
llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
llvm::Value *AddrWeakObj) {
- const llvm::Type* DestTy =
+ llvm::Type* DestTy =
cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
ObjCTypes.PtrObjectPtrTy);
@@ -3245,7 +3245,7 @@ llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
///
void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -3266,7 +3266,7 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst,
bool threadlocal) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -3292,7 +3292,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst,
llvm::Value *ivarOffset) {
assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -3312,7 +3312,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
///
void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -3394,7 +3394,7 @@ void CGObjCCommonMac::EmitImageInfo() {
// We never allow @synthesize of a superclass property.
flags |= eImageInfo_CorrectedSynthesize;
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
// Emitted as int[2];
llvm::Constant *values[2] = {
@@ -3575,7 +3575,7 @@ void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
const RecordDecl *RD = RT->getDecl();
// FIXME - Use iterator.
llvm::SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
- const llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
const llvm::StructLayout *RecLayout =
CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
@@ -3754,7 +3754,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
/// filled already by the caller.
llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string& BitMap) {
unsigned int WordsToScan, WordsToSkip;
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
// Build the string of skip/scan nibbles
llvm::SmallVector<SKIP_SCAN, 32> SkipScanIvars;
@@ -3898,7 +3898,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
bool ForStrongLayout) {
bool hasUnion = false;
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC &&
!CGM.getLangOptions().ObjCAutoRefCount)
return llvm::Constant::getNullValue(PtrTy);
@@ -4125,7 +4125,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// FIXME: It would be nice to unify this with the opaque type, so that the IR
// comes out a bit cleaner.
- const llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
// I'm not sure I like this. The implicit coordination is a bit
@@ -5273,7 +5273,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
ComputeIvarBaseOffset(CGM, ID, IVD));
Ivar[1] = GetMethodVarName(IVD->getIdentifier());
Ivar[2] = GetMethodVarType(IVD);
- const llvm::Type *FieldTy =
+ llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(IVD->getType());
unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
unsigned Align = CGM.getContext().getPreferredTypeAlign(
@@ -5669,7 +5669,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
callee = CGF.Builder.CreateLoad(callee, "msgSend_fn");
bool variadic = method ? method->isVariadic() : false;
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
CGF.getTypes().GetFunctionType(fnInfo, variadic);
callee = CGF.Builder.CreateBitCast(callee,
llvm::PointerType::getUnqual(fnType));
@@ -5847,7 +5847,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// FIXME: We shouldn't need to do this cast, rectify the ASTContext and
// ObjCTypes types.
- const llvm::Type *ClassTy =
+ llvm::Type *ClassTy =
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
CGF.Builder.CreateStore(Target,
@@ -5890,7 +5890,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src,
llvm::Value *dst,
llvm::Value *ivarOffset) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -5911,7 +5911,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -5944,7 +5944,7 @@ void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
CodeGen::CodeGenFunction &CGF,
llvm::Value *AddrWeakObj) {
- const llvm::Type* DestTy =
+ llvm::Type* DestTy =
cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
@@ -5958,7 +5958,7 @@ llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
///
void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -5979,7 +5979,7 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst,
bool threadlocal) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 09c8d0b28f..52338606b8 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -86,9 +86,9 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- const llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
QualType IvarTy = Ivar->getType();
- const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
@@ -244,7 +244,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
// Bind the catch parameter if it exists.
if (const VarDecl *CatchParam = Handler.Variable) {
- const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
CGF.EmitAutoVarDecl(*CatchParam);
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index e564c70705..23e6a42e5d 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -26,7 +26,7 @@ class RTTIBuilder {
CodeGenModule &CGM; // Per-module state.
llvm::LLVMContext &VMContext;
- const llvm::Type *Int8PtrTy;
+ llvm::Type *Int8PtrTy;
/// Fields - The fields of the RTTI descriptor currently being built.
llvm::SmallVector<llvm::Constant *, 16> Fields;
@@ -479,7 +479,7 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
llvm::Constant *VTable =
CGM.getModule().getOrInsertGlobal(VTableName, Int8PtrTy);
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
// The vtable address point is 2.
@@ -580,7 +580,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// And the name.
llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, Int8PtrTy));
switch (Ty->getTypeClass()) {
@@ -822,7 +822,7 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
/// classes with bases that do not satisfy the abi::__si_class_type_info
/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
- const llvm::Type *UnsignedIntLTy =
+ llvm::Type *UnsignedIntLTy =
CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
// Itanium C++ ABI 2.9.5p6c:
@@ -840,7 +840,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
if (!RD->getNumBases())
return;
- const llvm::Type *LongLTy =
+ llvm::Type *LongLTy =
CGM.getTypes().ConvertType(CGM.getContext().LongTy);
// Now add the base class descriptions.
@@ -916,7 +916,7 @@ void RTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
Flags |= PTI_Incomplete;
- const llvm::Type *UnsignedIntLTy =
+ llvm::Type *UnsignedIntLTy =
CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
@@ -953,7 +953,7 @@ void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
if (IsIncompleteClassType(ClassType))
Flags |= PTI_ContainingClassIncomplete;
- const llvm::Type *UnsignedIntLTy =
+ llvm::Type *UnsignedIntLTy =
CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
@@ -977,7 +977,7 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
if (!ForEH && !getContext().getLangOptions().RTTI) {
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
return llvm::Constant::getNullValue(Int8PtrTy);
}
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 2b07bafa00..df65ba88a3 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -174,7 +174,7 @@ private:
/// the passed size.
void AppendTailPadding(CharUnits RecordSize);
- CharUnits getTypeAlignment(const llvm::Type *Ty) const;
+ CharUnits getTypeAlignment(llvm::Type *Ty) const;
/// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
/// LLVM element types.
@@ -230,7 +230,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
uint64_t FieldSize,
uint64_t ContainingTypeSizeInBits,
unsigned ContainingTypeAlign) {
- const llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
+ llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
CharUnits TypeSizeInBytes =
CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
@@ -672,10 +672,10 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
// Check if we need to add a vtable pointer.
if (RD->isDynamicClass()) {
if (!PrimaryBase) {
- const llvm::Type *FunctionType =
+ llvm::Type *FunctionType =
llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
/*isVarArg=*/true);
- const llvm::Type *VTableTy = FunctionType->getPointerTo();
+ llvm::Type *VTableTy = FunctionType->getPointerTo();
assert(NextFieldOffset.isZero() &&
"VTable pointer must come first!");
@@ -882,7 +882,7 @@ void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
AppendField(NextFieldOffset, getByteArrayType(numBytes));
}
-CharUnits CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
+CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
if (Packed)
return CharUnits::One();
@@ -983,7 +983,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
}
// Verify that the LLVM and AST field offsets agree.
- const llvm::StructType *ST =
+ llvm::StructType *ST =
dyn_cast<llvm::StructType>(RL->getLLVMType());
const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 07bddb7972..d56c4bbe01 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -1301,7 +1301,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
if (!CodeGenFunction::hasAggregateLLVMType(InputType)) {
Arg = EmitLoadOfLValue(InputValue).getScalarVal();
} else {
- const llvm::Type *Ty = ConvertType(InputType);
+ llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
@@ -1530,14 +1530,14 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Use ptrtoint as appropriate so that we can do our extension.
if (isa<llvm::PointerType>(Arg->getType()))
Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
- const llvm::Type *OutputTy = ConvertType(OutputType);
+ llvm::Type *OutputTy = ConvertType(OutputType);
if (isa<llvm::IntegerType>(OutputTy))
Arg = Builder.CreateZExt(Arg, OutputTy);
else
Arg = Builder.CreateFPExt(Arg, OutputTy);
}
}
- if (const llvm::Type* AdjTy =
+ if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
Arg->getType()))
Arg = Builder.CreateBitCast(Arg, AdjTy);
@@ -1577,7 +1577,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Constraints += MachineClobbers;
}
- const llvm::Type *ResultType;
+ llvm::Type *ResultType;
if (ResultRegTypes.empty())
ResultType = llvm::Type::getVoidTy(getLLVMContext());
else if (ResultRegTypes.size() == 1)
@@ -1585,7 +1585,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
else
ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(ResultType, ArgTypes, false);
llvm::InlineAsm *IA =
@@ -1615,7 +1615,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// If the result type of the LLVM IR asm doesn't match the result type of
// the expression, do the conversion.
if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
- const llvm::Type *TruncTy = ResultTruncRegTypes[i];
+ llvm::Type *TruncTy = ResultTruncRegTypes[i];
// Truncate the integer result to the right size, note that TruncTy can be
// a pointer.
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index cec02cdfc2..f9709da8af 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -210,7 +210,7 @@ void VTTBuilder::AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
llvm::Constant *Init =
llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs, 2);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
VTTComponents.push_back(Init);
@@ -385,8 +385,8 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/true, Linkage);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- const llvm::ArrayType *ArrayType =
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
llvm::Constant *Init =
@@ -414,9 +414,9 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
- const llvm::Type *Int8PtrTy =
+ llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- const llvm::ArrayType *ArrayType =
+ llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
llvm::GlobalVariable *GV =
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index c161b79fd3..1e5c56f7fc 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -2532,7 +2532,7 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
Out.flush();
- const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
+ llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true);
}
@@ -2543,7 +2543,7 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
if (!NonVirtualAdjustment && !VirtualAdjustment)
return Ptr;
- const llvm::Type *Int8PtrTy =
+ llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
@@ -2554,7 +2554,7 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
}
if (VirtualAdjustment) {
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
// Do the virtual adjustment.
@@ -2704,7 +2704,7 @@ void CodeGenFunction::GenerateVarArgsThunk(
QualType ResultType = FPT->getResultType();
// Get the original function
- const llvm::Type *Ty =
+ llvm::Type *Ty =
CGM.getTypes().GetFunctionType(FnInfo, /*IsVariadic*/true);
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
llvm::Function *BaseFn = cast<llvm::Function>(Callee);
@@ -2811,7 +2811,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
}
// Get our callee.
- const llvm::Type *Ty =
+ llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(GD),
FPT->isVariadic());
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
@@ -3066,9 +3066,9 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
const VTableThunksTy &VTableThunks) {
llvm::SmallVector<llvm::Constant *, 64> Inits;
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
QualType ClassType = CGM.getContext().getTagDeclType(RD);
@@ -3126,7 +3126,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
// We have a pure virtual member function.
if (!PureVirtualFn) {
- const llvm::FunctionType *Ty =
+ llvm::FunctionType *Ty =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
/*isVarArg=*/false);
PureVirtualFn =
@@ -3147,7 +3147,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
NextVTableThunkIndex++;
} else {
- const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
+ llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
}
@@ -3178,7 +3178,7 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
ComputeVTableRelatedInformation(RD, /*VTableRequired=*/true);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, getNumVTableComponents(RD));
@@ -3246,7 +3246,7 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
Out.flush();
llvm::StringRef Name = OutName.str();
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, Builder.getNumVTableComponents());
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 702897a7c4..d580ce3fcb 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -215,7 +215,7 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
// void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
llvm::PointerType *PointerTy = Int8PtrTy;
llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
- const llvm::FunctionType *FunctionTy =
+ llvm::FunctionType *FunctionTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
ProfileFuncArgs, false);
@@ -645,7 +645,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
llvm::Value *baseSizeInChars
= llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
- const llvm::Type *i8p = Builder.getInt8PtrTy();
+ llvm::Type *i8p = Builder.getInt8PtrTy();
llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
@@ -690,7 +690,7 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Cast the dest ptr to the appropriate i8 pointer type.
unsigned DestAS =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
+ llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
@@ -828,7 +828,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
// constant-length arrays than to re-evaluate the array bounds.
uint64_t countFromCLAs = 1;
- const llvm::ArrayType *llvmArrayType =
+ llvm::ArrayType *llvmArrayType =
cast<llvm::ArrayType>(
cast<llvm::PointerType>(addr->getType())->getElementType());
while (true) {
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index f27ed947b8..1ce9c7966c 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -1135,7 +1135,7 @@ private:
/// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
/// type as well as the field number that contains the actual data.
- llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
+ llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
unsigned> > ByRefValueInfo;
llvm::BasicBlock *TerminateLandingPad;
@@ -1269,7 +1269,7 @@ public:
llvm::Value *EmitBlockLiteral(const BlockExpr *);
llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
const CGBlockInfo &Info,
- const llvm::StructType *,
+ llvm::StructType *,
llvm::Constant *BlockVarLayout);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
@@ -1298,7 +1298,7 @@ public:
return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
}
llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
- const llvm::Type *BuildByRefType(const VarDecl *var);
+ llvm::Type *BuildByRefType(const VarDecl *var);
void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo);
@@ -1352,7 +1352,7 @@ public:
/// GetVTablePtr - Return the Value of the vtable pointer member pointed
/// to by This.
- llvm::Value *GetVTablePtr(llvm::Value *This, const llvm::Type *Ty);
+ llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
/// EnterDtorCleanups - Enter the cleanups necessary to complete the
/// given phase of destruction for a destructor. The end result
@@ -1486,7 +1486,7 @@ public:
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block. The caller is responsible for setting an appropriate alignment on
/// the alloca.
- llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
+ llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
const llvm::Twine &Name = "tmp");
/// InitTempAlloca - Provide an initial value for the given alloca.
@@ -2080,12 +2080,12 @@ public:
const llvm::Twine &Name = "");
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
- const llvm::Type *Ty);
+ llvm::Type *Ty);
llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
- llvm::Value *This, const llvm::Type *Ty);
+ llvm::Value *This, llvm::Type *Ty);
llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
NestedNameSpecifier *Qual,
- const llvm::Type *Ty);
+ llvm::Type *Ty);
llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
@@ -2126,7 +2126,7 @@ public:
const char *name,
unsigned shift = 0, bool rightshift = false);
llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
- llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
+ llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
bool negateForRightShift);
llvm::Value *BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops);
@@ -2439,7 +2439,7 @@ private:
void EmitDeclMetadata();
CodeGenModule::ByrefHelpers *
- buildByrefHelpers(const llvm::StructType &byrefType,
+ buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission);
};
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 0668039a89..2bd5df23eb 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -570,7 +570,7 @@ void CodeGenModule::EmitLLVMUsed() {
if (LLVMUsed.empty())
return;
- const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
// Convert LLVMUsed to what ConstantArray needs.
std::vector<llvm::Constant*> UsedArray;
@@ -653,7 +653,7 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
// get [N x i8] constants for the annotation string, and the filename string
// which are the 2nd and 3rd elements of the global annotation structure.
- const llvm::Type *SBP = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *SBP = llvm::Type::getInt8PtrTy(VMContext);
llvm::Constant *anno = llvm::ConstantArray::get(VMContext,
AA->getAnnotation(), true);
llvm::Constant *unit = llvm::ConstantArray::get(VMContext,
@@ -695,7 +695,7 @@ llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
const AliasAttr *AA = VD->getAttr<AliasAttr>();
assert(AA && "No alias?");
- const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
// See if there is already something with the target's name in the module.
llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
@@ -839,7 +839,7 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
/// to set the attributes on the function when it is first created.
llvm::Constant *
CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
- const llvm::Type *Ty,
+ llvm::Type *Ty,
GlobalDecl D, bool ForVTable,
llvm::Attributes ExtraAttrs) {
// Lookup the entry, lazily creating it if necessary.
@@ -865,7 +865,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
// sure not to try to set attributes.
bool IsIncompleteFunction = false;
- const llvm::FunctionType *FTy;
+ llvm::FunctionType *FTy;
if (isa<llvm::FunctionType>(Ty)) {
FTy = cast<llvm::FunctionType>(Ty);
} else {
@@ -935,7 +935,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
/// non-null, then this function will use the specified type if it has to
/// create it (this occurs when we see a definition of the function).
llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
- const llvm::Type *Ty,
+ llvm::Type *Ty,
bool ForVTable) {
// If there was no specific requested type, just convert it now.
if (!Ty)
@@ -948,7 +948,7 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
llvm::Constant *
-CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
+CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
llvm::StringRef Name,
llvm::Attributes ExtraAttrs) {
return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
@@ -980,7 +980,7 @@ static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D,
/// to set the attributes on the global when it is first created.
llvm::Constant *
CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
- const llvm::PointerType *Ty,
+ llvm::PointerType *Ty,
const VarDecl *D,
bool UnnamedAddr) {
// Lookup the entry, lazily creating it if necessary.
@@ -1050,7 +1050,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
llvm::GlobalVariable *
CodeGenModule::CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name,
- const llvm::Type *Ty,
+ llvm::Type *Ty,
llvm::GlobalValue::LinkageTypes Linkage) {
llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
llvm::GlobalVariable *OldGV = 0;
@@ -1092,13 +1092,13 @@ CodeGenModule::CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name,
/// then it will be greated with the specified type instead of whatever the
/// normal requested type would be.
llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
- const llvm::Type *Ty) {
+ llvm::Type *Ty) {
assert(D->hasGlobalStorage() && "Not a global variable");
QualType ASTTy = D->getType();
if (Ty == 0)
Ty = getTypes().ConvertTypeForMem(ASTTy);
- const llvm::PointerType *PTy =
+ llvm::PointerType *PTy =
llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
llvm::StringRef MangledName = getMangledName(D);
@@ -1108,7 +1108,7 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
llvm::Constant *
-CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
+CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
llvm::StringRef Name) {
return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0,
true);
@@ -1207,7 +1207,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
return llvm::GlobalVariable::LinkOnceODRLinkage;
}
-CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const {
+CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
return Context.toCharUnitsFromBits(
TheTargetData.getTypeStoreSizeInBits(Ty));
}
@@ -1253,7 +1253,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
}
}
- const llvm::Type* InitType = Init->getType();
+ llvm::Type* InitType = Init->getType();
llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
// Strip off a bitcast if we got one back.
@@ -1377,7 +1377,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
if (OldFn == 0) return;
- const llvm::Type *NewRetTy = NewFn->getReturnType();
+ llvm::Type *NewRetTy = NewFn->getReturnType();
llvm::SmallVector<llvm::Value*, 4> ArgList;
for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
@@ -1440,7 +1440,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
bool variadic = false;
if (const FunctionProtoType *fpt = D->getType()->getAs<FunctionProtoType>())
variadic = fpt->isVariadic();
- const llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic);
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1525,7 +1525,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
if (Entry && !Entry->isDeclaration())
return;
- const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
// Create a reference to the named value. This ensures that it is emitted
// if a deferred decl.
@@ -1605,7 +1605,7 @@ llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
Name = Context.BuiltinInfo.GetName(BuiltinID);
- const llvm::FunctionType *Ty =
+ llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
@@ -1696,7 +1696,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
// If we don't already have it, get __CFConstantStringClassReference.
if (!CFConstantStringClassRef) {
- const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
llvm::Constant *GV = CreateRuntimeVariable(Ty,
"__CFConstantStringClassReference");
@@ -1707,7 +1707,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
QualType CFTy = getContext().getCFConstantStringType();
- const llvm::StructType *STy =
+ llvm::StructType *STy =
cast<llvm::StructType>(getTypes().ConvertType(CFTy));
std::vector<llvm::Constant*> Fields(4);
@@ -1716,7 +1716,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
Fields[0] = CFConstantStringClassRef;
// Flags.
- const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
llvm::ConstantInt::get(Ty, 0x07C8);
@@ -1784,7 +1784,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
// If we don't already have it, get _NSConstantStringClassReference.
if (!ConstantStringClassRef) {
std::string StringClass(getLangOptions().ObjCConstantStringClass);
- const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
llvm::Constant *GV;
if (Features.ObjCNonFragileABI) {
std::string str =
@@ -1792,14 +1792,14 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
: "OBJC_CLASS_$_" + StringClass;
GV = getObjCRuntime().GetClassGlobal(str);
// Make sure the result is of the correct type.
- const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
ConstantStringClassRef =
llvm::ConstantExpr::getBitCast(GV, PTy);
} else {
std::string str =
StringClass.empty() ? "_NSConstantStringClassReference"
: "_" + StringClass + "ClassReference";
- const llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
+ llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
GV = CreateRuntimeVariable(PTy, str);
// Decay array -> ptr
ConstantStringClassRef =
@@ -1809,7 +1809,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
QualType NSTy = getContext().getNSConstantStringType();
- const llvm::StructType *STy =
+ llvm::StructType *STy =
cast<llvm::StructType>(getTypes().ConvertType(NSTy));
std::vector<llvm::Constant*> Fields(3);
@@ -1834,7 +1834,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
// String length.
- const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
// The struct.
@@ -2191,7 +2191,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
const void *Ptr) {
uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
- const llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
+ llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
return llvm::ConstantInt::get(i64, PtrInt);
}
@@ -2293,7 +2293,7 @@ llvm::Constant *CodeGenModule::getBlockObjectDispose() {
// Otherwise construct the function by hand.
llvm::Type *args[] = { Int8PtrTy, Int32Ty };
- const llvm::FunctionType *fty
+ llvm::FunctionType *fty
= llvm::FunctionType::get(VoidTy, args, false);
return BlockObjectDispose =
CreateRuntimeFunction(fty, "_Block_object_dispose");
@@ -2312,7 +2312,7 @@ llvm::Constant *CodeGenModule::getBlockObjectAssign() {
// Otherwise construct the function by hand.
llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
- const llvm::FunctionType *fty
+ llvm::FunctionType *fty
= llvm::FunctionType::get(VoidTy, args, false);
return BlockObjectAssign =
CreateRuntimeFunction(fty, "_Block_object_assign");
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 86fb6d4d03..76c59d42bc 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -433,7 +433,7 @@ public:
/// variable with the right type will be created and all uses of the old
/// variable will be replaced with a bitcast to the new variable.
llvm::GlobalVariable *
- CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name, const llvm::Type *Ty,
+ CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name, llvm::Type *Ty,
llvm::GlobalValue::LinkageTypes Linkage);
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
@@ -441,14 +441,14 @@ public:
/// then it will be greated with the specified type instead of whatever the
/// normal requested type would be.
llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
- const llvm::Type *Ty = 0);
+ llvm::Type *Ty = 0);
/// GetAddrOfFunction - Return the address of the given function. If Ty is
/// non-null, then this function will use the specified type if it has to
/// create it.
llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
- const llvm::Type *Ty = 0,
+ llvm::Type *Ty = 0,
bool ForVTable = false);
/// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor
@@ -594,13 +594,13 @@ public:
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
- llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
+ llvm::Constant *CreateRuntimeFunction(llvm::FunctionType *Ty,
llvm::StringRef Name,
llvm::Attributes ExtraAttrs =
llvm::Attribute::None);
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
- llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
+ llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
llvm::StringRef Name);
///@name Custom Blocks Runtime Interfaces
@@ -709,7 +709,7 @@ public:
/// GetTargetTypeStoreSize - Return the store size, in character units, of
/// the given LLVM type.
- CharUnits GetTargetTypeStoreSize(const llvm::Type *Ty) const;
+ CharUnits GetTargetTypeStoreSize(llvm::Type *Ty) const;
/// GetLLVMLinkageVarDefinition - Returns LLVM linkage for a global
/// variable.
@@ -723,13 +723,13 @@ private:
llvm::GlobalValue *GetGlobalValue(llvm::StringRef Ref);
llvm::Constant *GetOrCreateLLVMFunction(llvm::StringRef MangledName,
- const llvm::Type *Ty,
+ llvm::Type *Ty,
GlobalDecl D,
bool ForVTable,
llvm::Attributes ExtraAttrs =
llvm::Attribute::None);
llvm::Constant *GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
- const llvm::PointerType *PTy,
+ llvm::PointerType *PTy,
const VarDecl *D,
bool UnnamedAddr = false);
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 764688fcb1..5859ef543f 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -418,7 +418,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::ConstantArray: {
const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
- const llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
+ llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
break;
}
@@ -511,7 +511,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// Protocol qualifications do not influence the LLVM type, we just return a
// pointer to the underlying interface type. We don't need to worry about
// recursive conversion.
- const llvm::Type *T =
+ llvm::Type *T =
ConvertType(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
ResultType = T->getPointerTo();
break;
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 7c0fb81643..7682f2af7d 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -138,7 +138,7 @@ public:
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
/// and/or incomplete argument types, this will return the opaque type.
- const llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
+ llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 0c86080fa8..2832168eb3 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -215,11 +215,11 @@ ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
FPT->isVariadic());
- const llvm::IntegerType *ptrdiff = getPtrDiffTy();
+ llvm::IntegerType *ptrdiff = getPtrDiffTy();
llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
@@ -259,7 +259,7 @@ ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
CGF.EmitBlock(FnVirtual);
// Cast the adjusted this to a pointer to vtable pointer and load.
- const llvm::Type *VTableTy = Builder.getInt8PtrTy();
+ llvm::Type *VTableTy = Builder.getInt8PtrTy();
llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
VTable = Builder.CreateLoad(VTable, "memptr.vtable");
@@ -307,7 +307,7 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
// Cast the address to the appropriate pointer type, adopting the
// address space of the base pointer.
- const llvm::Type *PType
+ llvm::Type *PType
= CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
return Builder.CreateBitCast(Addr, PType);
}
@@ -478,7 +478,7 @@ ItaniumCXXABI::EmitMemberPointerConversion(llvm::Constant *C,
llvm::Constant *
ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
- const llvm::Type *ptrdiff_t = getPtrDiffTy();
+ llvm::Type *ptrdiff_t = getPtrDiffTy();
// Itanium C++ ABI 2.3:
// A NULL pointer is represented as -1.
@@ -504,7 +504,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
MD = MD->getCanonicalDecl();
CodeGenTypes &Types = CGM.getTypes();
- const llvm::Type *ptrdiff_t = getPtrDiffTy();
+ llvm::Type *ptrdiff_t = getPtrDiffTy();
// Get the function pointer (or index if this is a virtual function).
llvm::Constant *MemPtr[2];
@@ -535,7 +535,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
}
} else {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
- const llvm::Type *Ty;
+ llvm::Type *Ty;
// Check whether the function has a computable LLVM signature.
if (Types.isFuncTypeConvertible(FPT)) {
// The function has a computable LLVM signature; use the correct type.
@@ -784,7 +784,7 @@ void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
// Destructor thunks in the ARM ABI have indeterminate results.
- const llvm::Type *T =
+ llvm::Type *T =
cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
RValue Undef = RValue::get(llvm::UndefValue::get(T));
return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
@@ -907,7 +907,7 @@ void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
CharUnits &CookieSize) {
// Derive a char* in the same address space as the pointer.
unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
- const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+ llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
// If we don't need an array cookie, bail out early.
if (!NeedsArrayCookie(expr, ElementType)) {
@@ -919,7 +919,7 @@ void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
QualType SizeTy = getContext().getSizeType();
CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
- const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+ llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
CookieSize
= std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));
@@ -968,7 +968,7 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
ASTContext &Ctx = getContext();
CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
- const llvm::IntegerType *SizeTy =
+ llvm::IntegerType *SizeTy =
cast<llvm::IntegerType>(CGF.ConvertType(Ctx.getSizeType()));
// The cookie is always at the start of the buffer.
@@ -1000,7 +1000,7 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
CharUnits &CookieSize) {
// Derive a char* in the same address space as the pointer.
unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
- const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+ llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
// If we don't need an array cookie, bail out early.
if (!NeedsArrayCookie(expr, ElementType)) {
@@ -1012,7 +1012,7 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
QualType SizeTy = getContext().getSizeType();
CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
- const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+ llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
// The cookie size is always 2 * sizeof(size_t).
CookieSize = 2 * SizeSize;
@@ -1037,7 +1037,7 @@ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// int __cxa_guard_acquire(__guard *guard_object);
llvm::Type *ArgTys[] = { GuardPtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
ArgTys, /*isVarArg=*/false);
@@ -1048,7 +1048,7 @@ static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_release(__guard *guard_object);
llvm::Type *ArgTys[] = { GuardPtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
ArgTys, /*isVarArg=*/false);
@@ -1059,7 +1059,7 @@ static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_abort(__guard *guard_object);
llvm::Type *ArgTys[] = { GuardPtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
ArgTys, /*isVarArg=*/false);
@@ -1090,7 +1090,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
bool threadsafe =
(getContext().getLangOptions().ThreadsafeStatics && D.isLocalVarDecl());
- const llvm::IntegerType *GuardTy;
+ llvm::IntegerType *GuardTy;
// If we have a global variable with internal linkage and thread-safe statics
// are disabled, we can just let the guard variable be of type i8.
@@ -1152,7 +1152,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// }
} else {
// Load the first byte of the guard variable.
- const llvm::Type *PtrTy = Builder.getInt8PtrTy();
+ llvm::Type *PtrTy = Builder.getInt8PtrTy();
llvm::Value *V =
Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index df2c1bd98c..58efe179e4 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -62,7 +62,7 @@ void ABIArgInfo::dump() const {
switch (TheKind) {
case Direct:
OS << "Direct Type=";
- if (const llvm::Type *Ty = getCoerceToType())
+ if (llvm::Type *Ty = getCoerceToType())
Ty->print(OS);
else
OS << "null";
@@ -348,7 +348,7 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
/// UseX86_MMXType - Return true if this is an MMX type that should use the special
/// x86_mmx type.
-bool UseX86_MMXType(const llvm::Type *IRType) {
+bool UseX86_MMXType(llvm::Type *IRType) {
// If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
// special x86_mmx type.
return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
@@ -724,8 +724,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
@@ -765,7 +765,7 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
// 0-7 are the eight integer registers; the order is different
@@ -932,7 +932,7 @@ public:
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
// 0-15 are the 16 integer registers.
@@ -964,7 +964,7 @@ public:
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
// 0-15 are the 16 integer registers.
@@ -1489,14 +1489,14 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
/// float member at the specified offset. For example, {int,{float}} has a
/// float at offset 4. It is conservatively correct for this routine to return
/// false.
-static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
+static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
const llvm::TargetData &TD) {
// Base case if we find a float.
if (IROffset == 0 && IRType->isFloatTy())
return true;
// If this is a struct, recurse into the field at the specified offset.
- if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
const llvm::StructLayout *SL = TD.getStructLayout(STy);
unsigned Elt = SL->getElementContainingOffset(IROffset);
IROffset -= SL->getElementOffset(Elt);
@@ -1504,8 +1504,8 @@ static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
}
// If this is an array, recurse into the field at the specified offset.
- if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
- const llvm::Type *EltTy = ATy->getElementType();
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
unsigned EltSize = TD.getTypeAllocSize(EltTy);
IROffset -= IROffset/EltSize*EltSize;
return ContainsFloatAtOffset(EltTy, IROffset, TD);
@@ -1578,7 +1578,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
}
}
- if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
// If this is a struct, recurse into the field at the specified offset.
const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
if (IROffset < SL->getSizeInBytes()) {
@@ -1590,7 +1590,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
}
}
- if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
llvm::Type *EltTy = ATy->getElementType();
unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
unsigned EltOffset = IROffset/EltSize*EltSize;
@@ -1970,7 +1970,7 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
}
// AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
- const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *Res =
CGF.Builder.CreateBitCast(overflow_arg_area,
llvm::PointerType::getUnqual(LTy));
@@ -2061,22 +2061,22 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// collect arguments from different places; often what should result in a
// simple assembling of a structure from scattered addresses has many more
// loads than necessary. Can we clean this up?
- const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *RegAddr =
CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
"reg_save_area");
if (neededInt && neededSSE) {
// FIXME: Cleanup.
assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
- const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
- const llvm::Type *TyLo = ST->getElementType(0);
- const llvm::Type *TyHi = ST->getElementType(1);
+ llvm::Type *TyLo = ST->getElementType(0);
+ llvm::Type *TyHi = ST->getElementType(1);
assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
"Unexpected ABI info for mixed regs");
- const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
- const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
@@ -2104,9 +2104,9 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
- const llvm::Type *DblPtrTy =
+ llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(DoubleTy);
- const llvm::StructType *ST = llvm::StructType::get(DoubleTy,
+ llvm::StructType *ST = llvm::StructType::get(DoubleTy,
DoubleTy, NULL);
llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
@@ -2198,8 +2198,8 @@ void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
@@ -2246,7 +2246,7 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
@@ -2330,7 +2330,7 @@ public:
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
// 0-15 are the 16 integer registers.
@@ -2401,7 +2401,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
// Otherwise, pass by coercing to a structure of the appropriate size.
//
// FIXME: This doesn't handle alignment > 64 bits.
- const llvm::Type* ElemTy;
+ llvm::Type* ElemTy;
unsigned SizeRegs;
if (getContext().getTypeSizeInChars(Ty) <= CharUnits::fromQuantity(64)) {
ElemTy = llvm::Type::getInt32Ty(getVMContext());
@@ -2580,8 +2580,8 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
// FIXME: Need to handle alignment
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
@@ -2987,7 +2987,7 @@ MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// Everything on MIPS is 4 bytes. Double-precision FP registers
// are aliased to pairs of single-precision FP registers.
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
// 0-31 are the general purpose registers, $0 - $31.