aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm-c/Core.h2
-rw-r--r--include/llvm/Analysis/AliasAnalysis.h2
-rw-r--r--include/llvm/Analysis/ConstantFolding.h2
-rw-r--r--include/llvm/Analysis/FindUsedTypes.h6
-rw-r--r--include/llvm/Analysis/MemoryBuiltins.h4
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h38
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpander.h16
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpressions.h28
-rw-r--r--include/llvm/Argument.h2
-rw-r--r--include/llvm/Attributes.h2
-rw-r--r--include/llvm/CodeGen/Analysis.h6
-rw-r--r--include/llvm/CodeGen/FunctionLoweringInfo.h2
-rw-r--r--include/llvm/CodeGen/MachineConstantPool.h8
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h2
-rw-r--r--include/llvm/CodeGen/ValueTypes.h6
-rw-r--r--include/llvm/Constant.h8
-rw-r--r--include/llvm/Constants.h132
-rw-r--r--include/llvm/DerivedTypes.h38
-rw-r--r--include/llvm/ExecutionEngine/ExecutionEngine.h4
-rw-r--r--include/llvm/Function.h4
-rw-r--r--include/llvm/GlobalAlias.h2
-rw-r--r--include/llvm/GlobalValue.h2
-rw-r--r--include/llvm/GlobalVariable.h4
-rw-r--r--include/llvm/InlineAsm.h6
-rw-r--r--include/llvm/InstrTypes.h80
-rw-r--r--include/llvm/Instruction.h4
-rw-r--r--include/llvm/Instructions.h116
-rw-r--r--include/llvm/IntrinsicInst.h2
-rw-r--r--include/llvm/Intrinsics.h2
-rw-r--r--include/llvm/Module.h12
-rw-r--r--include/llvm/Operator.h4
-rw-r--r--include/llvm/Support/CallSite.h2
-rw-r--r--include/llvm/Support/ConstantFolder.h20
-rw-r--r--include/llvm/Support/GetElementPtrTypeIterator.h22
-rw-r--r--include/llvm/Support/IRBuilder.h46
-rw-r--r--include/llvm/Support/NoFolder.h20
-rw-r--r--include/llvm/Support/TargetFolder.h18
-rw-r--r--include/llvm/Target/TargetData.h28
-rw-r--r--include/llvm/Target/TargetIntrinsicInfo.h4
-rw-r--r--include/llvm/Target/TargetLowering.h16
-rw-r--r--include/llvm/Transforms/Utils/AddrModeMatcher.h6
-rw-r--r--include/llvm/Transforms/Utils/SSAUpdater.h4
-rw-r--r--include/llvm/Type.h12
-rw-r--r--include/llvm/User.h2
-rw-r--r--include/llvm/Value.h2
-rw-r--r--lib/Analysis/AliasAnalysis.cpp2
-rw-r--r--lib/Analysis/AliasAnalysisEvaluator.cpp6
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp4
-rw-r--r--lib/Analysis/ConstantFolding.cpp62
-rw-r--r--lib/Analysis/IPA/FindUsedTypes.cpp4
-rw-r--r--lib/Analysis/InstructionSimplify.cpp18
-rw-r--r--lib/Analysis/LazyValueInfo.cpp6
-rw-r--r--lib/Analysis/Lint.cpp8
-rw-r--r--lib/Analysis/Loads.cpp6
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp16
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp2
-rw-r--r--lib/Analysis/ScalarEvolution.cpp142
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp64
-rw-r--r--lib/Analysis/ValueTracking.cpp30
-rw-r--r--lib/AsmParser/LLParser.cpp46
-rw-r--r--lib/AsmParser/LLParser.h18
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp90
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.h8
-rw-r--r--lib/Bitcode/Writer/BitcodeWriter.cpp24
-rw-r--r--lib/Bitcode/Writer/ValueEnumerator.cpp4
-rw-r--r--lib/Bitcode/Writer/ValueEnumerator.h8
-rw-r--r--lib/CodeGen/Analysis.cpp16
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp4
-rw-r--r--lib/CodeGen/ELFWriter.cpp4
-rw-r--r--lib/CodeGen/IntrinsicLowering.cpp10
-rw-r--r--lib/CodeGen/MachineFunction.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp8
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp28
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp18
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp68
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp14
-rw-r--r--lib/CodeGen/ShadowStackGC.cpp12
-rw-r--r--lib/CodeGen/SjLjEHPrepare.cpp8
-rw-r--r--lib/CodeGen/StackProtector.cpp4
-rw-r--r--lib/ExecutionEngine/ExecutionEngine.cpp18
-rw-r--r--lib/ExecutionEngine/Interpreter/Execution.cpp118
-rw-r--r--lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp34
-rw-r--r--lib/ExecutionEngine/Interpreter/Interpreter.h28
-rw-r--r--lib/ExecutionEngine/JIT/JIT.cpp8
-rw-r--r--lib/ExecutionEngine/JIT/JITEmitter.cpp4
-rw-r--r--lib/ExecutionEngine/MCJIT/MCJIT.cpp4
-rw-r--r--lib/Target/ARM/ARMConstantPoolValue.cpp6
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp44
-rw-r--r--lib/Target/ARM/ARMGlobalMerge.cpp8
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp12
-rw-r--r--lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--lib/Target/ARM/ARMSelectionDAGInfo.cpp2
-rw-r--r--lib/Target/Blackfin/BlackfinIntrinsicInfo.cpp8
-rw-r--r--lib/Target/Blackfin/BlackfinIntrinsicInfo.h4
-rw-r--r--lib/Target/CBackend/CBackend.cpp128
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.cpp8
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.h4
-rw-r--r--lib/Target/CppBackend/CPPBackend.cpp44
-rw-r--r--lib/Target/MBlaze/MBlazeISelLowering.cpp2
-rw-r--r--lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp8
-rw-r--r--lib/Target/MBlaze/MBlazeIntrinsicInfo.h4
-rw-r--r--lib/Target/MBlaze/MBlazeTargetObjectFile.cpp2
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.cpp6
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.h4
-rw-r--r--lib/Target/Mangler.cpp4
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp6
-rw-r--r--lib/Target/Mips/MipsTargetObjectFile.cpp2
-rw-r--r--lib/Target/PTX/PTXAsmPrinter.cpp12
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp10
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h6
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp4
-rw-r--r--lib/Target/Target.cpp4
-rw-r--r--lib/Target/TargetData.cpp34
-rw-r--r--lib/Target/TargetLoweringObjectFile.cpp6
-rw-r--r--lib/Target/X86/X86FastISel.cpp20
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp30
-rw-r--r--lib/Target/X86/X86ISelLowering.h8
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp2
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.cpp2
-rw-r--r--lib/Target/XCore/XCoreAsmPrinter.cpp2
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp12
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h2
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp26
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp12
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp80
-rw-r--r--lib/Transforms/IPO/IPConstantPropagation.cpp2
-rw-r--r--lib/Transforms/IPO/Inliner.cpp4
-rw-r--r--lib/Transforms/IPO/LowerSetJmp.cpp10
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp26
-rw-r--r--lib/Transforms/InstCombine/InstCombine.h10
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp68
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp78
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp34
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp28
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombinePHI.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp12
-rw-r--r--lib/Transforms/InstCombine/InstCombineVectorOps.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp34
-rw-r--r--lib/Transforms/Instrumentation/EdgeProfiling.cpp2
-rw-r--r--lib/Transforms/Instrumentation/GCOVProfiling.cpp20
-rw-r--r--lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp4
-rw-r--r--lib/Transforms/Instrumentation/PathProfiling.cpp14
-rw-r--r--lib/Transforms/Instrumentation/ProfilingUtils.cpp6
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp8
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp2
-rw-r--r--lib/Transforms/Scalar/GVN.cpp32
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp22
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp4
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp74
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp8
-rw-r--r--lib/Transforms/Scalar/ObjCARC.cpp42
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp20
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp182
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp66
-rw-r--r--lib/Transforms/Utils/AddrModeMatcher.cpp4
-rw-r--r--lib/Transforms/Utils/BuildLibCalls.cpp10
-rw-r--r--lib/Transforms/Utils/CodeExtractor.cpp6
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp6
-rw-r--r--lib/Transforms/Utils/LowerExpectIntrinsic.cpp4
-rw-r--r--lib/Transforms/Utils/LowerInvoke.cpp6
-rw-r--r--lib/Transforms/Utils/SSAUpdater.cpp2
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp2
-rw-r--r--lib/VMCore/AsmWriter.cpp2
-rw-r--r--lib/VMCore/Attributes.cpp2
-rw-r--r--lib/VMCore/AutoUpgrade.cpp8
-rw-r--r--lib/VMCore/ConstantFold.cpp130
-rw-r--r--lib/VMCore/ConstantFold.h2
-rw-r--r--lib/VMCore/Constants.cpp166
-rw-r--r--lib/VMCore/ConstantsContext.h32
-rw-r--r--lib/VMCore/Core.cpp6
-rw-r--r--lib/VMCore/DebugLoc.cpp2
-rw-r--r--lib/VMCore/Function.cpp12
-rw-r--r--lib/VMCore/Globals.cpp6
-rw-r--r--lib/VMCore/IRBuilder.cpp2
-rw-r--r--lib/VMCore/InlineAsm.cpp8
-rw-r--r--lib/VMCore/Instruction.cpp4
-rw-r--r--lib/VMCore/Instructions.cpp180
-rw-r--r--lib/VMCore/LLVMContextImpl.h4
-rw-r--r--lib/VMCore/Module.cpp14
-rw-r--r--lib/VMCore/Type.cpp45
-rw-r--r--lib/VMCore/Value.cpp8
-rw-r--r--lib/VMCore/ValueTypes.cpp10
-rw-r--r--lib/VMCore/Verifier.cpp126
-rw-r--r--tools/bugpoint/ExtractFunction.cpp2
-rw-r--r--tools/bugpoint/Miscompilation.cpp6
-rw-r--r--tools/lto/LTOCodeGenerator.cpp2
-rw-r--r--unittests/ADT/SmallVectorTest.cpp2
-rw-r--r--unittests/Analysis/ScalarEvolutionTest.cpp4
-rw-r--r--unittests/ExecutionEngine/ExecutionEngineTest.cpp2
-rw-r--r--unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp2
-rw-r--r--unittests/ExecutionEngine/JIT/JITTest.cpp12
-rw-r--r--unittests/Support/TypeBuilderTest.cpp8
-rw-r--r--unittests/VMCore/ConstantsTest.cpp8
-rw-r--r--unittests/VMCore/InstructionsTest.cpp14
-rw-r--r--unittests/VMCore/VerifierTest.cpp2
206 files changed, 2029 insertions, 2030 deletions
diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h
index a4456dd13e..9e20ed6f74 100644
--- a/include/llvm-c/Core.h
+++ b/include/llvm-c/Core.h
@@ -1136,7 +1136,7 @@ namespace llvm {
return reinterpret_cast<Type**>(Tys);
}
- inline LLVMTypeRef *wrap(const Type **Tys) {
+ inline LLVMTypeRef *wrap(Type **Tys) {
return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys));
}
diff --git a/include/llvm/Analysis/AliasAnalysis.h b/include/llvm/Analysis/AliasAnalysis.h
index 5d8edd1bf4..2701b52393 100644
--- a/include/llvm/Analysis/AliasAnalysis.h
+++ b/include/llvm/Analysis/AliasAnalysis.h
@@ -88,7 +88,7 @@ public:
/// getTypeStoreSize - Return the TargetData store size for the given type,
/// if known, or a conservative value otherwise.
///
- uint64_t getTypeStoreSize(const Type *Ty);
+ uint64_t getTypeStoreSize(Type *Ty);
//===--------------------------------------------------------------------===//
/// Alias Queries...
diff --git a/include/llvm/Analysis/ConstantFolding.h b/include/llvm/Analysis/ConstantFolding.h
index f6b1f5ab99..8d968c0ae2 100644
--- a/include/llvm/Analysis/ConstantFolding.h
+++ b/include/llvm/Analysis/ConstantFolding.h
@@ -47,7 +47,7 @@ Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
/// fold instructions like loads and stores, which have no constant expression
/// form.
///
-Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
+Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
Constant *const *Ops, unsigned NumOps,
const TargetData *TD = 0);
diff --git a/include/llvm/Analysis/FindUsedTypes.h b/include/llvm/Analysis/FindUsedTypes.h
index 3e5da572b2..b22cb88135 100644
--- a/include/llvm/Analysis/FindUsedTypes.h
+++ b/include/llvm/Analysis/FindUsedTypes.h
@@ -23,7 +23,7 @@ class Type;
class Value;
class FindUsedTypes : public ModulePass {
- SetVector<const Type *> UsedTypes;
+ SetVector<Type *> UsedTypes;
public:
static char ID; // Pass identification, replacement for typeid
FindUsedTypes() : ModulePass(ID) {
@@ -33,7 +33,7 @@ public:
/// getTypes - After the pass has been run, return the set containing all of
/// the types used in the module.
///
- const SetVector<const Type *> &getTypes() const { return UsedTypes; }
+ const SetVector<Type *> &getTypes() const { return UsedTypes; }
/// Print the types found in the module. If the optional Module parameter is
/// passed in, then the types are printed symbolically if possible, using the
@@ -45,7 +45,7 @@ private:
/// IncorporateType - Incorporate one type and all of its subtypes into the
/// collection of used types.
///
- void IncorporateType(const Type *Ty);
+ void IncorporateType(Type *Ty);
/// IncorporateValue - Incorporate all of the types used by this value.
///
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h
index 22493f6f8b..865d236f6f 100644
--- a/include/llvm/Analysis/MemoryBuiltins.h
+++ b/include/llvm/Analysis/MemoryBuiltins.h
@@ -51,14 +51,14 @@ const CallInst *isArrayMalloc(const Value *I, const TargetData *TD);
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-const PointerType *getMallocType(const CallInst *CI);
+PointerType *getMallocType(const CallInst *CI);
/// getMallocAllocatedType - Returns the Type allocated by malloc call.
/// The Type depends on the number of bitcast uses of the malloc call:
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-const Type *getMallocAllocatedType(const CallInst *CI);
+Type *getMallocAllocatedType(const CallInst *CI);
/// getMallocArraySize - Returns the array size of a malloc call. If the
/// argument passed to malloc is a multiple of the size of the malloced type,
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index 554524a127..49e8fa3663 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -103,7 +103,7 @@ namespace llvm {
/// getType - Return the LLVM type of this SCEV expression.
///
- const Type *getType() const;
+ Type *getType() const;
/// isZero - Return true if the expression is a constant zero.
///
@@ -479,17 +479,17 @@ namespace llvm {
/// the SCEV framework. This primarily includes integer types, and it
/// can optionally include pointer types if the ScalarEvolution class
/// has access to target-specific information.
- bool isSCEVable(const Type *Ty) const;
+ bool isSCEVable(Type *Ty) const;
/// getTypeSizeInBits - Return the size in bits of the specified type,
/// for which isSCEVable must return true.
- uint64_t getTypeSizeInBits(const Type *Ty) const;
+ uint64_t getTypeSizeInBits(Type *Ty) const;
/// getEffectiveSCEVType - Return a type with the same bitwidth as
/// the given type and which represents how SCEV will treat the given
/// type, for which isSCEVable must return true. For pointer types,
/// this is the pointer-sized integer type.
- const Type *getEffectiveSCEVType(const Type *Ty) const;
+ Type *getEffectiveSCEVType(Type *Ty) const;
/// getSCEV - Return a SCEV expression for the full generality of the
/// specified expression.
@@ -497,11 +497,11 @@ namespace llvm {
const SCEV *getConstant(ConstantInt *V);
const SCEV *getConstant(const APInt& Val);
- const SCEV *getConstant(const Type *Ty, uint64_t V, bool isSigned = false);
- const SCEV *getTruncateExpr(const SCEV *Op, const Type *Ty);
- const SCEV *getZeroExtendExpr(const SCEV *Op, const Type *Ty);
- const SCEV *getSignExtendExpr(const SCEV *Op, const Type *Ty);
- const SCEV *getAnyExtendExpr(const SCEV *Op, const Type *Ty);
+ const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
+ const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
@@ -550,19 +550,19 @@ namespace llvm {
/// getSizeOfExpr - Return an expression for sizeof on the given type.
///
- const SCEV *getSizeOfExpr(const Type *AllocTy);
+ const SCEV *getSizeOfExpr(Type *AllocTy);
/// getAlignOfExpr - Return an expression for alignof on the given type.
///
- const SCEV *getAlignOfExpr(const Type *AllocTy);
+ const SCEV *getAlignOfExpr(Type *AllocTy);
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
///
- const SCEV *getOffsetOfExpr(const StructType *STy, unsigned FieldNo);
+ const SCEV *getOffsetOfExpr(StructType *STy, unsigned FieldNo);
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
///
- const SCEV *getOffsetOfExpr(const Type *CTy, Constant *FieldNo);
+ const SCEV *getOffsetOfExpr(Type *CTy, Constant *FieldNo);
/// getNegativeSCEV - Return the SCEV object corresponding to -V.
///
@@ -579,33 +579,33 @@ namespace llvm {
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion
/// of the input value to the specified type. If the type must be
/// extended, it is zero extended.
- const SCEV *getTruncateOrZeroExtend(const SCEV *V, const Type *Ty);
+ const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty);
/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion
/// of the input value to the specified type. If the type must be
/// extended, it is sign extended.
- const SCEV *getTruncateOrSignExtend(const SCEV *V, const Type *Ty);
+ const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty);
/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of
/// the input value to the specified type. If the type must be extended,
/// it is zero extended. The conversion must not be narrowing.
- const SCEV *getNoopOrZeroExtend(const SCEV *V, const Type *Ty);
+ const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of
/// the input value to the specified type. If the type must be extended,
/// it is sign extended. The conversion must not be narrowing.
- const SCEV *getNoopOrSignExtend(const SCEV *V, const Type *Ty);
+ const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
/// the input value to the specified type. If the type must be extended,
/// it is extended with unspecified bits. The conversion must not be
/// narrowing.
- const SCEV *getNoopOrAnyExtend(const SCEV *V, const Type *Ty);
+ const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
/// input value to the specified type. The conversion must not be
/// widening.
- const SCEV *getTruncateOrNoop(const SCEV *V, const Type *Ty);
+ const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
/// the types using zero-extension, and then perform a umax operation
diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h
index a8c03b2219..c883d7f17e 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -89,12 +89,12 @@ namespace llvm {
/// loop (inserting one if there is none). A canonical induction variable
/// starts at zero and steps by one on each iteration.
PHINode *getOrInsertCanonicalInductionVariable(const Loop *L,
- const Type *Ty);
+ Type *Ty);
/// expandCodeFor - Insert code to directly compute the specified SCEV
/// expression into the program. The inserted code is inserted into the
/// specified block.
- Value *expandCodeFor(const SCEV *SH, const Type *Ty, Instruction *I);
+ Value *expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I);
/// setIVIncInsertPos - Set the current IV increment loop and position.
void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
@@ -145,20 +145,20 @@ namespace llvm {
/// reusing an existing cast if a suitable one exists, moving an existing
/// cast if a suitable one exists but isn't in the right place, or
/// or creating a new one.
- Value *ReuseOrCreateCast(Value *V, const Type *Ty,
+ Value *ReuseOrCreateCast(Value *V, Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP);
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to
/// share the casts.
- Value *InsertNoopCastOfTo(Value *V, const Type *Ty);
+ Value *InsertNoopCastOfTo(Value *V, Type *Ty);
/// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP
/// instead of using ptrtoint+arithmetic+inttoptr.
Value *expandAddToGEP(const SCEV *const *op_begin,
const SCEV *const *op_end,
- const PointerType *PTy, const Type *Ty, Value *V);
+ PointerType *PTy, Type *Ty, Value *V);
Value *expand(const SCEV *S);
@@ -166,7 +166,7 @@ namespace llvm {
/// expression into the program. The inserted code is inserted into the
/// SCEVExpander's current insertion point. If a type is specified, the
/// result will be expanded to have that type, with a cast if necessary.
- Value *expandCodeFor(const SCEV *SH, const Type *Ty = 0);
+ Value *expandCodeFor(const SCEV *SH, Type *Ty = 0);
/// isInsertedInstruction - Return true if the specified instruction was
/// inserted by the code rewriter. If so, the client should not modify the
@@ -211,8 +211,8 @@ namespace llvm {
Value *expandAddRecExprLiterally(const SCEVAddRecExpr *);
PHINode *getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
const Loop *L,
- const Type *ExpandTy,
- const Type *IntTy);
+ Type *ExpandTy,
+ Type *IntTy);
};
}
diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 856d92c97c..b6f0ae54cf 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -42,7 +42,7 @@ namespace llvm {
public:
ConstantInt *getValue() const { return V; }
- const Type *getType() const { return V->getType(); }
+ Type *getType() const { return V->getType(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVConstant *S) { return true; }
@@ -57,14 +57,14 @@ namespace llvm {
class SCEVCastExpr : public SCEV {
protected:
const SCEV *Op;
- const Type *Ty;
+ Type *Ty;
SCEVCastExpr(const FoldingSetNodeIDRef ID,
- unsigned SCEVTy, const SCEV *op, const Type *ty);
+ unsigned SCEVTy, const SCEV *op, Type *ty);
public:
const SCEV *getOperand() const { return Op; }
- const Type *getType() const { return Ty; }
+ Type *getType() const { return Ty; }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVCastExpr *S) { return true; }
@@ -83,7 +83,7 @@ namespace llvm {
friend class ScalarEvolution;
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, const Type *ty);
+ const SCEV *op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -101,7 +101,7 @@ namespace llvm {
friend class ScalarEvolution;
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, const Type *ty);
+ const SCEV *op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -119,7 +119,7 @@ namespace llvm {
friend class ScalarEvolution;
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, const Type *ty);
+ const SCEV *op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -158,7 +158,7 @@ namespace llvm {
op_iterator op_begin() const { return Operands; }
op_iterator op_end() const { return Operands + NumOperands; }
- const Type *getType() const { return getOperand(0)->getType(); }
+ Type *getType() const { return getOperand(0)->getType(); }
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
return (NoWrapFlags)(SubclassData & Mask);
@@ -214,7 +214,7 @@ namespace llvm {
}
public:
- const Type *getType() const {
+ Type *getType() const {
// Use the type of the last operand, which is likely to be a pointer
// type, if there is one. This doesn't usually matter, but it can help
// reduce casts when the expressions are expanded.
@@ -263,7 +263,7 @@ namespace llvm {
const SCEV *getLHS() const { return LHS; }
const SCEV *getRHS() const { return RHS; }
- const Type *getType() const {
+ Type *getType() const {
// In most cases the types of LHS and RHS will be the same, but in some
// crazy cases one or the other may be a pointer. ScalarEvolution doesn't
// depend on the type for correctness, but handling types carefully can
@@ -441,11 +441,11 @@ namespace llvm {
/// folded with other operations into something unrecognizable. This
/// is mainly only useful for pretty-printing and other situations
/// where it isn't absolutely required for these to succeed.
- bool isSizeOf(const Type *&AllocTy) const;
- bool isAlignOf(const Type *&AllocTy) const;
- bool isOffsetOf(const Type *&STy, Constant *&FieldNo) const;
+ bool isSizeOf(Type *&AllocTy) const;
+ bool isAlignOf(Type *&AllocTy) const;
+ bool isOffsetOf(Type *&STy, Constant *&FieldNo) const;
- const Type *getType() const { return getValPtr()->getType(); }
+ Type *getType() const { return getValPtr()->getType(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVUnknown *S) { return true; }
diff --git a/include/llvm/Argument.h b/include/llvm/Argument.h
index ff8637881e..cd74882662 100644
--- a/include/llvm/Argument.h
+++ b/include/llvm/Argument.h
@@ -39,7 +39,7 @@ public:
/// Argument ctor - If Function argument is specified, this argument is
/// inserted at the end of the argument list for the function.
///
- explicit Argument(const Type *Ty, const Twine &Name = "", Function *F = 0);
+ explicit Argument(Type *Ty, const Twine &Name = "", Function *F = 0);
inline const Function *getParent() const { return Parent; }
inline Function *getParent() { return Parent; }
diff --git a/include/llvm/Attributes.h b/include/llvm/Attributes.h
index 233eab8bf1..cd3ee35d74 100644
--- a/include/llvm/Attributes.h
+++ b/include/llvm/Attributes.h
@@ -107,7 +107,7 @@ const Attributes MutuallyIncompatible[4] = {
};
/// @brief Which attributes cannot be applied to a type.
-Attributes typeIncompatible(const Type *Ty);
+Attributes typeIncompatible(Type *Ty);
/// This turns an int alignment (a power of 2, normally) into the
/// form used internally in Attributes.
diff --git a/include/llvm/CodeGen/Analysis.h b/include/llvm/CodeGen/Analysis.h
index f8a70293c1..d8e64071a1 100644
--- a/include/llvm/CodeGen/Analysis.h
+++ b/include/llvm/CodeGen/Analysis.h
@@ -33,12 +33,12 @@ class SelectionDAG;
/// of insertvalue or extractvalue indices that identify a member, return
/// the linearized index of the start of the member.
///
-unsigned ComputeLinearIndex(const Type *Ty,
+unsigned ComputeLinearIndex(Type *Ty,
const unsigned *Indices,
const unsigned *IndicesEnd,
unsigned CurIndex = 0);
-inline unsigned ComputeLinearIndex(const Type *Ty,
+inline unsigned ComputeLinearIndex(Type *Ty,
ArrayRef<unsigned> Indices,
unsigned CurIndex = 0) {
return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
@@ -51,7 +51,7 @@ inline unsigned ComputeLinearIndex(const Type *Ty,
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
-void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
+void ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
SmallVectorImpl<EVT> &ValueVTs,
SmallVectorImpl<uint64_t> *Offsets = 0,
uint64_t StartingOffset = 0);
diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h
index 84bbf48047..7eb21b5c66 100644
--- a/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -139,7 +139,7 @@ public:
unsigned CreateReg(EVT VT);
- unsigned CreateRegs(const Type *Ty);
+ unsigned CreateRegs(Type *Ty);
unsigned InitializeRegForValue(const Value *V) {
unsigned &R = ValueMap[V];
diff --git a/include/llvm/CodeGen/MachineConstantPool.h b/include/llvm/CodeGen/MachineConstantPool.h
index beb16a2824..8ce0e0590d 100644
--- a/include/llvm/CodeGen/MachineConstantPool.h
+++ b/include/llvm/CodeGen/MachineConstantPool.h
@@ -34,15 +34,15 @@ class raw_ostream;
/// Abstract base class for all machine specific constantpool value subclasses.
///
class MachineConstantPoolValue {
- const Type *Ty;
+ Type *Ty;
public:
- explicit MachineConstantPoolValue(const Type *ty) : Ty(ty) {}
+ explicit MachineConstantPoolValue(Type *ty) : Ty(ty) {}
virtual ~MachineConstantPoolValue() {}
/// getType - get type of this MachineConstantPoolValue.
///
- const Type *getType() const { return Ty; }
+ Type *getType() const { return Ty; }
/// getRelocationInfo - This method classifies the entry according to
@@ -104,7 +104,7 @@ public:
return Alignment & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
}
- const Type *getType() const;
+ Type *getType() const;
/// getRelocationInfo - This method classifies the entry according to
/// whether or not it may generate a relocation entry. This must be
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index a5c4201ef6..d3ce7934d8 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1291,7 +1291,7 @@ public:
unsigned getAlignment() const { return Alignment; }
unsigned char getTargetFlags() const { return TargetFlags; }
- const Type *getType() const;
+ Type *getType() const;
static bool classof(const ConstantPoolSDNode *) { return true; }
static bool classof(const SDNode *N) {
diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h
index 424721bf64..1676483893 100644
--- a/include/llvm/CodeGen/ValueTypes.h
+++ b/include/llvm/CodeGen/ValueTypes.h
@@ -380,7 +380,7 @@ namespace llvm {
struct EVT {
private:
MVT V;
- const Type *LLVMTy;
+ Type *LLVMTy;
public:
EVT() : V((MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE)),
@@ -645,12 +645,12 @@ namespace llvm {
/// getTypeForEVT - This method returns an LLVM type corresponding to the
/// specified EVT. For integer types, this returns an unsigned type. Note
/// that this will abort for types that cannot be represented.
- const Type *getTypeForEVT(LLVMContext &Context) const;
+ Type *getTypeForEVT(LLVMContext &Context) const;
/// getEVT - Return the value type corresponding to the specified type.
/// This returns all pointers as iPTR. If HandleUnknown is true, unknown
/// types are returned as Other, otherwise they are invalid.
- static EVT getEVT(const Type *Ty, bool HandleUnknown = false);
+ static EVT getEVT(Type *Ty, bool HandleUnknown = false);
intptr_t getRawBits() {
if (isSimple())
diff --git a/include/llvm/Constant.h b/include/llvm/Constant.h
index 5e351c4ec5..601b37bb64 100644
--- a/include/llvm/Constant.h
+++ b/include/llvm/Constant.h
@@ -43,7 +43,7 @@ class Constant : public User {
Constant(const Constant &); // Do not implement
protected:
- Constant(const Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
+ Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
: User(ty, vty, Ops, NumOps) {}
void destroyConstantImpl();
@@ -128,16 +128,16 @@ public:
assert(0 && "Constants that do not have operands cannot be using 'From'!");
}
- static Constant *getNullValue(const Type* Ty);
+ static Constant *getNullValue(Type* Ty);
/// @returns the value for an integer constant of the given type that has all
/// its bits set to true.
/// @brief Get the all ones value
- static Constant *getAllOnesValue(const Type* Ty);
+ static Constant *getAllOnesValue(Type* Ty);
/// getIntegerValue - Return the value for an integer or pointer constant,
/// or a vector thereof, with the given scalar value.
- static Constant *getIntegerValue(const Type* Ty, const APInt &V);
+ static Constant *getIntegerValue(Type* Ty, const APInt &V);
/// removeDeadConstantUsers - If there are any dead constant users dangling
/// off of this constant, remove them. This method is useful for clients
diff --git a/include/llvm/Constants.h b/include/llvm/Constants.h
index 01fca29184..1302a01534 100644
--- a/include/llvm/Constants.h
+++ b/include/llvm/Constants.h
@@ -47,7 +47,7 @@ struct ConvertConstantType;
class ConstantInt : public Constant {
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
ConstantInt(const ConstantInt &); // DO NOT IMPLEMENT
- ConstantInt(const IntegerType *Ty, const APInt& V);
+ ConstantInt(IntegerType *Ty, const APInt& V);
APInt Val;
protected:
// allocate space for exactly zero operands
@@ -57,12 +57,12 @@ protected:
public:
static ConstantInt *getTrue(LLVMContext &Context);
static ConstantInt *getFalse(LLVMContext &Context);
- static Constant *getTrue(const Type *Ty);
- static Constant *getFalse(const Type *Ty);
+ static Constant *getTrue(Type *Ty);
+ static Constant *getFalse(Type *Ty);
/// If Ty is a vector type, return a Constant with a splat of the given
/// value. Otherwise return a ConstantInt for the given value.
- static Constant *get(const Type *Ty, uint64_t V, bool isSigned = false);
+ static Constant *get(Type *Ty, uint64_t V, bool isSigned = false);
/// Return a ConstantInt with the specified integer value for the specified
/// type. If the type is wider than 64 bits, the value will be zero-extended
@@ -70,7 +70,7 @@ public:
/// be interpreted as a 64-bit signed integer and sign-extended to fit
/// the type.
/// @brief Get a ConstantInt for a specific value.
- static ConstantInt *get(const IntegerType *Ty, uint64_t V,
+ static ConstantInt *get(IntegerType *Ty, uint64_t V,
bool isSigned = false);
/// Return a ConstantInt with the specified value for the specified type. The
@@ -78,8 +78,8 @@ public:
/// either getSExtValue() or getZExtValue() will yield a correctly sized and
/// signed value for the type Ty.
/// @brief Get a ConstantInt for a specific signed value.
- static ConstantInt *getSigned(const IntegerType *Ty, int64_t V);
- static Constant *getSigned(const Type *Ty, int64_t V);
+ static ConstantInt *getSigned(IntegerType *Ty, int64_t V);
+ static Constant *getSigned(Type *Ty, int64_t V);
/// Return a ConstantInt with the specified value and an implied Type. The
/// type is the integer type that corresponds to the bit width of the value.
@@ -87,12 +87,12 @@ public:
/// Return a ConstantInt constructed from the string strStart with the given
/// radix.
- static ConstantInt *get(const IntegerType *Ty, StringRef Str,
+ static ConstantInt *get(IntegerType *Ty, StringRef Str,
uint8_t radix);
/// If Ty is a vector type, return a Constant with a splat of the given
/// value. Otherwise return a ConstantInt for the given value.
- static Constant *get(const Type* Ty, const APInt& V);
+ static Constant *get(Type* Ty, const APInt& V);
/// Return the constant as an APInt value reference. This allows clients to
/// obtain a copy of the value, with all its precision in tact.
@@ -133,8 +133,8 @@ public:
/// getType - Specialize the getType() method to always return an IntegerType,
/// which reduces the amount of casting needed in parts of the compiler.
///
- inline const IntegerType *getType() const {
- return reinterpret_cast<const IntegerType*>(Value::getType());
+ inline IntegerType *getType() const {
+ return reinterpret_cast<IntegerType*>(Value::getType());
}
/// This static method returns true if the type Ty is big enough to
@@ -146,8 +146,8 @@ public:
/// to the appropriate unsigned type before calling the method.
/// @returns true if V is a valid value for type Ty
/// @brief Determine if the value is in range for the given type.
- static bool isValueValidForType(const Type *Ty, uint64_t V);
- static bool isValueValidForType(const Type *Ty, int64_t V);
+ static bool isValueValidForType(Type *Ty, uint64_t V);
+ static bool isValueValidForType(Type *Ty, int64_t V);
bool isNegative() const { return Val.isNegative(); }
@@ -233,7 +233,7 @@ class ConstantFP : public Constant {
ConstantFP(const ConstantFP &); // DO NOT IMPLEMENT
friend class LLVMContextImpl;
protected:
- ConstantFP(const Type *Ty, const APFloat& V);
+ ConstantFP(Type *Ty, const APFloat& V);
protected:
// allocate space for exactly zero operands
void *operator new(size_t s) {
@@ -243,20 +243,20 @@ public:
/// Floating point negation must be implemented with f(x) = -0.0 - x. This
/// method returns the negative zero constant for floating point or vector
/// floating point types; for all other types, it returns the null value.
- static Constant *getZeroValueForNegation(const Type *Ty);
+ static Constant *getZeroValueForNegation(Type *Ty);
/// get() - This returns a ConstantFP, or a vector containing a splat of a
/// ConstantFP, for the specified value in the specified type. This should
/// only be used for simple constant values like 2.0/1.0 etc, that are
/// known-valid both as host double and as the target format.
- static Constant *get(const Type* Ty, double V);
- static Constant *get(const Type* Ty, StringRef Str);
+ static Constant *get(Type* Ty, double V);
+ static Constant *get(Type* Ty, StringRef Str);
static ConstantFP *get(LLVMContext &Context, const APFloat &V);
- static ConstantFP *getNegativeZero(const Type* Ty);
- static ConstantFP *getInfinity(const Type *Ty, bool Negative = false);
+ static ConstantFP *getNegativeZero(Type* Ty);
+ static ConstantFP *getInfinity(Type *Ty, bool Negative = false);
/// isValueValidForType - return true if Ty is big enough to represent V.
- static bool isValueValidForType(const Type *Ty, const APFloat &V);
+ static bool isValueValidForType(Type *Ty, const APFloat &V);
inline const APFloat &getValueAPF() const { return Val; }
/// isZero - Return true if the value is positive or negative zero.
@@ -300,7 +300,7 @@ class ConstantAggregateZero : public Constant {
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
ConstantAggregateZero(const ConstantAggregateZero &); // DO NOT IMPLEMENT
protected:
- explicit ConstantAggregateZero(const Type *ty)
+ explicit ConstantAggregateZero(Type *ty)
: Constant(ty, ConstantAggregateZeroVal, 0, 0) {}
protected:
// allocate space for exactly zero operands
@@ -308,7 +308,7 @@ protected:
return User::operator new(s, 0);
}
public:
- static ConstantAggregateZero* get(const Type *Ty);
+ static ConstantAggregateZero* get(Type *Ty);
virtual void destroyConstant();
@@ -329,10 +329,10 @@ class ConstantArray : public Constant {
std::vector<Constant*> >;
ConstantArray(const ConstantArray &); // DO NOT IMPLEMENT
protected:
- ConstantArray(const ArrayType *T, const std::vector<Constant*> &Val);
+ ConstantArray(ArrayType *T, const std::vector<Constant*> &Val);
public:
// ConstantArray accessors
- static Constant *get(const ArrayType *T, ArrayRef<Constant*> V);
+ static Constant *get(ArrayType *T, ArrayRef<Constant*> V);
/// This method constructs a ConstantArray and initializes it with a text
/// string. The default behavior (AddNull==true) causes a null terminator to
@@ -349,8 +349,8 @@ public:
/// getType - Specialize the getType() method to always return an ArrayType,
/// which reduces the amount of casting needed in parts of the compiler.
///
- inline const ArrayType *getType() const {
- return reinterpret_cast<const ArrayType*>(Value::getType());
+ inline ArrayType *getType() const {
+ return reinterpret_cast<ArrayType*>(Value::getType());
}
/// isString - This method returns true if the array is an array of i8 and
@@ -400,11 +400,11 @@ class ConstantStruct : public Constant {
std::vector<Constant*> >;
ConstantStruct(const ConstantStruct &); // DO NOT IMPLEMENT
protected:
- ConstantStruct(const StructType *T, const std::vector<Constant*> &Val);
+ ConstantStruct(StructType *T, const std::vector<Constant*> &Val);
public:
// ConstantStruct accessors
- static Constant *get(const StructType *T, ArrayRef<Constant*> V);
- static Constant *get(const StructType *T, ...) END_WITH_NULL;
+ static Constant *get(StructType *T, ArrayRef<Constant*> V);
+ static Constant *get(StructType *T, ...) END_WITH_NULL;
/// getAnon - Return an anonymous struct that has the specified
/// elements. If the struct is possibly empty, then you must specify a
@@ -431,8 +431,8 @@ public:
/// getType() specialization - Reduce amount of casting...
///
- inline const StructType *getType() const {
- return reinterpret_cast<const StructType*>(Value::getType());
+ inline StructType *getType() const {
+ return reinterpret_cast<StructType*>(Value::getType());
}
virtual void destroyConstant();
@@ -461,7 +461,7 @@ class ConstantVector : public Constant {
std::vector<Constant*> >;
ConstantVector(const ConstantVector &); // DO NOT IMPLEMENT
protected:
- ConstantVector(const VectorType *T, const std::vector<Constant*> &Val);
+ ConstantVector(VectorType *T, const std::vector<Constant*> &Val);
public:
// ConstantVector accessors
static Constant *get(ArrayRef<Constant*> V);
@@ -472,8 +472,8 @@ public:
/// getType - Specialize the getType() method to always return a VectorType,
/// which reduces the amount of casting needed in parts of the compiler.
///
- inline const VectorType *getType() const {
- return reinterpret_cast<const VectorType*>(Value::getType());
+ inline VectorType *getType() const {
+ return reinterpret_cast<VectorType*>(Value::getType());
}
/// This function will return true iff every element in this vector constant
@@ -511,8 +511,8 @@ class ConstantPointerNull : public Constant {
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
ConstantPointerNull(const ConstantPointerNull &); // DO NOT IMPLEMENT
protected:
- explicit ConstantPointerNull(const PointerType *T)
- : Constant(reinterpret_cast<const Type*>(T),
+ explicit ConstantPointerNull(PointerType *T)
+ : Constant(reinterpret_cast<Type*>(T),
Value::ConstantPointerNullVal, 0, 0) {}
protected:
@@ -522,15 +522,15 @@ protected:
}
public:
/// get() - Static factory methods - Return objects of the specified value
- static ConstantPointerNull *get(const PointerType *T);
+ static ConstantPointerNull *get(PointerType *T);
virtual void destroyConstant();
/// getType - Specialize the getType() method to always return an PointerType,
/// which reduces the amount of casting needed in parts of the compiler.
///
- inline const PointerType *getType() const {
- return reinterpret_cast<const PointerType*>(Value::getType());
+ inline PointerType *getType() const {
+ return reinterpret_cast<PointerType*>(Value::getType());
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -591,7 +591,7 @@ class ConstantExpr : public Constant {
friend struct ConvertConstantType<ConstantExpr, Type>;
protected:
- ConstantExpr(const Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps)
+ ConstantExpr(Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps)
: Constant(ty, ConstantExprVal, Ops, NumOps) {
// Operation type (an Instruction opcode) is stored as the SubclassData.
setValueSubclassData(Opcode);
@@ -605,23 +605,23 @@ public:
/// getAlignOf constant expr - computes the alignment of a type in a target
/// independent way (Note: the return type is an i64).
- static Constant *getAlignOf(const Type *Ty);
+ static Constant *getAlignOf(Type *Ty);
/// getSizeOf constant expr - computes the (alloc) size of a type (in
/// address-units, not bits) in a target independent way (Note: the return
/// type is an i64).
///
- static Constant *getSizeOf(const Type *Ty);
+ static Constant *getSizeOf(Type *Ty);
/// getOffsetOf constant expr - computes the offset of a struct field in a
/// target independent way (Note: the return type is an i64).
///
- static Constant *getOffsetOf(const StructType *STy, unsigned FieldNo);
+ static Constant *getOffsetOf(StructType *STy, unsigned FieldNo);
/// getOffsetOf constant expr - This is a generalized form of getOffsetOf,
/// which supports any aggregate type, and any Constant index.
///
- static Constant *getOffsetOf(const Type *Ty, Constant *FieldNo);
+ static Constant *getOffsetOf(Type *Ty, Constant *FieldNo);
static Constant *getNeg(Constant *C, bool HasNUW = false, bool HasNSW =false);
static Constant *getFNeg(Constant *C);
@@ -648,18 +648,18 @@ public:
bool HasNUW = false, bool HasNSW = false);
static Constant *getLShr(Constant *C1, Constant *C2, bool isExact = false);
static Constant *getAShr(Constant *C1, Constant *C2, bool isExact = false);
- static Constant *getTrunc (Constant *C, const Type *Ty);
- static Constant *getSExt (Constant *C, const Type *Ty);
- static Constant *getZExt (Constant *C, const Type *Ty);
- static Constant *getFPTrunc (Constant *C, const Type *Ty);
- static Constant *getFPExtend(Constant *C, const Type *Ty);
- static Constant *getUIToFP (Constant *C, const Type *Ty);
- static Constant *getSIToFP (Constant *C, const Type *Ty);
- static Constant *getFPToUI (Constant *C, const Type *Ty);
- static Constant *getFPToSI (Constant *C, const Type *Ty);
- static Constant *getPtrToInt(Constant *C, const Type *Ty);
- static Constant *getIntToPtr(Constant *C, const Type *Ty);
- static Constant *getBitCast (Constant *C, const Type *Ty);
+ static Constant *getTrunc (Constant *C, Type *Ty);
+ static Constant *getSExt (Constant *C, Type *Ty);
+ static Constant *getZExt (Constant *C, Type *Ty);
+ static Constant *getFPTrunc (Constant *C, Type *Ty);
+ static Constant *getFPExtend(Constant *C, Type *Ty);
+ static Constant *getUIToFP (Constant *C, Type *Ty);
+ static Constant *getSIToFP (Constant *C, Type *Ty);
+ static Constant *getFPToUI (Constant *C, Type *Ty);
+ static Constant *getFPToSI (Constant *C, Type *Ty);
+ static Constant *getPtrToInt(Constant *C, Type *Ty);
+ static Constant *getIntToPtr(Constant *C, Type *Ty);
+ static Constant *getBitCast (Constant *C, Type *Ty);
static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); }
static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); }
@@ -708,44 +708,44 @@ public:
static Constant *getCast(
unsigned ops, ///< The opcode for the conversion
Constant *C, ///< The constant to be converted
- const Type *Ty ///< The type to which the constant is converted
+ Type *Ty ///< The type to which the constant is converted
);
// @brief Create a ZExt or BitCast cast constant expression
static Constant *getZExtOrBitCast(
Constant *C, ///< The constant to zext or bitcast
- const Type *Ty ///< The type to zext or bitcast C to
+ Type *Ty ///< The type to zext or bitcast C to
);
// @brief Create a SExt or BitCast cast constant expression
static Constant *getSExtOrBitCast(
Constant *C, ///< The constant to sext or bitcast
- const Type *Ty ///< The type to sext or bitcast C to
+ Type *Ty ///< The type to sext or bitcast C to
);
// @brief Create a Trunc or BitCast cast constant expression
static Constant *getTruncOrBitCast(
Constant *C, ///< The constant to trunc or bitcast
- const Type *Ty ///< The type to trunc or bitcast C to
+ Type *Ty ///< The type to trunc or bitcast C to
);
/// @brief Create a BitCast or a PtrToInt cast constant expression
static Constant *getPointerCast(
Constant *C, ///< The pointer value to be casted (operand 0)
- const Type *Ty ///< The type to which cast should be made
+ Type *Ty ///< The type to which cast should be made
);
/// @brief Create a ZExt, Bitcast or Trunc for integer -> integer casts
static Constant *getIntegerCast(
Constant *C, ///< The integer constant to be casted
- const Type *Ty, ///< The integer type to cast to
+ Type *Ty, ///< The integer type to cast to
bool isSigned ///< Whether C should be treated as signed or not
);
/// @brief Create a FPExt, Bitcast or FPTrunc for fp -> fp casts
static Constant *getFPCast(
Constant *C, ///< The integer constant to be casted
- const Type *Ty ///< The integer type to cast to
+ Type *Ty ///< The integer type to cast to
);
/// @brief Return true if this is a convert constant expression
@@ -845,7 +845,7 @@ public:
/// operands replaced with the specified values and with the specified result
/// type. The specified array must have the same number of operands as our
/// current one.
- Constant *getWithOperands(ArrayRef<Constant*> Ops, const Type *Ty) const;
+ Constant *getWithOperands(ArrayRef<Constant*> Ops, Type *Ty) const;
virtual void destroyConstant();
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
@@ -886,7 +886,7 @@ class UndefValue : public Constant {
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
UndefValue(const UndefValue &); // DO NOT IMPLEMENT
protected:
- explicit UndefValue(const Type *T) : Constant(T, UndefValueVal, 0, 0) {}
+ explicit UndefValue(Type *T) : Constant(T, UndefValueVal, 0, 0) {}
protected:
// allocate space for exactly zero operands
void *operator new(size_t s) {
@@ -896,7 +896,7 @@ public:
/// get() - Static factory methods - Return an 'undef' object of the specified
/// type.
///
- static UndefValue *get(const Type *T);
+ static UndefValue *get(Type *T);
virtual void destroyConstant();
diff --git a/include/llvm/DerivedTypes.h b/include/llvm/DerivedTypes.h
index acb28deada..f90eb42581 100644
--- a/include/llvm/DerivedTypes.h
+++ b/include/llvm/DerivedTypes.h
@@ -96,26 +96,26 @@ public:
class FunctionType : public Type {
FunctionType(const FunctionType &); // Do not implement
const FunctionType &operator=(const FunctionType &); // Do not implement
- FunctionType(const Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
+ FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
public:
/// FunctionType::get - This static method is the primary way of constructing
/// a FunctionType.
///
- static FunctionType *get(const Type *Result,
+ static FunctionType *get(Type *Result,
ArrayRef<Type*> Params, bool isVarArg);
/// FunctionType::get - Create a FunctionType taking no parameters.
///
- static FunctionType *get(const Type *Result, bool isVarArg);
+ static FunctionType *get(Type *Result, bool isVarArg);
/// isValidReturnType - Return true if the specified type is valid as a return
/// type.
- static bool isValidReturnType(const Type *RetTy);
+ static bool isValidReturnType(Type *RetTy);
/// isValidArgumentType - Return true if the specified type is valid as an
/// argument type.
- static bool isValidArgumentType(const Type *ArgTy);
+ static bool isValidArgumentType(Type *ArgTy);
bool isVarArg() const { return getSubclassData(); }
Type *getReturnType() const { return ContainedTys[0]; }
@@ -150,8 +150,8 @@ public:
/// getTypeAtIndex - Given an index value into the type, return the type of
/// the element.
///
- Type *getTypeAtIndex(const Value *V) const;
- Type *getTypeAtIndex(unsigned Idx) const;
+ Type *getTypeAtIndex(const Value *V);
+ Type *getTypeAtIndex(unsigned Idx);
bool indexValid(const Value *V) const;
bool indexValid(unsigned Idx) const;
@@ -250,7 +250,7 @@ public:
/// isValidElementType - Return true if the specified type is valid as a
/// element type.
- static bool isValidElementType(const Type *ElemTy);
+ static bool isValidElementType(Type *ElemTy);
// Iterator access to the elements.
@@ -260,7 +260,7 @@ public:
/// isLayoutIdentical - Return true if this is layout identical to the
/// specified struct.
- bool isLayoutIdentical(const StructType *Other) const;
+ bool isLayoutIdentical(StructType *Other) const;
// Random access to the elements
unsigned getNumElements() const { return NumContainedTys; }
@@ -321,11 +321,11 @@ public:
/// ArrayType::get - This static method is the primary way to construct an
/// ArrayType
///
- static ArrayType *get(const Type *ElementType, uint64_t NumElements);
+ static ArrayType *get(Type *ElementType, uint64_t NumElements);
/// isValidElementType - Return true if the specified type is valid as a
/// element type.
- static bool isValidElementType(const Type *ElemTy);
+ static bool isValidElementType(Type *ElemTy);
uint64_t getNumElements() const { return NumElements; }
@@ -348,13 +348,13 @@ public:
/// VectorType::get - This static method is the primary way to construct an
/// VectorType.
///
- static VectorType *get(const Type *ElementType, unsigned NumElements);
+ static VectorType *get(Type *ElementType, unsigned NumElements);
/// VectorType::getInteger - This static method gets a VectorType with the
/// same number of elements as the input type, and the element type is an
/// integer type of the same width as the input element type.
///
- static VectorType *getInteger(const VectorType *VTy) {
+ static VectorType *getInteger(VectorType *VTy) {
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
Type *EltTy = IntegerType::get(VTy->getContext(), EltBits);
return VectorType::get(EltTy, VTy->getNumElements());
@@ -364,7 +364,7 @@ public:
/// getInteger except that the element types are twice as wide as the
/// elements in the input type.
///
- static VectorType *getExtendedElementVectorType(const VectorType *VTy) {
+ static VectorType *getExtendedElementVectorType(VectorType *VTy) {
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
Type *EltTy = IntegerType::get(VTy->getContext(), EltBits * 2);
return VectorType::get(EltTy, VTy->getNumElements());
@@ -374,7 +374,7 @@ public:
/// getInteger except that the element types are half as wide as the
/// elements in the input type.
///
- static VectorType *getTruncatedElementVectorType(const VectorType *VTy) {
+ static VectorType *getTruncatedElementVectorType(VectorType *VTy) {
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
assert((EltBits & 1) == 0 &&
"Cannot truncate vector element with odd bit-width");
@@ -384,7 +384,7 @@ public:
/// isValidElementType - Return true if the specified type is valid as a
/// element type.
- static bool isValidElementType(const Type *ElemTy);
+ static bool isValidElementType(Type *ElemTy);
/// @brief Return the number of elements in the Vector type.
unsigned getNumElements() const { return NumElements; }
@@ -411,17 +411,17 @@ class PointerType : public SequentialType {
public:
/// PointerType::get - This constructs a pointer to an object of the specified
/// type in a numbered address space.
- static PointerType *get(const Type *ElementType, unsigned AddressSpace);
+ static PointerType *get(Type *ElementType, unsigned AddressSpace);
/// PointerType::getUnqual - This constructs a pointer to an object of the
/// specified type in the generic address space (address space zero).
- static PointerType *getUnqual(const Type *ElementType) {
+ static PointerType *getUnqual(Type *ElementType) {
return PointerType::get(ElementType, 0);
}
/// isValidElementType - Return true if the specified type is valid as a
/// element type.
- static bool isValidElementType(const Type *ElemTy);
+ static bool isValidElementType(Type *ElemTy);
/// @brief Return the address space of the Pointer type.
inline unsigned getAddressSpace() const { return getSubclassData(); }
diff --git a/include/llvm/ExecutionEngine/ExecutionEngine.h b/include/llvm/ExecutionEngine/ExecutionEngine.h
index 88b21cd4d2..f66f9eca03 100644
--- a/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -314,7 +314,7 @@ public:
/// GenericValue *. It is not a pointer to a GenericValue containing the
/// address at which to store Val.
void StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr,
- const Type *Ty);
+ Type *Ty);
void InitializeMemory(const Constant *Init, void *Addr);
@@ -440,7 +440,7 @@ protected:
GenericValue getConstantValue(const Constant *C);
void LoadValueFromMemory(GenericValue &Result, GenericValue *Ptr,
- const Type *Ty);
+ Type *Ty);
};
namespace EngineKind {
diff --git a/include/llvm/Function.h b/include/llvm/Function.h
index 0aa5b2a9fa..678651bbf1 100644
--- a/include/llvm/Function.h
+++ b/include/llvm/Function.h
@@ -117,11 +117,11 @@ private:
/// function is automatically inserted into the end of the function list for
/// the module.
///
- Function(const FunctionType *Ty, LinkageTypes Linkage,
+ Function(FunctionType *Ty, LinkageTypes Linkage,
const Twine &N = "", Module *M = 0);
public:
- static Function *Create(const FunctionType *Ty, LinkageTypes Linkage,
+ static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
const Twine &N = "", Module *M = 0) {
return new(0) Function(Ty, Linkage, N, M);
}
diff --git a/include/llvm/GlobalAlias.h b/include/llvm/GlobalAlias.h
index c3d3c38bd3..33561b9dce 100644
--- a/include/llvm/GlobalAlias.h
+++ b/include/llvm/GlobalAlias.h
@@ -41,7 +41,7 @@ public:
}
/// GlobalAlias ctor - If a parent module is specified, the alias is
/// automatically inserted into the end of the specified module's alias list.
- GlobalAlias(const Type *Ty, LinkageTypes Linkage, const Twine &Name = "",
+ GlobalAlias(Type *Ty, LinkageTypes Linkage, const Twine &Name = "",
Constant* Aliasee = 0, Module *Parent = 0);
/// Provide fast operand accessors
diff --git a/include/llvm/GlobalValue.h b/include/llvm/GlobalValue.h
index d0f0888a22..63dc4ab6ba 100644
--- a/include/llvm/GlobalValue.h
+++ b/include/llvm/GlobalValue.h
@@ -57,7 +57,7 @@ public:
};
protected:
- GlobalValue(const Type *ty, ValueTy vty, Use *Ops, unsigned NumOps,
+ GlobalValue(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps,
LinkageTypes linkage, const Twine &Name)
: Constant(ty, vty, Ops, NumOps), Parent(0),
Linkage(linkage), Visibility(DefaultVisibility), Alignment(0),
diff --git a/include/llvm/GlobalVariable.h b/include/llvm/GlobalVariable.h
index bbc09c177e..034ade1fb0 100644
--- a/include/llvm/GlobalVariable.h
+++ b/include/llvm/GlobalVariable.h
@@ -50,12 +50,12 @@ public:
}
/// GlobalVariable ctor - If a parent module is specified, the global is
/// automatically inserted into the end of the specified modules global list.
- GlobalVariable(const Type *Ty, bool isConstant, LinkageTypes Linkage,
+ GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
Constant *Initializer = 0, const Twine &Name = "",
bool ThreadLocal = false, unsigned AddressSpace = 0);
/// GlobalVariable ctor - This creates a global and inserts it before the
/// specified other global.
- GlobalVariable(Module &M, const Type *Ty, bool isConstant,
+ GlobalVariable(Module &M, Type *Ty, bool isConstant,
LinkageTypes Linkage, Constant *Initializer,
const Twine &Name,
GlobalVariable *InsertBefore = 0, bool ThreadLocal = false,
diff --git a/include/llvm/InlineAsm.h b/include/llvm/InlineAsm.h
index a98aff178c..fe8a714dd9 100644
--- a/include/llvm/InlineAsm.h
+++ b/include/llvm/InlineAsm.h
@@ -43,7 +43,7 @@ class InlineAsm : public Value {
bool HasSideEffects;
bool IsAlignStack;
- InlineAsm(const PointerType *Ty, const std::string &AsmString,
+ InlineAsm(PointerType *Ty, const std::string &AsmString,
const std::string &Constraints, bool hasSideEffects,
bool isAlignStack);
virtual ~InlineAsm();
@@ -55,7 +55,7 @@ public:
/// InlineAsm::get - Return the specified uniqued inline asm string.
///
- static InlineAsm *get(const FunctionType *Ty, StringRef AsmString,
+ static InlineAsm *get(FunctionType *Ty, StringRef AsmString,
StringRef Constraints, bool hasSideEffects,
bool isAlignStack = false);
@@ -79,7 +79,7 @@ public:
/// the specified constraint string is legal for the type. This returns true
/// if legal, false if not.
///
- static bool Verify(const FunctionType *Ty, StringRef Constraints);
+ static bool Verify(FunctionType *Ty, StringRef Constraints);
// Constraint String Parsing
enum ConstraintPrefix {
diff --git a/include/llvm/InstrTypes.h b/include/llvm/InstrTypes.h
index cc9ec3ac76..a1492f3c14 100644
--- a/include/llvm/InstrTypes.h
+++ b/include/llvm/InstrTypes.h
@@ -34,12 +34,12 @@ class LLVMContext;
///
class TerminatorInst : public Instruction {
protected:
- TerminatorInst(const Type *Ty, Instruction::TermOps iType,
+ TerminatorInst(Type *Ty, Instruction::TermOps iType,
Use *Ops, unsigned NumOps,
Instruction *InsertBefore = 0)
: Instruction(Ty, iType, Ops, NumOps, InsertBefore) {}
- TerminatorInst(const Type *Ty, Instruction::TermOps iType,
+ TerminatorInst(Type *Ty, Instruction::TermOps iType,
Use *Ops, unsigned NumOps, BasicBlock *InsertAtEnd)
: Instruction(Ty, iType, Ops, NumOps, InsertAtEnd) {}
@@ -91,12 +91,12 @@ class UnaryInstruction : public Instruction {
void *operator new(size_t, unsigned); // Do not implement
protected:
- UnaryInstruction(const Type *Ty, unsigned iType, Value *V,
+ UnaryInstruction(Type *Ty, unsigned iType, Value *V,
Instruction *IB = 0)
: Instruction(Ty, iType, &Op<0>(), 1, IB) {
Op<0>() = V;
}
- UnaryInstruction(const Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
+ UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
: Instruction(Ty, iType, &Op<0>(), 1, IAE) {
Op<0>() = V;
}
@@ -141,9 +141,9 @@ class BinaryOperator : public Instruction {
void *operator new(size_t, unsigned); // Do not implement
protected:
void init(BinaryOps iType);
- BinaryOperator(BinaryOps iType, Value *S1, Value *S2, const Type *Ty,
+ BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, Instruction *InsertBefore);
- BinaryOperator(BinaryOps iType, Value *S1, Value *S2, const Type *Ty,
+ BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, BasicBlock *InsertAtEnd);
virtual BinaryOperator *clone_impl() const;
public:
@@ -390,13 +390,13 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)
class CastInst : public UnaryInstruction {
protected:
/// @brief Constructor with insert-before-instruction semantics for subclasses
- CastInst(const Type *Ty, unsigned iType, Value *S,
+ CastInst(Type *Ty, unsigned iType, Value *S,
const Twine &NameStr = "", Instruction *InsertBefore = 0)
: UnaryInstruction(Ty, iType, S, InsertBefore) {
setName(NameStr);
}
/// @brief Constructor with insert-at-end-of-block semantics for subclasses
- CastInst(const Type *Ty, unsigned iType, Value *S,
+ CastInst(Type *Ty, unsigned iType, Value *S,
const Twine &NameStr, BasicBlock *InsertAtEnd)
: UnaryInstruction(Ty, iType, S, InsertAtEnd) {
setName(NameStr);
@@ -411,7 +411,7 @@ public:
static CastInst *Create(
Instruction::CastOps, ///< The opcode of the cast instruction
Value *S, ///< The value to be casted (operand 0)
- const Type *Ty, ///< The type to which cast should be made
+ Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
Instruction *InsertBefore = 0 ///< Place to insert the instruction
);
@@ -424,7 +424,7 @@ public:
static CastInst *Create(
Instruction::CastOps, ///< The opcode for the cast instruction
Value *S, ///< The value to be casted (operand 0)
- const Type *Ty, ///< The type to which operand is casted
+ Type *Ty, ///< The type to which operand is casted
const Twine &Name, ///< The name for the instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -432,7 +432,7 @@ public:
/// @brief Create a ZExt or BitCast cast instruction
static CastInst *CreateZExtOrBitCast(
Value *S, ///< The value to be casted (operand 0)
- const Type *Ty, ///< The type to which cast should be made
+ Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
Instruction *InsertBefore = 0 ///< Place to insert the instruction
);
@@ -440,7 +440,7 @@ public:
/// @brief Create a ZExt or BitCast cast instruction
static CastInst *CreateZExtOrBitCast(
Value *S, ///< The value to be casted (operand 0)
- const Type *Ty, ///< The type to which operand is casted
+ Type *Ty, ///< The type to which operand is casted
const Twine &Name, ///< The name for the instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -448,7 +448,7 @@ public:
/// @brief Create a SExt or BitCast cast instruction
static CastInst *CreateSExtOrBitCast(
Value *S, ///< The value to be casted (operand 0)
- const Type *Ty, ///< The type to which cast should be made
+ Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
Instruction *InsertBefore = 0 ///< Place to insert the instruction
);
@@ -456,7 +456,7 @@ public:
/// @brief Create a SExt or BitCast cast instruction
static CastInst *CreateSExtOrBitCast(
Value *S, ///< The value to be casted (operand 0)
- const Type *Ty, ///< The type to which operand is casted
+ Type *Ty, ///< The type to which operand is casted
const Twine &Name, ///< The name for the instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -464,7 +464,7 @@ public:
/// @brief Create a BitCast or a PtrToInt cast instruction
static CastInst *CreatePointerCast(
Value *S, ///< The pointer value to be casted (operand 0)
- const Type *Ty, ///< The type to which operand is casted
+ Type *Ty, ///< The type to which operand is casted
const Twine &Name, ///< The name for the instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -472,7 +472,7 @@ public:
/// @brief Create a BitCast or a PtrToInt cast instruction
static CastInst *CreatePointerCast(
Value *S, ///< The pointer value to be casted (operand 0)
- const Type *Ty, ///< The type to which cast should be made
+ Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
Instruction *InsertBefore = 0 ///< Place to insert the instruction
);
@@ -480,7 +480,7 @@ public:
/// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
static CastInst *CreateIntegerCast(
Value *S, ///< The pointer value to be casted (operand 0)
- const Type *Ty, ///< The type to which cast should be made
+ Type *Ty, ///< The type to which cast should be made
bool isSigned, ///< Whether to regard S as signed or not
const Twine &Name = "", ///< Name for the instruction
Instruction *InsertBefore = 0 ///< Place to insert the instruction
@@ -489,7 +489,7 @@ public:
/// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
static CastInst *CreateIntegerCast(
Value *S, ///< The integer value to be casted (operand 0)
- const Type *Ty, ///< The integer type to which operand is casted
+ Type *Ty, ///< The integer type to which operand is casted
bool isSigned, ///< Whether to regard S as signed or not
const Twine &Name, ///< The name for the instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
@@ -498,7 +498,7 @@ public:
/// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
static CastInst *CreateFPCast(
Value *S, ///< The floating point value to be casted
- const Type *Ty, ///< The floating point type to cast to
+ Type *Ty, ///< The floating point type to cast to
const Twine &Name = "", ///< Name for the instruction
Instruction *InsertBefore = 0 ///< Place to insert the instruction
);
@@ -506,7 +506,7 @@ public:
/// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
static CastInst *CreateFPCast(
Value *S, ///< The floating point value to be casted
- const Type *Ty, ///< The floating point type to cast to
+ Type *Ty, ///< The floating point type to cast to
const Twine &Name, ///< The name for the instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -514,7 +514,7 @@ public:
/// @brief Create a Trunc or BitCast cast instruction
static CastInst *CreateTruncOrBitCast(
Value *S, ///< The value to be casted (operand 0)
- const Type *Ty, ///< The type to which cast should be made
+ Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
Instruction *InsertBefore = 0 ///< Place to insert the instruction
);
@@ -522,15 +522,15 @@ public:
/// @brief Create a Trunc or BitCast cast instruction
static CastInst *CreateTruncOrBitCast(
Value *S, ///< The value to be casted (operand 0)
- const Type *Ty, ///< The type to which operand is casted
+ Type *Ty, ///< The type to which operand is casted
const Twine &Name, ///< The name for the instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
/// @brief Check whether it is valid to call getCastOpcode for these types.
static bool isCastable(
- const Type *SrcTy, ///< The Type from which the value should be cast.
- const Type *DestTy ///< The Type to which the value should be cast.
+ Type *SrcTy, ///< The Type from which the value should be cast.
+ Type *DestTy ///< The Type to which the value should be cast.
);
/// Returns the opcode necessary to cast Val into Ty using usual casting
@@ -539,7 +539,7 @@ public:
static Instruction::CastOps getCastOpcode(
const Value *Val, ///< The value to cast
bool SrcIsSigned, ///< Whether to treat the source as signed
- const Type *Ty, ///< The Type to which the value should be casted
+ Type *Ty, ///< The Type to which the value should be casted
bool DstIsSigned ///< Whether to treate the dest. as signed
);
@@ -568,14 +568,14 @@ public:
/// @brief Determine if the described cast is a no-op cast.
static bool isNoopCast(
Instruction::CastOps Opcode, ///< Opcode of cast
- const Type *SrcTy, ///< SrcTy of cast
- const Type *DstTy, ///< DstTy of cast
- const Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
+ Type *SrcTy, ///< SrcTy of cast
+ Type *DstTy, ///< DstTy of cast
+ Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
);
/// @brief Determine if this cast is a no-op cast.
bool isNoopCast(
- const Type *IntPtrTy ///< Integer type corresponding to pointer
+ Type *IntPtrTy ///< Integer type corresponding to pointer
) const;
/// Determine how a pair of casts can be eliminated, if they can be at all.
@@ -587,10 +587,10 @@ public:
static unsigned isEliminableCastPair(
Instruction::CastOps firstOpcode, ///< Opcode of first cast
Instruction::CastOps secondOpcode, ///< Opcode of second cast
- const Type *SrcTy, ///< SrcTy of 1st cast
- const Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
- const Type *DstTy, ///< DstTy of 2nd cast
- const Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
+ Type *SrcTy, ///< SrcTy of 1st cast
+ Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
+ Type *DstTy, ///< DstTy of 2nd cast
+ Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
);
/// @brief Return the opcode of this CastInst
@@ -599,15 +599,15 @@ public:
}
/// @brief Return the source type, as a convenience
- const Type* getSrcTy() const { return getOperand(0)->getType(); }
+ Type* getSrcTy() const { return getOperand(0)->getType(); }
/// @brief Return the destination type, as a convenience
- const Type* getDestTy() const { return getType(); }
+ Type* getDestTy() const { return getType(); }
/// This method can be used to determine if a cast from S to DstTy using
/// Opcode op is valid or not.
/// @returns true iff the proposed cast is valid.
/// @brief Determine if a cast is valid without creating one.
- static bool castIsValid(Instruction::CastOps op, Value *S, const Type *DstTy);
+ static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy);
/// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const CastInst *) { return true; }
@@ -629,11 +629,11 @@ class CmpInst : public Instruction {
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
CmpInst(); // do not implement
protected:
- CmpInst(const Type *ty, Instruction::OtherOps op, unsigned short pred,
+ CmpInst(Type *ty, Instruction::OtherOps op, unsigned short pred,
Value *LHS, Value *RHS, const Twine &Name = "",
Instruction *InsertBefore = 0);
- CmpInst(const Type *ty, Instruction::OtherOps op, unsigned short pred,
+ CmpInst(Type *ty, Instruction::OtherOps op, unsigned short pred,
Value *LHS, Value *RHS, const Twine &Name,
BasicBlock *InsertAtEnd);
@@ -825,8 +825,8 @@ public:
}
/// @brief Create a result type for fcmp/icmp
- static const Type* makeCmpResultType(const Type* opnd_type) {
- if (const VectorType* vt = dyn_cast<const VectorType>(opnd_type)) {
+ static Type* makeCmpResultType(Type* opnd_type) {
+ if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
vt->getNumElements());
}
diff --git a/include/llvm/Instruction.h b/include/llvm/Instruction.h
index 89bb9fdf42..2459ef6076 100644
--- a/include/llvm/Instruction.h
+++ b/include/llvm/Instruction.h
@@ -365,9 +365,9 @@ protected:
return getSubclassDataFromValue() & ~HasMetadataBit;
}
- Instruction(const Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
+ Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
Instruction *InsertBefore = 0);
- Instruction(const Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
+ Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
BasicBlock *InsertAtEnd);
virtual Instruction *clone_impl() const = 0;
diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h
index 0bc9a3b643..2eadba98ca 100644
--- a/include/llvm/Instructions.h
+++ b/include/llvm/Instructions.h
@@ -41,17 +41,17 @@ class AllocaInst : public UnaryInstruction {
protected:
virtual AllocaInst *clone_impl() const;
public:
- explicit AllocaInst(const Type *Ty, Value *ArraySize = 0,
+ explicit AllocaInst(Type *Ty, Value *ArraySize = 0,
const Twine &Name = "", Instruction *InsertBefore = 0);
- AllocaInst(const Type *Ty, Value *ArraySize,
+ AllocaInst(Type *Ty, Value *ArraySize,
const Twine &Name, BasicBlock *InsertAtEnd);
- AllocaInst(const Type *Ty, const Twine &Name, Instruction *InsertBefore = 0);
- AllocaInst(const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd);
+ AllocaInst(Type *Ty, const Twine &Name, Instruction *InsertBefore = 0);
+ AllocaInst(Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd);
- AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
+ AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
const Twine &Name = "", Instruction *InsertBefore = 0);
- AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
+ AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
const Twine &Name, BasicBlock *InsertAtEnd);
// Out of line virtual method, so the vtable, etc. has a home.
@@ -70,8 +70,8 @@ public:
/// getType - Overload to return most specific pointer type
///
- const PointerType *getType() const {
- return reinterpret_cast<const PointerType*>(Instruction::getType());
+ PointerType *getType() const {
+ return reinterpret_cast<PointerType*>(Instruction::getType());
}
/// getAllocatedType - Return the type that is being allocated by the
@@ -275,7 +275,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
// checkGEPType - Simple wrapper function to give a better assertion failure
// message on bad indexes for a gep instruction.
//
-static inline const Type *checkGEPType(const Type *Ty) {
+static inline Type *checkGEPType(Type *Ty) {
assert(Ty && "Invalid GetElementPtrInst indices for type!");
return Ty;
}
@@ -316,7 +316,7 @@ class GetElementPtrInst : public Instruction {
/// pointer type.
///
template<typename RandomAccessIterator>
- static Type *getIndexedType(const Type *Ptr,
+ static Type *getIndexedType(Type *Ptr,
RandomAccessIterator IdxBegin,
RandomAccessIterator IdxEnd,
// This argument ensures that we
@@ -436,8 +436,8 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
// getType - Overload to return most specific pointer type...
- const PointerType *getType() const {
- return reinterpret_cast<const PointerType*>(Instruction::getType());
+ PointerType *getType() const {
+ return reinterpret_cast<PointerType*>(Instruction::getType());
}
/// getIndexedType - Returns the type of the element that would be loaded with
@@ -447,7 +447,7 @@ public:
/// pointer type.
///
template<typename RandomAccessIterator>
- static Type *getIndexedType(const Type *Ptr, RandomAccessIterator IdxBegin,
+ static Type *getIndexedType(Type *Ptr, RandomAccessIterator IdxBegin,
RandomAccessIterator IdxEnd) {
return getIndexedType(Ptr, IdxBegin, IdxEnd,
typename std::iterator_traits<RandomAccessIterator>::
@@ -455,14 +455,14 @@ public:
}
// FIXME: Use ArrayRef
- static Type *getIndexedType(const Type *Ptr,
+ static Type *getIndexedType(Type *Ptr,
Value* const *Idx, unsigned NumIdx);
- static Type *getIndexedType(const Type *Ptr,
+ static Type *getIndexedType(Type *Ptr,
Constant* const *Idx, unsigned NumIdx);
- static Type *getIndexedType(const Type *Ptr,
+ static Type *getIndexedType(Type *Ptr,
uint64_t const *Idx, unsigned NumIdx);
- static Type *getIndexedType(const Type *Ptr, Value *Idx);
+ static Type *getIndexedType(Type *Ptr, Value *Idx);
inline op_iterator idx_begin() { return op_begin()+1; }
inline const_op_iterator idx_begin() const { return op_begin()+1; }
@@ -485,8 +485,8 @@ public:
/// getPointerOperandType - Method to return the pointer operand as a
/// PointerType.
- const PointerType *getPointerOperandType() const {
- return reinterpret_cast<const PointerType*>(getPointerOperand()->getType());
+ PointerType *getPointerOperandType() const {
+ return reinterpret_cast<PointerType*>(getPointerOperand()->getType());
}
@@ -893,12 +893,12 @@ public:
/// 2. Call malloc with that argument.
/// 3. Bitcast the result of the malloc call to the specified type.
static Instruction *CreateMalloc(Instruction *InsertBefore,
- const Type *IntPtrTy, const Type *AllocTy,
+ Type *IntPtrTy, Type *AllocTy,
Value *AllocSize, Value *ArraySize = 0,
Function* MallocF = 0,
const Twine &Name = "");
static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
- const Type *IntPtrTy, const Type *AllocTy,
+ Type *IntPtrTy, Type *AllocTy,
Value *AllocSize, Value *ArraySize = 0,
Function* MallocF = 0,
const Twine &Name = "");
@@ -1165,12 +1165,12 @@ protected:
virtual VAArgInst *clone_impl() const;
public:
- VAArgInst(Value *List, const Type *Ty, const Twine &NameStr = "",
+ VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
Instruction *InsertBefore = 0)
: UnaryInstruction(Ty, VAArg, List, InsertBefore) {
setName(NameStr);
}
- VAArgInst(Value *List, const Type *Ty, const Twine &NameStr,
+ VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
setName(NameStr);
@@ -1226,8 +1226,8 @@ public:
const Value *getVectorOperand() const { return Op<0>(); }
const Value *getIndexOperand() const { return Op<1>(); }
- const VectorType *getVectorOperandType() const {
- return reinterpret_cast<const VectorType*>(getVectorOperand()->getType());
+ VectorType *getVectorOperandType() const {
+ return reinterpret_cast<VectorType*>(getVectorOperand()->getType());
}
@@ -1286,8 +1286,8 @@ public:
/// getType - Overload to return most specific vector type.
///
- const VectorType *getType() const {
- return reinterpret_cast<const VectorType*>(Instruction::getType());
+ VectorType *getType() const {
+ return reinterpret_cast<VectorType*>(Instruction::getType());
}
/// Transparently provide more efficient getOperand methods.
@@ -1339,8 +1339,8 @@ public:
/// getType - Overload to return most specific vector type.
///
- const VectorType *getType() const {
- return reinterpret_cast<const VectorType*>(Instruction::getType());
+ VectorType *getType() const {
+ return reinterpret_cast<VectorType*>(Instruction::getType());
}
/// Transparently provide more efficient getOperand methods.
@@ -1419,7 +1419,7 @@ public:
/// with an extractvalue instruction with the specified parameters.
///
/// Null is returned if the indices are invalid for the specified type.
- static Type *getIndexedType(const Type *Agg, ArrayRef<unsigned> Idxs);
+ static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
typedef const unsigned* idx_iterator;
inline idx_iterator idx_begin() const { return Indices.begin(); }
@@ -1625,7 +1625,7 @@ class PHINode : public Instruction {
void *operator new(size_t s) {
return User::operator new(s, 0);
}
- explicit PHINode(const Type *Ty, unsigned NumReservedValues,
+ explicit PHINode(Type *Ty, unsigned NumReservedValues,
const Twine &NameStr = "", Instruction *InsertBefore = 0)
: Instruction(Ty, Instruction::PHI, 0, 0, InsertBefore),
ReservedSpace(NumReservedValues) {
@@ -1633,7 +1633,7 @@ class PHINode : public Instruction {
OperandList = allocHungoffUses(ReservedSpace);
}
- PHINode(const Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
+ PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: Instruction(Ty, Instruction::PHI, 0, 0, InsertAtEnd),
ReservedSpace(NumReservedValues) {
@@ -1650,12 +1650,12 @@ protected:
public:
/// Constructors - NumReservedValues is a hint for the number of incoming
/// edges that this phi node will have (use 0 if you really have no idea).
- static PHINode *Create(const Type *Ty, unsigned NumReservedValues,
+ static PHINode *Create(Type *Ty, unsigned NumReservedValues,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
}
- static PHINode *Create(const Type *Ty, unsigned NumReservedValues,
+ static PHINode *Create(Type *Ty, unsigned NumReservedValues,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
}
@@ -2543,7 +2543,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
TruncInst(
Value *S, ///< The value to be truncated
- const Type *Ty, ///< The (smaller) type to truncate to
+ Type *Ty, ///< The (smaller) type to truncate to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2551,7 +2551,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
TruncInst(
Value *S, ///< The value to be truncated
- const Type *Ty, ///< The (smaller) type to truncate to
+ Type *Ty, ///< The (smaller) type to truncate to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2580,7 +2580,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
ZExtInst(
Value *S, ///< The value to be zero extended
- const Type *Ty, ///< The type to zero extend to
+ Type *Ty, ///< The type to zero extend to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2588,7 +2588,7 @@ public:
/// @brief Constructor with insert-at-end semantics.
ZExtInst(
Value *S, ///< The value to be zero extended
- const Type *Ty, ///< The type to zero extend to
+ Type *Ty, ///< The type to zero extend to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2617,7 +2617,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
SExtInst(
Value *S, ///< The value to be sign extended
- const Type *Ty, ///< The type to sign extend to
+ Type *Ty, ///< The type to sign extend to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2625,7 +2625,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
SExtInst(
Value *S, ///< The value to be sign extended
- const Type *Ty, ///< The type to sign extend to
+ Type *Ty, ///< The type to sign extend to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2654,7 +2654,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
FPTruncInst(
Value *S, ///< The value to be truncated
- const Type *Ty, ///< The type to truncate to
+ Type *Ty, ///< The type to truncate to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2662,7 +2662,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
FPTruncInst(
Value *S, ///< The value to be truncated
- const Type *Ty, ///< The type to truncate to
+ Type *Ty, ///< The type to truncate to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2691,7 +2691,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
FPExtInst(
Value *S, ///< The value to be extended
- const Type *Ty, ///< The type to extend to
+ Type *Ty, ///< The type to extend to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2699,7 +2699,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
FPExtInst(
Value *S, ///< The value to be extended
- const Type *Ty, ///< The type to extend to
+ Type *Ty, ///< The type to extend to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2728,7 +2728,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
UIToFPInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2736,7 +2736,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
UIToFPInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2765,7 +2765,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
SIToFPInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2773,7 +2773,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
SIToFPInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2802,7 +2802,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
FPToUIInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2810,7 +2810,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
FPToUIInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< Where to insert the new instruction
);
@@ -2839,7 +2839,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
FPToSIInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2847,7 +2847,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
FPToSIInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2872,7 +2872,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
IntToPtrInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2880,7 +2880,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
IntToPtrInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2912,7 +2912,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
PtrToIntInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2920,7 +2920,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
PtrToIntInst(
Value *S, ///< The value to be converted
- const Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2949,7 +2949,7 @@ public:
/// @brief Constructor with insert-before-instruction semantics
BitCastInst(
Value *S, ///< The value to be casted
- const Type *Ty, ///< The type to casted to
+ Type *Ty, ///< The type to casted to
const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2957,7 +2957,7 @@ public:
/// @brief Constructor with insert-at-end-of-block semantics
BitCastInst(
Value *S, ///< The value to be casted
- const Type *Ty, ///< The type to casted to
+ Type *Ty, ///< The type to casted to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
diff --git a/include/llvm/IntrinsicInst.h b/include/llvm/IntrinsicInst.h
index 24e5fe7845..42862011ac 100644
--- a/include/llvm/IntrinsicInst.h
+++ b/include/llvm/IntrinsicInst.h
@@ -170,7 +170,7 @@ namespace llvm {
setArgOperand(4, V);
}
- const Type *getAlignmentType() const {
+ Type *getAlignmentType() const {
return getArgOperand(3)->getType();
}
diff --git a/include/llvm/Intrinsics.h b/include/llvm/Intrinsics.h
index 46361ca0c2..3703825603 100644
--- a/include/llvm/Intrinsics.h
+++ b/include/llvm/Intrinsics.h
@@ -49,7 +49,7 @@ namespace Intrinsic {
/// Intrinsic::getType(ID) - Return the function type for an intrinsic.
///
- const FunctionType *getType(LLVMContext &Context, ID id,
+ FunctionType *getType(LLVMContext &Context, ID id,
ArrayRef<Type*> Tys = ArrayRef<Type*>());
/// Intrinsic::isOverloaded(ID) - Returns true if the intrinsic can be
diff --git a/include/llvm/Module.h b/include/llvm/Module.h
index 47d23f36c1..7eb34b6896 100644
--- a/include/llvm/Module.h
+++ b/include/llvm/Module.h
@@ -272,10 +272,10 @@ public:
/// the existing function.
/// 4. Finally, the function exists but has the wrong prototype: return the
/// function with a constantexpr cast to the right prototype.
- Constant *getOrInsertFunction(StringRef Name, const FunctionType *T,
+ Constant *getOrInsertFunction(StringRef Name, FunctionType *T,
AttrListPtr AttributeList);
- Constant *getOrInsertFunction(StringRef Name, const FunctionType *T);
+ Constant *getOrInsertFunction(StringRef Name, FunctionType *T);
/// getOrInsertFunction - Look up the specified function in the module symbol
/// table. If it does not exist, add a prototype for the function and return
@@ -286,14 +286,14 @@ public:
/// clients to use.
Constant *getOrInsertFunction(StringRef Name,
AttrListPtr AttributeList,
- const Type *RetTy, ...) END_WITH_NULL;
+ Type *RetTy, ...) END_WITH_NULL;
/// getOrInsertFunction - Same as above, but without the attributes.
- Constant *getOrInsertFunction(StringRef Name, const Type *RetTy, ...)
+ Constant *getOrInsertFunction(StringRef Name, Type *RetTy, ...)
END_WITH_NULL;
Constant *getOrInsertTargetIntrinsic(StringRef Name,
- const FunctionType *Ty,
+ FunctionType *Ty,
AttrListPtr AttributeList);
/// getFunction - Look up the specified function in the module symbol table.
@@ -325,7 +325,7 @@ public:
/// with a constantexpr cast to the right type.
/// 3. Finally, if the existing global is the correct declaration, return
/// the existing global.
- Constant *getOrInsertGlobal(StringRef Name, const Type *Ty);
+ Constant *getOrInsertGlobal(StringRef Name, Type *Ty);
/// @}
/// @name Global Alias Accessors
diff --git a/include/llvm/Operator.h b/include/llvm/Operator.h
index e9aa4997f2..48a5796383 100644
--- a/include/llvm/Operator.h
+++ b/include/llvm/Operator.h
@@ -261,8 +261,8 @@ public:
/// getPointerOperandType - Method to return the pointer operand as a
/// PointerType.
- const PointerType *getPointerOperandType() const {
- return reinterpret_cast<const PointerType*>(getPointerOperand()->getType());
+ PointerType *getPointerOperandType() const {
+ return reinterpret_cast<PointerType*>(getPointerOperand()->getType());
}
unsigned getNumIndices() const { // Note: always non-negative
diff --git a/include/llvm/Support/CallSite.h b/include/llvm/Support/CallSite.h
index 8a998a8cd0..04b8c4e69c 100644
--- a/include/llvm/Support/CallSite.h
+++ b/include/llvm/Support/CallSite.h
@@ -147,7 +147,7 @@ public:
/// getType - Return the type of the instruction that generated this call site
///
- const Type *getType() const { return (*this)->getType(); }
+ Type *getType() const { return (*this)->getType(); }
/// getCaller - Return the caller function for this call site
///
diff --git a/include/llvm/Support/ConstantFolder.h b/include/llvm/Support/ConstantFolder.h
index 733023566a..699c6281b3 100644
--- a/include/llvm/Support/ConstantFolder.h
+++ b/include/llvm/Support/ConstantFolder.h
@@ -141,37 +141,37 @@ public:
//===--------------------------------------------------------------------===//
Constant *CreateCast(Instruction::CastOps Op, Constant *C,
- const Type *DestTy) const {
+ Type *DestTy) const {
return ConstantExpr::getCast(Op, C, DestTy);
}
- Constant *CreatePointerCast(Constant *C, const Type *DestTy) const {
+ Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getPointerCast(C, DestTy);
}
- Constant *CreateIntCast(Constant *C, const Type *DestTy,
+ Constant *CreateIntCast(Constant *C, Type *DestTy,
bool isSigned) const {
return ConstantExpr::getIntegerCast(C, DestTy, isSigned);
}
- Constant *CreateFPCast(Constant *C, const Type *DestTy) const {
+ Constant *CreateFPCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getFPCast(C, DestTy);
}
- Constant *CreateBitCast(Constant *C, const Type *DestTy) const {
+ Constant *CreateBitCast(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::BitCast, C, DestTy);
}
- Constant *CreateIntToPtr(Constant *C, const Type *DestTy) const {
+ Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::IntToPtr, C, DestTy);
}
- Constant *CreatePtrToInt(Constant *C, const Type *DestTy) const {
+ Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::PtrToInt, C, DestTy);
}
- Constant *CreateZExtOrBitCast(Constant *C, const Type *DestTy) const {
+ Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getZExtOrBitCast(C, DestTy);
}
- Constant *CreateSExtOrBitCast(Constant *C, const Type *DestTy) const {
+ Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getSExtOrBitCast(C, DestTy);
}
- Constant *CreateTruncOrBitCast(Constant *C, const Type *DestTy) const {
+ Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getTruncOrBitCast(C, DestTy);
}
diff --git a/include/llvm/Support/GetElementPtrTypeIterator.h b/include/llvm/Support/GetElementPtrTypeIterator.h
index e5e7fc7409..526afd40c6 100644
--- a/include/llvm/Support/GetElementPtrTypeIterator.h
+++ b/include/llvm/Support/GetElementPtrTypeIterator.h
@@ -21,16 +21,16 @@
namespace llvm {
template<typename ItTy = User::const_op_iterator>
class generic_gep_type_iterator
- : public std::iterator<std::forward_iterator_tag, const Type *, ptrdiff_t> {
+ : public std::iterator<std::forward_iterator_tag, Type *, ptrdiff_t> {
typedef std::iterator<std::forward_iterator_tag,
- const Type *, ptrdiff_t> super;
+ Type *, ptrdiff_t> super;
ItTy OpIt;
- const Type *CurTy;
+ Type *CurTy;
generic_gep_type_iterator() {}
public:
- static generic_gep_type_iterator begin(const Type *Ty, ItTy It) {
+ static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
generic_gep_type_iterator I;
I.CurTy = Ty;
I.OpIt = It;
@@ -50,23 +50,23 @@ namespace llvm {
return !operator==(x);
}
- const Type *operator*() const {
+ Type *operator*() const {
return CurTy;
}
- const Type *getIndexedType() const {
- const CompositeType *CT = cast<CompositeType>(CurTy);
+ Type *getIndexedType() const {
+ CompositeType *CT = cast<CompositeType>(CurTy);
return CT->getTypeAtIndex(getOperand());
}
// This is a non-standard operator->. It allows you to call methods on the
// current type directly.
- const Type *operator->() const { return operator*(); }
+ Type *operator->() const { return operator*(); }
Value *getOperand() const { return *OpIt; }
generic_gep_type_iterator& operator++() { // Preincrement
- if (const CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
+ if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
CurTy = CT->getTypeAtIndex(getOperand());
} else {
CurTy = 0;
@@ -99,13 +99,13 @@ namespace llvm {
template<typename ItTy>
inline generic_gep_type_iterator<ItTy>
- gep_type_begin(const Type *Op0, ItTy I, ItTy E) {
+ gep_type_begin(Type *Op0, ItTy I, ItTy E) {
return generic_gep_type_iterator<ItTy>::begin(Op0, I);
}
template<typename ItTy>
inline generic_gep_type_iterator<ItTy>
- gep_type_end(const Type *Op0, ItTy I, ItTy E) {
+ gep_type_end(Type *Op0, ItTy I, ItTy E) {
return generic_gep_type_iterator<ItTy>::end(E);
}
} // end namespace llvm
diff --git a/include/llvm/Support/IRBuilder.h b/include/llvm/Support/IRBuilder.h
index 91cd78e7f6..e166b199dd 100644
--- a/include/llvm/Support/IRBuilder.h
+++ b/include/llvm/Support/IRBuilder.h
@@ -744,7 +744,7 @@ public:
// Instruction creation methods: Memory Instructions
//===--------------------------------------------------------------------===//
- AllocaInst *CreateAlloca(const Type *Ty, Value *ArraySize = 0,
+ AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = 0,
const Twine &Name = "") {
return Insert(new AllocaInst(Ty, ArraySize), Name);
}
@@ -910,47 +910,47 @@ public:
// Instruction creation methods: Cast/Conversion Operators
//===--------------------------------------------------------------------===//
- Value *CreateTrunc(Value *V, const Type *DestTy, const Twine &Name = "") {
+ Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
return CreateCast(Instruction::Trunc, V, DestTy, Name);
}
- Value *CreateZExt(Value *V, const Type *DestTy, const Twine &Name = "") {
+ Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
return CreateCast(Instruction::ZExt, V, DestTy, Name);
}
- Value *CreateSExt(Value *V, const Type *DestTy, const Twine &Name = "") {
+ Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
return CreateCast(Instruction::SExt, V, DestTy, Name);
}
- Value *CreateFPToUI(Value *V, const Type *DestTy, const Twine &Name = ""){
+ Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){
return CreateCast(Instruction::FPToUI, V, DestTy, Name);
}
- Value *CreateFPToSI(Value *V, const Type *DestTy, const Twine &Name = ""){
+ Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = ""){
return CreateCast(Instruction::FPToSI, V, DestTy, Name);
}
- Value *CreateUIToFP(Value *V, const Type *DestTy, const Twine &Name = ""){
+ Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
return CreateCast(Instruction::UIToFP, V, DestTy, Name);
}
- Value *CreateSIToFP(Value *V, const Type *DestTy, const Twine &Name = ""){
+ Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
return CreateCast(Instruction::SIToFP, V, DestTy, Name);
}
- Value *CreateFPTrunc(Value *V, const Type *DestTy,
+ Value *CreateFPTrunc(Value *V, Type *DestTy,
const Twine &Name = "") {
return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
}
- Value *CreateFPExt(Value *V, const Type *DestTy, const Twine &Name = "") {
+ Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
return CreateCast(Instruction::FPExt, V, DestTy, Name);
}
- Value *CreatePtrToInt(Value *V, const Type *DestTy,
+ Value *CreatePtrToInt(Value *V, Type *DestTy,
const Twine &Name = "") {
return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
}
- Value *CreateIntToPtr(Value *V, const Type *DestTy,
+ Value *CreateIntToPtr(Value *V, Type *DestTy,
const Twine &Name = "") {
return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
}
- Value *CreateBitCast(Value *V, const Type *DestTy,
+ Value *CreateBitCast(Value *V, Type *DestTy,
const Twine &Name = "") {
return CreateCast(Instruction::BitCast, V, DestTy, Name);
}
- Value *CreateZExtOrBitCast(Value *V, const Type *DestTy,
+ Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
const Twine &Name = "") {
if (V->getType() == DestTy)
return V;
@@ -958,7 +958,7 @@ public:
return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
}
- Value *CreateSExtOrBitCast(Value *V, const Type *DestTy,
+ Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
const Twine &Name = "") {
if (V->getType() == DestTy)
return V;
@@ -966,7 +966,7 @@ public:
return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
}
- Value *CreateTruncOrBitCast(Value *V, const Type *DestTy,
+ Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
const Twine &Name = "") {
if (V->getType() == DestTy)
return V;
@@ -974,7 +974,7 @@ public:
return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
}
- Value *CreateCast(Instruction::CastOps Op, Value *V, const Type *DestTy,
+ Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
const Twine &Name = "") {
if (V->getType() == DestTy)
return V;
@@ -982,7 +982,7 @@ public:
return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
return Insert(CastInst::Create(Op, V, DestTy), Name);
}
- Value *CreatePointerCast(Value *V, const Type *DestTy,
+ Value *CreatePointerCast(Value *V, Type *DestTy,
const Twine &Name = "") {
if (V->getType() == DestTy)
return V;
@@ -990,7 +990,7 @@ public:
return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
}
- Value *CreateIntCast(Value *V, const Type *DestTy, bool isSigned,
+ Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
const Twine &Name = "") {
if (V->getType() == DestTy)
return V;
@@ -1001,9 +1001,9 @@ public:
private:
// Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a compile time
// error, instead of converting the string to bool for the isSigned parameter.
- Value *CreateIntCast(Value *, const Type *, const char *); // DO NOT IMPLEMENT
+ Value *CreateIntCast(Value *, Type *, const char *); // DO NOT IMPLEMENT
public:
- Value *CreateFPCast(Value *V, const Type *DestTy, const Twine &Name = "") {
+ Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
if (V->getType() == DestTy)
return V;
if (Constant *VC = dyn_cast<Constant>(V))
@@ -1108,7 +1108,7 @@ public:
// Instruction creation methods: Other Instructions
//===--------------------------------------------------------------------===//
- PHINode *CreatePHI(const Type *Ty, unsigned NumReservedValues,
+ PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
const Twine &Name = "") {
return Insert(PHINode::Create(Ty, NumReservedValues), Name);
}
@@ -1154,7 +1154,7 @@ public:
return Insert(SelectInst::Create(C, True, False), Name);
}
- VAArgInst *CreateVAArg(Value *List, const Type *Ty, const Twine &Name = "") {
+ VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
return Insert(new VAArgInst(List, Ty), Name);
}
diff --git a/include/llvm/Support/NoFolder.h b/include/llvm/Support/NoFolder.h
index 94359a5328..28447f7497 100644
--- a/include/llvm/Support/NoFolder.h
+++ b/include/llvm/Support/NoFolder.h
@@ -200,37 +200,37 @@ public:
//===--------------------------------------------------------------------===//
Instruction *CreateCast(Instruction::CastOps Op, Constant *C,
- const Type *DestTy) const {
+ Type *DestTy) const {
return CastInst::Create(Op, C, DestTy);
}
- Instruction *CreatePointerCast(Constant *C, const Type *DestTy) const {
+ Instruction *CreatePointerCast(Constant *C, Type *DestTy) const {
return CastInst::CreatePointerCast(C, DestTy);
}
- Instruction *CreateIntCast(Constant *C, const Type *DestTy,
+ Instruction *CreateIntCast(Constant *C, Type *DestTy,
bool isSigned) const {
return CastInst::CreateIntegerCast(C, DestTy, isSigned);
}
- Instruction *CreateFPCast(Constant *C, const Type *DestTy) const {
+ Instruction *CreateFPCast(Constant *C, Type *DestTy) const {
return CastInst::CreateFPCast(C, DestTy);
}
- Instruction *CreateBitCast(Constant *C, const Type *DestTy) const {
+ Instruction *CreateBitCast(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::BitCast, C, DestTy);
}
- Instruction *CreateIntToPtr(Constant *C, const Type *DestTy) const {
+ Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::IntToPtr, C, DestTy);
}
- Instruction *CreatePtrToInt(Constant *C, const Type *DestTy) const {
+ Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::PtrToInt, C, DestTy);
}
- Instruction *CreateZExtOrBitCast(Constant *C, const Type *DestTy) const {
+ Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
return CastInst::CreateZExtOrBitCast(C, DestTy);
}
- Instruction *CreateSExtOrBitCast(Constant *C, const Type *DestTy) const {
+ Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
return CastInst::CreateSExtOrBitCast(C, DestTy);
}
- Instruction *CreateTruncOrBitCast(Constant *C, const Type *DestTy) const {
+ Instruction *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
return CastInst::CreateTruncOrBitCast(C, DestTy);
}
diff --git a/include/llvm/Support/TargetFolder.h b/include/llvm/Support/TargetFolder.h
index 3233a98da9..c13e741739 100644
--- a/include/llvm/Support/TargetFolder.h
+++ b/include/llvm/Support/TargetFolder.h
@@ -153,40 +153,40 @@ public:
//===--------------------------------------------------------------------===//
Constant *CreateCast(Instruction::CastOps Op, Constant *C,
- const Type *DestTy) const {
+ Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getCast(Op, C, DestTy));
}
- Constant *CreateIntCast(Constant *C, const Type *DestTy,
+ Constant *CreateIntCast(Constant *C, Type *DestTy,
bool isSigned) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned));
}
- Constant *CreatePointerCast(Constant *C, const Type *DestTy) const {
+ Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getPointerCast(C, DestTy);
}
- Constant *CreateBitCast(Constant *C, const Type *DestTy) const {
+ Constant *CreateBitCast(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::BitCast, C, DestTy);
}
- Constant *CreateIntToPtr(Constant *C, const Type *DestTy) const {
+ Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::IntToPtr, C, DestTy);
}
- Constant *CreatePtrToInt(Constant *C, const Type *DestTy) const {
+ Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::PtrToInt, C, DestTy);
}
- Constant *CreateZExtOrBitCast(Constant *C, const Type *DestTy) const {
+ Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getZExtOrBitCast(C, DestTy));
}
- Constant *CreateSExtOrBitCast(Constant *C, const Type *DestTy) const {
+ Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getSExtOrBitCast(C, DestTy));
}
- Constant *CreateTruncOrBitCast(Constant *C, const Type *DestTy) const {
+ Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getTruncOrBitCast(C, DestTy));
diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h
index c28081000d..e210b27be0 100644
--- a/include/llvm/Target/TargetData.h
+++ b/include/llvm/Target/TargetData.h
@@ -90,9 +90,9 @@ private:
void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
unsigned pref_align, uint32_t bit_width);
unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
- bool ABIAlign, const Type *Ty) const;
+ bool ABIAlign, Type *Ty) const;
//! Internal helper method that returns requested alignment for type.
- unsigned getAlignment(const Type *Ty, bool abi_or_pref) const;
+ unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
/// Valid alignment predicate.
///
@@ -200,19 +200,19 @@ public:
/// getTypeSizeInBits - Return the number of bits necessary to hold the
/// specified type. For example, returns 36 for i36 and 80 for x86_fp80.
- uint64_t getTypeSizeInBits(const Type* Ty) const;
+ uint64_t getTypeSizeInBits(Type* Ty) const;
/// getTypeStoreSize - Return the maximum number of bytes that may be
/// overwritten by storing the specified type. For example, returns 5
/// for i36 and 10 for x86_fp80.
- uint64_t getTypeStoreSize(const Type *Ty) const {
+ uint64_t getTypeStoreSize(Type *Ty) const {
return (getTypeSizeInBits(Ty)+7)/8;
}
/// getTypeStoreSizeInBits - Return the maximum number of bits that may be
/// overwritten by storing the specified type; always a multiple of 8. For
/// example, returns 40 for i36 and 80 for x86_fp80.
- uint64_t getTypeStoreSizeInBits(const Type *Ty) const {
+ uint64_t getTypeStoreSizeInBits(Type *Ty) const {
return 8*getTypeStoreSize(Ty);
}
@@ -220,7 +220,7 @@ public:
/// of the specified type, including alignment padding. This is the amount
/// that alloca reserves for this type. For example, returns 12 or 16 for
/// x86_fp80, depending on alignment.
- uint64_t getTypeAllocSize(const Type* Ty) const {
+ uint64_t getTypeAllocSize(Type* Ty) const {
// Round up to the next alignment boundary.
return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
}
@@ -229,13 +229,13 @@ public:
/// objects of the specified type, including alignment padding; always a
/// multiple of 8. This is the amount that alloca reserves for this type.
/// For example, returns 96 or 128 for x86_fp80, depending on alignment.
- uint64_t getTypeAllocSizeInBits(const Type* Ty) const {
+ uint64_t getTypeAllocSizeInBits(Type* Ty) const {
return 8*getTypeAllocSize(Ty);
}
/// getABITypeAlignment - Return the minimum ABI-required alignment for the
/// specified type.
- unsigned getABITypeAlignment(const Type *Ty) const;
+ unsigned getABITypeAlignment(Type *Ty) const;
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
@@ -244,17 +244,17 @@ public:
/// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
/// for the specified type when it is part of a call frame.
- unsigned getCallFrameTypeAlignment(const Type *Ty) const;
+ unsigned getCallFrameTypeAlignment(Type *Ty) const;
/// getPrefTypeAlignment - Return the preferred stack/global alignment for
/// the specified type. This is always at least as good as the ABI alignment.
- unsigned getPrefTypeAlignment(const Type *Ty) const;
+ unsigned getPrefTypeAlignment(Type *Ty) const;
/// getPreferredTypeAlignmentShift - Return the preferred alignment for the
/// specified type, returned as log2 of the value (a shift amount).
///
- unsigned getPreferredTypeAlignmentShift(const Type *Ty) const;
+ unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
/// getIntPtrType - Return an unsigned integer type that is the same size or
/// greater to the host pointer size.
@@ -264,13 +264,13 @@ public:
/// getIndexedOffset - return the offset from the beginning of the type for
/// the specified indices. This is used to implement getelementptr.
///
- uint64_t getIndexedOffset(const Type *Ty,
+ uint64_t getIndexedOffset(Type *Ty,
Value* const* Indices, unsigned NumIndices) const;
/// getStructLayout - Return a StructLayout object, indicating the alignment
/// of the struct, its size, and the offsets of its fields. Note that this
/// information is lazily cached.
- const StructLayout *getStructLayout(const StructType *Ty) const;
+ const StructLayout *getStructLayout(StructType *Ty) const;
/// getPreferredAlignment - Return the preferred alignment of the specified
/// global. This includes an explicitly requested alignment (if the global
@@ -333,7 +333,7 @@ public:
private:
friend class TargetData; // Only TargetData can create this class
- StructLayout(const StructType *ST, const TargetData &TD);
+ StructLayout(StructType *ST, const TargetData &TD);
};
} // End llvm namespace
diff --git a/include/llvm/Target/TargetIntrinsicInfo.h b/include/llvm/Target/TargetIntrinsicInfo.h
index ad8ac925e9..c44b9230c0 100644
--- a/include/llvm/Target/TargetIntrinsicInfo.h
+++ b/include/llvm/Target/TargetIntrinsicInfo.h
@@ -39,7 +39,7 @@ public:
/// intrinsic, Tys should point to an array of numTys pointers to Type,
/// and must provide exactly one type for each overloaded type in the
/// intrinsic.
- virtual std::string getName(unsigned IID, const Type **Tys = 0,
+ virtual std::string getName(unsigned IID, Type **Tys = 0,
unsigned numTys = 0) const = 0;
/// Look up target intrinsic by name. Return intrinsic ID or 0 for unknown
@@ -55,7 +55,7 @@ public:
/// Create or insert an LLVM Function declaration for an intrinsic,
/// and return it. The Tys and numTys are for intrinsics with overloaded
/// types. See above for more information.
- virtual Function *getDeclaration(Module *M, unsigned ID, const Type **Tys = 0,
+ virtual Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0,
unsigned numTys = 0) const = 0;
};
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 533c3ac878..8bb497ef05 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -501,7 +501,7 @@ public:
/// This is fixed by the LLVM operations except for the pointer size. If
/// AllowUnknown is true, this will return MVT::Other for types with no EVT
/// counterpart (e.g. structs), otherwise it will assert.
- EVT getValueType(const Type *Ty, bool AllowUnknown = false) const {
+ EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
EVT VT = EVT::getEVT(Ty, AllowUnknown);
return VT == MVT::iPTR ? PointerTy : VT;
}
@@ -509,7 +509,7 @@ public:
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
- virtual unsigned getByValTypeAlignment(const Type *Ty) const;
+ virtual unsigned getByValTypeAlignment(Type *Ty) const;
/// getRegisterType - Return the type of registers that this ValueType will
/// eventually require.
@@ -1166,7 +1166,7 @@ public:
/// lowering.
struct ArgListEntry {
SDValue Node;
- const Type* Ty;
+ Type* Ty;
bool isSExt : 1;
bool isZExt : 1;
bool isInReg : 1;
@@ -1180,7 +1180,7 @@ public:
};
typedef std::vector<ArgListEntry> ArgListTy;
std::pair<SDValue, SDValue>
- LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt,
+ LowerCallTo(SDValue Chain, Type *RetTy, bool RetSExt, bool RetZExt,
bool isVarArg, bool isInreg, unsigned NumFixedArgs,
CallingConv::ID CallConv, bool isTailCall,
bool isReturnValueUsed, SDValue Callee, ArgListTy &Args,
@@ -1485,12 +1485,12 @@ public:
/// The type may be VoidTy, in which case only return true if the addressing
/// mode is legal for a load/store of any legal type.
/// TODO: Handle pre/postinc as well.
- virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const;
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
/// isTruncateFree - Return true if it's free to truncate a value of
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
/// register EAX to i16 by referencing its sub-register AX.
- virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const {
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const {
return false;
}
@@ -1506,7 +1506,7 @@ public:
/// does not necessarily apply to truncate instructions. e.g. on x86-64,
/// all instructions that define 32-bit values implicit zero-extend the
/// result out to 64 bits.
- virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const {
+ virtual bool isZExtFree(Type *Ty1, Type *Ty2) const {
return false;
}
@@ -1963,7 +1963,7 @@ private:
/// GetReturnInfo - Given an LLVM IR type and return type attributes,
/// compute the return value EVTs and flags, and optionally also
/// the offsets, if the return value is being lowered to memory.
-void GetReturnInfo(const Type* ReturnType, Attributes attr,
+void GetReturnInfo(Type* ReturnType, Attributes attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI,
SmallVectorImpl<uint64_t> *Offsets = 0);
diff --git a/include/llvm/Transforms/Utils/AddrModeMatcher.h b/include/llvm/Transforms/Utils/AddrModeMatcher.h
index 0678eccb5d..90485eb4c6 100644
--- a/include/llvm/Transforms/Utils/AddrModeMatcher.h
+++ b/include/llvm/Transforms/Utils/AddrModeMatcher.h
@@ -58,7 +58,7 @@ class AddressingModeMatcher {
/// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
/// the memory instruction that we're computing this address for.
- const Type *AccessTy;
+ Type *AccessTy;
Instruction *MemoryInst;
/// AddrMode - This is the addressing mode that we're building up. This is
@@ -71,7 +71,7 @@ class AddressingModeMatcher {
bool IgnoreProfitability;
AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI,
- const TargetLowering &T, const Type *AT,
+ const TargetLowering &T, Type *AT,
Instruction *MI, ExtAddrMode &AM)
: AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM) {
IgnoreProfitability = false;
@@ -81,7 +81,7 @@ public:
/// Match - Find the maximal addressing mode that a load/store of V can fold,
/// give an access type of AccessTy. This returns a list of involved
/// instructions in AddrModeInsts.
- static ExtAddrMode Match(Value *V, const Type *AccessTy,
+ static ExtAddrMode Match(Value *V, Type *AccessTy,
Instruction *MemoryInst,
SmallVectorImpl<Instruction*> &AddrModeInsts,
const TargetLowering &TLI) {
diff --git a/include/llvm/Transforms/Utils/SSAUpdater.h b/include/llvm/Transforms/Utils/SSAUpdater.h
index 063d413989..064e5501a4 100644
--- a/include/llvm/Transforms/Utils/SSAUpdater.h
+++ b/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -39,7 +39,7 @@ private:
void *AV;
/// ProtoType holds the type of the values being rewritten.
- const Type *ProtoType;
+ Type *ProtoType;
// PHI nodes are given a name based on ProtoName.
std::string ProtoName;
@@ -56,7 +56,7 @@ public:
/// Initialize - Reset this object to get ready for a new set of SSA
/// updates with type 'Ty'. PHI nodes get a name based on 'Name'.
- void Initialize(const Type *Ty, StringRef Name);
+ void Initialize(Type *Ty, StringRef Name);
/// AddAvailableValue - Indicate that a rewritten value is available at the
/// end of the specified block with the specified value.
diff --git a/include/llvm/Type.h b/include/llvm/Type.h
index e4ff3e1c8d..43b7dc5788 100644
--- a/include/llvm/Type.h
+++ b/include/llvm/Type.h
@@ -193,7 +193,7 @@ public:
/// are valid for types of the same size only where no re-interpretation of
/// the bits is done.
/// @brief Determine if this type could be losslessly bitcast to Ty
- bool canLosslesslyBitCastTo(const Type *Ty) const;
+ bool canLosslesslyBitCastTo(Type *Ty) const;
/// isEmptyTy - Return true if this type is empty, that is, it has no
/// elements or all its elements are empty.
@@ -262,7 +262,7 @@ public:
/// getScalarSizeInBits - If this is a vector type, return the
/// getPrimitiveSizeInBits value for the element type. Otherwise return the
/// getPrimitiveSizeInBits value for this type.
- unsigned getScalarSizeInBits() const;
+ unsigned getScalarSizeInBits();
/// getFPMantissaWidth - Return the width of the mantissa of this type. This
/// is only valid on floating point types. If the FP type does not
@@ -271,7 +271,7 @@ public:
/// getScalarType - If this is a vector type, return the element type,
/// otherwise return 'this'.
- const Type *getScalarType() const;
+ Type *getScalarType();
//===--------------------------------------------------------------------===//
// Type Iteration support.
@@ -342,7 +342,7 @@ public:
/// getPointerTo - Return a pointer to the current type. This is equivalent
/// to PointerType::get(Foo, AddrSpace).
- PointerType *getPointerTo(unsigned AddrSpace = 0) const;
+ PointerType *getPointerTo(unsigned AddrSpace = 0);
private:
/// isSizedDerivedType - Derived types like structures and arrays are sized
@@ -352,7 +352,7 @@ private:
};
// Printing of types.
-static inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) {
+static inline raw_ostream &operator<<(raw_ostream &OS, Type &T) {
T.print(OS);
return OS;
}
@@ -387,7 +387,7 @@ template <> struct GraphTraits<const Type*> {
typedef const Type NodeType;
typedef Type::subtype_iterator ChildIteratorType;
- static inline NodeType *getEntryNode(const Type *T) { return T; }
+ static inline NodeType *getEntryNode(NodeType *T) { return T; }
static inline ChildIteratorType child_begin(NodeType *N) {
return N->subtype_begin();
}
diff --git a/include/llvm/User.h b/include/llvm/User.h
index 3f9c28e7b3..62bc9f0346 100644
--- a/include/llvm/User.h
+++ b/include/llvm/User.h
@@ -47,7 +47,7 @@ protected:
unsigned NumOperands;
void *operator new(size_t s, unsigned Us);
- User(const Type *ty, unsigned vty, Use *OpList, unsigned NumOps)
+ User(Type *ty, unsigned vty, Use *OpList, unsigned NumOps)
: Value(ty, vty), OperandList(OpList), NumOperands(NumOps) {}
Use *allocHungoffUses(unsigned) const;
void dropHungoffUses() {
diff --git a/include/llvm/Value.h b/include/llvm/Value.h
index 08fa1c9034..9d274db1a8 100644
--- a/include/llvm/Value.h
+++ b/include/llvm/Value.h
@@ -91,7 +91,7 @@ protected:
/// printing behavior.
virtual void printCustom(raw_ostream &O) const;
- Value(const Type *Ty, unsigned scid);
+ Value(Type *Ty, unsigned scid);
public:
virtual ~Value();
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index c189a00429..bfa02e0e1f 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -341,7 +341,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// getTypeStoreSize - Return the TargetData store size for the given type,
/// if known, or a conservative value otherwise.
///
-uint64_t AliasAnalysis::getTypeStoreSize(const Type *Ty) {
+uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
return TD ? TD->getTypeStoreSize(Ty) : UnknownSize;
}
diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp
index 1afc1b71d9..37271b94a2 100644
--- a/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -171,12 +171,12 @@ bool AAEval::runOnFunction(Function &F) {
for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
I1 != E; ++I1) {
uint64_t I1Size = AliasAnalysis::UnknownSize;
- const Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
+ Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
if (I1ElTy->isSized()) I1Size = AA.getTypeStoreSize(I1ElTy);
for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
uint64_t I2Size = AliasAnalysis::UnknownSize;
- const Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
+ Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
if (I2ElTy->isSized()) I2Size = AA.getTypeStoreSize(I2ElTy);
switch (AA.alias(*I1, I1Size, *I2, I2Size)) {
@@ -207,7 +207,7 @@ bool AAEval::runOnFunction(Function &F) {
for (SetVector<Value *>::iterator V = Pointers.begin(), Ve = Pointers.end();
V != Ve; ++V) {
uint64_t Size = AliasAnalysis::UnknownSize;
- const Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
+ Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
if (ElTy->isSized()) Size = AA.getTypeStoreSize(ElTy);
switch (AA.getModRefInfo(*C, *V, Size)) {
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index 8330ea7c70..116076ce2a 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -100,7 +100,7 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const TargetData &TD) {
- const Type *AccessTy;
+ Type *AccessTy;
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
if (!GV->hasDefinitiveInitializer())
return AliasAnalysis::UnknownSize;
@@ -317,7 +317,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
E = GEPOp->op_end(); I != E; ++I) {
Value *Index = *I;
// Compute the (potentially symbolic) offset in bytes for this index.
- if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0) continue;
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 7fca17eb69..171f924f62 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -43,11 +43,11 @@ using namespace llvm;
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
/// TargetData. This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
-static Constant *FoldBitCast(Constant *C, const Type *DestTy,
+static Constant *FoldBitCast(Constant *C, Type *DestTy,
const TargetData &TD) {
// This only handles casts to vectors currently.
- const VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
+ VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (DestVTy == 0)
return ConstantExpr::getBitCast(C, DestTy);
@@ -69,8 +69,8 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
- const Type *SrcEltTy = CV->getType()->getElementType();
- const Type *DstEltTy = DestVTy->getElementType();
+ Type *SrcEltTy = CV->getType()->getElementType();
+ Type *DstEltTy = DestVTy->getElementType();
// Otherwise, we're changing the number of elements in a vector, which
// requires endianness information to do the right thing. For example,
@@ -85,7 +85,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
if (DstEltTy->isFloatingPointTy()) {
// Fold to an vector of integers with same size as our FP type.
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
- const Type *DestIVTy =
+ Type *DestIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
@@ -99,7 +99,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
// it to integer first.
if (SrcEltTy->isFloatingPointTy()) {
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
- const Type *SrcIVTy =
+ Type *SrcIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
// Ask VMCore to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
@@ -212,11 +212,11 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
if (!CI) return false; // Index isn't a simple constant?
if (CI->isZero()) continue; // Not adding anything.
- if (const StructType *ST = dyn_cast<StructType>(*GTI)) {
+ if (StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
} else {
- const SequentialType *SQT = cast<SequentialType>(*GTI);
+ SequentialType *SQT = cast<SequentialType>(*GTI);
Offset += TD.getTypeAllocSize(SQT->getElementType())*CI->getSExtValue();
}
}
@@ -354,8 +354,8 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
const TargetData &TD) {
- const Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
- const IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
+ Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
+ IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
@@ -363,7 +363,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
// and then bitcast the result. This can be useful for union cases. Note
// that address spaces don't matter here since we're not going to result in
// an actual new load.
- const Type *MapTy;
+ Type *MapTy;
if (LoadTy->isFloatTy())
MapTy = Type::getInt32PtrTy(C->getContext());
else if (LoadTy->isDoubleTy())
@@ -443,7 +443,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
std::string Str;
if (TD && GetConstantStringInfo(CE, Str) && !Str.empty()) {
unsigned StrLen = Str.length();
- const Type *Ty = cast<PointerType>(CE->getType())->getElementType();
+ Type *Ty = cast<PointerType>(CE->getType())->getElementType();
unsigned NumBits = Ty->getPrimitiveSizeInBits();
// Replace load with immediate integer if the result is an integer or fp
// value.
@@ -478,7 +478,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
if (GlobalVariable *GV =
dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
- const Type *ResTy = cast<PointerType>(C->getType())->getElementType();
+ Type *ResTy = cast<PointerType>(C->getType())->getElementType();
if (GV->getInitializer()->isNullValue())
return Constant::getNullValue(ResTy);
if (isa<UndefValue>(GV->getInitializer()))
@@ -537,10 +537,10 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// explicitly cast them so that they aren't implicitly casted by the
/// getelementptr.
static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
- const Type *ResultTy,
+ Type *ResultTy,
const TargetData *TD) {
if (!TD) return 0;
- const Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
bool Any = false;
SmallVector<Constant*, 32> NewIdxs;
@@ -572,13 +572,13 @@ static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
- const Type *ResultTy,
+ Type *ResultTy,
const TargetData *TD) {
Constant *Ptr = Ops[0];
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
return 0;
- const Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
@@ -649,10 +649,10 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
// we eliminate over-indexing of the notional static type array bounds.
// This makes it easy to determine if the getelementptr is "inbounds".
// Also, this helps GlobalOpt do SROA on GlobalVariables.
- const Type *Ty = Ptr->getType();
+ Type *Ty = Ptr->getType();
SmallVector<Constant*, 32> NewIdxs;
do {
- if (const SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
+ if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
if (ATy->isPointerTy()) {
// The only pointer indexing we'll do is on the first index of the GEP.
if (!NewIdxs.empty())
@@ -665,7 +665,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
- const IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
+ IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
if (ElemSize == 0)
// The element size is 0. This may be [0 x Ty]*, so just use a zero
// index for this level and proceed to the next level to see if it can
@@ -679,7 +679,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
}
Ty = ATy->getElementType();
- } else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
// Determine which field of the struct the offset points into. The
// getZExtValue is at least as safe as the StructLayout API because we
// know the offset is within the struct at this point.
@@ -814,7 +814,7 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
/// information, due to only being passed an opcode and operands. Constant
/// folding using this function strips this information.
///
-Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
+Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
Constant* const* Ops, unsigned NumOps,
const TargetData *TD) {
// Handle easy binops first.
@@ -912,7 +912,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
// around to know if bit truncation is happening.
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
if (TD && Ops1->isNullValue()) {
- const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
if (CE0->getOpcode() == Instruction::IntToPtr) {
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
@@ -934,7 +934,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
- const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
if (CE0->getOpcode() == Instruction::IntToPtr) {
// Convert the integer value to the right size to ensure we get the
@@ -987,7 +987,7 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
// addressing...
gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
for (++I; I != E; ++I)
- if (const StructType *STy = dyn_cast<StructType>(*I)) {
+ if (StructType *STy = dyn_cast<StructType>(*I)) {
ConstantInt *CU = cast<ConstantInt>(I.getOperand());
assert(CU->getZExtValue() < STy->getNumElements() &&
"Struct index out of range!");
@@ -1002,7 +1002,7 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
return 0;
}
} else if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) {
- if (const ArrayType *ATy = dyn_cast<ArrayType>(*I)) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(*I)) {
if (CI->getZExtValue() >= ATy->getNumElements())
return 0;
if (ConstantArray *CA = dyn_cast<ConstantArray>(C))
@@ -1013,7 +1013,7 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
C = UndefValue::get(ATy->getElementType());
else
return 0;
- } else if (const VectorType *VTy = dyn_cast<VectorType>(*I)) {
+ } else if (VectorType *VTy = dyn_cast<VectorType>(*I)) {
if (CI->getZExtValue() >= VTy->getNumElements())
return 0;
if (ConstantVector *CP = dyn_cast<ConstantVector>(C))
@@ -1101,7 +1101,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
}
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
- const Type *Ty) {
+ Type *Ty) {
sys::llvm_fenv_clearexcept();
V = NativeFP(V);
if (sys::llvm_fenv_testexcept()) {
@@ -1118,7 +1118,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
}
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
- double V, double W, const Type *Ty) {
+ double V, double W, Type *Ty) {
sys::llvm_fenv_clearexcept();
V = NativeFP(V, W);
if (sys::llvm_fenv_testexcept()) {
@@ -1143,7 +1143,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
/// performed, otherwise returns the Constant value resulting from the
/// conversion.
static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
- const Type *Ty) {
+ Type *Ty) {
assert(Op && "Called with NULL operand");
APFloat Val(Op->getValueAPF());
@@ -1172,7 +1172,7 @@ llvm::ConstantFoldCall(Function *F,
if (!F->hasName()) return 0;
StringRef Name = F->getName();
- const Type *Ty = F->getReturnType();
+ Type *Ty = F->getReturnType();
if (NumOperands == 1) {
if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) {
diff --git a/lib/Analysis/IPA/FindUsedTypes.cpp b/lib/Analysis/IPA/FindUsedTypes.cpp
index 6535786668..e9df3ca010 100644
--- a/lib/Analysis/IPA/FindUsedTypes.cpp
+++ b/lib/Analysis/IPA/FindUsedTypes.cpp
@@ -29,7 +29,7 @@ INITIALIZE_PASS(FindUsedTypes, "print-used-types",
// IncorporateType - Incorporate one type and all of its subtypes into the
// collection of used types.
//
-void FindUsedTypes::IncorporateType(const Type *Ty) {
+void FindUsedTypes::IncorporateType(Type *Ty) {
// If ty doesn't already exist in the used types map, add it now, otherwise
// return.
if (!UsedTypes.insert(Ty)) return; // Already contain Ty.
@@ -94,7 +94,7 @@ bool FindUsedTypes::runOnModule(Module &m) {
//
void FindUsedTypes::print(raw_ostream &OS, const Module *M) const {
OS << "Types in use by this module:\n";
- for (SetVector<const Type *>::const_iterator I = UsedTypes.begin(),
+ for (SetVector<Type *>::const_iterator I = UsedTypes.begin(),
E = UsedTypes.end(); I != E; ++I) {
OS << " " << **I << '\n';
}
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 8709f6bf9d..135be6d8b1 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -1372,7 +1372,7 @@ Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
return ::SimplifyXorInst(Op0, Op1, TD, DT, RecursionLimit);
}
-static const Type *GetCompareTy(Value *Op) {
+static Type *GetCompareTy(Value *Op) {
return CmpInst::makeCmpResultType(Op->getType());
}
@@ -1413,8 +1413,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Pred = CmpInst::getSwappedPredicate(Pred);
}
- const Type *ITy = GetCompareTy(LHS); // The return type.
- const Type *OpTy = LHS->getType(); // The operand type.
+ Type *ITy = GetCompareTy(LHS); // The return type.
+ Type *OpTy = LHS->getType(); // The operand type.
// icmp X, X -> true/false
// X icmp undef -> true/false. For example, icmp ugt %X, undef -> false
@@ -1593,8 +1593,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
Instruction *LI = cast<CastInst>(LHS);
Value *SrcOp = LI->getOperand(0);
- const Type *SrcTy = SrcOp->getType();
- const Type *DstTy = LI->getType();
+ Type *SrcTy = SrcOp->getType();
+ Type *DstTy = LI->getType();
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
@@ -2222,7 +2222,7 @@ Value *llvm::SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal,
Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
const TargetData *TD, const DominatorTree *) {
// The type of the GEP pointer operand.
- const PointerType *PtrTy = cast<PointerType>(Ops[0]->getType());
+ PointerType *PtrTy = cast<PointerType>(Ops[0]->getType());
// getelementptr P -> P.
if (NumOps == 1)
@@ -2230,9 +2230,9 @@ Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
if (isa<UndefValue>(Ops[0])) {
// Compute the (pointer) type returned by the GEP instruction.
- const Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, &Ops[1],
+ Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, &Ops[1],
NumOps-1);
- const Type *GEPTy = PointerType::get(LastType, PtrTy->getAddressSpace());
+ Type *GEPTy = PointerType::get(LastType, PtrTy->getAddressSpace());
return UndefValue::get(GEPTy);
}
@@ -2243,7 +2243,7 @@ Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
return Ops[0];
// getelementptr P, N -> P if P points to a type of zero size.
if (TD) {
- const Type *Ty = PtrTy->getElementType();
+ Type *Ty = PtrTy->getElementType();
if (Ty->isSized() && TD->getTypeAllocSize(Ty) == 0)
return Ops[0];
}
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index 6e27597827..f80595c7db 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -630,7 +630,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
if (BB == &BB->getParent()->getEntryBlock()) {
assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
if (NotNull) {
- const PointerType *PTy = cast<PointerType>(Val->getType());
+ PointerType *PTy = cast<PointerType>(Val->getType());
Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
} else {
Result.markOverdefined();
@@ -658,7 +658,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
// If we previously determined that this is a pointer that can't be null
// then return that rather than giving up entirely.
if (NotNull) {
- const PointerType *PTy = cast<PointerType>(Val->getType());
+ PointerType *PTy = cast<PointerType>(Val->getType());
Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
}
@@ -728,7 +728,7 @@ bool LazyValueInfoCache::solveBlockValueConstantRange(LVILatticeVal &BBLV,
ConstantRange LHSRange = LHSVal.getConstantRange();
ConstantRange RHSRange(1);
- const IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
+ IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
if (isa<BinaryOperator>(BBI)) {
if (ConstantInt *RHS = dyn_cast<ConstantInt>(BBI->getOperand(1))) {
RHSRange = ConstantRange(RHS->getValue());
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 89755da850..38d677d502 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -71,7 +71,7 @@ namespace {
void visitCallSite(CallSite CS);
void visitMemoryReference(Instruction &I, Value *Ptr,
uint64_t Size, unsigned Align,
- const Type *Ty, unsigned Flags);
+ Type *Ty, unsigned Flags);
void visitCallInst(CallInst &I);
void visitInvokeInst(InvokeInst &I);
@@ -201,7 +201,7 @@ void Lint::visitCallSite(CallSite CS) {
"Undefined behavior: Caller and callee calling convention differ",
&I);
- const FunctionType *FT = F->getFunctionType();
+ FunctionType *FT = F->getFunctionType();
unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
Assert1(FT->isVarArg() ?
@@ -240,7 +240,7 @@ void Lint::visitCallSite(CallSite CS) {
// Check that an sret argument points to valid memory.
if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
- const Type *Ty =
+ Type *Ty =
cast<PointerType>(Formal->getType())->getElementType();
visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
TD ? TD->getABITypeAlignment(Ty) : 0,
@@ -364,7 +364,7 @@ void Lint::visitReturnInst(ReturnInst &I) {
// TODO: Check readnone/readonly function attributes.
void Lint::visitMemoryReference(Instruction &I,
Value *Ptr, uint64_t Size, unsigned Align,
- const Type *Ty, unsigned Flags) {
+ Type *Ty, unsigned Flags) {
// If no memory is being referenced, it doesn't matter if the pointer
// is valid.
if (Size == 0)
diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp
index c5c676b526..1f554a3da2 100644
--- a/lib/Analysis/Loads.cpp
+++ b/lib/Analysis/Loads.cpp
@@ -90,7 +90,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
if (TD)
Base = getUnderlyingObjectWithOffset(V, TD, ByteOffset);
- const Type *BaseType = 0;
+ Type *BaseType = 0;
unsigned BaseAlign = 0;
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
// An alloca is safe to load from as load as it is suitably aligned.
@@ -114,7 +114,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
return true; // Loading directly from an alloca or global is OK.
// Check if the load is within the bounds of the underlying object.
- const PointerType *AddrTy = cast<PointerType>(V->getType());
+ PointerType *AddrTy = cast<PointerType>(V->getType());
uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
(Align == 0 || (ByteOffset % Align) == 0))
@@ -169,7 +169,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// If we're using alias analysis to disambiguate get the size of *Ptr.
uint64_t AccessSize = 0;
if (AA) {
- const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
+ Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
AccessSize = AA->getTypeStoreSize(AccessTy);
}
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index 53d4304911..8d451c46f9 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -47,7 +47,7 @@ static bool isMallocCall(const CallInst *CI) {
// Check malloc prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
- const FunctionType *FTy = Callee->getFunctionType();
+ FunctionType *FTy = Callee->getFunctionType();
if (FTy->getNumParams() != 1)
return false;
return FTy->getParamType(0)->isIntegerTy(32) ||
@@ -94,12 +94,12 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
return NULL;
// The size of the malloc's result type must be known to determine array size.
- const Type *T = getMallocAllocatedType(CI);
+ Type *T = getMallocAllocatedType(CI);
if (!T || !T->isSized() || !TD)
return NULL;
unsigned ElementSize = TD->getTypeAllocSize(T);
- if (const StructType *ST = dyn_cast<StructType>(T))
+ if (StructType *ST = dyn_cast<StructType>(T))
ElementSize = TD->getStructLayout(ST)->getSizeInBytes();
// If malloc call's arg can be determined to be a multiple of ElementSize,
@@ -133,10 +133,10 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
/// 0: PointerType is the calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-const PointerType *llvm::getMallocType(const CallInst *CI) {
+PointerType *llvm::getMallocType(const CallInst *CI) {
assert(isMalloc(CI) && "getMallocType and not malloc call");
- const PointerType *MallocType = NULL;
+ PointerType *MallocType = NULL;
unsigned NumOfBitCastUses = 0;
// Determine if CallInst has a bitcast use.
@@ -164,8 +164,8 @@ const PointerType *llvm::getMallocType(const CallInst *CI) {
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-const Type *llvm::getMallocAllocatedType(const CallInst *CI) {
- const PointerType *PT = getMallocType(CI);
+Type *llvm::getMallocAllocatedType(const CallInst *CI) {
+ PointerType *PT = getMallocType(CI);
return PT ? PT->getElementType() : NULL;
}
@@ -201,7 +201,7 @@ const CallInst *llvm::isFreeCall(const Value *I) {
// Check free prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
- const FunctionType *FTy = Callee->getFunctionType();
+ FunctionType *FTy = Callee->getFunctionType();
if (!FTy->getReturnType()->isVoidTy())
return 0;
if (FTy->getNumParams() != 1)
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index bba4482f4d..34ba92509e 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -382,7 +382,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// location is 1 byte at P+1). If so, return it as a load/load
// clobber result, allowing the client to decide to widen the load if
// it wants to.
- if (const IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
+ if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
MemLocOffset, LI, TD))
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 025718e09f..05267d12d8 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -197,7 +197,7 @@ void SCEV::print(raw_ostream &OS) const {
}
case scUnknown: {
const SCEVUnknown *U = cast<SCEVUnknown>(this);
- const Type *AllocTy;
+ Type *AllocTy;
if (U->isSizeOf(AllocTy)) {
OS << "sizeof(" << *AllocTy << ")";
return;
@@ -207,7 +207,7 @@ void SCEV::print(raw_ostream &OS) const {
return;
}
- const Type *CTy;
+ Type *CTy;
Constant *FieldNo;
if (U->isOffsetOf(CTy, FieldNo)) {
OS << "offsetof(" << *CTy << ", ";
@@ -228,7 +228,7 @@ void SCEV::print(raw_ostream &OS) const {
llvm_unreachable("Unknown SCEV kind!");
}
-const Type *SCEV::getType() const {
+Type *SCEV::getType() const {
switch (getSCEVType()) {
case scConstant:
return cast<SCEVConstant>(this)->getType();
@@ -297,17 +297,17 @@ const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
}
const SCEV *
-ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
- const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
+ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
+ IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
return getConstant(ConstantInt::get(ITy, V, isSigned));
}
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
- unsigned SCEVTy, const SCEV *op, const Type *ty)
+ unsigned SCEVTy, const SCEV *op, Type *ty)
: SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, const Type *ty)
+ const SCEV *op, Type *ty)
: SCEVCastExpr(ID, scTruncate, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
@@ -315,7 +315,7 @@ SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
}
SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, const Type *ty)
+ const SCEV *op, Type *ty)
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
@@ -323,7 +323,7 @@ SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
}
SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, const Type *ty)
+ const SCEV *op, Type *ty)
: SCEVCastExpr(ID, scSignExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
@@ -354,7 +354,7 @@ void SCEVUnknown::allUsesReplacedWith(Value *New) {
setValPtr(New);
}
-bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
+bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
@@ -371,15 +371,15 @@ bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
return false;
}
-bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
+bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
if (CE->getOpcode() == Instruction::GetElementPtr &&
CE->getOperand(0)->isNullValue()) {
- const Type *Ty =
+ Type *Ty =
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
- if (const StructType *STy = dyn_cast<StructType>(Ty))
+ if (StructType *STy = dyn_cast<StructType>(Ty))
if (!STy->isPacked() &&
CE->getNumOperands() == 3 &&
CE->getOperand(1)->isNullValue()) {
@@ -396,7 +396,7 @@ bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
return false;
}
-bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
+bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
@@ -404,7 +404,7 @@ bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
CE->getNumOperands() == 3 &&
CE->getOperand(0)->isNullValue() &&
CE->getOperand(1)->isNullValue()) {
- const Type *Ty =
+ Type *Ty =
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
// Ignore vector types here so that ScalarEvolutionExpander doesn't
// emit getelementptrs that index into vectors.
@@ -652,7 +652,7 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
/// Assume, K > 0.
static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
ScalarEvolution &SE,
- const Type* ResultTy) {
+ Type* ResultTy) {
// Handle the simplest case efficiently.
if (K == 1)
return SE.getTruncateOrZeroExtend(It, ResultTy);
@@ -742,7 +742,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
MultiplyFactor = MultiplyFactor.trunc(W);
// Calculate the product, at width T+W
- const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
+ IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
CalculationBits);
const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
for (unsigned i = 1; i != K; ++i) {
@@ -790,7 +790,7 @@ const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
//===----------------------------------------------------------------------===//
const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
- const Type *Ty) {
+ Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
"This is not a truncating conversion!");
assert(isSCEVable(Ty) &&
@@ -877,7 +877,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
}
const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
- const Type *Ty) {
+ Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -954,7 +954,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
const SCEV *RecastedMaxBECount =
getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
if (MaxBECount == RecastedMaxBECount) {
- const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
+ Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no unsigned overflow.
const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
const SCEV *Add = getAddExpr(Start, ZMul);
@@ -1062,7 +1062,7 @@ static const SCEV *getOverflowLimitForStep(const SCEV *Step,
// result, the expression "Step + sext(PreIncAR)" is congruent with
// "sext(PostIncAR)"
static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
- const Type *Ty,
+ Type *Ty,
ScalarEvolution *SE) {
const Loop *L = AR->getLoop();
const SCEV *Start = AR->getStart();
@@ -1086,7 +1086,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
// 2. Direct overflow check on the step operation's expression.
unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
- const Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
+ Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
const SCEV *OperandExtendedStart =
SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
SE->getSignExtendExpr(Step, WideTy));
@@ -1112,7 +1112,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
// Get the normalized sign-extended expression for this AddRec's Start.
static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
- const Type *Ty,
+ Type *Ty,
ScalarEvolution *SE) {
const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
if (!PreStart)
@@ -1123,7 +1123,7 @@ static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
}
const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
- const Type *Ty) {
+ Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1208,7 +1208,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
const SCEV *RecastedMaxBECount =
getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
if (MaxBECount == RecastedMaxBECount) {
- const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
+ Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no signed overflow.
const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
const SCEV *Add = getAddExpr(Start, SMul);
@@ -1275,7 +1275,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
/// unspecified bits out to the given type.
///
const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
- const Type *Ty) {
+ Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1438,7 +1438,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
assert(!Ops.empty() && "Cannot get empty add!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVAddExpr operand types don't match!");
@@ -1488,7 +1488,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// Okay, check to see if the same value occurs in the operand list more than
// once. If so, merge them together into an multiply expression. Since we
// sorted the list, these values are required to be adjacent.
- const Type *Ty = Ops[0]->getType();
+ Type *Ty = Ops[0]->getType();
bool FoundMatch = false;
for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
@@ -1515,8 +1515,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// if the contents of the resulting outer trunc fold to something simple.
for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
- const Type *DstType = Trunc->getType();
- const Type *SrcType = Trunc->getOperand()->getType();
+ Type *DstType = Trunc->getType();
+ Type *SrcType = Trunc->getOperand()->getType();
SmallVector<const SCEV *, 8> LargeOps;
bool Ok = true;
// Check all the operands to see if they can be represented in the
@@ -1809,7 +1809,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
assert(!Ops.empty() && "Cannot get empty mul!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVMulExpr operand types don't match!");
@@ -2042,14 +2042,14 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
// Determine if the division can be folded into the operands of
// its operands.
// TODO: Generalize this to non-constants by using known-bits information.
- const Type *Ty = LHS->getType();
+ Type *Ty = LHS->getType();
unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
// For non-power-of-two values, effectively round the value up to the
// nearest power of two.
if (!RHSC->getValue()->getValue().isPowerOf2())
++MaxShiftAmt;
- const IntegerType *ExtTy =
+ IntegerType *ExtTy =
IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
// {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
@@ -2151,7 +2151,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
const Loop *L, SCEV::NoWrapFlags Flags) {
if (Operands.size() == 1) return Operands[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
"SCEVAddRecExpr operand types don't match!");
@@ -2269,7 +2269,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
assert(!Ops.empty() && "Cannot get empty smax!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVSMaxExpr operand types don't match!");
@@ -2373,7 +2373,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
assert(!Ops.empty() && "Cannot get empty umax!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVUMaxExpr operand types don't match!");
@@ -2476,7 +2476,7 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
}
-const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
+const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
// If we have TargetData, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
@@ -2488,20 +2488,20 @@ const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
C = Folded;
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
+const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
Constant *C = ConstantExpr::getAlignOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
C = Folded;
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
+const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
unsigned FieldNo) {
// If we have TargetData, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
@@ -2514,17 +2514,17 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
C = Folded;
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
+ Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
+const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy,
Constant *FieldNo) {
Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
C = Folded;
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
+ Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
@@ -2558,14 +2558,14 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
/// the SCEV framework. This primarily includes integer types, and it
/// can optionally include pointer types if the ScalarEvolution class
/// has access to target-specific information.
-bool ScalarEvolution::isSCEVable(const Type *Ty) const {
+bool ScalarEvolution::isSCEVable(Type *Ty) const {
// Integers and pointers are always SCEVable.
return Ty->isIntegerTy() || Ty->isPointerTy();
}
/// getTypeSizeInBits - Return the size in bits of the specified type,
/// for which isSCEVable must return true.
-uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
+uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
// If we have a TargetData, use it!
@@ -2586,7 +2586,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
/// the given type and which represents how SCEV will treat the given
/// type, for which isSCEVable must return true. For pointer types,
/// this is the pointer-sized integer type.
-const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
+Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
if (Ty->isIntegerTy())
@@ -2628,7 +2628,7 @@ const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
return getConstant(
cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
- const Type *Ty = V->getType();
+ Type *Ty = V->getType();
Ty = getEffectiveSCEVType(Ty);
return getMulExpr(V,
getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
@@ -2640,7 +2640,7 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
return getConstant(
cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
- const Type *Ty = V->getType();
+ Type *Ty = V->getType();
Ty = getEffectiveSCEVType(Ty);
const SCEV *AllOnes =
getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
@@ -2664,8 +2664,8 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
/// input value to the specified type. If the type must be extended, it is zero
/// extended.
const SCEV *
-ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate or zero extend with non-integer arguments!");
@@ -2681,8 +2681,8 @@ ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) {
/// extended.
const SCEV *
ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
- const Type *Ty) {
- const Type *SrcTy = V->getType();
+ Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate or zero extend with non-integer arguments!");
@@ -2697,8 +2697,8 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
/// input value to the specified type. If the type must be extended, it is zero
/// extended. The conversion must not be narrowing.
const SCEV *
-ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot noop or zero extend with non-integer arguments!");
@@ -2713,8 +2713,8 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
/// input value to the specified type. If the type must be extended, it is sign
/// extended. The conversion must not be narrowing.
const SCEV *
-ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot noop or sign extend with non-integer arguments!");
@@ -2730,8 +2730,8 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
/// it is extended with unspecified bits. The conversion must not be
/// narrowing.
const SCEV *
-ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot noop or any extend with non-integer arguments!");
@@ -2745,8 +2745,8 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
/// input value to the specified type. The conversion must not be widening.
const SCEV *
-ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate or noop with non-integer arguments!");
@@ -3032,7 +3032,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
// context.
bool isInBounds = GEP->isInBounds();
- const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
+ Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
// Don't attempt to analyze GEPs over unsized objects.
if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
@@ -3044,7 +3044,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
I != E; ++I) {
Value *Index = *I;
// Compute the (potentially symbolic) offset in bytes for this index.
- if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
@@ -3244,7 +3244,7 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
// TODO: non-affine addrec
if (AddRec->isAffine()) {
- const Type *Ty = AddRec->getType();
+ Type *Ty = AddRec->getType();
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
@@ -3396,7 +3396,7 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
// TODO: non-affine addrec
if (AddRec->isAffine()) {
- const Type *Ty = AddRec->getType();
+ Type *Ty = AddRec->getType();
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
@@ -3601,9 +3601,9 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
LCI->getValue() == CI->getValue())
if (const SCEVZeroExtendExpr *Z =
dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
- const Type *UTy = U->getType();
+ Type *UTy = U->getType();
const SCEV *Z0 = Z->getOperand();
- const Type *Z0Ty = Z0->getType();
+ Type *Z0Ty = Z0->getType();
unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
// If C is a low-bits mask, the zero extend is serving to
@@ -4321,10 +4321,10 @@ GetAddressedElementFromGlobal(GlobalVariable *GV,
if (Idx >= CA->getNumOperands()) return 0; // Bogus program
Init = cast<Constant>(CA->getOperand(Idx));
} else if (isa<ConstantAggregateZero>(Init)) {
- if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
+ if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
assert(Idx < STy->getNumElements() && "Bad struct index!");
Init = Constant::getNullValue(STy->getElementType(Idx));
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
if (Idx >= ATy->getNumElements()) return 0; // Bogus program
Init = Constant::getNullValue(ATy->getElementType());
} else {
@@ -5741,7 +5741,7 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
assert(!isKnownNegative(Step) &&
"This code doesn't handle negative strides yet!");
- const Type *Ty = Start->getType();
+ Type *Ty = Start->getType();
// When Start == End, we have an exact BECount == 0. Short-circuit this case
// here because SCEV may not be able to determine that the unsigned division
@@ -5760,7 +5760,7 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
if (!NoWrap) {
// Check Add for unsigned overflow.
// TODO: More sophisticated things could be done here.
- const Type *WideTy = IntegerType::get(getContext(),
+ Type *WideTy = IntegerType::get(getContext(),
getTypeSizeInBits(Ty) + 1);
const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index befe6d2599..1904bdc586 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
/// reusing an existing cast if a suitable one exists, moving an existing
/// cast if a suitable one exists but isn't in the right place, or
/// creating a new one.
-Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
+Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP) {
// Check to see if there is already a cast!
@@ -62,7 +62,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to share
/// the casts.
-Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
+Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
assert((Op == Instruction::BitCast ||
Op == Instruction::PtrToInt ||
@@ -277,7 +277,7 @@ static bool FactorOutConstant(const SCEV *&S,
/// the list.
///
static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
- const Type *Ty,
+ Type *Ty,
ScalarEvolution &SE) {
unsigned NumAddRecs = 0;
for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
@@ -306,7 +306,7 @@ static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
/// into GEP indices.
///
static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
- const Type *Ty,
+ Type *Ty,
ScalarEvolution &SE) {
// Find the addrecs.
SmallVector<const SCEV *, 8> AddRecs;
@@ -365,10 +365,10 @@ static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
///
Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
const SCEV *const *op_end,
- const PointerType *PTy,
- const Type *Ty,
+ PointerType *PTy,
+ Type *Ty,
Value *V) {
- const Type *ElTy = PTy->getElementType();
+ Type *ElTy = PTy->getElementType();
SmallVector<Value *, 4> GepIndices;
SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
bool AnyNonZeroIndices = false;
@@ -423,7 +423,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
GepIndices.push_back(Scaled);
// Collect struct field index operands.
- while (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ while (StructType *STy = dyn_cast<StructType>(ElTy)) {
bool FoundFieldNo = false;
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
@@ -451,7 +451,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// appropriate struct type.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
- const Type *CTy;
+ Type *CTy;
Constant *FieldNo;
if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
GepIndices.push_back(FieldNo);
@@ -474,7 +474,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
}
}
- if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
+ if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
ElTy = ATy->getElementType();
else
break;
@@ -691,7 +691,7 @@ public:
}
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
// Collect all the add operands in a loop, along with their associated loops.
// Iterate in reverse so that constants are emitted last, all else equal, and
@@ -717,7 +717,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// This is the first operand. Just expand it.
Sum = expand(Op);
++I;
- } else if (const PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
+ } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
// The running sum expression is a pointer. Try to form a getelementptr
// at this level with that as the base.
SmallVector<const SCEV *, 4> NewOps;
@@ -731,7 +731,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
NewOps.push_back(X);
}
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
- } else if (const PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
+ } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
// The running sum is an integer, and there's a pointer at this level.
// Try to form a getelementptr. If the running sum is instructions,
// use a SCEVUnknown to avoid re-analyzing them.
@@ -762,7 +762,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
}
Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
// Collect all the mul operands in a loop, along with their associated loops.
// Iterate in reverse so that constants are emitted last, all else equal.
@@ -804,7 +804,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
}
Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *LHS = expandCodeFor(S->getLHS(), Ty);
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
@@ -847,8 +847,8 @@ static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
PHINode *
SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
const Loop *L,
- const Type *ExpandTy,
- const Type *IntTy) {
+ Type *ExpandTy,
+ Type *IntTy) {
assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
// Reuse a previously-inserted PHI, if present.
@@ -969,7 +969,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
Value *IncV;
// If the PHI is a pointer, use a GEP, otherwise use an add or sub.
if (isPointer) {
- const PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
+ PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
// If the step isn't constant, don't use an implicitly scaled GEP, because
// that would require a multiply inside the loop.
if (!isa<ConstantInt>(StepV))
@@ -1001,8 +1001,8 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
}
Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
- const Type *STy = S->getType();
- const Type *IntTy = SE.getEffectiveSCEVType(STy);
+ Type *STy = S->getType();
+ Type *IntTy = SE.getEffectiveSCEVType(STy);
const Loop *L = S->getLoop();
// Determine a normalized form of this expression, which is the expression
@@ -1045,7 +1045,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Expand the core addrec. If we need post-loop scaling, force it to
// expand to an integer type to avoid the need for additional casting.
- const Type *ExpandTy = PostLoopScale ? IntTy : STy;
+ Type *ExpandTy = PostLoopScale ? IntTy : STy;
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
// Accommodate post-inc mode, if necessary.
@@ -1069,7 +1069,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Re-apply any non-loop-dominating offset.
if (PostLoopOffset) {
- if (const PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
+ if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
const SCEV *const OffsetArray[1] = { PostLoopOffset };
Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
} else {
@@ -1086,7 +1086,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
if (!CanonicalMode) return expandAddRecExprLiterally(S);
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
const Loop *L = S->getLoop();
// First check for an existing canonical IV in a suitable type.
@@ -1132,7 +1132,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// Dig into the expression to find the pointer base for a GEP.
ExposePointerBase(Base, RestArray[0], SE);
// If we found a pointer, expand the AddRec with a GEP.
- if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
+ if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
// Make sure the Base isn't something exotic, such as a multiplied
// or divided pointer value. In those cases, the result type isn't
// actually a pointer type.
@@ -1216,7 +1216,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
}
Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateTrunc(V, Ty, "tmp");
@@ -1225,7 +1225,7 @@ Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
}
Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateZExt(V, Ty, "tmp");
@@ -1234,7 +1234,7 @@ Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
}
Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateSExt(V, Ty, "tmp");
@@ -1244,7 +1244,7 @@ Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
- const Type *Ty = LHS->getType();
+ Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
// In the case of mixed integer and pointer types, do the
// rest of the comparisons as integer.
@@ -1268,7 +1268,7 @@ Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
- const Type *Ty = LHS->getType();
+ Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
// In the case of mixed integer and pointer types, do the
// rest of the comparisons as integer.
@@ -1290,7 +1290,7 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
return LHS;
}
-Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
+Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
Instruction *I) {
BasicBlock::iterator IP = I;
while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
@@ -1299,7 +1299,7 @@ Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
return expandCodeFor(SH, Ty);
}
-Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) {
+Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
// Expand the code for this SCEV.
Value *V = expand(SH);
if (Ty) {
@@ -1384,7 +1384,7 @@ void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
/// starts at zero and steps by one on each iteration.
PHINode *
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
- const Type *Ty) {
+ Type *Ty) {
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
// Build a SCEV for {0,+,1}<L>.
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 455c91077d..3662582b67 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -34,7 +34,7 @@ const unsigned MaxDepth = 6;
/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
/// unknown returns 0). For vector types, returns the element type's bitwidth.
-static unsigned getBitWidth(const Type *Ty, const TargetData *TD) {
+static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
assert(isa<PointerType>(Ty) && "Expected a pointer type!");
@@ -103,7 +103,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
unsigned Align = GV->getAlignment();
if (Align == 0 && TD && GV->getType()->getElementType()->isSized()) {
- const Type *ObjectType = GV->getType()->getElementType();
+ Type *ObjectType = GV->getType()->getElementType();
// If the object is defined in the current Module, we'll be giving
// it the preferred alignment. Otherwise, we have to assume that it
// may only have the minimum ABI alignment.
@@ -268,7 +268,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// FALL THROUGH and handle them the same as zext/trunc.
case Instruction::ZExt:
case Instruction::Trunc: {
- const Type *SrcTy = I->getOperand(0)->getType();
+ Type *SrcTy = I->getOperand(0)->getType();
unsigned SrcBitWidth;
// Note that we handle pointer operands here because of inttoptr/ptrtoint
@@ -291,7 +291,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
return;
}
case Instruction::BitCast: {
- const Type *SrcTy = I->getOperand(0)->getType();
+ Type *SrcTy = I->getOperand(0)->getType();
if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
// TODO: For now, not handling conversions like:
// (bitcast i64 %x to <2 x i32>)
@@ -559,7 +559,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
gep_type_iterator GTI = gep_type_begin(I);
for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
Value *Index = I->getOperand(i);
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
// Handle struct member offset arithmetic.
if (!TD) return;
const StructLayout *SL = TD->getStructLayout(STy);
@@ -569,7 +569,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
CountTrailingZeros_64(Offset));
} else {
// Handle array index arithmetic.
- const Type *IndexedTy = GTI.getIndexedType();
+ Type *IndexedTy = GTI.getIndexedType();
if (!IndexedTy->isSized()) return;
unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
@@ -898,7 +898,7 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
assert((TD || V->getType()->isIntOrIntVectorTy()) &&
"ComputeNumSignBits requires a TargetData object to operate "
"on non-integer values!");
- const Type *Ty = V->getType();
+ Type *Ty = V->getType();
unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
Ty->getScalarSizeInBits();
unsigned Tmp, Tmp2;
@@ -1078,7 +1078,7 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
assert(Depth <= MaxDepth && "Limit Search Depth");
assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
- const Type *T = V->getType();
+ Type *T = V->getType();
ConstantInt *CI = dyn_cast<ConstantInt>(V);
@@ -1315,11 +1315,11 @@ Value *llvm::isBytewiseValue(Value *V) {
// indices from Idxs that should be left out when inserting into the resulting
// struct. To is the result struct built so far, new insertvalue instructions
// build on that.
-static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
+static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
SmallVector<unsigned, 10> &Idxs,
unsigned IdxSkip,
Instruction *InsertBefore) {
- const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
+ llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
if (STy) {
// Save the original To argument so we can modify it
Value *OrigTo = To;
@@ -1378,7 +1378,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
Instruction *InsertBefore) {
assert(InsertBefore && "Must have someplace to insert!");
- const Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
+ Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
idx_range);
Value *To = UndefValue::get(IndexedType);
SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
@@ -1404,7 +1404,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
&& "Not looking at a struct or array?");
assert(ExtractValueInst::getIndexedType(V->getType(), idx_range)
&& "Invalid indices for type?");
- const CompositeType *PTy = cast<CompositeType>(V->getType());
+ CompositeType *PTy = cast<CompositeType>(V->getType());
if (isa<UndefValue>(V))
return UndefValue::get(ExtractValueInst::getIndexedType(PTy,
@@ -1506,7 +1506,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
if (OpC->isZero()) continue;
// Handle a struct and array indices which add their offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
} else {
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
@@ -1557,8 +1557,8 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
return false;
// Make sure the index-ee is a pointer to array of i8.
- const PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
- const ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
+ PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
+ ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
if (AT == 0 || !AT->getElementType()->isIntegerTy(8))
return false;
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index cfc31f3db8..baf1bc9ae8 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -26,7 +26,7 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-static std::string getTypeString(const Type *T) {
+static std::string getTypeString(Type *T) {
std::string Result;
raw_string_ostream Tmp(Result);
Tmp << *T;
@@ -744,9 +744,9 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
/// GetGlobalVal - Get a value with the specified name or ID, creating a
/// forward reference record if needed. This can return null if the value
/// exists but does not have the right type.
-GlobalValue *LLParser::GetGlobalVal(const std::string &Name, const Type *Ty,
+GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty,
LocTy Loc) {
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
+ PointerType *PTy = dyn_cast<PointerType>(Ty);
if (PTy == 0) {
Error(Loc, "global variable reference must have pointer type");
return 0;
@@ -775,7 +775,7 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, const Type *Ty,
// Otherwise, create a new forward reference for this value and remember it.
GlobalValue *FwdVal;
- if (const FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType()))
+ if (FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType()))
FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, Name, M);
else
FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
@@ -785,8 +785,8 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, const Type *Ty,
return FwdVal;
}
-GlobalValue *LLParser::GetGlobalVal(unsigned ID, const Type *Ty, LocTy Loc) {
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
+GlobalValue *LLParser::GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc) {
+ PointerType *PTy = dyn_cast<PointerType>(Ty);
if (PTy == 0) {
Error(Loc, "global variable reference must have pointer type");
return 0;
@@ -813,7 +813,7 @@ GlobalValue *LLParser::GetGlobalVal(unsigned ID, const Type *Ty, LocTy Loc) {
// Otherwise, create a new forward reference for this value and remember it.
GlobalValue *FwdVal;
- if (const FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType()))
+ if (FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType()))
FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, "", M);
else
FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
@@ -1668,7 +1668,7 @@ bool LLParser::PerFunctionState::FinishFunction() {
/// forward reference record if needed. This can return null if the value
/// exists but does not have the right type.
Value *LLParser::PerFunctionState::GetVal(const std::string &Name,
- const Type *Ty, LocTy Loc) {
+ Type *Ty, LocTy Loc) {
// Look this name up in the normal function symbol table.
Value *Val = F.getValueSymbolTable().lookup(Name);
@@ -1709,7 +1709,7 @@ Value *LLParser::PerFunctionState::GetVal(const std::string &Name,
return FwdVal;
}
-Value *LLParser::PerFunctionState::GetVal(unsigned ID, const Type *Ty,
+Value *LLParser::PerFunctionState::GetVal(unsigned ID, Type *Ty,
LocTy Loc) {
// Look this name up in the normal function symbol table.
Value *Val = ID < NumberedVals.size() ? NumberedVals[ID] : 0;
@@ -2323,7 +2323,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
}
/// ParseGlobalValue - Parse a global value with the specified type.
-bool LLParser::ParseGlobalValue(const Type *Ty, Constant *&C) {
+bool LLParser::ParseGlobalValue(Type *Ty, Constant *&C) {
C = 0;
ValID ID;
Value *V = NULL;
@@ -2410,7 +2410,7 @@ bool LLParser::ParseMetadataValue(ValID &ID, PerFunctionState *PFS) {
// Function Parsing.
//===----------------------------------------------------------------------===//
-bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
+bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
PerFunctionState *PFS) {
if (Ty->isFunctionTy())
return Error(ID.Loc, "functions are not values, refer to them as pointers");
@@ -2426,8 +2426,8 @@ bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
V = PFS->GetVal(ID.StrVal, Ty, ID.Loc);
return (V == 0);
case ValID::t_InlineAsm: {
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
- const FunctionType *FTy =
+ PointerType *PTy = dyn_cast<PointerType>(Ty);
+ FunctionType *FTy =
PTy ? dyn_cast<FunctionType>(PTy->getElementType()) : 0;
if (!FTy || !InlineAsm::Verify(FTy, ID.StrVal2))
return Error(ID.Loc, "invalid type for inline asm constraint string");
@@ -2506,7 +2506,7 @@ bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
return false;
case ValID::t_ConstantStruct:
case ValID::t_PackedConstantStruct:
- if (const StructType *ST = dyn_cast<StructType>(Ty)) {
+ if (StructType *ST = dyn_cast<StructType>(Ty)) {
if (ST->getNumElements() != ID.UIntVal)
return Error(ID.Loc,
"initializer with struct type has wrong # elements");
@@ -2527,7 +2527,7 @@ bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
}
}
-bool LLParser::ParseValue(const Type *Ty, Value *&V, PerFunctionState *PFS) {
+bool LLParser::ParseValue(Type *Ty, Value *&V, PerFunctionState *PFS) {
V = 0;
ValID ID;
return ParseValID(ID, PFS) ||
@@ -2671,9 +2671,9 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
if (PAL.paramHasAttr(1, Attribute::StructRet) && !RetType->isVoidTy())
return Error(RetTypeLoc, "functions with 'sret' argument must return void");
- const FunctionType *FT =
+ FunctionType *FT =
FunctionType::get(RetType, ParamTypeList, isVarArg);
- const PointerType *PFT = PointerType::getUnqual(FT);
+ PointerType *PFT = PointerType::getUnqual(FT);
Fn = 0;
if (!FunctionName.empty()) {
@@ -3162,8 +3162,8 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
// If RetType is a non-function pointer type, then this is the short syntax
// for the call, which means that RetType is just the return type. Infer the
// rest of the function argument types from the arguments that are present.
- const PointerType *PFTy = 0;
- const FunctionType *Ty = 0;
+ PointerType *PFTy = 0;
+ FunctionType *Ty = 0;
if (!(PFTy = dyn_cast<PointerType>(RetType)) ||
!(Ty = dyn_cast<FunctionType>(PFTy->getElementType()))) {
// Pull out the types of all of the arguments...
@@ -3194,7 +3194,7 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
FunctionType::param_iterator I = Ty->param_begin();
FunctionType::param_iterator E = Ty->param_end();
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- const Type *ExpectedTy = 0;
+ Type *ExpectedTy = 0;
if (I != E) {
ExpectedTy = *I++;
} else if (!Ty->isVarArg()) {
@@ -3498,8 +3498,8 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
// If RetType is a non-function pointer type, then this is the short syntax
// for the call, which means that RetType is just the return type. Infer the
// rest of the function argument types from the arguments that are present.
- const PointerType *PFTy = 0;
- const FunctionType *Ty = 0;
+ PointerType *PFTy = 0;
+ FunctionType *Ty = 0;
if (!(PFTy = dyn_cast<PointerType>(RetType)) ||
!(Ty = dyn_cast<FunctionType>(PFTy->getElementType()))) {
// Pull out the types of all of the arguments...
@@ -3530,7 +3530,7 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
FunctionType::param_iterator I = Ty->param_begin();
FunctionType::param_iterator E = Ty->param_end();
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- const Type *ExpectedTy = 0;
+ Type *ExpectedTy = 0;
if (I != E) {
ExpectedTy = *I++;
} else if (!Ty->isVarArg()) {
diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h
index 9630657850..41e42cad19 100644
--- a/lib/AsmParser/LLParser.h
+++ b/lib/AsmParser/LLParser.h
@@ -142,8 +142,8 @@ namespace llvm {
/// GetGlobalVal - Get a value with the specified name or ID, creating a
/// forward reference record if needed. This can return null if the value
/// exists but does not have the right type.
- GlobalValue *GetGlobalVal(const std::string &N, const Type *Ty, LocTy Loc);
- GlobalValue *GetGlobalVal(unsigned ID, const Type *Ty, LocTy Loc);
+ GlobalValue *GetGlobalVal(const std::string &N, Type *Ty, LocTy Loc);
+ GlobalValue *GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc);
// Helper Routines.
bool ParseToken(lltok::Kind T, const char *ErrMsg);
@@ -249,8 +249,8 @@ namespace llvm {
/// GetVal - Get a value with the specified name or ID, creating a
/// forward reference record if needed. This can return null if the value
/// exists but does not have the right type.
- Value *GetVal(const std::string &Name, const Type *Ty, LocTy Loc);
- Value *GetVal(unsigned ID, const Type *Ty, LocTy Loc);
+ Value *GetVal(const std::string &Name, Type *Ty, LocTy Loc);
+ Value *GetVal(unsigned ID, Type *Ty, LocTy Loc);
/// SetInstName - After an instruction is parsed and inserted into its
/// basic block, this installs its name.
@@ -269,14 +269,14 @@ namespace llvm {
BasicBlock *DefineBB(const std::string &Name, LocTy Loc);
};
- bool ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
+ bool ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
PerFunctionState *PFS);
- bool ParseValue(const Type *Ty, Value *&V, PerFunctionState *PFS);
- bool ParseValue(const Type *Ty, Value *&V, PerFunctionState &PFS) {
+ bool ParseValue(Type *Ty, Value *&V, PerFunctionState *PFS);
+ bool ParseValue(Type *Ty, Value *&V, PerFunctionState &PFS) {
return ParseValue(Ty, V, &PFS);
}
- bool ParseValue(const Type *Ty, Value *&V, LocTy &Loc,
+ bool ParseValue(Type *Ty, Value *&V, LocTy &Loc,
PerFunctionState &PFS) {
Loc = Lex.getLoc();
return ParseValue(Ty, V, &PFS);
@@ -310,7 +310,7 @@ namespace llvm {
// Constant Parsing.
bool ParseValID(ValID &ID, PerFunctionState *PFS = NULL);
- bool ParseGlobalValue(const Type *Ty, Constant *&V);
+ bool ParseGlobalValue(Type *Ty, Constant *&V);
bool ParseGlobalTypeAndValue(Constant *&V);
bool ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts);
bool ParseMetadataListValue(ValID &ID, PerFunctionState *PFS);
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 24c29941cf..e49cecf3ba 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -107,7 +107,7 @@ static int GetDecodedCastOpcode(unsigned Val) {
case bitc::CAST_BITCAST : return Instruction::BitCast;
}
}
-static int GetDecodedBinaryOpcode(unsigned Val, const Type *Ty) {
+static int GetDecodedBinaryOpcode(unsigned Val, Type *Ty) {
switch (Val) {
default: return -1;
case bitc::BINOP_ADD:
@@ -142,7 +142,7 @@ namespace {
void *operator new(size_t s) {
return User::operator new(s, 1);
}
- explicit ConstantPlaceHolder(const Type *Ty, LLVMContext& Context)
+ explicit ConstantPlaceHolder(Type *Ty, LLVMContext& Context)
: ConstantExpr(Ty, Instruction::UserOp1, &Op<0>(), 1) {
Op<0>() = UndefValue::get(Type::getInt32Ty(Context));
}
@@ -198,7 +198,7 @@ void BitcodeReaderValueList::AssignValue(Value *V, unsigned Idx) {
Constant *BitcodeReaderValueList::getConstantFwdRef(unsigned Idx,
- const Type *Ty) {
+ Type *Ty) {
if (Idx >= size())
resize(Idx + 1);
@@ -213,7 +213,7 @@ Constant *BitcodeReaderValueList::getConstantFwdRef(unsigned Idx,
return C;
}
-Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, const Type *Ty) {
+Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty) {
if (Idx >= size())
resize(Idx + 1);
@@ -1063,7 +1063,7 @@ bool BitcodeReader::ParseMetadata() {
unsigned Size = Record.size();
SmallVector<Value*, 8> Elts;
for (unsigned i = 0; i != Size; i += 2) {
- const Type *Ty = getTypeByID(Record[i]);
+ Type *Ty = getTypeByID(Record[i]);
if (!Ty) return Error("Invalid METADATA_NODE record");
if (Ty->isMetadataTy())
Elts.push_back(MDValueList.getValueFwdRef(Record[i+1]));
@@ -1163,7 +1163,7 @@ bool BitcodeReader::ParseConstants() {
SmallVector<uint64_t, 64> Record;
// Read all the records for this value table.
- const Type *CurTy = Type::getInt32Ty(Context);
+ Type *CurTy = Type::getInt32Ty(Context);
unsigned NextCstNo = ValueList.size();
while (1) {
unsigned Code = Stream.ReadCode();
@@ -1250,18 +1250,18 @@ bool BitcodeReader::ParseConstants() {
unsigned Size = Record.size();
std::vector<Constant*> Elts;
- if (const StructType *STy = dyn_cast<StructType>(CurTy)) {
+ if (StructType *STy = dyn_cast<StructType>(CurTy)) {
for (unsigned i = 0; i != Size; ++i)
Elts.push_back(ValueList.getConstantFwdRef(Record[i],
STy->getElementType(i)));
V = ConstantStruct::get(STy, Elts);
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) {
- const Type *EltTy = ATy->getElementType();
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) {
+ Type *EltTy = ATy->getElementType();
for (unsigned i = 0; i != Size; ++i)
Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
V = ConstantArray::get(ATy, Elts);
- } else if (const VectorType *VTy = dyn_cast<VectorType>(CurTy)) {
- const Type *EltTy = VTy->getElementType();
+ } else if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) {
+ Type *EltTy = VTy->getElementType();
for (unsigned i = 0; i != Size; ++i)
Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
V = ConstantVector::get(Elts);
@@ -1274,8 +1274,8 @@ bool BitcodeReader::ParseConstants() {
if (Record.empty())
return Error("Invalid CST_AGGREGATE record");
- const ArrayType *ATy = cast<ArrayType>(CurTy);
- const Type *EltTy = ATy->getElementType();
+ ArrayType *ATy = cast<ArrayType>(CurTy);
+ Type *EltTy = ATy->getElementType();
unsigned Size = Record.size();
std::vector<Constant*> Elts;
@@ -1288,8 +1288,8 @@ bool BitcodeReader::ParseConstants() {
if (Record.empty())
return Error("Invalid CST_AGGREGATE record");
- const ArrayType *ATy = cast<ArrayType>(CurTy);
- const Type *EltTy = ATy->getElementType();
+ ArrayType *ATy = cast<ArrayType>(CurTy);
+ Type *EltTy = ATy->getElementType();
unsigned Size = Record.size();
std::vector<Constant*> Elts;
@@ -1335,7 +1335,7 @@ bool BitcodeReader::ParseConstants() {
if (Opc < 0) {
V = UndefValue::get(CurTy); // Unknown cast.
} else {
- const Type *OpTy = getTypeByID(Record[1]);
+ Type *OpTy = getTypeByID(Record[1]);
if (!OpTy) return Error("Invalid CE_CAST record");
Constant *Op = ValueList.getConstantFwdRef(Record[2], OpTy);
V = ConstantExpr::getCast(Opc, Op, CurTy);
@@ -1347,7 +1347,7 @@ bool BitcodeReader::ParseConstants() {
if (Record.size() & 1) return Error("Invalid CE_GEP record");
SmallVector<Constant*, 16> Elts;
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
- const Type *ElTy = getTypeByID(Record[i]);
+ Type *ElTy = getTypeByID(Record[i]);
if (!ElTy) return Error("Invalid CE_GEP record");
Elts.push_back(ValueList.getConstantFwdRef(Record[i+1], ElTy));
}
@@ -1368,7 +1368,7 @@ bool BitcodeReader::ParseConstants() {
break;
case bitc::CST_CODE_CE_EXTRACTELT: { // CE_EXTRACTELT: [opty, opval, opval]
if (Record.size() < 3) return Error("Invalid CE_EXTRACTELT record");
- const VectorType *OpTy =
+ VectorType *OpTy =
dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
if (OpTy == 0) return Error("Invalid CE_EXTRACTELT record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
@@ -1377,7 +1377,7 @@ bool BitcodeReader::ParseConstants() {
break;
}
case bitc::CST_CODE_CE_INSERTELT: { // CE_INSERTELT: [opval, opval, opval]
- const VectorType *OpTy = dyn_cast<VectorType>(CurTy);
+ VectorType *OpTy = dyn_cast<VectorType>(CurTy);
if (Record.size() < 3 || OpTy == 0)
return Error("Invalid CE_INSERTELT record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
@@ -1388,26 +1388,26 @@ bool BitcodeReader::ParseConstants() {
break;
}
case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval]
- const VectorType *OpTy = dyn_cast<VectorType>(CurTy);
+ VectorType *OpTy = dyn_cast<VectorType>(CurTy);
if (Record.size() < 3 || OpTy == 0)
return Error("Invalid CE_SHUFFLEVEC record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy);
- const Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
+ Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
OpTy->getNumElements());
Constant *Op2 = ValueList.getConstantFwdRef(Record[2], ShufTy);
V = ConstantExpr::getShuffleVector(Op0, Op1, Op2);
break;
}
case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval]
- const VectorType *RTy = dyn_cast<VectorType>(CurTy);
- const VectorType *OpTy =
+ VectorType *RTy = dyn_cast<VectorType>(CurTy);
+ VectorType *OpTy =
dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
if (Record.size() < 4 || RTy == 0 || OpTy == 0)
return Error("Invalid CE_SHUFVEC_EX record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
- const Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
+ Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
RTy->getNumElements());
Constant *Op2 = ValueList.getConstantFwdRef(Record[3], ShufTy);
V = ConstantExpr::getShuffleVector(Op0, Op1, Op2);
@@ -1415,7 +1415,7 @@ bool BitcodeReader::ParseConstants() {
}
case bitc::CST_CODE_CE_CMP: { // CE_CMP: [opty, opval, opval, pred]
if (Record.size() < 4) return Error("Invalid CE_CMP record");
- const Type *OpTy = getTypeByID(Record[0]);
+ Type *OpTy = getTypeByID(Record[0]);
if (OpTy == 0) return Error("Invalid CE_CMP record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
@@ -1442,14 +1442,14 @@ bool BitcodeReader::ParseConstants() {
AsmStr += (char)Record[2+i];
for (unsigned i = 0; i != ConstStrSize; ++i)
ConstrStr += (char)Record[3+AsmStrSize+i];
- const PointerType *PTy = cast<PointerType>(CurTy);
+ PointerType *PTy = cast<PointerType>(CurTy);
V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()),
AsmStr, ConstrStr, HasSideEffects, IsAlignStack);
break;
}
case bitc::CST_CODE_BLOCKADDRESS:{
if (Record.size() < 3) return Error("Invalid CE_BLOCKADDRESS record");
- const Type *FnTy = getTypeByID(Record[0]);
+ Type *FnTy = getTypeByID(Record[0]);
if (FnTy == 0) return Error("Invalid CE_BLOCKADDRESS record");
Function *Fn =
dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy));
@@ -1662,7 +1662,7 @@ bool BitcodeReader::ParseModule() {
case bitc::MODULE_CODE_GLOBALVAR: {
if (Record.size() < 6)
return Error("Invalid MODULE_CODE_GLOBALVAR record");
- const Type *Ty = getTypeByID(Record[0]);
+ Type *Ty = getTypeByID(Record[0]);
if (!Ty) return Error("Invalid MODULE_CODE_GLOBALVAR record");
if (!Ty->isPointerTy())
return Error("Global not a pointer type!");
@@ -1711,11 +1711,11 @@ bool BitcodeReader::ParseModule() {
case bitc::MODULE_CODE_FUNCTION: {
if (Record.size() < 8)
return Error("Invalid MODULE_CODE_FUNCTION record");
- const Type *Ty = getTypeByID(Record[0]);
+ Type *Ty = getTypeByID(Record[0]);
if (!Ty) return Error("Invalid MODULE_CODE_FUNCTION record");
if (!Ty->isPointerTy())
return Error("Function not a pointer type!");
- const FunctionType *FTy =
+ FunctionType *FTy =
dyn_cast<FunctionType>(cast<PointerType>(Ty)->getElementType());
if (!FTy)
return Error("Function not a pointer to function type!");
@@ -1757,7 +1757,7 @@ bool BitcodeReader::ParseModule() {
case bitc::MODULE_CODE_ALIAS: {
if (Record.size() < 3)
return Error("Invalid MODULE_ALIAS record");
- const Type *Ty = getTypeByID(Record[0]);
+ Type *Ty = getTypeByID(Record[0]);
if (!Ty) return Error("Invalid MODULE_ALIAS record");
if (!Ty->isPointerTy())
return Error("Function not a pointer type!");
@@ -2160,7 +2160,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
OpNum+2 != Record.size())
return Error("Invalid CAST record");
- const Type *ResTy = getTypeByID(Record[OpNum]);
+ Type *ResTy = getTypeByID(Record[OpNum]);
int Opc = GetDecodedCastOpcode(Record[OpNum+1]);
if (Opc == -1 || ResTy == 0)
return Error("Invalid CAST record");
@@ -2261,8 +2261,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
return Error("Invalid SELECT record");
// select condition can be either i1 or [N x i1]
- if (const VectorType* vector_type =
- dyn_cast<const VectorType>(Cond->getType())) {
+ if (VectorType* vector_type =
+ dyn_cast<VectorType>(Cond->getType())) {
// expect <n x i1>
if (vector_type->getElementType() != Type::getInt1Ty(Context))
return Error("Invalid SELECT condition type");
@@ -2381,7 +2381,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_INST_SWITCH: { // SWITCH: [opty, op0, op1, ...]
if (Record.size() < 3 || (Record.size() & 1) == 0)
return Error("Invalid SWITCH record");
- const Type *OpTy = getTypeByID(Record[0]);
+ Type *OpTy = getTypeByID(Record[0]);
Value *Cond = getFnValueByID(Record[1], OpTy);
BasicBlock *Default = getBasicBlock(Record[2]);
if (OpTy == 0 || Cond == 0 || Default == 0)
@@ -2405,7 +2405,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_INST_INDIRECTBR: { // INDIRECTBR: [opty, op0, op1, ...]
if (Record.size() < 2)
return Error("Invalid INDIRECTBR record");
- const Type *OpTy = getTypeByID(Record[0]);
+ Type *OpTy = getTypeByID(Record[0]);
Value *Address = getFnValueByID(Record[1], OpTy);
if (OpTy == 0 || Address == 0)
return Error("Invalid INDIRECTBR record");
@@ -2437,8 +2437,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
return Error("Invalid INVOKE record");
- const PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType());
- const FunctionType *FTy = !CalleeTy ? 0 :
+ PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType());
+ FunctionType *FTy = !CalleeTy ? 0 :
dyn_cast<FunctionType>(CalleeTy->getElementType());
// Check that the right number of fixed parameters are here.
@@ -2483,7 +2483,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...]
if (Record.size() < 1 || ((Record.size()-1)&1))
return Error("Invalid PHI record");
- const Type *Ty = getTypeByID(Record[0]);
+ Type *Ty = getTypeByID(Record[0]);
if (!Ty) return Error("Invalid PHI record");
PHINode *PN = PHINode::Create(Ty, (Record.size()-1)/2);
@@ -2502,9 +2502,9 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align]
if (Record.size() != 4)
return Error("Invalid ALLOCA record");
- const PointerType *Ty =
+ PointerType *Ty =
dyn_cast_or_null<PointerType>(getTypeByID(Record[0]));
- const Type *OpTy = getTypeByID(Record[1]);
+ Type *OpTy = getTypeByID(Record[1]);
Value *Size = getFnValueByID(Record[2], OpTy);
unsigned Align = Record[3];
if (!Ty || !Size) return Error("Invalid ALLOCA record");
@@ -2549,8 +2549,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
return Error("Invalid CALL record");
- const PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
- const FunctionType *FTy = 0;
+ PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
+ FunctionType *FTy = 0;
if (OpTy) FTy = dyn_cast<FunctionType>(OpTy->getElementType());
if (!FTy || Record.size() < FTy->getNumParams()+OpNum)
return Error("Invalid CALL record");
@@ -2589,9 +2589,9 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_INST_VAARG: { // VAARG: [valistty, valist, instty]
if (Record.size() < 3)
return Error("Invalid VAARG record");
- const Type *OpTy = getTypeByID(Record[0]);
+ Type *OpTy = getTypeByID(Record[0]);
Value *Op = getFnValueByID(Record[1], OpTy);
- const Type *ResTy = getTypeByID(Record[2]);
+ Type *ResTy = getTypeByID(Record[2]);
if (!OpTy || !Op || !ResTy)
return Error("Invalid VAARG record");
I = new VAArgInst(Op, ResTy);
diff --git a/lib/Bitcode/Reader/BitcodeReader.h b/lib/Bitcode/Reader/BitcodeReader.h
index 1b3bf1a185..6e6118cac0 100644
--- a/lib/Bitcode/Reader/BitcodeReader.h
+++ b/lib/Bitcode/Reader/BitcodeReader.h
@@ -76,8 +76,8 @@ public:
ValuePtrs.resize(N);
}
- Constant *getConstantFwdRef(unsigned Idx, const Type *Ty);
- Value *getValueFwdRef(unsigned Idx, const Type *Ty);
+ Constant *getConstantFwdRef(unsigned Idx, Type *Ty);
+ Value *getValueFwdRef(unsigned Idx, Type *Ty);
void AssignValue(Value *V, unsigned Idx);
@@ -212,7 +212,7 @@ public:
private:
Type *getTypeByID(unsigned ID);
Type *getTypeByIDOrNull(unsigned ID);
- Value *getFnValueByID(unsigned ID, const Type *Ty) {
+ Value *getFnValueByID(unsigned ID, Type *Ty) {
if (Ty && Ty->isMetadataTy())
return MDValueList.getValueFwdRef(ID);
return ValueList.getValueFwdRef(ID, Ty);
@@ -248,7 +248,7 @@ private:
return ResVal == 0;
}
bool getValue(SmallVector<uint64_t, 64> &Record, unsigned &Slot,
- const Type *Ty, Value *&ResVal) {
+ Type *Ty, Value *&ResVal) {
if (Slot == Record.size()) return true;
unsigned ValNo = (unsigned)Record[Slot++];
ResVal = getFnValueByID(ValNo, Ty);
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index 85d67ce62b..4dfa0ba4df 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -216,7 +216,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
// Loop over all of the types, emitting each in turn.
for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
- const Type *T = TypeList[i];
+ Type *T = TypeList[i];
int AbbrevToUse = 0;
unsigned Code = 0;
@@ -237,7 +237,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
TypeVals.push_back(cast<IntegerType>(T)->getBitWidth());
break;
case Type::PointerTyID: {
- const PointerType *PTy = cast<PointerType>(T);
+ PointerType *PTy = cast<PointerType>(T);
// POINTER: [pointee type, address space]
Code = bitc::TYPE_CODE_POINTER;
TypeVals.push_back(VE.getTypeID(PTy->getElementType()));
@@ -247,7 +247,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
break;
}
case Type::FunctionTyID: {
- const FunctionType *FT = cast<FunctionType>(T);
+ FunctionType *FT = cast<FunctionType>(T);
// FUNCTION: [isvararg, attrid, retty, paramty x N]
Code = bitc::TYPE_CODE_FUNCTION;
TypeVals.push_back(FT->isVarArg());
@@ -259,7 +259,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
break;
}
case Type::StructTyID: {
- const StructType *ST = cast<StructType>(T);
+ StructType *ST = cast<StructType>(T);
// STRUCT: [ispacked, eltty x N]
TypeVals.push_back(ST->isPacked());
// Output all of the element types.
@@ -286,7 +286,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
break;
}
case Type::ArrayTyID: {
- const ArrayType *AT = cast<ArrayType>(T);
+ ArrayType *AT = cast<ArrayType>(T);
// ARRAY: [numelts, eltty]
Code = bitc::TYPE_CODE_ARRAY;
TypeVals.push_back(AT->getNumElements());
@@ -295,7 +295,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
break;
}
case Type::VectorTyID: {
- const VectorType *VT = cast<VectorType>(T);
+ VectorType *VT = cast<VectorType>(T);
// VECTOR [numelts, eltty]
Code = bitc::TYPE_CODE_VECTOR;
TypeVals.push_back(VT->getNumElements());
@@ -716,7 +716,7 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
SmallVector<uint64_t, 64> Record;
const ValueEnumerator::ValueList &Vals = VE.getValues();
- const Type *LastTy = 0;
+ Type *LastTy = 0;
for (unsigned i = FirstVal; i != LastVal; ++i) {
const Value *V = Vals[i].first;
// If we need to switch types, do so now.
@@ -781,7 +781,7 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
}
} else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
Code = bitc::CST_CODE_FLOAT;
- const Type *Ty = CFP->getType();
+ Type *Ty = CFP->getType();
if (Ty->isFloatTy() || Ty->isDoubleTy()) {
Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
} else if (Ty->isX86_FP80Ty()) {
@@ -1083,8 +1083,8 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
case Instruction::Invoke: {
const InvokeInst *II = cast<InvokeInst>(&I);
const Value *Callee(II->getCalledValue());
- const PointerType *PTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
+ PointerType *PTy = cast<PointerType>(Callee->getType());
+ FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
Code = bitc::FUNC_CODE_INST_INVOKE;
Vals.push_back(VE.getAttributeID(II->getAttributes()));
@@ -1149,8 +1149,8 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
break;
case Instruction::Call: {
const CallInst &CI = cast<CallInst>(I);
- const PointerType *PTy = cast<PointerType>(CI.getCalledValue()->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
+ PointerType *PTy = cast<PointerType>(CI.getCalledValue()->getType());
+ FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
Code = bitc::FUNC_CODE_INST_CALL;
diff --git a/lib/Bitcode/Writer/ValueEnumerator.cpp b/lib/Bitcode/Writer/ValueEnumerator.cpp
index b68bf92d51..db766b1bf0 100644
--- a/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -315,7 +315,7 @@ void ValueEnumerator::EnumerateValue(const Value *V) {
}
-void ValueEnumerator::EnumerateType(const Type *Ty) {
+void ValueEnumerator::EnumerateType(Type *Ty) {
unsigned *TypeID = &TypeMap[Ty];
// We've already seen this type.
@@ -325,7 +325,7 @@ void ValueEnumerator::EnumerateType(const Type *Ty) {
// If it is a non-anonymous struct, mark the type as being visited so that we
// don't recursively visit it. This is safe because we allow forward
// references of these in the bitcode reader.
- if (const StructType *STy = dyn_cast<StructType>(Ty))
+ if (StructType *STy = dyn_cast<StructType>(Ty))
if (!STy->isAnonymous())
*TypeID = ~0U;
diff --git a/lib/Bitcode/Writer/ValueEnumerator.h b/lib/Bitcode/Writer/ValueEnumerator.h
index 6617b60deb..b6fc920e41 100644
--- a/lib/Bitcode/Writer/ValueEnumerator.h
+++ b/lib/Bitcode/Writer/ValueEnumerator.h
@@ -35,12 +35,12 @@ class MDSymbolTable;
class ValueEnumerator {
public:
- typedef std::vector<const Type*> TypeList;
+ typedef std::vector<Type*> TypeList;
// For each value, we remember its Value* and occurrence frequency.
typedef std::vector<std::pair<const Value*, unsigned> > ValueList;
private:
- typedef DenseMap<const Type*, unsigned> TypeMapType;
+ typedef DenseMap<Type*, unsigned> TypeMapType;
TypeMapType TypeMap;
TypeList Types;
@@ -85,7 +85,7 @@ public:
unsigned getValueID(const Value *V) const;
- unsigned getTypeID(const Type *T) const {
+ unsigned getTypeID(Type *T) const {
TypeMapType::const_iterator I = TypeMap.find(T);
assert(I != TypeMap.end() && "Type not in ValueEnumerator!");
return I->second-1;
@@ -140,7 +140,7 @@ private:
void EnumerateFunctionLocalMetadata(const MDNode *N);
void EnumerateNamedMDNode(const NamedMDNode *NMD);
void EnumerateValue(const Value *V);
- void EnumerateType(const Type *T);
+ void EnumerateType(Type *T);
void EnumerateOperandType(const Value *V);
void EnumerateAttributes(const AttrListPtr &PAL);
diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp
index 125e64196f..fafc01044d 100644
--- a/lib/CodeGen/Analysis.cpp
+++ b/lib/CodeGen/Analysis.cpp
@@ -31,7 +31,7 @@ using namespace llvm;
/// of insertvalue or extractvalue indices that identify a member, return
/// the linearized index of the start of the member.
///
-unsigned llvm::ComputeLinearIndex(const Type *Ty,
+unsigned llvm::ComputeLinearIndex(Type *Ty,
const unsigned *Indices,
const unsigned *IndicesEnd,
unsigned CurIndex) {
@@ -40,7 +40,7 @@ unsigned llvm::ComputeLinearIndex(const Type *Ty,
return CurIndex;
// Given a struct type, recursively traverse the elements.
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
@@ -52,8 +52,8 @@ unsigned llvm::ComputeLinearIndex(const Type *Ty,
return CurIndex;
}
// Given an array type, recursively traverse the elements.
- else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
- const Type *EltTy = ATy->getElementType();
+ else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ Type *EltTy = ATy->getElementType();
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
if (Indices && *Indices == i)
return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
@@ -72,12 +72,12 @@ unsigned llvm::ComputeLinearIndex(const Type *Ty,
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
-void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
+void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
SmallVectorImpl<EVT> &ValueVTs,
SmallVectorImpl<uint64_t> *Offsets,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
@@ -88,8 +88,8 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
return;
}
// Given an array type, recursively traverse the elements.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
- const Type *EltTy = ATy->getElementType();
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ Type *EltTy = ATy->getElementType();
uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 7f314eed3a..fbf9867191 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1009,7 +1009,7 @@ void AsmPrinter::EmitConstantPool() {
unsigned NewOffset = (Offset + AlignMask) & ~AlignMask;
OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/);
- const Type *Ty = CPE.getType();
+ Type *Ty = CPE.getType();
Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
OutStreamer.EmitLabel(GetCPISymbol(CPI));
@@ -1447,7 +1447,7 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
// Support only foldable casts to/from pointers that can be eliminated by
// changing the pointer to the appropriately sized integer type.
Constant *Op = CE->getOperand(0);
- const Type *Ty = CE->getType();
+ Type *Ty = CE->getType();
const MCExpr *OpExpr = LowerConstant(Op, AP);
diff --git a/lib/CodeGen/ELFWriter.cpp b/lib/CodeGen/ELFWriter.cpp
index d977651c32..eef3d8a65b 100644
--- a/lib/CodeGen/ELFWriter.cpp
+++ b/lib/CodeGen/ELFWriter.cpp
@@ -482,7 +482,7 @@ void ELFWriter::EmitGlobalConstant(const Constant *CV, ELFSection &GblS) {
EmitGlobalConstantLargeInt(CI, GblS);
return;
} else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
- const VectorType *PTy = CP->getType();
+ VectorType *PTy = CP->getType();
for (unsigned I = 0, E = PTy->getNumElements(); I < E; ++I)
EmitGlobalConstant(CP->getOperand(I), GblS);
return;
@@ -552,7 +552,7 @@ CstExprResTy ELFWriter::ResolveConstantExpr(const Constant *CV) {
}
case Instruction::PtrToInt: {
Constant *Op = CE->getOperand(0);
- const Type *Ty = CE->getType();
+ Type *Ty = CE->getType();
// We can emit the pointer value into this slot if the slot is an
// integer slot greater or equal to the size of the pointer.
diff --git a/lib/CodeGen/IntrinsicLowering.cpp b/lib/CodeGen/IntrinsicLowering.cpp
index 611886ff16..0f92c2d06b 100644
--- a/lib/CodeGen/IntrinsicLowering.cpp
+++ b/lib/CodeGen/IntrinsicLowering.cpp
@@ -27,7 +27,7 @@ using namespace llvm;
template <class ArgIt>
static void EnsureFunctionExists(Module &M, const char *Name,
ArgIt ArgBegin, ArgIt ArgEnd,
- const Type *RetTy) {
+ Type *RetTy) {
// Insert a correctly-typed definition now.
std::vector<Type *> ParamTys;
for (ArgIt I = ArgBegin; I != ArgEnd; ++I)
@@ -64,7 +64,7 @@ static void EnsureFPIntrinsicsExist(Module &M, Function *Fn,
template <class ArgIt>
static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
ArgIt ArgBegin, ArgIt ArgEnd,
- const Type *RetTy) {
+ Type *RetTy) {
// If we haven't already looked up this function, check to see if the
// program already contains a function with this name.
Module *M = CI->getParent()->getParent()->getParent();
@@ -462,7 +462,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break; // Strip out annotate intrinsic
case Intrinsic::memcpy: {
- const IntegerType *IntPtr = TD.getIntPtrType(Context);
+ IntegerType *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -473,7 +473,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
}
case Intrinsic::memmove: {
- const IntegerType *IntPtr = TD.getIntPtrType(Context);
+ IntegerType *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -484,7 +484,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
}
case Intrinsic::memset: {
- const IntegerType *IntPtr = TD.getIntPtrType(Context);
+ IntegerType *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index cd25156528..20066a067b 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -619,7 +619,7 @@ void MachineJumpTableInfo::dump() const { print(dbgs()); }
// MachineConstantPool implementation
//===----------------------------------------------------------------------===//
-const Type *MachineConstantPoolEntry::getType() const {
+Type *MachineConstantPoolEntry::getType() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getType();
return Val.ConstVal->getType();
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4f0d2caca2..5a7319f9d6 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6479,7 +6479,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
- const Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
+ Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy))
return SDValue();
@@ -6542,7 +6542,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
unsigned LDAlign = LD->getAlignment();
unsigned STAlign = ST->getAlignment();
- const Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
+ Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
unsigned ABIAlign = TLI.getTargetData()->getABITypeAlignment(IntVTTy);
if (LDAlign < ABIAlign || STAlign < ABIAlign)
return SDValue();
@@ -7447,7 +7447,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
const_cast<ConstantFP*>(FV->getConstantFPValue()),
const_cast<ConstantFP*>(TV->getConstantFPValue())
};
- const Type *FPTy = Elts[0]->getType();
+ Type *FPTy = Elts[0]->getType();
const TargetData &TD = *TLI.getTargetData();
// Create a ConstantArray of the two constants.
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 54a7d43f46..b8d7e82fe3 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -422,12 +422,12 @@ bool FastISel::SelectGetElementPtr(const User *I) {
bool NIsKill = hasTrivialKill(I->getOperand(0));
- const Type *Ty = I->getOperand(0)->getType();
+ Type *Ty = I->getOperand(0)->getType();
MVT VT = TLI.getPointerTy();
for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
E = I->op_end(); OI != E; ++OI) {
const Value *Idx = *OI;
- if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
+ if (StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
@@ -839,7 +839,7 @@ FastISel::SelectExtractValue(const User *U) {
return false;
const Value *Op0 = EVI->getOperand(0);
- const Type *AggTy = Op0->getType();
+ Type *AggTy = Op0->getType();
// Get the base result register.
unsigned ResultReg;
@@ -1074,7 +1074,7 @@ unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
if (MaterialReg == 0) {
// This is a bit ugly/slow, but failing here means falling out of
// fast-isel, which would be very slow.
- const IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
+ IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
VT.getSizeInBits());
MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
}
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index d518b5d346..d5bf12055e 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -78,7 +78,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
- const Type *Ty = AI->getAllocatedType();
+ Type *Ty = AI->getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
@@ -216,7 +216,7 @@ unsigned FunctionLoweringInfo::CreateReg(EVT VT) {
/// In the case that the given value has struct or array type, this function
/// will assign registers for each member or element.
///
-unsigned FunctionLoweringInfo::CreateRegs(const Type *Ty) {
+unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, Ty, ValueVTs);
@@ -260,7 +260,7 @@ FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
/// register based on the LiveOutInfo of its operands.
void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
- const Type *Ty = PN->getType();
+ Type *Ty = PN->getType();
if (!Ty->isIntegerTy() || Ty->isVectorTy())
return;
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index f0f4743298..568f66c039 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -356,7 +356,7 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
int Offset = CP->getOffset();
unsigned Align = CP->getAlignment();
- const Type *Type = CP->getType();
+ Type *Type = CP->getType();
// MachineConstantPool wants an explicit alignment.
if (Align == 0) {
Align = TM->getTargetData()->getPrefTypeAlignment(Type);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index d06e2bdce0..aadfa26420 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -365,7 +365,7 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
// smaller type.
TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
TLI.ShouldShrinkFPConstant(OrigVT)) {
- const Type *SType = SVT.getTypeForEVT(*DAG.getContext());
+ Type *SType = SVT.getTypeForEVT(*DAG.getContext());
LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
VT = SVT;
Extend = true;
@@ -1124,7 +1124,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned load and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
@@ -1311,7 +1311,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned load and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- const Type *Ty =
+ Type *Ty =
LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment =
TLI.getTargetData()->getABITypeAlignment(Ty);
@@ -1491,7 +1491,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned store and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
- const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
@@ -1596,7 +1596,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned store and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
- const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
@@ -1999,7 +1999,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
unsigned SlotSize = SlotVT.getSizeInBits();
unsigned DestSize = DestVT.getSizeInBits();
- const Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
+ Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType);
// Emit a store to the stack slot. Use a truncstore if the input value is
@@ -2106,7 +2106,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
}
} else {
assert(Node->getOperand(i).getOpcode() == ISD::UNDEF);
- const Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
+ Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
CV.push_back(UndefValue::get(OpNTy));
}
}
@@ -2159,7 +2159,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
@@ -2169,7 +2169,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
TLI.getPointerTy());
// Splice the libcall in wherever FindInputOutputChains tells us to.
- const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
// isTailCall may be true since the callee does not reference caller stack
// frame. Check if it's in the right position.
@@ -2210,7 +2210,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
- const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
+ Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
std::pair<SDValue,SDValue> CallInfo =
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
false, 0, TLI.getLibcallCallingConv(LC), false,
@@ -2237,7 +2237,7 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
TargetLowering::ArgListEntry Entry;
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i);
Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
@@ -2248,7 +2248,7 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
TLI.getPointerTy());
// Splice the libcall in wherever FindInputOutputChains tells us to.
- const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
@@ -2360,13 +2360,13 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
SDValue InChain = DAG.getEntryNode();
EVT RetVT = Node->getValueType(0);
- const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
+ Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 20fb9c32be..41197d8b5d 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2176,9 +2176,9 @@ void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
- const Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
+ Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
EVT PtrVT = TLI.getPointerTy();
- const Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
+ Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
DebugLoc dl = N->getDebugLoc();
// A divide for UMULO should be faster than a function call.
@@ -2222,7 +2222,7 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
EVT ArgVT = N->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = N->getOperand(i);
Entry.Ty = ArgTy;
Entry.isSExt = true;
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index ba658b0220..80f555bde9 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -1046,7 +1046,7 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, EVT RetVT,
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
- const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
+ Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
std::pair<SDValue,SDValue> CallInfo =
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
false, 0, TLI.getLibcallCallingConv(LC), false,
@@ -1067,7 +1067,7 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
TargetLowering::ArgListEntry Entry;
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i);
Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
@@ -1078,7 +1078,7 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
TLI.getPointerTy());
// Splice the libcall in wherever FindInputOutputChains tells us to.
- const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index b5698f9c67..85a169261a 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -670,7 +670,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// Store the new element. This may be larger than the vector element type,
// so use a truncating store.
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
- const Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
+ Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
unsigned Alignment =
TLI.getTargetData()->getPrefTypeAlignment(VecType);
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 35ea0bb940..b72a47d0f6 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -821,7 +821,7 @@ static void VerifyMachineNode(SDNode *N) {
/// given type.
///
unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
- const Type *Ty = VT == MVT::iPTR ?
+ Type *Ty = VT == MVT::iPTR ?
PointerType::get(Type::getInt8Ty(*getContext()), 0) :
VT.getTypeForEVT(*getContext());
@@ -1432,7 +1432,7 @@ SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
unsigned ByteSize = VT.getStoreSize();
- const Type *Ty = VT.getTypeForEVT(*getContext());
+ Type *Ty = VT.getTypeForEVT(*getContext());
unsigned StackAlign =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign);
@@ -1445,8 +1445,8 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
VT2.getStoreSizeInBits())/8;
- const Type *Ty1 = VT1.getTypeForEVT(*getContext());
- const Type *Ty2 = VT2.getTypeForEVT(*getContext());
+ Type *Ty1 = VT1.getTypeForEVT(*getContext());
+ Type *Ty2 = VT2.getTypeForEVT(*getContext());
const TargetData *TD = TLI.getTargetData();
unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
TD->getPrefTypeAlignment(Ty2));
@@ -3425,7 +3425,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
return SDValue();
if (DstAlignCanChange) {
- const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
+ Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
@@ -3514,7 +3514,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
return SDValue();
if (DstAlignCanChange) {
- const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
+ Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
@@ -3589,7 +3589,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
return SDValue();
if (DstAlignCanChange) {
- const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
+ Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
@@ -3782,7 +3782,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
return Result;
// Emit a library call.
- const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
+ Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst; Entry.Ty = IntPtrTy;
@@ -6528,7 +6528,7 @@ unsigned GlobalAddressSDNode::getAddressSpace() const {
}
-const Type *ConstantPoolSDNode::getType() const {
+Type *ConstantPoolSDNode::getType() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getType();
return Val.ConstVal->getType();
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 81b03ee76a..ba18465c0d 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -578,7 +578,7 @@ namespace {
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
RegsForValue(LLVMContext &Context, const TargetLowering &tli,
- unsigned Reg, const Type *Ty) {
+ unsigned Reg, Type *Ty) {
ComputeValueVTs(tli, Ty, ValueVTs);
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
@@ -1069,7 +1069,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
return DAG.getBlockAddress(BA, VT);
- const VectorType *VecTy = cast<VectorType>(V->getType());
+ VectorType *VecTy = cast<VectorType>(V->getType());
unsigned NumElements = VecTy->getNumElements();
// Now that we know the number and type of the elements, get that number of
@@ -2458,7 +2458,7 @@ void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
void SelectionDAGBuilder::visitFSub(const User &I) {
// -0.0 - X --> fneg
- const Type *Ty = I.getType();
+ Type *Ty = I.getType();
if (isa<Constant>(I.getOperand(0)) &&
I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
SDValue Op2 = getValue(I.getOperand(1));
@@ -2886,8 +2886,8 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
const Value *Op0 = I.getOperand(0);
const Value *Op1 = I.getOperand(1);
- const Type *AggTy = I.getType();
- const Type *ValTy = Op1->getType();
+ Type *AggTy = I.getType();
+ Type *ValTy = Op1->getType();
bool IntoUndef = isa<UndefValue>(Op0);
bool FromUndef = isa<UndefValue>(Op1);
@@ -2927,8 +2927,8 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
const Value *Op0 = I.getOperand(0);
- const Type *AggTy = Op0->getType();
- const Type *ValTy = I.getType();
+ Type *AggTy = Op0->getType();
+ Type *ValTy = I.getType();
bool OutOfUndef = isa<UndefValue>(Op0);
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
@@ -2961,12 +2961,12 @@ void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
SDValue N = getValue(I.getOperand(0));
- const Type *Ty = I.getOperand(0)->getType();
+ Type *Ty = I.getOperand(0)->getType();
for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
const Value *Idx = *OI;
- if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
+ if (StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
@@ -3037,7 +3037,7 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
if (FuncInfo.StaticAllocaMap.count(&I))
return; // getValue will auto-populate this.
- const Type *Ty = I.getAllocatedType();
+ Type *Ty = I.getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
@@ -3087,7 +3087,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
const Value *SV = I.getOperand(0);
SDValue Ptr = getValue(SV);
- const Type *Ty = I.getType();
+ Type *Ty = I.getType();
bool isVolatile = I.isVolatile();
bool isNonTemporal = I.getMetadata("nontemporal") != 0;
@@ -3290,7 +3290,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
}
if (!I.getType()->isVoidTy()) {
- if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
+ if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
EVT VT = TLI.getValueType(PTy);
Result = DAG.getNode(ISD::BITCAST, getCurDebugLoc(), VT, Result);
}
@@ -4918,9 +4918,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
bool isTailCall,
MachineBasicBlock *LandingPad) {
- const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
- const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
- const Type *RetTy = FTy->getReturnType();
+ PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ Type *RetTy = FTy->getReturnType();
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
MCSymbol *BeginLabel = 0;
@@ -4949,7 +4949,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
FTy->getReturnType());
MachineFunction &MF = DAG.getMachineFunction();
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
- const Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
+ Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI.getPointerTy());
Entry.Node = DemoteStackSlot;
@@ -5037,7 +5037,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
// The instruction result is the result of loading from the
// hidden sret parameter.
SmallVector<EVT, 1> PVTs;
- const Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
+ Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
ComputeValueVTs(TLI, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register");
@@ -5130,7 +5130,7 @@ static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
}
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
- const Type *LoadTy,
+ Type *LoadTy,
SelectionDAGBuilder &Builder) {
// Check to see if this load can be trivially constant folded, e.g. if the
@@ -5193,7 +5193,7 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
if (Size && IsOnlyUsedInZeroEqualityComparison(&I)) {
bool ActuallyDoIt = true;
MVT LoadVT;
- const Type *LoadTy;
+ Type *LoadTy;
switch (Size->getZExtValue()) {
default:
LoadVT = MVT::Other;
@@ -5261,14 +5261,14 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
// See if any floating point values are being passed to this function. This is
// used to emit an undefined reference to fltused on Windows.
- const FunctionType *FT =
+ FunctionType *FT =
cast<FunctionType>(I.getCalledValue()->getType()->getContainedType(0));
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
if (FT->isVarArg() &&
!MMI.callsExternalVAFunctionWithFloatingPointArguments()) {
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
- const Type* T = I.getArgOperand(i)->getType();
- for (po_iterator<const Type*> i = po_begin(T), e = po_end(T);
+ Type* T = I.getArgOperand(i)->getType();
+ for (po_iterator<Type*> i = po_begin(T), e = po_end(T);
i != e; ++i) {
if (!i->isFloatingPointTy()) continue;
MMI.setCallsExternalVAFunctionWithFloatingPointArguments(true);
@@ -5412,20 +5412,20 @@ public:
if (isa<BasicBlock>(CallOperandVal))
return TLI.getPointerTy();
- const llvm::Type *OpTy = CallOperandVal->getType();
+ llvm::Type *OpTy = CallOperandVal->getType();
// FIXME: code duplicated from TargetLowering::ParseConstraints().
// If this is an indirect operand, the operand is a pointer to the
// accessed type.
if (isIndirect) {
- const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
+ llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
if (!PtrTy)
report_fatal_error("Indirect operand for inline asm not a pointer!");
OpTy = PtrTy->getElementType();
}
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
- if (const StructType *STy = dyn_cast<StructType>(OpTy))
+ if (StructType *STy = dyn_cast<StructType>(OpTy))
if (STy->getNumElements() == 1)
OpTy = STy->getElementType(0);
@@ -5639,7 +5639,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// corresponding argument.
assert(!CS.getType()->isVoidTy() &&
"Bad inline asm!");
- if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
+ if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
OpVT = TLI.getValueType(STy->getElementType(ResNo));
} else {
assert(ResNo == 0 && "Asm only has one result!");
@@ -5750,7 +5750,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
} else {
// Otherwise, create a stack slot and emit a store to it before the
// asm.
- const Type *Ty = OpVal->getType();
+ Type *Ty = OpVal->getType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
@@ -6111,7 +6111,7 @@ void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
/// FIXME: When all targets are
/// migrated to using LowerCall, this hook should be integrated into SDISel.
std::pair<SDValue, SDValue>
-TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
+TargetLowering::LowerCallTo(SDValue Chain, Type *RetTy,
bool RetSExt, bool RetZExt, bool isVarArg,
bool isInreg, unsigned NumFixedArgs,
CallingConv::ID CallConv, bool isTailCall,
@@ -6128,7 +6128,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
- const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
+ Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
@@ -6145,8 +6145,8 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
Flags.setSRet();
if (Args[i].isByVal) {
Flags.setByVal();
- const PointerType *Ty = cast<PointerType>(Args[i].Ty);
- const Type *ElementTy = Ty->getElementType();
+ PointerType *Ty = cast<PointerType>(Args[i].Ty);
+ Type *ElementTy = Ty->getElementType();
Flags.setByValSize(getTargetData()->getTypeAllocSize(ElementTy));
// For ByVal, alignment should come from FE. BE will guess if this
// info is not there but there are cases it cannot get right.
@@ -6356,7 +6356,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
- const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment =
TD->getABITypeAlignment(ArgTy);
@@ -6371,8 +6371,8 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
Flags.setSRet();
if (F.paramHasAttr(Idx, Attribute::ByVal)) {
Flags.setByVal();
- const PointerType *Ty = cast<PointerType>(I->getType());
- const Type *ElementTy = Ty->getElementType();
+ PointerType *Ty = cast<PointerType>(I->getType());
+ Type *ElementTy = Ty->getElementType();
Flags.setByValSize(TD->getTypeAllocSize(ElementTy));
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 2626ac3bbb..335eca787f 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -996,7 +996,7 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
/// type of the given function. This does not require a DAG or a return value,
/// and is suitable for use before any DAGs for the function are constructed.
/// TODO: Move this out of TargetLowering.cpp.
-void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
+void llvm::GetReturnInfo(Type* ReturnType, Attributes attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI,
SmallVectorImpl<uint64_t> *Offsets) {
@@ -1054,7 +1054,7 @@ void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
-unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const {
+unsigned TargetLowering::getByValTypeAlignment(Type *Ty) const {
return TD->getCallFrameTypeAlignment(Ty);
}
@@ -2840,7 +2840,7 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
// corresponding argument.
assert(!CS.getType()->isVoidTy() &&
"Bad inline asm!");
- if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
+ if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
OpInfo.ConstraintVT = getValueType(STy->getElementType(ResNo));
} else {
assert(ResNo == 0 && "Asm only has one result!");
@@ -2857,16 +2857,16 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
}
if (OpInfo.CallOperandVal) {
- const llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
+ llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
if (OpInfo.isIndirect) {
- const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
+ llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
if (!PtrTy)
report_fatal_error("Indirect operand for inline asm not a pointer!");
OpTy = PtrTy->getElementType();
}
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
- if (const StructType *STy = dyn_cast<StructType>(OpTy))
+ if (StructType *STy = dyn_cast<StructType>(OpTy))
if (STy->getNumElements() == 1)
OpTy = STy->getElementType(0);
@@ -3187,7 +3187,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
- const Type *Ty) const {
+ Type *Ty) const {
// The default implementation of this implements a conservative RISCy, r+r and
// r+i addr mode.
diff --git a/lib/CodeGen/ShadowStackGC.cpp b/lib/CodeGen/ShadowStackGC.cpp
index 5a253a4d97..2f1d223cf9 100644
--- a/lib/CodeGen/ShadowStackGC.cpp
+++ b/lib/CodeGen/ShadowStackGC.cpp
@@ -61,7 +61,7 @@ namespace {
private:
bool IsNullValue(Value *V);
Constant *GetFrameMap(Function &F);
- const Type* GetConcreteStackEntryType(Function &F);
+ Type* GetConcreteStackEntryType(Function &F);
void CollectRoots(Function &F);
static GetElementPtrInst *CreateGEP(LLVMContext &Context,
IRBuilder<> &B, Value *BasePtr,
@@ -190,7 +190,7 @@ ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) {
Constant *ShadowStackGC::GetFrameMap(Function &F) {
// doInitialization creates the abstract type of this value.
- const Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
+ Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
// Truncate the ShadowStackDescriptor if some metadata is null.
unsigned NumMeta = 0;
@@ -203,7 +203,7 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
}
Metadata.resize(NumMeta);
- const Type *Int32Ty = Type::getInt32Ty(F.getContext());
+ Type *Int32Ty = Type::getInt32Ty(F.getContext());
Constant *BaseElts[] = {
ConstantInt::get(Int32Ty, Roots.size(), false),
@@ -244,7 +244,7 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
return ConstantExpr::getGetElementPtr(GV, GEPIndices, 2);
}
-const Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
+Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
// doInitialization creates the generic version of this type.
std::vector<Type*> EltTys;
EltTys.push_back(StackEntryTy);
@@ -282,7 +282,7 @@ bool ShadowStackGC::initializeCustomLowering(Module &M) {
EltTys.push_back(PointerType::getUnqual(StackEntryTy));
EltTys.push_back(FrameMapPtrTy);
StackEntryTy->setBody(EltTys);
- const PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
+ PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
// Get the root chain if it already exists.
Head = M.getGlobalVariable("llvm_gc_root_chain");
@@ -373,7 +373,7 @@ bool ShadowStackGC::performCustomLowering(Function &F) {
// Build the constant map and figure the type of the shadow stack entry.
Value *FrameMap = GetFrameMap(F);
- const Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
+ Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
// Build the shadow stack entry at the very start of the function.
BasicBlock::iterator IP = F.getEntryBlock().begin();
diff --git a/lib/CodeGen/SjLjEHPrepare.cpp b/lib/CodeGen/SjLjEHPrepare.cpp
index 65a33da93a..07ec857608 100644
--- a/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/lib/CodeGen/SjLjEHPrepare.cpp
@@ -40,7 +40,7 @@ namespace {
const TargetLowering *TLI;
- const Type *FunctionContextTy;
+ Type *FunctionContextTy;
Constant *RegisterFn;
Constant *UnregisterFn;
Constant *BuiltinSetjmpFn;
@@ -204,7 +204,7 @@ splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
++AfterAllocaInsertPt;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI) {
- const Type *Ty = AI->getType();
+ Type *Ty = AI->getType();
// Aggregate types can't be cast, but are legal argument types, so we have
// to handle them differently. We use an extract/insert pair as a
// lightweight method to achieve the same goal.
@@ -381,7 +381,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
"fcn_context", F.begin()->begin());
Value *Idxs[2];
- const Type *Int32Ty = Type::getInt32Ty(F.getContext());
+ Type *Int32Ty = Type::getInt32Ty(F.getContext());
Value *Zero = ConstantInt::get(Int32Ty, 0);
// We need to also keep around a reference to the call_site field
Idxs[0] = Zero;
@@ -423,7 +423,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
// instruction hasn't already been removed.
if (!I->getParent()) continue;
Value *Val = new LoadInst(ExceptionAddr, "exception", true, I);
- const Type *Ty = Type::getInt8PtrTy(F.getContext());
+ Type *Ty = Type::getInt8PtrTy(F.getContext());
Val = CastInst::Create(Instruction::IntToPtr, Val, Ty, "", I);
I->replaceAllUsesWith(Val);
diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp
index d3cbd15b64..1f0e5a2711 100644
--- a/lib/CodeGen/StackProtector.cpp
+++ b/lib/CodeGen/StackProtector.cpp
@@ -123,7 +123,7 @@ bool StackProtector::RequiresStackProtector() const {
// protectors.
return true;
- if (const ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) {
+ if (ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) {
// We apparently only care about character arrays.
if (!AT->getElementType()->isIntegerTy(8))
continue;
@@ -165,7 +165,7 @@ bool StackProtector::InsertStackProtectors() {
// StackGuard = load __stack_chk_guard
// call void @llvm.stackprotect.create(StackGuard, StackGuardSlot)
//
- const PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
+ PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
unsigned AddressSpace, Offset;
if (TLI->getStackCookieLocation(AddressSpace, Offset)) {
Constant *OffsetVal =
diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp
index 7652090af8..1b1b498f39 100644
--- a/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -93,7 +93,7 @@ public:
/// \brief Returns the address the GlobalVariable should be written into. The
/// GVMemoryBlock object prefixes that.
static char *Create(const GlobalVariable *GV, const TargetData& TD) {
- const Type *ElTy = GV->getType()->getElementType();
+ Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
void *RawMemory = ::operator new(
TargetData::RoundUpAlignment(sizeof(GVMemoryBlock),
@@ -272,7 +272,7 @@ void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
Array = new char[(InputArgv.size()+1)*PtrSize];
DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n");
- const Type *SBytePtr = Type::getInt8PtrTy(C);
+ Type *SBytePtr = Type::getInt8PtrTy(C);
for (unsigned i = 0; i != InputArgv.size(); ++i) {
unsigned Size = InputArgv[i].size()+1;
@@ -361,8 +361,8 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn,
// Check main() type
unsigned NumArgs = Fn->getFunctionType()->getNumParams();
- const FunctionType *FTy = Fn->getFunctionType();
- const Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
+ FunctionType *FTy = Fn->getFunctionType();
+ Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
// Check the argument types.
if (NumArgs > 3)
@@ -651,7 +651,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
}
case Instruction::BitCast: {
GenericValue GV = getConstantValue(Op0);
- const Type* DestTy = CE->getType();
+ Type* DestTy = CE->getType();
switch (Op0->getType()->getTypeID()) {
default: llvm_unreachable("Invalid bitcast operand");
case Type::IntegerTyID:
@@ -847,7 +847,7 @@ static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
}
void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
- GenericValue *Ptr, const Type *Ty) {
+ GenericValue *Ptr, Type *Ty) {
const unsigned StoreBytes = getTargetData()->getTypeStoreSize(Ty);
switch (Ty->getTypeID()) {
@@ -909,7 +909,7 @@ static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
///
void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
GenericValue *Ptr,
- const Type *Ty) {
+ Type *Ty) {
const unsigned LoadBytes = getTargetData()->getTypeStoreSize(Ty);
switch (Ty->getTypeID()) {
@@ -986,7 +986,7 @@ void ExecutionEngine::emitGlobals() {
// Loop over all of the global variables in the program, allocating the memory
// to hold them. If there is more than one module, do a prepass over globals
// to figure out how the different modules should link together.
- std::map<std::pair<std::string, const Type*>,
+ std::map<std::pair<std::string, Type*>,
const GlobalValue*> LinkedGlobalsMap;
if (Modules.size() != 1) {
@@ -1101,7 +1101,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
if (!GV->isThreadLocal())
InitializeMemory(GV->getInitializer(), GA);
- const Type *ElTy = GV->getType()->getElementType();
+ Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
NumInitBytes += (unsigned)GVSize;
++NumGlobals;
diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp
index 498063bf65..28fbf2b159 100644
--- a/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -51,7 +51,7 @@ static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
break
static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
+ GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(+, Float);
IMPLEMENT_BINARY_OPERATOR(+, Double);
@@ -62,7 +62,7 @@ static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
}
static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
+ GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(-, Float);
IMPLEMENT_BINARY_OPERATOR(-, Double);
@@ -73,7 +73,7 @@ static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
}
static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
+ GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(*, Float);
IMPLEMENT_BINARY_OPERATOR(*, Double);
@@ -84,7 +84,7 @@ static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
}
static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
+ GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(/, Float);
IMPLEMENT_BINARY_OPERATOR(/, Double);
@@ -95,7 +95,7 @@ static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
}
static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
+ GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
case Type::FloatTyID:
Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
@@ -125,7 +125,7 @@ static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
break;
static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(eq,Ty);
@@ -138,7 +138,7 @@ static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(ne,Ty);
@@ -151,7 +151,7 @@ static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(ult,Ty);
@@ -164,7 +164,7 @@ static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(slt,Ty);
@@ -177,7 +177,7 @@ static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(ugt,Ty);
@@ -190,7 +190,7 @@ static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(sgt,Ty);
@@ -203,7 +203,7 @@ static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(ule,Ty);
@@ -216,7 +216,7 @@ static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(sle,Ty);
@@ -229,7 +229,7 @@ static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(uge,Ty);
@@ -242,7 +242,7 @@ static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(sge,Ty);
@@ -256,7 +256,7 @@ static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
void Interpreter::visitICmpInst(ICmpInst &I) {
ExecutionContext &SF = ECStack.back();
- const Type *Ty = I.getOperand(0)->getType();
+ Type *Ty = I.getOperand(0)->getType();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
@@ -286,7 +286,7 @@ void Interpreter::visitICmpInst(ICmpInst &I) {
break
static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(==, Float);
@@ -299,7 +299,7 @@ static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(!=, Float);
@@ -313,7 +313,7 @@ static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(<=, Float);
@@ -326,7 +326,7 @@ static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(>=, Float);
@@ -339,7 +339,7 @@ static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(<, Float);
@@ -352,7 +352,7 @@ static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(>, Float);
@@ -377,49 +377,49 @@ static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
return executeFCMP_OEQ(Src1, Src2, Ty);
}
static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
return executeFCMP_ONE(Src1, Src2, Ty);
}
static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
return executeFCMP_OLE(Src1, Src2, Ty);
}
static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
return executeFCMP_OGE(Src1, Src2, Ty);
}
static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
return executeFCMP_OLT(Src1, Src2, Ty);
}
static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
return executeFCMP_OGT(Src1, Src2, Ty);
}
static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
if (Ty->isFloatTy())
Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
@@ -431,7 +431,7 @@ static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
+ Type *Ty) {
GenericValue Dest;
if (Ty->isFloatTy())
Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
@@ -444,7 +444,7 @@ static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
void Interpreter::visitFCmpInst(FCmpInst &I) {
ExecutionContext &SF = ECStack.back();
- const Type *Ty = I.getOperand(0)->getType();
+ Type *Ty = I.getOperand(0)->getType();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
@@ -475,7 +475,7 @@ void Interpreter::visitFCmpInst(FCmpInst &I) {
}
static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
+ GenericValue Src2, Type *Ty) {
GenericValue Result;
switch (predicate) {
case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
@@ -520,7 +520,7 @@ static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
void Interpreter::visitBinaryOperator(BinaryOperator &I) {
ExecutionContext &SF = ECStack.back();
- const Type *Ty = I.getOperand(0)->getType();
+ Type *Ty = I.getOperand(0)->getType();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
@@ -585,7 +585,7 @@ void Interpreter::exitCalled(GenericValue GV) {
/// care of switching to the normal destination BB, if we are returning
/// from an invoke.
///
-void Interpreter::popStackAndReturnValueToCaller(const Type *RetTy,
+void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
GenericValue Result) {
// Pop the current stack frame.
ECStack.pop_back();
@@ -613,7 +613,7 @@ void Interpreter::popStackAndReturnValueToCaller(const Type *RetTy,
void Interpreter::visitReturnInst(ReturnInst &I) {
ExecutionContext &SF = ECStack.back();
- const Type *RetTy = Type::getVoidTy(I.getContext());
+ Type *RetTy = Type::getVoidTy(I.getContext());
GenericValue Result;
// Save away the return value... (if we are not 'ret void')
@@ -663,7 +663,7 @@ void Interpreter::visitBranchInst(BranchInst &I) {
void Interpreter::visitSwitchInst(SwitchInst &I) {
ExecutionContext &SF = ECStack.back();
GenericValue CondVal = getOperandValue(I.getOperand(0), SF);
- const Type *ElTy = I.getOperand(0)->getType();
+ Type *ElTy = I.getOperand(0)->getType();
// Check to see if any of the cases match...
BasicBlock *Dest = 0;
@@ -730,7 +730,7 @@ void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
void Interpreter::visitAllocaInst(AllocaInst &I) {
ExecutionContext &SF = ECStack.back();
- const Type *Ty = I.getType()->getElementType(); // Type to be allocated
+ Type *Ty = I.getType()->getElementType(); // Type to be allocated
// Get the number of elements being allocated by the array...
unsigned NumElements =
@@ -767,7 +767,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
uint64_t Total = 0;
for (; I != E; ++I) {
- if (const StructType *STy = dyn_cast<StructType>(*I)) {
+ if (StructType *STy = dyn_cast<StructType>(*I)) {
const StructLayout *SLO = TD.getStructLayout(STy);
const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
@@ -775,7 +775,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
Total += SLO->getElementOffset(Index);
} else {
- const SequentialType *ST = cast<SequentialType>(*I);
+ SequentialType *ST = cast<SequentialType>(*I);
// Get the index number for the array... which must be long type...
GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
@@ -929,34 +929,34 @@ void Interpreter::visitAShr(BinaryOperator &I) {
SetValue(&I, Dest, SF);
}
-GenericValue Interpreter::executeTruncInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- const IntegerType *DITy = cast<IntegerType>(DstTy);
+ IntegerType *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.trunc(DBitWidth);
return Dest;
}
-GenericValue Interpreter::executeSExtInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- const IntegerType *DITy = cast<IntegerType>(DstTy);
+ IntegerType *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.sext(DBitWidth);
return Dest;
}
-GenericValue Interpreter::executeZExtInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- const IntegerType *DITy = cast<IntegerType>(DstTy);
+ IntegerType *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.zext(DBitWidth);
return Dest;
}
-GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
@@ -965,7 +965,7 @@ GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, const Type *DstTy,
return Dest;
}
-GenericValue Interpreter::executeFPExtInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
@@ -974,9 +974,9 @@ GenericValue Interpreter::executeFPExtInst(Value *SrcVal, const Type *DstTy,
return Dest;
}
-GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
- const Type *SrcTy = SrcVal->getType();
+ Type *SrcTy = SrcVal->getType();
uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
@@ -988,9 +988,9 @@ GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, const Type *DstTy,
return Dest;
}
-GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
- const Type *SrcTy = SrcVal->getType();
+ Type *SrcTy = SrcVal->getType();
uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
@@ -1002,7 +1002,7 @@ GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, const Type *DstTy,
return Dest;
}
-GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
@@ -1014,7 +1014,7 @@ GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, const Type *DstTy,
return Dest;
}
-GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
@@ -1027,7 +1027,7 @@ GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, const Type *DstTy,
}
-GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
@@ -1037,7 +1037,7 @@ GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, const Type *DstTy,
return Dest;
}
-GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
@@ -1050,10 +1050,10 @@ GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
return Dest;
}
-GenericValue Interpreter::executeBitCastInst(Value *SrcVal, const Type *DstTy,
+GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
- const Type *SrcTy = SrcVal->getType();
+ Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (DstTy->isPointerTy()) {
assert(SrcTy->isPointerTy() && "Invalid BitCast");
@@ -1155,7 +1155,7 @@ void Interpreter::visitVAArgInst(VAArgInst &I) {
GenericValue Dest;
GenericValue Src = ECStack[VAList.UIntPairVal.first]
.VarArgs[VAList.UIntPairVal.second];
- const Type *Ty = I.getType();
+ Type *Ty = I.getType();
switch (Ty->getTypeID()) {
case Type::IntegerTyID: Dest.IntVal = Src.IntVal;
IMPLEMENT_VAARG(Pointer);
@@ -1222,7 +1222,7 @@ GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
GenericValue Dest;
- const Type * Ty = CE->getOperand(0)->getType();
+ Type * Ty = CE->getOperand(0)->getType();
switch (CE->getOpcode()) {
case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
diff --git a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index f7e2a4df95..055875c945 100644
--- a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -48,7 +48,7 @@ using namespace llvm;
static ManagedStatic<sys::Mutex> FunctionsLock;
-typedef GenericValue (*ExFunc)(const FunctionType *,
+typedef GenericValue (*ExFunc)(FunctionType *,
const std::vector<GenericValue> &);
static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions;
static std::map<std::string, ExFunc> FuncNames;
@@ -60,7 +60,7 @@ static ManagedStatic<std::map<const Function *, RawFunc> > RawFunctions;
static Interpreter *TheInterpreter;
-static char getTypeID(const Type *Ty) {
+static char getTypeID(Type *Ty) {
switch (Ty->getTypeID()) {
case Type::VoidTyID: return 'V';
case Type::IntegerTyID:
@@ -91,7 +91,7 @@ static ExFunc lookupFunction(const Function *F) {
// Function not found, look it up... start by figuring out what the
// composite function name should be.
std::string ExtName = "lle_";
- const FunctionType *FT = F->getFunctionType();
+ FunctionType *FT = F->getFunctionType();
for (unsigned i = 0, e = FT->getNumContainedTypes(); i != e; ++i)
ExtName += getTypeID(FT->getContainedType(i));
ExtName + "_" + F->getNameStr();
@@ -109,7 +109,7 @@ static ExFunc lookupFunction(const Function *F) {
}
#ifdef USE_LIBFFI
-static ffi_type *ffiTypeFor(const Type *Ty) {
+static ffi_type *ffiTypeFor(Type *Ty) {
switch (Ty->getTypeID()) {
case Type::VoidTyID: return &ffi_type_void;
case Type::IntegerTyID:
@@ -129,7 +129,7 @@ static ffi_type *ffiTypeFor(const Type *Ty) {
return NULL;
}
-static void *ffiValueFor(const Type *Ty, const GenericValue &AV,
+static void *ffiValueFor(Type *Ty, const GenericValue &AV,
void *ArgDataPtr) {
switch (Ty->getTypeID()) {
case Type::IntegerTyID:
@@ -181,7 +181,7 @@ static bool ffiInvoke(RawFunc Fn, Function *F,
const std::vector<GenericValue> &ArgVals,
const TargetData *TD, GenericValue &Result) {
ffi_cif cif;
- const FunctionType *FTy = F->getFunctionType();
+ FunctionType *FTy = F->getFunctionType();
const unsigned NumArgs = F->arg_size();
// TODO: We don't have type information about the remaining arguments, because
@@ -197,7 +197,7 @@ static bool ffiInvoke(RawFunc Fn, Function *F,
for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
A != E; ++A) {
const unsigned ArgNo = A->getArgNo();
- const Type *ArgTy = FTy->getParamType(ArgNo);
+ Type *ArgTy = FTy->getParamType(ArgNo);
args[ArgNo] = ffiTypeFor(ArgTy);
ArgBytes += TD->getTypeStoreSize(ArgTy);
}
@@ -209,12 +209,12 @@ static bool ffiInvoke(RawFunc Fn, Function *F,
for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
A != E; ++A) {
const unsigned ArgNo = A->getArgNo();
- const Type *ArgTy = FTy->getParamType(ArgNo);
+ Type *ArgTy = FTy->getParamType(ArgNo);
values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr);
ArgDataPtr += TD->getTypeStoreSize(ArgTy);
}
- const Type *RetTy = FTy->getReturnType();
+ Type *RetTy = FTy->getReturnType();
ffi_type *rtype = ffiTypeFor(RetTy);
if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, &args[0]) == FFI_OK) {
@@ -304,7 +304,7 @@ GenericValue Interpreter::callExternalFunction(Function *F,
extern "C" { // Don't add C++ manglings to llvm mangling :)
// void atexit(Function*)
-GenericValue lle_X_atexit(const FunctionType *FT,
+GenericValue lle_X_atexit(FunctionType *FT,
const std::vector<GenericValue> &Args) {
assert(Args.size() == 1);
TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
@@ -314,14 +314,14 @@ GenericValue lle_X_atexit(const FunctionType *FT,
}
// void exit(int)
-GenericValue lle_X_exit(const FunctionType *FT,
+GenericValue lle_X_exit(FunctionType *FT,
const std::vector<GenericValue> &Args) {
TheInterpreter->exitCalled(Args[0]);
return GenericValue();
}
// void abort(void)
-GenericValue lle_X_abort(const FunctionType *FT,
+GenericValue lle_X_abort(FunctionType *FT,
const std::vector<GenericValue> &Args) {
//FIXME: should we report or raise here?
//report_fatal_error("Interpreted program raised SIGABRT");
@@ -331,7 +331,7 @@ GenericValue lle_X_abort(const FunctionType *FT,
// int sprintf(char *, const char *, ...) - a very rough implementation to make
// output useful.
-GenericValue lle_X_sprintf(const FunctionType *FT,
+GenericValue lle_X_sprintf(FunctionType *FT,
const std::vector<GenericValue> &Args) {
char *OutputBuffer = (char *)GVTOP(Args[0]);
const char *FmtStr = (const char *)GVTOP(Args[1]);
@@ -413,7 +413,7 @@ GenericValue lle_X_sprintf(const FunctionType *FT,
// int printf(const char *, ...) - a very rough implementation to make output
// useful.
-GenericValue lle_X_printf(const FunctionType *FT,
+GenericValue lle_X_printf(FunctionType *FT,
const std::vector<GenericValue> &Args) {
char Buffer[10000];
std::vector<GenericValue> NewArgs;
@@ -425,7 +425,7 @@ GenericValue lle_X_printf(const FunctionType *FT,
}
// int sscanf(const char *format, ...);
-GenericValue lle_X_sscanf(const FunctionType *FT,
+GenericValue lle_X_sscanf(FunctionType *FT,
const std::vector<GenericValue> &args) {
assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
@@ -440,7 +440,7 @@ GenericValue lle_X_sscanf(const FunctionType *FT,
}
// int scanf(const char *format, ...);
-GenericValue lle_X_scanf(const FunctionType *FT,
+GenericValue lle_X_scanf(FunctionType *FT,
const std::vector<GenericValue> &args) {
assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
@@ -456,7 +456,7 @@ GenericValue lle_X_scanf(const FunctionType *FT,
// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
// output useful.
-GenericValue lle_X_fprintf(const FunctionType *FT,
+GenericValue lle_X_fprintf(FunctionType *FT,
const std::vector<GenericValue> &Args) {
assert(Args.size() >= 2);
char Buffer[10000];
diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.h b/lib/ExecutionEngine/Interpreter/Interpreter.h
index bfebe3debf..60b9cb7a5b 100644
--- a/lib/ExecutionEngine/Interpreter/Interpreter.h
+++ b/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -207,33 +207,33 @@ private: // Helper functions
void initializeExternalFunctions();
GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF);
GenericValue getOperandValue(Value *V, ExecutionContext &SF);
- GenericValue executeTruncInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeTruncInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeSExtInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeSExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeZExtInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeZExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeFPTruncInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeFPTruncInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeFPExtInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeFPExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeFPToUIInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeFPToUIInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeFPToSIInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeFPToSIInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeUIToFPInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeUIToFPInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeSIToFPInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeSIToFPInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executePtrToIntInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executePtrToIntInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeIntToPtrInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeBitCastInst(Value *SrcVal, const Type *DstTy,
+ GenericValue executeBitCastInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
- const Type *Ty, ExecutionContext &SF);
- void popStackAndReturnValueToCaller(const Type *RetTy, GenericValue Result);
+ Type *Ty, ExecutionContext &SF);
+ void popStackAndReturnValueToCaller(Type *RetTy, GenericValue Result);
};
diff --git a/lib/ExecutionEngine/JIT/JIT.cpp b/lib/ExecutionEngine/JIT/JIT.cpp
index 445d2d0670..d773009065 100644
--- a/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/lib/ExecutionEngine/JIT/JIT.cpp
@@ -390,8 +390,8 @@ GenericValue JIT::runFunction(Function *F,
void *FPtr = getPointerToFunction(F);
assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
- const FunctionType *FTy = F->getFunctionType();
- const Type *RetTy = FTy->getReturnType();
+ FunctionType *FTy = F->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
assert((FTy->getNumParams() == ArgValues.size() ||
(FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
@@ -500,7 +500,7 @@ GenericValue JIT::runFunction(Function *F,
SmallVector<Value*, 8> Args;
for (unsigned i = 0, e = ArgValues.size(); i != e; ++i) {
Constant *C = 0;
- const Type *ArgTy = FTy->getParamType(i);
+ Type *ArgTy = FTy->getParamType(i);
const GenericValue &AV = ArgValues[i];
switch (ArgTy->getTypeID()) {
default: llvm_unreachable("Unknown argument type for function call!");
@@ -788,7 +788,7 @@ char* JIT::getMemoryForGV(const GlobalVariable* GV) {
// be allocated into the same buffer, but in general globals are allocated
// through the memory manager which puts them near the code but not in the
// same buffer.
- const Type *GlobalType = GV->getType()->getElementType();
+ Type *GlobalType = GV->getType()->getElementType();
size_t S = getTargetData()->getTypeAllocSize(GlobalType);
size_t A = getTargetData()->getPreferredAlignment(GV);
if (GV->isThreadLocal()) {
diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp
index d046b8aea6..b08969197f 100644
--- a/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -770,7 +770,7 @@ static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
MachineConstantPoolEntry CPE = Constants[i];
unsigned AlignMask = CPE.getAlignment() - 1;
Size = (Size + AlignMask) & ~AlignMask;
- const Type *Ty = CPE.getType();
+ Type *Ty = CPE.getType();
Size += TD->getTypeAllocSize(Ty);
}
return Size;
@@ -1098,7 +1098,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
DEBUG(dbgs() << "JIT: CP" << i << " at [0x";
dbgs().write_hex(CAddr) << "]\n");
- const Type *Ty = CPE.Val.ConstVal->getType();
+ Type *Ty = CPE.Val.ConstVal->getType();
Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty);
}
}
diff --git a/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index 4475f4d5c0..561c1504b1 100644
--- a/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -124,8 +124,8 @@ GenericValue MCJIT::runFunction(Function *F,
void *FPtr = getPointerToFunction(F);
assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
- const FunctionType *FTy = F->getFunctionType();
- const Type *RetTy = FTy->getReturnType();
+ FunctionType *FTy = F->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
assert((FTy->getNumParams() == ArgValues.size() ||
(FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
diff --git a/lib/Target/ARM/ARMConstantPoolValue.cpp b/lib/Target/ARM/ARMConstantPoolValue.cpp
index 165a1d849a..eb85aa3c72 100644
--- a/lib/Target/ARM/ARMConstantPoolValue.cpp
+++ b/lib/Target/ARM/ARMConstantPoolValue.cpp
@@ -26,7 +26,7 @@ ARMConstantPoolValue::ARMConstantPoolValue(const Constant *cval, unsigned id,
unsigned char PCAdj,
ARMCP::ARMCPModifier Modif,
bool AddCA)
- : MachineConstantPoolValue((const Type*)cval->getType()),
+ : MachineConstantPoolValue((Type*)cval->getType()),
CVal(cval), S(NULL), LabelId(id), Kind(K), PCAdjust(PCAdj),
Modifier(Modif), AddCurrentAddress(AddCA) {}
@@ -35,13 +35,13 @@ ARMConstantPoolValue::ARMConstantPoolValue(LLVMContext &C,
unsigned char PCAdj,
ARMCP::ARMCPModifier Modif,
bool AddCA)
- : MachineConstantPoolValue((const Type*)Type::getInt32Ty(C)),
+ : MachineConstantPoolValue((Type*)Type::getInt32Ty(C)),
CVal(NULL), S(strdup(s)), LabelId(id), Kind(ARMCP::CPExtSymbol),
PCAdjust(PCAdj), Modifier(Modif), AddCurrentAddress(AddCA) {}
ARMConstantPoolValue::ARMConstantPoolValue(const GlobalValue *gv,
ARMCP::ARMCPModifier Modif)
- : MachineConstantPoolValue((const Type*)Type::getInt32Ty(gv->getContext())),
+ : MachineConstantPoolValue((Type*)Type::getInt32Ty(gv->getContext())),
CVal(gv), S(NULL), LabelId(0), Kind(ARMCP::CPValue), PCAdjust(0),
Modifier(Modif), AddCurrentAddress(false) {}
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index f469d7efe1..050b8c1bc4 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -171,8 +171,8 @@ class ARMFastISel : public FastISel {
// Utility routines.
private:
- bool isTypeLegal(const Type *Ty, MVT &VT);
- bool isLoadTypeLegal(const Type *Ty, MVT &VT);
+ bool isTypeLegal(Type *Ty, MVT &VT);
+ bool isLoadTypeLegal(Type *Ty, MVT &VT);
bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr);
bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr);
bool ARMComputeAddress(const Value *Obj, Address &Addr);
@@ -673,7 +673,7 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
return 0;
}
-bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) {
+bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
EVT evt = TLI.getValueType(Ty, true);
// Only handle simple types.
@@ -685,7 +685,7 @@ bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) {
return TLI.isTypeLegal(VT);
}
-bool ARMFastISel::isLoadTypeLegal(const Type *Ty, MVT &VT) {
+bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
if (isTypeLegal(Ty, VT)) return true;
// If this is a type than can be sign or zero-extended to a basic operation
@@ -714,7 +714,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
U = C;
}
- if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
+ if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
if (Ty->getAddressSpace() > 255)
// Fast instruction selection doesn't support the special
// address spaces.
@@ -749,7 +749,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
const Value *Op = *i;
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
const StructLayout *SL = TD.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
@@ -1085,7 +1085,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
// TODO: Factor this out.
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
MVT SourceVT;
- const Type *Ty = CI->getOperand(0)->getType();
+ Type *Ty = CI->getOperand(0)->getType();
if (CI->hasOneUse() && (CI->getParent() == I->getParent())
&& isTypeLegal(Ty, SourceVT)) {
bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
@@ -1201,7 +1201,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
const CmpInst *CI = cast<CmpInst>(I);
MVT VT;
- const Type *Ty = CI->getOperand(0)->getType();
+ Type *Ty = CI->getOperand(0)->getType();
if (!isTypeLegal(Ty, VT))
return false;
@@ -1309,7 +1309,7 @@ bool ARMFastISel::SelectSIToFP(const Instruction *I) {
if (!Subtarget->hasVFP2()) return false;
MVT DstVT;
- const Type *Ty = I->getType();
+ Type *Ty = I->getType();
if (!isTypeLegal(Ty, DstVT))
return false;
@@ -1343,7 +1343,7 @@ bool ARMFastISel::SelectFPToSI(const Instruction *I) {
if (!Subtarget->hasVFP2()) return false;
MVT DstVT;
- const Type *RetTy = I->getType();
+ Type *RetTy = I->getType();
if (!isTypeLegal(RetTy, DstVT))
return false;
@@ -1351,7 +1351,7 @@ bool ARMFastISel::SelectFPToSI(const Instruction *I) {
if (Op == 0) return false;
unsigned Opc;
- const Type *OpTy = I->getOperand(0)->getType();
+ Type *OpTy = I->getOperand(0)->getType();
if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS;
else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD;
else return 0;
@@ -1401,7 +1401,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
bool ARMFastISel::SelectSDiv(const Instruction *I) {
MVT VT;
- const Type *Ty = I->getType();
+ Type *Ty = I->getType();
if (!isTypeLegal(Ty, VT))
return false;
@@ -1429,7 +1429,7 @@ bool ARMFastISel::SelectSDiv(const Instruction *I) {
bool ARMFastISel::SelectSRem(const Instruction *I) {
MVT VT;
- const Type *Ty = I->getType();
+ Type *Ty = I->getType();
if (!isTypeLegal(Ty, VT))
return false;
@@ -1456,7 +1456,7 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
// operations, but can't figure out how to. Just use the vfp instructions
// if we have them.
// FIXME: It'd be nice to use NEON instructions.
- const Type *Ty = I->getType();
+ Type *Ty = I->getType();
bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
if (isFloat && !Subtarget->hasVFP2())
return false;
@@ -1778,7 +1778,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
// Handle *simple* calls for now.
- const Type *RetTy = I->getType();
+ Type *RetTy = I->getType();
MVT RetVT;
if (RetTy->isVoidTy())
RetVT = MVT::isVoid;
@@ -1802,7 +1802,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
unsigned Arg = getRegForValue(Op);
if (Arg == 0) return false;
- const Type *ArgTy = Op->getType();
+ Type *ArgTy = Op->getType();
MVT ArgVT;
if (!isTypeLegal(ArgTy, ArgVT)) return false;
@@ -1870,13 +1870,13 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
// TODO: Avoid some calling conventions?
// Let SDISel handle vararg functions.
- const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
- const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ FunctionType *FTy = cast<FunctionType>(PT->getElementType());
if (FTy->isVarArg())
return false;
// Handle *simple* calls for now.
- const Type *RetTy = I->getType();
+ Type *RetTy = I->getType();
MVT RetVT;
if (RetTy->isVoidTy())
RetVT = MVT::isVoid;
@@ -1915,7 +1915,7 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
CS.paramHasAttr(AttrInd, Attribute::ByVal))
return false;
- const Type *ArgTy = (*i)->getType();
+ Type *ArgTy = (*i)->getType();
MVT ArgVT;
if (!isTypeLegal(ArgTy, ArgVT))
return false;
@@ -1969,9 +1969,9 @@ bool ARMFastISel::SelectIntCast(const Instruction *I) {
// On ARM, in general, integer casts don't involve legal types; this code
// handles promotable integers. The high bits for a type smaller than
// the register size are assumed to be undefined.
- const Type *DestTy = I->getType();
+ Type *DestTy = I->getType();
Value *Op = I->getOperand(0);
- const Type *SrcTy = Op->getType();
+ Type *SrcTy = Op->getType();
EVT SrcVT, DestVT;
SrcVT = TLI.getValueType(SrcTy, true);
diff --git a/lib/Target/ARM/ARMGlobalMerge.cpp b/lib/Target/ARM/ARMGlobalMerge.cpp
index 8d77b2d838..e4b732ca1e 100644
--- a/lib/Target/ARM/ARMGlobalMerge.cpp
+++ b/lib/Target/ARM/ARMGlobalMerge.cpp
@@ -100,8 +100,8 @@ namespace {
GlobalCmp(const TargetData *td) : TD(td) { }
bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {
- const Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
- const Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
+ Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
+ Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
return (TD->getTypeAllocSize(Ty1) < TD->getTypeAllocSize(Ty2));
}
@@ -123,7 +123,7 @@ bool ARMGlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
// FIXME: Find better heuristics
std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(TD));
- const Type *Int32Ty = Type::getInt32Ty(M.getContext());
+ Type *Int32Ty = Type::getInt32Ty(M.getContext());
for (size_t i = 0, e = Globals.size(); i != e; ) {
size_t j = 0;
@@ -176,7 +176,7 @@ bool ARMGlobalMerge::doInitialization(Module &M) {
// Ignore fancy-aligned globals for now.
unsigned Alignment = I->getAlignment();
- const Type *Ty = I->getType()->getElementType();
+ Type *Ty = I->getType()->getElementType();
if (Alignment > TD->getABITypeAlignment(Ty))
continue;
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index cf8c5baa8e..45fac88b4d 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -1982,11 +1982,11 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
ArgListTy Args;
ArgListEntry Entry;
Entry.Node = Argument;
- Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext());
+ Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
Args.push_back(Entry);
// FIXME: is there useful debug info available here?
std::pair<SDValue, SDValue> CallResult =
- LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()),
+ LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()),
false, false, false, false,
0, CallingConv::C, false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
@@ -7235,7 +7235,7 @@ bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
- const Type *Ty) const {
+ Type *Ty) const {
EVT VT = getValueType(Ty, true);
if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
return false;
@@ -7536,7 +7536,7 @@ bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
if (AsmPieces.size() == 3 &&
AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
- const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (Ty && Ty->getBitWidth() == 32)
return IntrinsicLowering::LowerToByteSwap(CI);
}
@@ -7582,7 +7582,7 @@ ARMTargetLowering::getSingleConstraintMatchWeight(
// but allow it at the lowest weight.
if (CallOperandVal == NULL)
return CW_Default;
- const Type *type = CallOperandVal->getType();
+ Type *type = CallOperandVal->getType();
// Look at the constraint type.
switch (*constraint) {
default:
@@ -7933,7 +7933,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
// Conservatively set memVT to the entire set of vectors stored.
unsigned NumElts = 0;
for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
- const Type *ArgTy = I.getArgOperand(ArgI)->getType();
+ Type *ArgTy = I.getArgOperand(ArgI)->getType();
if (!ArgTy->isVectorTy())
break;
NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8;
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 980fb40488..61aa56179c 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -256,7 +256,7 @@ namespace llvm {
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
- virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
/// isLegalICmpImmediate - Return true if the specified immediate is legal
diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index ef0aaf2a59..4b2c5c5388 100644
--- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -155,7 +155,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
TargetLowering::ArgListEntry Entry;
// First argument: data pointer
- const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
+ Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
Entry.Node = Dst;
Entry.Ty = IntPtrTy;
Args.push_back(Entry);
diff --git a/lib/Target/Blackfin/BlackfinIntrinsicInfo.cpp b/lib/Target/Blackfin/BlackfinIntrinsicInfo.cpp
index ae8ee9e2a1..71356768dd 100644
--- a/lib/Target/Blackfin/BlackfinIntrinsicInfo.cpp
+++ b/lib/Target/Blackfin/BlackfinIntrinsicInfo.cpp
@@ -34,7 +34,7 @@ namespace bfinIntrinsic {
}
-std::string BlackfinIntrinsicInfo::getName(unsigned IntrID, const Type **Tys,
+std::string BlackfinIntrinsicInfo::getName(unsigned IntrID, Type **Tys,
unsigned numTys) const {
static const char *const names[] = {
#define GET_INTRINSIC_NAME_TABLE
@@ -81,8 +81,8 @@ bool BlackfinIntrinsicInfo::isOverloaded(unsigned IntrID) const {
#include "BlackfinGenIntrinsics.inc"
#undef GET_INTRINSIC_ATTRIBUTES
-static const FunctionType *getType(LLVMContext &Context, unsigned id) {
- const Type *ResultTy = NULL;
+static FunctionType *getType(LLVMContext &Context, unsigned id) {
+ Type *ResultTy = NULL;
std::vector<Type*> ArgTys;
bool IsVarArg = false;
@@ -94,7 +94,7 @@ static const FunctionType *getType(LLVMContext &Context, unsigned id) {
}
Function *BlackfinIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
- const Type **Tys,
+ Type **Tys,
unsigned numTy) const {
assert(!isOverloaded(IntrID) && "Blackfin intrinsics are not overloaded");
AttrListPtr AList = getAttributes((bfinIntrinsic::ID) IntrID);
diff --git a/lib/Target/Blackfin/BlackfinIntrinsicInfo.h b/lib/Target/Blackfin/BlackfinIntrinsicInfo.h
index 7c4b5a9967..f05db5ad7c 100644
--- a/lib/Target/Blackfin/BlackfinIntrinsicInfo.h
+++ b/lib/Target/Blackfin/BlackfinIntrinsicInfo.h
@@ -19,11 +19,11 @@ namespace llvm {
class BlackfinIntrinsicInfo : public TargetIntrinsicInfo {
public:
- std::string getName(unsigned IntrID, const Type **Tys = 0,
+ std::string getName(unsigned IntrID, Type **Tys = 0,
unsigned numTys = 0) const;
unsigned lookupName(const char *Name, unsigned Len) const;
bool isOverloaded(unsigned IID) const;
- Function *getDeclaration(Module *M, unsigned ID, const Type **Tys = 0,
+ Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0,
unsigned numTys = 0) const;
};
diff --git a/lib/Target/CBackend/CBackend.cpp b/lib/Target/CBackend/CBackend.cpp
index 415beb1dd1..f00fcc4cb0 100644
--- a/lib/Target/CBackend/CBackend.cpp
+++ b/lib/Target/CBackend/CBackend.cpp
@@ -99,7 +99,7 @@ namespace {
/// UnnamedStructIDs - This contains a unique ID for each struct that is
/// either anonymous or has no name.
- DenseMap<const StructType*, unsigned> UnnamedStructIDs;
+ DenseMap<StructType*, unsigned> UnnamedStructIDs;
public:
static char ID;
@@ -152,20 +152,20 @@ namespace {
return false;
}
- raw_ostream &printType(raw_ostream &Out, const Type *Ty,
+ raw_ostream &printType(raw_ostream &Out, Type *Ty,
bool isSigned = false,
const std::string &VariableName = "",
bool IgnoreName = false,
const AttrListPtr &PAL = AttrListPtr());
- raw_ostream &printSimpleType(raw_ostream &Out, const Type *Ty,
+ raw_ostream &printSimpleType(raw_ostream &Out, Type *Ty,
bool isSigned,
const std::string &NameSoFar = "");
void printStructReturnPointerFunctionType(raw_ostream &Out,
const AttrListPtr &PAL,
- const PointerType *Ty);
+ PointerType *Ty);
- std::string getStructName(const StructType *ST);
+ std::string getStructName(StructType *ST);
/// writeOperandDeref - Print the result of dereferencing the specified
/// operand with '*'. This is equivalent to printing '*' then using
@@ -188,7 +188,7 @@ namespace {
void writeOperandWithCast(Value* Operand, const ICmpInst &I);
bool writeInstructionCast(const Instruction &I);
- void writeMemoryAccess(Value *Operand, const Type *OperandType,
+ void writeMemoryAccess(Value *Operand, Type *OperandType,
bool IsVolatile, unsigned Alignment);
private :
@@ -200,7 +200,7 @@ namespace {
void printIntrinsicDefinition(const Function &F, raw_ostream &Out);
void printModuleTypes();
- void printContainedStructs(const Type *Ty, SmallPtrSet<const Type *, 16> &);
+ void printContainedStructs(Type *Ty, SmallPtrSet<Type *, 16> &);
void printFloatingPointConstants(Function &F);
void printFloatingPointConstants(const Constant *C);
void printFunctionSignature(const Function *F, bool Prototype);
@@ -209,7 +209,7 @@ namespace {
void printBasicBlock(BasicBlock *BB);
void printLoop(Loop *L);
- void printCast(unsigned opcode, const Type *SrcTy, const Type *DstTy);
+ void printCast(unsigned opcode, Type *SrcTy, Type *DstTy);
void printConstant(Constant *CPV, bool Static);
void printConstantWithCast(Constant *CPV, unsigned Opcode);
bool printConstExprCast(const ConstantExpr *CE, bool Static);
@@ -360,7 +360,7 @@ static std::string CBEMangle(const std::string &S) {
return Result;
}
-std::string CWriter::getStructName(const StructType *ST) {
+std::string CWriter::getStructName(StructType *ST) {
if (!ST->isAnonymous() && !ST->getName().empty())
return CBEMangle("l_"+ST->getName().str());
@@ -373,20 +373,20 @@ std::string CWriter::getStructName(const StructType *ST) {
/// print it as "Struct (*)(...)", for struct return functions.
void CWriter::printStructReturnPointerFunctionType(raw_ostream &Out,
const AttrListPtr &PAL,
- const PointerType *TheTy) {
- const FunctionType *FTy = cast<FunctionType>(TheTy->getElementType());
+ PointerType *TheTy) {
+ FunctionType *FTy = cast<FunctionType>(TheTy->getElementType());
std::string tstr;
raw_string_ostream FunctionInnards(tstr);
FunctionInnards << " (*) (";
bool PrintedType = false;
FunctionType::param_iterator I = FTy->param_begin(), E = FTy->param_end();
- const Type *RetTy = cast<PointerType>(*I)->getElementType();
+ Type *RetTy = cast<PointerType>(*I)->getElementType();
unsigned Idx = 1;
for (++I, ++Idx; I != E; ++I, ++Idx) {
if (PrintedType)
FunctionInnards << ", ";
- const Type *ArgTy = *I;
+ Type *ArgTy = *I;
if (PAL.paramHasAttr(Idx, Attribute::ByVal)) {
assert(ArgTy->isPointerTy());
ArgTy = cast<PointerType>(ArgTy)->getElementType();
@@ -408,7 +408,7 @@ void CWriter::printStructReturnPointerFunctionType(raw_ostream &Out,
}
raw_ostream &
-CWriter::printSimpleType(raw_ostream &Out, const Type *Ty, bool isSigned,
+CWriter::printSimpleType(raw_ostream &Out, Type *Ty, bool isSigned,
const std::string &NameSoFar) {
assert((Ty->isPrimitiveType() || Ty->isIntegerTy() || Ty->isVectorTy()) &&
"Invalid type for printSimpleType");
@@ -444,7 +444,7 @@ CWriter::printSimpleType(raw_ostream &Out, const Type *Ty, bool isSigned,
" __attribute__((vector_size(64))) " + NameSoFar);
case Type::VectorTyID: {
- const VectorType *VTy = cast<VectorType>(Ty);
+ VectorType *VTy = cast<VectorType>(Ty);
return printSimpleType(Out, VTy->getElementType(), isSigned,
" __attribute__((vector_size(" +
utostr(TD->getTypeAllocSize(VTy)) + " ))) " + NameSoFar);
@@ -461,7 +461,7 @@ CWriter::printSimpleType(raw_ostream &Out, const Type *Ty, bool isSigned,
// Pass the Type* and the variable name and this prints out the variable
// declaration.
//
-raw_ostream &CWriter::printType(raw_ostream &Out, const Type *Ty,
+raw_ostream &CWriter::printType(raw_ostream &Out, Type *Ty,
bool isSigned, const std::string &NameSoFar,
bool IgnoreName, const AttrListPtr &PAL) {
if (Ty->isPrimitiveType() || Ty->isIntegerTy() || Ty->isVectorTy()) {
@@ -471,14 +471,14 @@ raw_ostream &CWriter::printType(raw_ostream &Out, const Type *Ty,
switch (Ty->getTypeID()) {
case Type::FunctionTyID: {
- const FunctionType *FTy = cast<FunctionType>(Ty);
+ FunctionType *FTy = cast<FunctionType>(Ty);
std::string tstr;
raw_string_ostream FunctionInnards(tstr);
FunctionInnards << " (" << NameSoFar << ") (";
unsigned Idx = 1;
for (FunctionType::param_iterator I = FTy->param_begin(),
E = FTy->param_end(); I != E; ++I) {
- const Type *ArgTy = *I;
+ Type *ArgTy = *I;
if (PAL.paramHasAttr(Idx, Attribute::ByVal)) {
assert(ArgTy->isPointerTy());
ArgTy = cast<PointerType>(ArgTy)->getElementType();
@@ -502,7 +502,7 @@ raw_ostream &CWriter::printType(raw_ostream &Out, const Type *Ty,
return Out;
}
case Type::StructTyID: {
- const StructType *STy = cast<StructType>(Ty);
+ StructType *STy = cast<StructType>(Ty);
// Check to see if the type is named.
if (!IgnoreName)
@@ -523,7 +523,7 @@ raw_ostream &CWriter::printType(raw_ostream &Out, const Type *Ty,
}
case Type::PointerTyID: {
- const PointerType *PTy = cast<PointerType>(Ty);
+ PointerType *PTy = cast<PointerType>(Ty);
std::string ptrName = "*" + NameSoFar;
if (PTy->getElementType()->isArrayTy() ||
@@ -537,7 +537,7 @@ raw_ostream &CWriter::printType(raw_ostream &Out, const Type *Ty,
}
case Type::ArrayTyID: {
- const ArrayType *ATy = cast<ArrayType>(Ty);
+ ArrayType *ATy = cast<ArrayType>(Ty);
unsigned NumElements = ATy->getNumElements();
if (NumElements == 0) NumElements = 1;
// Arrays are wrapped in structs to allow them to have normal
@@ -560,7 +560,7 @@ void CWriter::printConstantArray(ConstantArray *CPA, bool Static) {
// As a special case, print the array as a string if it is an array of
// ubytes or an array of sbytes with positive values.
//
- const Type *ETy = CPA->getType()->getElementType();
+ Type *ETy = CPA->getType()->getElementType();
bool isString = (ETy == Type::getInt8Ty(CPA->getContext()) ||
ETy == Type::getInt8Ty(CPA->getContext()));
@@ -682,7 +682,7 @@ static bool isFPCSafeToPrint(const ConstantFP *CFP) {
/// Print out the casting for a cast operation. This does the double casting
/// necessary for conversion to the destination type, if necessary.
/// @brief Print a cast
-void CWriter::printCast(unsigned opc, const Type *SrcTy, const Type *DstTy) {
+void CWriter::printCast(unsigned opc, Type *SrcTy, Type *DstTy) {
// Print the destination type cast
switch (opc) {
case Instruction::UIToFP:
@@ -917,7 +917,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
}
if (ConstantInt *CI = dyn_cast<ConstantInt>(CPV)) {
- const Type* Ty = CI->getType();
+ Type* Ty = CI->getType();
if (Ty == Type::getInt1Ty(CPV->getContext()))
Out << (CI->getZExtValue() ? '1' : '0');
else if (Ty == Type::getInt32Ty(CPV->getContext()))
@@ -1027,7 +1027,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
printConstantArray(CA, Static);
} else {
assert(isa<ConstantAggregateZero>(CPV) || isa<UndefValue>(CPV));
- const ArrayType *AT = cast<ArrayType>(CPV->getType());
+ ArrayType *AT = cast<ArrayType>(CPV->getType());
Out << '{';
if (AT->getNumElements()) {
Out << ' ';
@@ -1054,7 +1054,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
printConstantVector(CV, Static);
} else {
assert(isa<ConstantAggregateZero>(CPV) || isa<UndefValue>(CPV));
- const VectorType *VT = cast<VectorType>(CPV->getType());
+ VectorType *VT = cast<VectorType>(CPV->getType());
Out << "{ ";
Constant *CZ = Constant::getNullValue(VT->getElementType());
printConstant(CZ, Static);
@@ -1074,7 +1074,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
Out << ")";
}
if (isa<ConstantAggregateZero>(CPV) || isa<UndefValue>(CPV)) {
- const StructType *ST = cast<StructType>(CPV->getType());
+ StructType *ST = cast<StructType>(CPV->getType());
Out << '{';
if (ST->getNumElements()) {
Out << ' ';
@@ -1123,7 +1123,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
// care of detecting that case and printing the cast for the ConstantExpr.
bool CWriter::printConstExprCast(const ConstantExpr* CE, bool Static) {
bool NeedsExplicitCast = false;
- const Type *Ty = CE->getOperand(0)->getType();
+ Type *Ty = CE->getOperand(0)->getType();
bool TypeIsSigned = false;
switch (CE->getOpcode()) {
case Instruction::Add:
@@ -1175,7 +1175,7 @@ bool CWriter::printConstExprCast(const ConstantExpr* CE, bool Static) {
void CWriter::printConstantWithCast(Constant* CPV, unsigned Opcode) {
// Extract the operand's type, we'll need it.
- const Type* OpTy = CPV->getType();
+ Type* OpTy = CPV->getType();
// Indicate whether to do the cast or not.
bool shouldCast = false;
@@ -1267,7 +1267,7 @@ std::string CWriter::GetValueName(const Value *Operand) {
void CWriter::writeInstComputationInline(Instruction &I) {
// We can't currently support integer types other than 1, 8, 16, 32, 64.
// Validate this.
- const Type *Ty = I.getType();
+ Type *Ty = I.getType();
if (Ty->isIntegerTy() && (Ty!=Type::getInt1Ty(I.getContext()) &&
Ty!=Type::getInt8Ty(I.getContext()) &&
Ty!=Type::getInt16Ty(I.getContext()) &&
@@ -1330,7 +1330,7 @@ void CWriter::writeOperand(Value *Operand, bool Static) {
// This function takes care of detecting that case and printing the cast
// for the Instruction.
bool CWriter::writeInstructionCast(const Instruction &I) {
- const Type *Ty = I.getOperand(0)->getType();
+ Type *Ty = I.getOperand(0)->getType();
switch (I.getOpcode()) {
case Instruction::Add:
case Instruction::Sub:
@@ -1362,7 +1362,7 @@ bool CWriter::writeInstructionCast(const Instruction &I) {
void CWriter::writeOperandWithCast(Value* Operand, unsigned Opcode) {
// Extract the operand's type, we'll need it.
- const Type* OpTy = Operand->getType();
+ Type* OpTy = Operand->getType();
// Indicate whether to do the cast or not.
bool shouldCast = false;
@@ -1430,7 +1430,7 @@ void CWriter::writeOperandWithCast(Value* Operand, const ICmpInst &Cmp) {
bool castIsSigned = Cmp.isSigned();
// If the operand was a pointer, convert to a large integer type.
- const Type* OpTy = Operand->getType();
+ Type* OpTy = Operand->getType();
if (OpTy->isPointerTy())
OpTy = TD->getIntPtrType(Operand->getContext());
@@ -2060,7 +2060,7 @@ void CWriter::printModuleTypes() {
Out << '\n';
// Keep track of which structures have been printed so far.
- SmallPtrSet<const Type *, 16> StructPrinted;
+ SmallPtrSet<Type *, 16> StructPrinted;
// Loop over all structures then push them into the stack so they are
// printed in the correct order.
@@ -2077,8 +2077,8 @@ void CWriter::printModuleTypes() {
//
// TODO: Make this work properly with vector types
//
-void CWriter::printContainedStructs(const Type *Ty,
- SmallPtrSet<const Type *, 16> &StructPrinted) {
+void CWriter::printContainedStructs(Type *Ty,
+ SmallPtrSet<Type *, 16> &StructPrinted) {
// Don't walk through pointers.
if (Ty->isPointerTy() || Ty->isPrimitiveType() || Ty->isIntegerTy())
return;
@@ -2088,7 +2088,7 @@ void CWriter::printContainedStructs(const Type *Ty,
E = Ty->subtype_end(); I != E; ++I)
printContainedStructs(*I, StructPrinted);
- if (const StructType *ST = dyn_cast<StructType>(Ty)) {
+ if (StructType *ST = dyn_cast<StructType>(Ty)) {
// Check to see if we have already printed this struct.
if (!StructPrinted.insert(Ty)) return;
@@ -2120,7 +2120,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
}
// Loop over the arguments, printing them...
- const FunctionType *FT = cast<FunctionType>(F->getFunctionType());
+ FunctionType *FT = cast<FunctionType>(F->getFunctionType());
const AttrListPtr &PAL = F->getAttributes();
std::string tstr;
@@ -2150,7 +2150,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
ArgName = GetValueName(I);
else
ArgName = "";
- const Type *ArgTy = I->getType();
+ Type *ArgTy = I->getType();
if (PAL.paramHasAttr(Idx, Attribute::ByVal)) {
ArgTy = cast<PointerType>(ArgTy)->getElementType();
ByValParams.insert(I);
@@ -2177,7 +2177,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
for (; I != E; ++I) {
if (PrintedArg) FunctionInnards << ", ";
- const Type *ArgTy = *I;
+ Type *ArgTy = *I;
if (PAL.paramHasAttr(Idx, Attribute::ByVal)) {
assert(ArgTy->isPointerTy());
ArgTy = cast<PointerType>(ArgTy)->getElementType();
@@ -2205,7 +2205,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
FunctionInnards << ')';
// Get the return tpe for the function.
- const Type *RetTy;
+ Type *RetTy;
if (!isStructReturn)
RetTy = F->getReturnType();
else {
@@ -2222,8 +2222,8 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
static inline bool isFPIntBitCast(const Instruction &I) {
if (!isa<BitCastInst>(I))
return false;
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DstTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DstTy = I.getType();
return (SrcTy->isFloatingPointTy() && DstTy->isIntegerTy()) ||
(DstTy->isFloatingPointTy() && SrcTy->isIntegerTy());
}
@@ -2237,7 +2237,7 @@ void CWriter::printFunction(Function &F) {
// If this is a struct return function, handle the result with magic.
if (isStructReturn) {
- const Type *StructTy =
+ Type *StructTy =
cast<PointerType>(F.arg_begin()->getType())->getElementType();
Out << " ";
printType(Out, StructTy, false, "StructReturn");
@@ -2656,7 +2656,7 @@ void CWriter::visitFCmpInst(FCmpInst &I) {
Out << ")";
}
-static const char * getFloatBitCastField(const Type *Ty) {
+static const char * getFloatBitCastField(Type *Ty) {
switch (Ty->getTypeID()) {
default: llvm_unreachable("Invalid Type");
case Type::FloatTyID: return "Float";
@@ -2672,8 +2672,8 @@ static const char * getFloatBitCastField(const Type *Ty) {
}
void CWriter::visitCastInst(CastInst &I) {
- const Type *DstTy = I.getType();
- const Type *SrcTy = I.getOperand(0)->getType();
+ Type *DstTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
if (isFPIntBitCast(I)) {
Out << '(';
// These int<->float and long<->double casts need to be handled specially
@@ -2719,7 +2719,7 @@ void CWriter::visitSelectInst(SelectInst &I) {
// Returns the macro name or value of the max or min of an integer type
// (as defined in limits.h).
-static void printLimitValue(const IntegerType &Ty, bool isSigned, bool isMax,
+static void printLimitValue(IntegerType &Ty, bool isSigned, bool isMax,
raw_ostream &Out) {
const char* type;
const char* sprefix = "";
@@ -2745,16 +2745,16 @@ static void printLimitValue(const IntegerType &Ty, bool isSigned, bool isMax,
}
#ifndef NDEBUG
-static bool isSupportedIntegerSize(const IntegerType &T) {
+static bool isSupportedIntegerSize(IntegerType &T) {
return T.getBitWidth() == 8 || T.getBitWidth() == 16 ||
T.getBitWidth() == 32 || T.getBitWidth() == 64;
}
#endif
void CWriter::printIntrinsicDefinition(const Function &F, raw_ostream &Out) {
- const FunctionType *funT = F.getFunctionType();
- const Type *retT = F.getReturnType();
- const IntegerType *elemT = cast<IntegerType>(funT->getParamType(1));
+ FunctionType *funT = F.getFunctionType();
+ Type *retT = F.getReturnType();
+ IntegerType *elemT = cast<IntegerType>(funT->getParamType(1));
assert(isSupportedIntegerSize(*elemT) &&
"CBackend does not support arbitrary size integers.");
@@ -2908,8 +2908,8 @@ void CWriter::visitCallInst(CallInst &I) {
Value *Callee = I.getCalledValue();
- const PointerType *PTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
+ PointerType *PTy = cast<PointerType>(Callee->getType());
+ FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
// If this is a call to a struct-return function, assign to the first
// parameter instead of passing it to the call.
@@ -3217,7 +3217,7 @@ void CWriter::visitInlineAsm(CallInst &CI) {
std::vector<std::pair<Value*, int> > ResultVals;
if (CI.getType() == Type::getVoidTy(CI.getContext()))
;
- else if (const StructType *ST = dyn_cast<StructType>(CI.getType())) {
+ else if (StructType *ST = dyn_cast<StructType>(CI.getType())) {
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i)
ResultVals.push_back(std::make_pair(&CI, (int)i));
} else {
@@ -3348,7 +3348,7 @@ void CWriter::printGEPExpression(Value *Ptr, gep_type_iterator I,
// Find out if the last index is into a vector. If so, we have to print this
// specially. Since vectors can't have elements of indexable type, only the
// last index could possibly be of a vector element.
- const VectorType *LastIndexIsVector = 0;
+ VectorType *LastIndexIsVector = 0;
{
for (gep_type_iterator TmpI = I; TmpI != E; ++TmpI)
LastIndexIsVector = dyn_cast<VectorType>(*TmpI);
@@ -3421,7 +3421,7 @@ void CWriter::printGEPExpression(Value *Ptr, gep_type_iterator I,
Out << ")";
}
-void CWriter::writeMemoryAccess(Value *Operand, const Type *OperandType,
+void CWriter::writeMemoryAccess(Value *Operand, Type *OperandType,
bool IsVolatile, unsigned Alignment) {
bool IsUnaligned = Alignment &&
@@ -3463,7 +3463,7 @@ void CWriter::visitStoreInst(StoreInst &I) {
Out << " = ";
Value *Operand = I.getOperand(0);
Constant *BitMask = 0;
- if (const IntegerType* ITy = dyn_cast<IntegerType>(Operand->getType()))
+ if (IntegerType* ITy = dyn_cast<IntegerType>(Operand->getType()))
if (!ITy->isPowerOf2ByteWidth())
// We have a bit width that doesn't match an even power-of-2 byte
// size. Consequently we must & the value with the type's bit mask
@@ -3492,7 +3492,7 @@ void CWriter::visitVAArgInst(VAArgInst &I) {
}
void CWriter::visitInsertElementInst(InsertElementInst &I) {
- const Type *EltTy = I.getType()->getElementType();
+ Type *EltTy = I.getType()->getElementType();
writeOperand(I.getOperand(0));
Out << ";\n ";
Out << "((";
@@ -3507,7 +3507,7 @@ void CWriter::visitInsertElementInst(InsertElementInst &I) {
void CWriter::visitExtractElementInst(ExtractElementInst &I) {
// We know that our operand is not inlined.
Out << "((";
- const Type *EltTy =
+ Type *EltTy =
cast<VectorType>(I.getOperand(0)->getType())->getElementType();
printType(Out, PointerType::getUnqual(EltTy));
Out << ")(&" << GetValueName(I.getOperand(0)) << "))[";
@@ -3519,9 +3519,9 @@ void CWriter::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
Out << "(";
printType(Out, SVI.getType());
Out << "){ ";
- const VectorType *VT = SVI.getType();
+ VectorType *VT = SVI.getType();
unsigned NumElts = VT->getNumElements();
- const Type *EltTy = VT->getElementType();
+ Type *EltTy = VT->getElementType();
for (unsigned i = 0; i != NumElts; ++i) {
if (i) Out << ", ";
@@ -3557,7 +3557,7 @@ void CWriter::visitInsertValueInst(InsertValueInst &IVI) {
Out << GetValueName(&IVI);
for (const unsigned *b = IVI.idx_begin(), *i = b, *e = IVI.idx_end();
i != e; ++i) {
- const Type *IndexedTy =
+ Type *IndexedTy =
ExtractValueInst::getIndexedType(IVI.getOperand(0)->getType(),
ArrayRef<unsigned>(b, i+1));
if (IndexedTy->isArrayTy())
@@ -3579,7 +3579,7 @@ void CWriter::visitExtractValueInst(ExtractValueInst &EVI) {
Out << GetValueName(EVI.getOperand(0));
for (const unsigned *b = EVI.idx_begin(), *i = b, *e = EVI.idx_end();
i != e; ++i) {
- const Type *IndexedTy =
+ Type *IndexedTy =
ExtractValueInst::getIndexedType(EVI.getOperand(0)->getType(),
ArrayRef<unsigned>(b, i+1));
if (IndexedTy->isArrayTy())
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index f0ceee2141..1c533a9ad1 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -69,7 +69,7 @@ namespace {
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
EVT ArgVT = Op.getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Op.getOperand(i);
Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
@@ -80,7 +80,7 @@ namespace {
TLI.getPointerTy());
// Splice the libcall in wherever FindInputOutputChains tells us to.
- const Type *RetTy =
+ Type *RetTy =
Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
@@ -3216,7 +3216,7 @@ SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode.
bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
- const Type *Ty) const {
+ Type *Ty) const {
// SPU's addresses are 256K:
return (V > -(1 << 18) && V < (1 << 18) - 1);
}
@@ -3239,7 +3239,7 @@ bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
bool
SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
- const Type * ) const{
+ Type * ) const{
// A-form: 18bit absolute address.
if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h
index d23f6cc48c..91bbdf26d8 100644
--- a/lib/Target/CellSPU/SPUISelLowering.h
+++ b/lib/Target/CellSPU/SPUISelLowering.h
@@ -147,7 +147,7 @@ namespace llvm {
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode.
- virtual bool isLegalAddressImmediate(int64_t V, const Type *Ty) const;
+ virtual bool isLegalAddressImmediate(int64_t V, Type *Ty) const;
virtual bool isLegalAddressImmediate(GlobalValue *) const;
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
@@ -179,7 +179,7 @@ namespace llvm {
virtual bool isLegalICmpImmediate(int64_t Imm) const;
virtual bool isLegalAddressingMode(const AddrMode &AM,
- const Type *Ty) const;
+ Type *Ty) const;
};
}
diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp
index 10d18f61c7..85b07d1fa1 100644
--- a/lib/Target/CppBackend/CPPBackend.cpp
+++ b/lib/Target/CppBackend/CPPBackend.cpp
@@ -88,11 +88,11 @@ extern "C" void LLVMInitializeCppBackendMCSubtargetInfo() {
}
namespace {
- typedef std::vector<const Type*> TypeList;
- typedef std::map<const Type*,std::string> TypeMap;
+ typedef std::vector<Type*> TypeList;
+ typedef std::map<Type*,std::string> TypeMap;
typedef std::map<const Value*,std::string> ValueMap;
typedef std::set<std::string> NameSet;
- typedef std::set<const Type*> TypeSet;
+ typedef std::set<Type*> TypeSet;
typedef std::set<const Value*> ValueSet;
typedef std::map<const Value*,std::string> ForwardRefMap;
@@ -143,14 +143,14 @@ namespace {
void printEscapedString(const std::string& str);
void printCFP(const ConstantFP* CFP);
- std::string getCppName(const Type* val);
- inline void printCppName(const Type* val);
+ std::string getCppName(Type* val);
+ inline void printCppName(Type* val);
std::string getCppName(const Value* val);
inline void printCppName(const Value* val);
void printAttributes(const AttrListPtr &PAL, const std::string &name);
- void printType(const Type* Ty);
+ void printType(Type* Ty);
void printTypes(const Module* M);
void printConstant(const Constant *CPV);
@@ -184,7 +184,7 @@ static inline void sanitize(std::string &str) {
str[i] = '_';
}
-static std::string getTypePrefix(const Type *Ty) {
+static std::string getTypePrefix(Type *Ty) {
switch (Ty->getTypeID()) {
case Type::VoidTyID: return "void_";
case Type::IntegerTyID:
@@ -339,7 +339,7 @@ void CppWriter::printEscapedString(const std::string &Str) {
}
}
-std::string CppWriter::getCppName(const Type* Ty) {
+std::string CppWriter::getCppName(Type* Ty) {
// First, handle the primitive types .. easy
if (Ty->isPrimitiveType() || Ty->isIntegerTy()) {
switch (Ty->getTypeID()) {
@@ -379,7 +379,7 @@ std::string CppWriter::getCppName(const Type* Ty) {
// See if the type has a name in the symboltable and build accordingly
std::string name;
- if (const StructType *STy = dyn_cast<StructType>(Ty))
+ if (StructType *STy = dyn_cast<StructType>(Ty))
if (STy->hasName())
name = STy->getName();
@@ -393,7 +393,7 @@ std::string CppWriter::getCppName(const Type* Ty) {
return TypeNames[Ty] = name;
}
-void CppWriter::printCppName(const Type* Ty) {
+void CppWriter::printCppName(Type* Ty) {
printEscapedString(getCppName(Ty));
}
@@ -499,7 +499,7 @@ void CppWriter::printAttributes(const AttrListPtr &PAL,
}
}
-void CppWriter::printType(const Type* Ty) {
+void CppWriter::printType(Type* Ty) {
// We don't print definitions for primitive types
if (Ty->isPrimitiveType() || Ty->isIntegerTy())
return;
@@ -514,13 +514,13 @@ void CppWriter::printType(const Type* Ty) {
// Print the type definition
switch (Ty->getTypeID()) {
case Type::FunctionTyID: {
- const FunctionType* FT = cast<FunctionType>(Ty);
+ FunctionType* FT = cast<FunctionType>(Ty);
Out << "std::vector<Type*>" << typeName << "_args;";
nl(Out);
FunctionType::param_iterator PI = FT->param_begin();
FunctionType::param_iterator PE = FT->param_end();
for (; PI != PE; ++PI) {
- const Type* argTy = static_cast<const Type*>(*PI);
+ Type* argTy = static_cast<Type*>(*PI);
printType(argTy);
std::string argName(getCppName(argTy));
Out << typeName << "_args.push_back(" << argName;
@@ -539,7 +539,7 @@ void CppWriter::printType(const Type* Ty) {
break;
}
case Type::StructTyID: {
- const StructType* ST = cast<StructType>(Ty);
+ StructType* ST = cast<StructType>(Ty);
if (!ST->isAnonymous()) {
Out << "StructType *" << typeName << " = ";
Out << "StructType::createNamed(mod->getContext(), \"";
@@ -555,7 +555,7 @@ void CppWriter::printType(const Type* Ty) {
StructType::element_iterator EI = ST->element_begin();
StructType::element_iterator EE = ST->element_end();
for (; EI != EE; ++EI) {
- const Type* fieldTy = static_cast<const Type*>(*EI);
+ Type* fieldTy = static_cast<Type*>(*EI);
printType(fieldTy);
std::string fieldName(getCppName(fieldTy));
Out << typeName << "_fields.push_back(" << fieldName;
@@ -576,8 +576,8 @@ void CppWriter::printType(const Type* Ty) {
break;
}
case Type::ArrayTyID: {
- const ArrayType* AT = cast<ArrayType>(Ty);
- const Type* ET = AT->getElementType();
+ ArrayType* AT = cast<ArrayType>(Ty);
+ Type* ET = AT->getElementType();
printType(ET);
if (DefinedTypes.find(Ty) == DefinedTypes.end()) {
std::string elemName(getCppName(ET));
@@ -589,8 +589,8 @@ void CppWriter::printType(const Type* Ty) {
break;
}
case Type::PointerTyID: {
- const PointerType* PT = cast<PointerType>(Ty);
- const Type* ET = PT->getElementType();
+ PointerType* PT = cast<PointerType>(Ty);
+ Type* ET = PT->getElementType();
printType(ET);
if (DefinedTypes.find(Ty) == DefinedTypes.end()) {
std::string elemName(getCppName(ET));
@@ -602,8 +602,8 @@ void CppWriter::printType(const Type* Ty) {
break;
}
case Type::VectorTyID: {
- const VectorType* PT = cast<VectorType>(Ty);
- const Type* ET = PT->getElementType();
+ VectorType* PT = cast<VectorType>(Ty);
+ Type* ET = PT->getElementType();
printType(ET);
if (DefinedTypes.find(Ty) == DefinedTypes.end()) {
std::string elemName(getCppName(ET));
@@ -1873,7 +1873,7 @@ void CppWriter::printVariable(const std::string& fname,
void CppWriter::printType(const std::string &fname,
const std::string &typeName) {
- const Type* Ty = TheModule->getTypeByName(typeName);
+ Type* Ty = TheModule->getTypeByName(typeName);
if (!Ty) {
error(std::string("Type '") + typeName + "' not found in input module");
return;
diff --git a/lib/Target/MBlaze/MBlazeISelLowering.cpp b/lib/Target/MBlaze/MBlazeISelLowering.cpp
index 62dfdcc2fd..85efbf30b7 100644
--- a/lib/Target/MBlaze/MBlazeISelLowering.cpp
+++ b/lib/Target/MBlaze/MBlazeISelLowering.cpp
@@ -1096,7 +1096,7 @@ MBlazeTargetLowering::getSingleConstraintMatchWeight(
// but allow it at the lowest weight.
if (CallOperandVal == NULL)
return CW_Default;
- const Type *type = CallOperandVal->getType();
+ Type *type = CallOperandVal->getType();
// Look at the constraint type.
switch (*constraint) {
default:
diff --git a/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp b/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
index 32d67b264a..ea81dd63d1 100644
--- a/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
+++ b/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
@@ -37,7 +37,7 @@ namespace mblazeIntrinsic {
#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
}
-std::string MBlazeIntrinsicInfo::getName(unsigned IntrID, const Type **Tys,
+std::string MBlazeIntrinsicInfo::getName(unsigned IntrID, Type **Tys,
unsigned numTys) const {
static const char *const names[] = {
#define GET_INTRINSIC_NAME_TABLE
@@ -90,8 +90,8 @@ bool MBlazeIntrinsicInfo::isOverloaded(unsigned IntrID) const {
#include "MBlazeGenIntrinsics.inc"
#undef GET_INTRINSIC_ATTRIBUTES
-static const FunctionType *getType(LLVMContext &Context, unsigned id) {
- const Type *ResultTy = NULL;
+static FunctionType *getType(LLVMContext &Context, unsigned id) {
+ Type *ResultTy = NULL;
std::vector<Type*> ArgTys;
bool IsVarArg = false;
@@ -103,7 +103,7 @@ static const FunctionType *getType(LLVMContext &Context, unsigned id) {
}
Function *MBlazeIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
- const Type **Tys,
+ Type **Tys,
unsigned numTy) const {
assert(!isOverloaded(IntrID) && "MBlaze intrinsics are not overloaded");
AttrListPtr AList = getAttributes((mblazeIntrinsic::ID) IntrID);
diff --git a/lib/Target/MBlaze/MBlazeIntrinsicInfo.h b/lib/Target/MBlaze/MBlazeIntrinsicInfo.h
index 9804c7723b..80760d87e0 100644
--- a/lib/Target/MBlaze/MBlazeIntrinsicInfo.h
+++ b/lib/Target/MBlaze/MBlazeIntrinsicInfo.h
@@ -19,12 +19,12 @@ namespace llvm {
class MBlazeIntrinsicInfo : public TargetIntrinsicInfo {
public:
- std::string getName(unsigned IntrID, const Type **Tys = 0,
+ std::string getName(unsigned IntrID, Type **Tys = 0,
unsigned numTys = 0) const;
unsigned lookupName(const char *Name, unsigned Len) const;
unsigned lookupGCCName(const char *Name) const;
bool isOverloaded(unsigned IID) const;
- Function *getDeclaration(Module *M, unsigned ID, const Type **Tys = 0,
+ Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0,
unsigned numTys = 0) const;
};
diff --git a/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp b/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
index abd1b0b62c..f66ea302d9 100644
--- a/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
+++ b/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
@@ -69,7 +69,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
if (Kind.isMergeable1ByteCString())
return false;
- const Type *Ty = GV->getType()->getElementType();
+ Type *Ty = GV->getType()->getElementType();
return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty));
}
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index 0a3eab1f7a..8405789a06 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -987,8 +987,8 @@ const char *MSP430TargetLowering::getTargetNodeName(unsigned Opcode) const {
}
}
-bool MSP430TargetLowering::isTruncateFree(const Type *Ty1,
- const Type *Ty2) const {
+bool MSP430TargetLowering::isTruncateFree(Type *Ty1,
+ Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
return false;
@@ -1002,7 +1002,7 @@ bool MSP430TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
return (VT1.getSizeInBits() > VT2.getSizeInBits());
}
-bool MSP430TargetLowering::isZExtFree(const Type *Ty1, const Type *Ty2) const {
+bool MSP430TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
// MSP430 implicitly zero-extends 8-bit results in 16-bit registers.
return 0 && Ty1->isIntegerTy(8) && Ty2->isIntegerTy(16);
}
diff --git a/lib/Target/MSP430/MSP430ISelLowering.h b/lib/Target/MSP430/MSP430ISelLowering.h
index bd660a0bb0..237f604357 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/lib/Target/MSP430/MSP430ISelLowering.h
@@ -102,7 +102,7 @@ namespace llvm {
/// isTruncateFree - Return true if it's free to truncate a value of type
/// Ty1 to type Ty2. e.g. On msp430 it's free to truncate a i16 value in
/// register R15W to i8 by referencing its sub-register R15B.
- virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
/// isZExtFree - Return true if any actual instruction that defines a value
@@ -113,7 +113,7 @@ namespace llvm {
/// necessarily apply to truncate instructions. e.g. on msp430, all
/// instructions that define 8-bit values implicit zero-extend the result
/// out to 16 bits.
- virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const;
+ virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
virtual bool isZExtFree(EVT VT1, EVT VT2) const;
MachineBasicBlock* EmitInstrWithCustomInserter(MachineInstr *MI,
diff --git a/lib/Target/Mangler.cpp b/lib/Target/Mangler.cpp
index 46c687b640..53ad155f37 100644
--- a/lib/Target/Mangler.cpp
+++ b/lib/Target/Mangler.cpp
@@ -159,7 +159,7 @@ static void AddFastCallStdCallSuffix(SmallVectorImpl<char> &OutName,
unsigned ArgWords = 0;
for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
AI != AE; ++AI) {
- const Type *Ty = AI->getType();
+ Type *Ty = AI->getType();
// 'Dereference' type in case of byval parameter attribute
if (AI->hasByValAttr())
Ty = cast<PointerType>(Ty)->getElementType();
@@ -214,7 +214,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
// fastcall and stdcall functions usually need @42 at the end to specify
// the argument info.
- const FunctionType *FT = F->getFunctionType();
+ FunctionType *FT = F->getFunctionType();
if ((CC == CallingConv::X86_FastCall || CC == CallingConv::X86_StdCall) &&
// "Pure" variadic functions do not receive @0 suffix.
(!FT->isVarArg() || FT->getNumParams() == 0 ||
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index b4f4b1b4bf..617a85f319 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -1361,11 +1361,11 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
ArgListTy Args;
ArgListEntry Entry;
Entry.Node = Argument;
- Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext());
+ Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
Args.push_back(Entry);
std::pair<SDValue, SDValue> CallResult =
LowerCallTo(DAG.getEntryNode(),
- (const Type *) Type::getInt32Ty(*DAG.getContext()),
+ (Type *) Type::getInt32Ty(*DAG.getContext()),
false, false, false, false, 0, CallingConv::C, false, true,
DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG,
dl);
@@ -2313,7 +2313,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
// but allow it at the lowest weight.
if (CallOperandVal == NULL)
return CW_Default;
- const Type *type = CallOperandVal->getType();
+ Type *type = CallOperandVal->getType();
// Look at the constraint type.
switch (*constraint) {
default:
diff --git a/lib/Target/Mips/MipsTargetObjectFile.cpp b/lib/Target/Mips/MipsTargetObjectFile.cpp
index cf5d1b58ad..05c46f5c97 100644
--- a/lib/Target/Mips/MipsTargetObjectFile.cpp
+++ b/lib/Target/Mips/MipsTargetObjectFile.cpp
@@ -79,7 +79,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
if (Kind.isMergeable1ByteCString())
return false;
- const Type *Ty = GV->getType()->getElementType();
+ Type *Ty = GV->getType()->getElementType();
return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty));
}
diff --git a/lib/Target/PTX/PTXAsmPrinter.cpp b/lib/Target/PTX/PTXAsmPrinter.cpp
index 2848d5460e..bb48e0ab4b 100644
--- a/lib/Target/PTX/PTXAsmPrinter.cpp
+++ b/lib/Target/PTX/PTXAsmPrinter.cpp
@@ -115,7 +115,7 @@ static const char *getStateSpaceName(unsigned addressSpace) {
return NULL;
}
-static const char *getTypeName(const Type* type) {
+static const char *getTypeName(Type* type) {
while (true) {
switch (type->getTypeID()) {
default: llvm_unreachable("Unknown type");
@@ -130,7 +130,7 @@ static const char *getTypeName(const Type* type) {
}
case Type::ArrayTyID:
case Type::PointerTyID:
- type = dyn_cast<const SequentialType>(type)->getElementType();
+ type = dyn_cast<SequentialType>(type)->getElementType();
break;
}
}
@@ -406,8 +406,8 @@ void PTXAsmPrinter::EmitVariableDeclaration(const GlobalVariable *gv) {
if (PointerType::classof(gv->getType())) {
- const PointerType* pointerTy = dyn_cast<const PointerType>(gv->getType());
- const Type* elementTy = pointerTy->getElementType();
+ PointerType* pointerTy = dyn_cast<PointerType>(gv->getType());
+ Type* elementTy = pointerTy->getElementType();
decl += ".b8 ";
decl += gvsym->getName();
@@ -417,14 +417,14 @@ void PTXAsmPrinter::EmitVariableDeclaration(const GlobalVariable *gv) {
{
assert(elementTy->isArrayTy() && "Only pointers to arrays are supported");
- const ArrayType* arrayTy = dyn_cast<const ArrayType>(elementTy);
+ ArrayType* arrayTy = dyn_cast<ArrayType>(elementTy);
elementTy = arrayTy->getElementType();
unsigned numElements = arrayTy->getNumElements();
while (elementTy->isArrayTy()) {
- arrayTy = dyn_cast<const ArrayType>(elementTy);
+ arrayTy = dyn_cast<ArrayType>(elementTy);
elementTy = arrayTy->getElementType();
numElements *= arrayTy->getNumElements();
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 9741a3902a..f97c46773f 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -406,7 +406,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area.
-unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const {
+unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const {
const TargetMachine &TM = getTargetMachine();
// Darwin passes everything on 4 byte boundary.
if (TM.getSubtarget<PPCSubtarget>().isDarwin())
@@ -1378,7 +1378,7 @@ SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op,
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = (PtrVT == MVT::i64);
- const Type *IntPtrTy =
+ Type *IntPtrTy =
DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType(
*DAG.getContext());
@@ -5504,7 +5504,7 @@ PPCTargetLowering::getSingleConstraintMatchWeight(
// but allow it at the lowest weight.
if (CallOperandVal == NULL)
return CW_Default;
- const Type *type = CallOperandVal->getType();
+ Type *type = CallOperandVal->getType();
// Look at the constraint type.
switch (*constraint) {
default:
@@ -5634,7 +5634,7 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// isLegalAddressingMode - Return true if the addressing mode represented
// by AM is legal for this target, for a load/store of the specified type.
bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
- const Type *Ty) const {
+ Type *Ty) const {
// FIXME: PPC does not allow r+i addressing modes for vectors!
// PPC allows a sign-extended 16-bit immediate field.
@@ -5670,7 +5670,7 @@ bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode for load / store of the
/// given type.
-bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{
+bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,Type *Ty) const{
// PPC allows a sign-extended 16-bit immediate field.
return (V > -(1 << 16) && V < (1 << 16)-1);
}
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index 986b4e73fb..a4f8e2a839 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -323,7 +323,7 @@ namespace llvm {
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
- unsigned getByValTypeAlignment(const Type *Ty) const;
+ unsigned getByValTypeAlignment(Type *Ty) const;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops.
@@ -334,12 +334,12 @@ namespace llvm {
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
- virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode for load / store of the
/// given type.
- virtual bool isLegalAddressImmediate(int64_t V, const Type *Ty) const;
+ virtual bool isLegalAddressImmediate(int64_t V, Type *Ty) const;
/// isLegalAddressImmediate - Return true if the GlobalValue can be used as
/// the offset of the target addressing mode.
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index 6f30d3fd6c..fb194907e2 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -631,8 +631,8 @@ SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
assert(CalleeFn->hasStructRetAttr() &&
"Callee does not have the StructRet attribute.");
- const PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
- const Type *ElementTy = Ty->getElementType();
+ PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
+ Type *ElementTy = Ty->getElementType();
return getTargetData()->getTypeAllocSize(ElementTy);
}
diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp
index a42ce548c8..1216ccee87 100644
--- a/lib/Target/Target.cpp
+++ b/lib/Target/Target.cpp
@@ -87,13 +87,13 @@ unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef TD,
unsigned LLVMElementAtOffset(LLVMTargetDataRef TD, LLVMTypeRef StructTy,
unsigned long long Offset) {
- const StructType *STy = unwrap<StructType>(StructTy);
+ StructType *STy = unwrap<StructType>(StructTy);
return unwrap(TD)->getStructLayout(STy)->getElementContainingOffset(Offset);
}
unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef TD, LLVMTypeRef StructTy,
unsigned Element) {
- const StructType *STy = unwrap<StructType>(StructTy);
+ StructType *STy = unwrap<StructType>(StructTy);
return unwrap(TD)->getStructLayout(STy)->getElementOffset(Element);
}
diff --git a/lib/Target/TargetData.cpp b/lib/Target/TargetData.cpp
index 17d022a339..4e95abad8f 100644
--- a/lib/Target/TargetData.cpp
+++ b/lib/Target/TargetData.cpp
@@ -41,7 +41,7 @@ char TargetData::ID = 0;
// Support for StructLayout
//===----------------------------------------------------------------------===//
-StructLayout::StructLayout(const StructType *ST, const TargetData &TD) {
+StructLayout::StructLayout(StructType *ST, const TargetData &TD) {
assert(!ST->isOpaque() && "Cannot get layout of opaque structs");
StructAlignment = 0;
StructSize = 0;
@@ -49,7 +49,7 @@ StructLayout::StructLayout(const StructType *ST, const TargetData &TD) {
// Loop over each of the elements, placing them in memory.
for (unsigned i = 0, e = NumElements; i != e; ++i) {
- const Type *Ty = ST->getElementType(i);
+ Type *Ty = ST->getElementType(i);
unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty);
// Add padding if necessary to align the data element properly.
@@ -261,7 +261,7 @@ TargetData::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
/// preferred if ABIInfo = false) the target wants for the specified datatype.
unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
uint32_t BitWidth, bool ABIInfo,
- const Type *Ty) const {
+ Type *Ty) const {
// Check to see if we have an exact match and remember the best match we see.
int BestMatchIdx = -1;
int LargestInt = -1;
@@ -315,7 +315,7 @@ unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
namespace {
class StructLayoutMap {
- typedef DenseMap<const StructType*, StructLayout*> LayoutInfoTy;
+ typedef DenseMap<StructType*, StructLayout*> LayoutInfoTy;
LayoutInfoTy LayoutInfo;
public:
@@ -329,7 +329,7 @@ public:
}
}
- StructLayout *&operator[](const StructType *STy) {
+ StructLayout *&operator[](StructType *STy) {
return LayoutInfo[STy];
}
@@ -343,7 +343,7 @@ TargetData::~TargetData() {
delete static_cast<StructLayoutMap*>(LayoutMap);
}
-const StructLayout *TargetData::getStructLayout(const StructType *Ty) const {
+const StructLayout *TargetData::getStructLayout(StructType *Ty) const {
if (!LayoutMap)
LayoutMap = new StructLayoutMap();
@@ -389,14 +389,14 @@ std::string TargetData::getStringRepresentation() const {
}
-uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const {
+uint64_t TargetData::getTypeSizeInBits(Type *Ty) const {
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
case Type::LabelTyID:
case Type::PointerTyID:
return getPointerSizeInBits();
case Type::ArrayTyID: {
- const ArrayType *ATy = cast<ArrayType>(Ty);
+ ArrayType *ATy = cast<ArrayType>(Ty);
return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements();
}
case Type::StructTyID:
@@ -435,7 +435,7 @@ uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const {
Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
== false) for the requested type \a Ty.
*/
-unsigned TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const {
+unsigned TargetData::getAlignment(Type *Ty, bool abi_or_pref) const {
int AlignType = -1;
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
@@ -485,7 +485,7 @@ unsigned TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const {
abi_or_pref, Ty);
}
-unsigned TargetData::getABITypeAlignment(const Type *Ty) const {
+unsigned TargetData::getABITypeAlignment(Type *Ty) const {
return getAlignment(Ty, true);
}
@@ -496,7 +496,7 @@ unsigned TargetData::getABIIntegerTypeAlignment(unsigned BitWidth) const {
}
-unsigned TargetData::getCallFrameTypeAlignment(const Type *Ty) const {
+unsigned TargetData::getCallFrameTypeAlignment(Type *Ty) const {
for (unsigned i = 0, e = Alignments.size(); i != e; ++i)
if (Alignments[i].AlignType == STACK_ALIGN)
return Alignments[i].ABIAlign;
@@ -504,11 +504,11 @@ unsigned TargetData::getCallFrameTypeAlignment(const Type *Ty) const {
return getABITypeAlignment(Ty);
}
-unsigned TargetData::getPrefTypeAlignment(const Type *Ty) const {
+unsigned TargetData::getPrefTypeAlignment(Type *Ty) const {
return getAlignment(Ty, false);
}
-unsigned TargetData::getPreferredTypeAlignmentShift(const Type *Ty) const {
+unsigned TargetData::getPreferredTypeAlignmentShift(Type *Ty) const {
unsigned Align = getPrefTypeAlignment(Ty);
assert(!(Align & (Align-1)) && "Alignment is not a power of two!");
return Log2_32(Align);
@@ -521,16 +521,16 @@ IntegerType *TargetData::getIntPtrType(LLVMContext &C) const {
}
-uint64_t TargetData::getIndexedOffset(const Type *ptrTy, Value* const* Indices,
+uint64_t TargetData::getIndexedOffset(Type *ptrTy, Value* const* Indices,
unsigned NumIndices) const {
- const Type *Ty = ptrTy;
+ Type *Ty = ptrTy;
assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()");
uint64_t Result = 0;
generic_gep_type_iterator<Value* const*>
TI = gep_type_begin(ptrTy, Indices, Indices+NumIndices);
for (unsigned CurIDX = 0; CurIDX != NumIndices; ++CurIDX, ++TI) {
- if (const StructType *STy = dyn_cast<StructType>(*TI)) {
+ if (StructType *STy = dyn_cast<StructType>(*TI)) {
assert(Indices[CurIDX]->getType() ==
Type::getInt32Ty(ptrTy->getContext()) &&
"Illegal struct idx");
@@ -561,7 +561,7 @@ uint64_t TargetData::getIndexedOffset(const Type *ptrTy, Value* const* Indices,
/// global. This includes an explicitly requested alignment (if the global
/// has one).
unsigned TargetData::getPreferredAlignment(const GlobalVariable *GV) const {
- const Type *ElemType = GV->getType()->getElementType();
+ Type *ElemType = GV->getType()->getElementType();
unsigned Alignment = getPrefTypeAlignment(ElemType);
unsigned GVAlignment = GV->getAlignment();
if (GVAlignment >= Alignment) {
diff --git a/lib/Target/TargetLoweringObjectFile.cpp b/lib/Target/TargetLoweringObjectFile.cpp
index 703431b380..20f87448dc 100644
--- a/lib/Target/TargetLoweringObjectFile.cpp
+++ b/lib/Target/TargetLoweringObjectFile.cpp
@@ -93,7 +93,7 @@ static bool isSuitableForBSS(const GlobalVariable *GV) {
/// known to have a type that is an array of 1/2/4 byte elements) ends with a
/// nul value and contains no other nuls in it.
static bool IsNullTerminatedString(const Constant *C) {
- const ArrayType *ATy = cast<ArrayType>(C->getType());
+ ArrayType *ATy = cast<ArrayType>(C->getType());
// First check: is we have constant array of i8 terminated with zero
if (const ConstantArray *CVA = dyn_cast<ConstantArray>(C)) {
@@ -188,8 +188,8 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
// If initializer is a null-terminated string, put it in a "cstring"
// section of the right width.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
- if (const IntegerType *ITy =
+ if (ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
+ if (IntegerType *ITy =
dyn_cast<IntegerType>(ATy->getElementType())) {
if ((ITy->getBitWidth() == 8 || ITy->getBitWidth() == 16 ||
ITy->getBitWidth() == 32) &&
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 21e163a300..545d880f9f 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -134,7 +134,7 @@ private:
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
- bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false);
+ bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
bool IsMemcpySmall(uint64_t Len);
@@ -144,7 +144,7 @@ private:
} // end anonymous namespace.
-bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) {
+bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true);
if (evt == MVT::Other || !evt.isSimple())
// Unhandled type. Halt "fast" selection and bail.
@@ -336,7 +336,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
U = C;
}
- if (const PointerType *Ty = dyn_cast<PointerType>(V->getType()))
+ if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
if (Ty->getAddressSpace() > 255)
// Fast instruction selection doesn't support the special
// address spaces.
@@ -399,7 +399,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
const Value *Op = *i;
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
const StructLayout *SL = TD.getStructLayout(STy);
Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
continue;
@@ -1411,7 +1411,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
// Replace "add with overflow" intrinsics with an "add" instruction followed
// by a seto/setc instruction.
const Function *Callee = I.getCalledFunction();
- const Type *RetTy =
+ Type *RetTy =
cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
MVT VT;
@@ -1484,8 +1484,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
return false;
- const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
- const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ FunctionType *FTy = cast<FunctionType>(PT->getElementType());
bool isVarArg = FTy->isVarArg();
// Don't know how to handle Win64 varargs yet. Nothing special needed for
@@ -1547,8 +1547,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
Flags.setZExt();
if (CS.paramHasAttr(AttrInd, Attribute::ByVal)) {
- const PointerType *Ty = cast<PointerType>(ArgVal->getType());
- const Type *ElementTy = Ty->getElementType();
+ PointerType *Ty = cast<PointerType>(ArgVal->getType());
+ Type *ElementTy = Ty->getElementType();
unsigned FrameSize = TD.getTypeAllocSize(ElementTy);
unsigned FrameAlign = CS.getParamAlignment(AttrInd);
if (!FrameAlign)
@@ -1600,7 +1600,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (ArgReg == 0) return false;
- const Type *ArgTy = ArgVal->getType();
+ Type *ArgTy = ArgVal->getType();
MVT ArgVT;
if (!isTypeLegal(ArgTy, ArgVT))
return false;
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 5096d9ae2e..1d953bc22d 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1131,18 +1131,18 @@ MVT::SimpleValueType X86TargetLowering::getSetCCResultType(EVT VT) const {
/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
/// the desired ByVal argument alignment.
-static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) {
+static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
if (MaxAlign == 16)
return;
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getBitWidth() == 128)
MaxAlign = 16;
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
unsigned EltAlign = 0;
getMaxByValAlign(ATy->getElementType(), EltAlign);
if (EltAlign > MaxAlign)
MaxAlign = EltAlign;
- } else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
unsigned EltAlign = 0;
getMaxByValAlign(STy->getElementType(i), EltAlign);
@@ -1159,7 +1159,7 @@ static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) {
/// function arguments in the caller parameter area. For X86, aggregates
/// that contain SSE vectors are placed at 16-byte boundaries while the rest
/// are at 4-byte boundaries.
-unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const {
+unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
if (Subtarget->is64Bit()) {
// Max of 8 and alignment of type.
unsigned TyAlign = TD->getABITypeAlignment(Ty);
@@ -8118,7 +8118,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
EVT ArgVT = Op.getNode()->getValueType(0);
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy);
uint8_t ArgMode;
@@ -8619,7 +8619,7 @@ SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
NestReg = X86::ECX;
// Check that ECX wasn't needed by an 'inreg' parameter.
- const FunctionType *FTy = Func->getFunctionType();
+ FunctionType *FTy = Func->getFunctionType();
const AttrListPtr &Attrs = Func->getAttributes();
if (!Attrs.isEmpty() && !Func->isVarArg()) {
@@ -9619,7 +9619,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
// isLegalAddressingMode - Return true if the addressing mode represented
// by AM is legal for this target, for a load/store of the specified type.
bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
- const Type *Ty) const {
+ Type *Ty) const {
// X86 supports extremely general addressing modes.
CodeModel::Model M = getTargetMachine().getCodeModel();
Reloc::Model R = getTargetMachine().getRelocationModel();
@@ -9671,7 +9671,7 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
}
-bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const {
+bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
return false;
unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
@@ -9691,7 +9691,7 @@ bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
return true;
}
-bool X86TargetLowering::isZExtFree(const Type *Ty1, const Type *Ty2) const {
+bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
}
@@ -12551,7 +12551,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
AsmPieces[1] == "${0:q}")) {
// No need to check constraints, nothing other than the equivalent of
// "=r,0" would be valid here.
- const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty || Ty->getBitWidth() % 16 != 0)
return false;
return IntrinsicLowering::LowerToByteSwap(CI);
@@ -12572,7 +12572,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
AsmPieces[1] == "~{dirflag}" &&
AsmPieces[2] == "~{flags}" &&
AsmPieces[3] == "~{fpsr}") {
- const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty || Ty->getBitWidth() % 16 != 0)
return false;
return IntrinsicLowering::LowerToByteSwap(CI);
@@ -12603,7 +12603,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
AsmPieces[1] == "~{dirflag}" &&
AsmPieces[2] == "~{flags}" &&
AsmPieces[3] == "~{fpsr}") {
- const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty || Ty->getBitWidth() % 16 != 0)
return false;
return IntrinsicLowering::LowerToByteSwap(CI);
@@ -12629,7 +12629,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
SplitString(AsmPieces[2], Words, " \t,");
if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" &&
Words[2] == "%edx") {
- const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty || Ty->getBitWidth() % 16 != 0)
return false;
return IntrinsicLowering::LowerToByteSwap(CI);
@@ -12700,7 +12700,7 @@ TargetLowering::ConstraintWeight
// but allow it at the lowest weight.
if (CallOperandVal == NULL)
return CW_Default;
- const Type *type = CallOperandVal->getType();
+ Type *type = CallOperandVal->getType();
// Look at the constraint type.
switch (*constraint) {
default:
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index b6036782b8..376aa8a440 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -505,7 +505,7 @@ namespace llvm {
/// function arguments in the caller parameter area. For X86, aggregates
/// that contains are placed at 16-byte boundaries while the rest are at
/// 4-byte boundaries.
- virtual unsigned getByValTypeAlignment(const Type *Ty) const;
+ virtual unsigned getByValTypeAlignment(Type *Ty) const;
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
@@ -617,12 +617,12 @@ namespace llvm {
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
- virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
/// isTruncateFree - Return true if it's free to truncate a value of
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
/// register EAX to i16 by referencing its sub-register AX.
- virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
/// isZExtFree - Return true if any actual instruction that defines a
@@ -633,7 +633,7 @@ namespace llvm {
/// does not necessarily apply to truncate instructions. e.g. on x86-64,
/// all instructions that define 32-bit values implicit zero-extend the
/// result out to 64 bits.
- virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const;
+ virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
virtual bool isZExtFree(EVT VT1, EVT VT2) const;
/// isNarrowingProfitable - Return true if it's profitable to narrow
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 55b5835f52..8dc682255d 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -2515,7 +2515,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Create a constant-pool entry.
MachineConstantPool &MCP = *MF.getConstantPool();
- const Type *Ty;
+ Type *Ty;
unsigned Opc = LoadMI->getOpcode();
if (Opc == X86::FsFLD0SS || Opc == X86::VFsFLD0SS)
Ty = Type::getFloatTy(MF.getFunction()->getContext());
diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp
index 02754f9ae5..6406bce311 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -54,7 +54,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
if (const char *bzeroEntry = V &&
V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
EVT IntPtr = TLI.getPointerTy();
- const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+ Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst;
diff --git a/lib/Target/XCore/XCoreAsmPrinter.cpp b/lib/Target/XCore/XCoreAsmPrinter.cpp
index 1a43714d63..6efa41f4dc 100644
--- a/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -88,7 +88,7 @@ void XCoreAsmPrinter::emitArrayBound(MCSymbol *Sym, const GlobalVariable *GV) {
assert(((GV->hasExternalLinkage() ||
GV->hasWeakLinkage()) ||
GV->hasLinkOnceLinkage()) && "Unexpected linkage");
- if (const ArrayType *ATy = dyn_cast<ArrayType>(
+ if (ArrayType *ATy = dyn_cast<ArrayType>(
cast<PointerType>(GV->getType())->getElementType())) {
OutStreamer.EmitSymbolAttribute(Sym, MCSA_Global);
// FIXME: MCStreamerize.
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 6d040e0526..21a119e376 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -252,8 +252,8 @@ static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) {
DAG.getConstant(Intrinsic::xcore_getid, MVT::i32));
}
-static inline bool isZeroLengthArray(const Type *Ty) {
- const ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty);
+static inline bool isZeroLengthArray(Type *Ty) {
+ ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty);
return AT && (AT->getNumElements() == 0);
}
@@ -275,7 +275,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
llvm_unreachable("Thread local object not a GlobalVariable?");
return SDValue();
}
- const Type *Ty = cast<PointerType>(GV->getType())->getElementType();
+ Type *Ty = cast<PointerType>(GV->getType())->getElementType();
if (!Ty->isSized() || isZeroLengthArray(Ty)) {
#ifndef NDEBUG
errs() << "Size of thread local object " << GVar->getName()
@@ -465,7 +465,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
}
// Lower to a call to __misaligned_load(BasePtr).
- const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+ Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -524,7 +524,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
}
// Lower to a call to __misaligned_store(BasePtr, Value).
- const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+ Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -1548,7 +1548,7 @@ static inline bool isImmUs4(int64_t val)
/// by AM is legal for this target, for a load/store of the specified type.
bool
XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
- const Type *Ty) const {
+ Type *Ty) const {
if (Ty->getTypeID() == Type::VoidTyID)
return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index 9c803bef6d..246da9eee5 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -101,7 +101,7 @@ namespace llvm {
MachineBasicBlock *MBB) const;
virtual bool isLegalAddressingMode(const AddrMode &AM,
- const Type *Ty) const;
+ Type *Ty) const;
private:
const XCoreTargetMachine &TM;
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index fa007cfc65..d92c45ff6a 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -155,12 +155,12 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
for (unsigned i = 0; i != PointerArgs.size(); ++i) {
bool isByVal = F->paramHasAttr(PointerArgs[i].second+1, Attribute::ByVal);
Argument *PtrArg = PointerArgs[i].first;
- const Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
+ Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
// If this is a byval argument, and if the aggregate type is small, just
// pass the elements, which is always safe.
if (isByVal) {
- if (const StructType *STy = dyn_cast<StructType>(AgTy)) {
+ if (StructType *STy = dyn_cast<StructType>(AgTy)) {
if (maxElements > 0 && STy->getNumElements() > maxElements) {
DEBUG(dbgs() << "argpromotion disable promoting argument '"
<< PtrArg->getName() << "' because it would require adding more"
@@ -190,7 +190,7 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
// If the argument is a recursive type and we're in a recursive
// function, we could end up infinitely peeling the function argument.
if (isSelfRecursive) {
- if (const StructType *STy = dyn_cast<StructType>(AgTy)) {
+ if (StructType *STy = dyn_cast<StructType>(AgTy)) {
bool RecursiveType = false;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
if (STy->getElementType(i) == PtrArg->getType()) {
@@ -492,7 +492,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Start by computing a new prototype for the function, which is the same as
// the old function, but has modified arguments.
- const FunctionType *FTy = F->getFunctionType();
+ FunctionType *FTy = F->getFunctionType();
std::vector<Type*> Params;
typedef std::set<IndicesVector> ScalarizeTable;
@@ -527,8 +527,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
++I, ++ArgIndex) {
if (ByValArgsToTransform.count(I)) {
// Simple byval argument? Just add all the struct element types.
- const Type *AgTy = cast<PointerType>(I->getType())->getElementType();
- const StructType *STy = cast<StructType>(AgTy);
+ Type *AgTy = cast<PointerType>(I->getType())->getElementType();
+ StructType *STy = cast<StructType>(AgTy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
Params.push_back(STy->getElementType(i));
++NumByValArgsPromoted;
@@ -593,7 +593,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
if (Attributes attrs = PAL.getFnAttributes())
AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));
- const Type *RetTy = FTy->getReturnType();
+ Type *RetTy = FTy->getReturnType();
// Work around LLVM bug PR56: the CWriter cannot emit varargs functions which
// have zero fixed arguments.
@@ -662,8 +662,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
} else if (ByValArgsToTransform.count(I)) {
// Emit a GEP and load for each element of the struct.
- const Type *AgTy = cast<PointerType>(I->getType())->getElementType();
- const StructType *STy = cast<StructType>(AgTy);
+ Type *AgTy = cast<PointerType>(I->getType())->getElementType();
+ StructType *STy = cast<StructType>(AgTy);
Value *Idxs[2] = {
ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -686,12 +686,12 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
LoadInst *OrigLoad = OriginalLoads[*SI];
if (!SI->empty()) {
Ops.reserve(SI->size());
- const Type *ElTy = V->getType();
+ Type *ElTy = V->getType();
for (IndicesVector::const_iterator II = SI->begin(),
IE = SI->end(); II != IE; ++II) {
// Use i32 to index structs, and i64 for others (pointers/arrays).
// This satisfies GEP constraints.
- const Type *IdxTy = (ElTy->isStructTy() ?
+ Type *IdxTy = (ElTy->isStructTy() ?
Type::getInt32Ty(F->getContext()) :
Type::getInt64Ty(F->getContext()));
Ops.push_back(ConstantInt::get(IdxTy, *II));
@@ -792,9 +792,9 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
Instruction *InsertPt = NF->begin()->begin();
// Just add all the struct element types.
- const Type *AgTy = cast<PointerType>(I->getType())->getElementType();
+ Type *AgTy = cast<PointerType>(I->getType())->getElementType();
Value *TheAlloca = new AllocaInst(AgTy, 0, "", InsertPt);
- const StructType *STy = cast<StructType>(AgTy);
+ StructType *STy = cast<StructType>(AgTy);
Value *Idxs[2] = {
ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 15177650f4..4bb6f7a90e 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -206,7 +206,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
// Start by computing a new prototype for the function, which is the same as
// the old function, but doesn't have isVarArg set.
- const FunctionType *FTy = Fn.getFunctionType();
+ FunctionType *FTy = Fn.getFunctionType();
std::vector<Type*> Params(FTy->param_begin(), FTy->param_end());
FunctionType *NFTy = FunctionType::get(FTy->getReturnType(),
@@ -344,7 +344,7 @@ bool DAE::RemoveDeadArgumentsFromCallers(Function &Fn)
static unsigned NumRetVals(const Function *F) {
if (F->getReturnType()->isVoidTy())
return 0;
- else if (const StructType *STy = dyn_cast<StructType>(F->getReturnType()))
+ else if (StructType *STy = dyn_cast<StructType>(F->getReturnType()))
return STy->getNumElements();
else
return 1;
@@ -491,7 +491,7 @@ void DAE::SurveyFunction(const Function &F) {
// Keep track of the number of live retvals, so we can skip checks once all
// of them turn out to be live.
unsigned NumLiveRetVals = 0;
- const Type *STy = dyn_cast<StructType>(F.getReturnType());
+ Type *STy = dyn_cast<StructType>(F.getReturnType());
// Loop all uses of the function.
for (Value::const_use_iterator I = F.use_begin(), E = F.use_end();
I != E; ++I) {
@@ -646,7 +646,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// Start by computing a new prototype for the function, which is the same as
// the old function, but has fewer arguments and a different return type.
- const FunctionType *FTy = F->getFunctionType();
+ FunctionType *FTy = F->getFunctionType();
std::vector<Type*> Params;
// Set up to build a new list of parameter attributes.
@@ -660,7 +660,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// Find out the new return value.
Type *RetTy = FTy->getReturnType();
- const Type *NRetTy = NULL;
+ Type *NRetTy = NULL;
unsigned RetCount = NumRetVals(F);
// -1 means unused, other numbers are the new index
@@ -669,7 +669,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
if (RetTy->isVoidTy()) {
NRetTy = RetTy;
} else {
- const StructType *STy = dyn_cast<StructType>(RetTy);
+ StructType *STy = dyn_cast<StructType>(RetTy);
if (STy)
// Look at each of the original return values individually.
for (unsigned i = 0; i != RetCount; ++i) {
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index 4ac721dd06..25eed51fd5 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -281,18 +281,18 @@ static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx) {
} else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) {
if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV);
} else if (isa<ConstantAggregateZero>(Agg)) {
- if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) {
+ if (StructType *STy = dyn_cast<StructType>(Agg->getType())) {
if (IdxV < STy->getNumElements())
return Constant::getNullValue(STy->getElementType(IdxV));
- } else if (const SequentialType *STy =
+ } else if (SequentialType *STy =
dyn_cast<SequentialType>(Agg->getType())) {
return Constant::getNullValue(STy->getElementType());
}
} else if (isa<UndefValue>(Agg)) {
- if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) {
+ if (StructType *STy = dyn_cast<StructType>(Agg->getType())) {
if (IdxV < STy->getNumElements())
return UndefValue::get(STy->getElementType(IdxV));
- } else if (const SequentialType *STy =
+ } else if (SequentialType *STy =
dyn_cast<SequentialType>(Agg->getType())) {
return UndefValue::get(STy->getElementType());
}
@@ -430,7 +430,7 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
++GEPI; // Skip over the pointer index.
// If this is a use of an array allocation, do a bit more checking for sanity.
- if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
+ if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
uint64_t NumElements = AT->getNumElements();
ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
@@ -451,9 +451,9 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
GEPI != E;
++GEPI) {
uint64_t NumElements;
- if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
+ if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
NumElements = SubArrayTy->getNumElements();
- else if (const VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
+ else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
NumElements = SubVectorTy->getNumElements();
else {
assert((*GEPI)->isStructTy() &&
@@ -498,7 +498,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
assert(GV->hasLocalLinkage() && !GV->isConstant());
Constant *Init = GV->getInitializer();
- const Type *Ty = Init->getType();
+ Type *Ty = Init->getType();
std::vector<GlobalVariable*> NewGlobals;
Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
@@ -508,7 +508,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
if (StartAlignment == 0)
StartAlignment = TD.getABITypeAlignment(GV->getType());
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
NewGlobals.reserve(STy->getNumElements());
const StructLayout &Layout = *TD.getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -531,9 +531,9 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i)))
NGV->setAlignment(NewAlign);
}
- } else if (const SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
+ } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
unsigned NumElements = 0;
- if (const ArrayType *ATy = dyn_cast<ArrayType>(STy))
+ if (ArrayType *ATy = dyn_cast<ArrayType>(STy))
NumElements = ATy->getNumElements();
else
NumElements = cast<VectorType>(STy)->getNumElements();
@@ -846,12 +846,12 @@ static void ConstantPropUsersOf(Value *V) {
/// malloc into a global, and any loads of GV as uses of the new global.
static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
CallInst *CI,
- const Type *AllocTy,
+ Type *AllocTy,
ConstantInt *NElements,
TargetData* TD) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
- const Type *GlobalType;
+ Type *GlobalType;
if (NElements->getZExtValue() == 1)
GlobalType = AllocTy;
else
@@ -1192,7 +1192,7 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
} else if (PHINode *PN = dyn_cast<PHINode>(V)) {
// PN's type is pointer to struct. Make a new PHI of pointer to struct
// field.
- const StructType *ST =
+ StructType *ST =
cast<StructType>(cast<PointerType>(PN->getType())->getElementType());
PHINode *NewPN =
@@ -1298,8 +1298,8 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
Value* NElems, TargetData *TD) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
- const Type* MAT = getMallocAllocatedType(CI);
- const StructType *STy = cast<StructType>(MAT);
+ Type* MAT = getMallocAllocatedType(CI);
+ StructType *STy = cast<StructType>(MAT);
// There is guaranteed to be at least one use of the malloc (storing
// it into GV). If there are other uses, change them to be uses of
@@ -1313,8 +1313,8 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
std::vector<Value*> FieldMallocs;
for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
- const Type *FieldTy = STy->getElementType(FieldNo);
- const PointerType *PFieldTy = PointerType::getUnqual(FieldTy);
+ Type *FieldTy = STy->getElementType(FieldNo);
+ PointerType *PFieldTy = PointerType::getUnqual(FieldTy);
GlobalVariable *NGV =
new GlobalVariable(*GV->getParent(),
@@ -1325,9 +1325,9 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
FieldGlobals.push_back(NGV);
unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
- if (const StructType *ST = dyn_cast<StructType>(FieldTy))
+ if (StructType *ST = dyn_cast<StructType>(FieldTy))
TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
- const Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
NElems, 0,
@@ -1428,7 +1428,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// Insert a store of null into each global.
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
- const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
+ PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
Constant *Null = Constant::getNullValue(PT->getElementType());
new StoreInst(Null, FieldGlobals[i], SI);
}
@@ -1485,7 +1485,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
/// cast of malloc.
static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CallInst *CI,
- const Type *AllocTy,
+ Type *AllocTy,
Module::global_iterator &GVI,
TargetData *TD) {
if (!TD)
@@ -1538,10 +1538,10 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is an allocation of a fixed size array of structs, analyze as a
// variable size array. malloc [100 x struct],1 -> malloc struct, 100
if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
- if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
+ if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
AllocTy = AT->getElementType();
- const StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
+ StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
if (!AllocSTy)
return false;
@@ -1552,8 +1552,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100
- if (const ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) {
- const Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
+ if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) {
+ Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
@@ -1596,7 +1596,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC))
return true;
} else if (CallInst *CI = extractMallocCall(StoredOnceVal)) {
- const Type* MallocType = getMallocAllocatedType(CI);
+ Type* MallocType = getMallocAllocatedType(CI);
if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
GVI, TD))
return true;
@@ -1611,7 +1611,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
/// can shrink the global into a boolean and select between the two values
/// whenever it is used. This exposes the values to other scalar optimizations.
static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
- const Type *GVElType = GV->getType()->getElementType();
+ Type *GVElType = GV->getType()->getElementType();
// If GVElType is already i1, it is already shrunk. If the type of the GV is
// an FP value, pointer or vector, don't do this optimization because a select
@@ -1761,7 +1761,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction
->getEntryBlock().begin());
- const Type* ElemTy = GV->getType()->getElementType();
+ Type* ElemTy = GV->getType()->getElementType();
// FIXME: Pass Global's alignment when globals have alignment
AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
if (!isa<UndefValue>(GV->getInitializer()))
@@ -2003,7 +2003,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535);
CSVals[1] = 0;
- const StructType *StructTy =
+ StructType *StructTy =
cast <StructType>(
cast<ArrayType>(GCL->getType()->getElementType())->getElementType());
@@ -2013,9 +2013,9 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
if (Ctors[i]) {
CSVals[1] = Ctors[i];
} else {
- const Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()),
+ Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()),
false);
- const PointerType *PFTy = PointerType::getUnqual(FTy);
+ PointerType *PFTy = PointerType::getUnqual(FTy);
CSVals[1] = Constant::getNullValue(PFTy);
CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()),
0x7fffffff);
@@ -2196,7 +2196,7 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
}
std::vector<Constant*> Elts;
- if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
+ if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
// Break up the constant into its elements.
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
@@ -2224,10 +2224,10 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
}
ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
- const SequentialType *InitTy = cast<SequentialType>(Init->getType());
+ SequentialType *InitTy = cast<SequentialType>(Init->getType());
uint64_t NumElts;
- if (const ArrayType *ATy = dyn_cast<ArrayType>(InitTy))
+ if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy))
NumElts = ATy->getNumElements();
else
NumElts = cast<VectorType>(InitTy)->getNumElements();
@@ -2358,7 +2358,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
// stored value.
Ptr = CE->getOperand(0);
- const Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType();
+ Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType();
// In order to push the bitcast onto the stored value, a bitcast
// from NewTy to Val's type must be legal. If it's not, we can try
@@ -2367,10 +2367,10 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
// If NewTy is a struct, we can convert the pointer to the struct
// into a pointer to its first member.
// FIXME: This could be extended to support arrays as well.
- if (const StructType *STy = dyn_cast<StructType>(NewTy)) {
+ if (StructType *STy = dyn_cast<StructType>(NewTy)) {
NewTy = STy->getTypeAtIndex(0U);
- const IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32);
+ IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32);
Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
Constant * const IdxList[] = {IdxZero, IdxZero};
@@ -2421,7 +2421,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
if (InstResult == 0) return false; // Could not evaluate load.
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
if (AI->isArrayAllocation()) return false; // Cannot handle array allocs.
- const Type *Ty = AI->getType()->getElementType();
+ Type *Ty = AI->getType()->getElementType();
AllocaTmps.push_back(new GlobalVariable(Ty, false,
GlobalValue::InternalLinkage,
UndefValue::get(Ty),
@@ -2711,7 +2711,7 @@ static Function *FindCXAAtExit(Module &M) {
if (!Fn)
return 0;
- const FunctionType *FTy = Fn->getFunctionType();
+ FunctionType *FTy = Fn->getFunctionType();
// Checking that the function has the right return type, the right number of
// parameters and that they all have pointer types should be enough.
diff --git a/lib/Transforms/IPO/IPConstantPropagation.cpp b/lib/Transforms/IPO/IPConstantPropagation.cpp
index 25c0134664..d757e1fdb1 100644
--- a/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -167,7 +167,7 @@ bool IPCP::PropagateConstantReturn(Function &F) {
// Check to see if this function returns a constant.
SmallVector<Value *,4> RetVals;
- const StructType *STy = dyn_cast<StructType>(F.getReturnType());
+ StructType *STy = dyn_cast<StructType>(F.getReturnType());
if (STy)
for (unsigned i = 0, e = STy->getNumElements(); i < e; ++i)
RetVals.push_back(UndefValue::get(STy->getElementType(i)));
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index 57f3e772b5..f00935b088 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -62,7 +62,7 @@ void Inliner::getAnalysisUsage(AnalysisUsage &Info) const {
}
-typedef DenseMap<const ArrayType*, std::vector<AllocaInst*> >
+typedef DenseMap<ArrayType*, std::vector<AllocaInst*> >
InlinedArrayAllocasTy;
/// InlineCallIfPossible - If it is possible to inline the specified call site,
@@ -139,7 +139,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// Don't bother trying to merge array allocations (they will usually be
// canonicalized to be an allocation *of* an array), or allocations whose
// type is not itself an array (because we're afraid of pessimizing SRoA).
- const ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
+ ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
if (ATy == 0 || AI->isArrayAllocation())
continue;
diff --git a/lib/Transforms/IPO/LowerSetJmp.cpp b/lib/Transforms/IPO/LowerSetJmp.cpp
index 659476b139..494cee20f2 100644
--- a/lib/Transforms/IPO/LowerSetJmp.cpp
+++ b/lib/Transforms/IPO/LowerSetJmp.cpp
@@ -199,8 +199,8 @@ bool LowerSetJmp::runOnModule(Module& M) {
// This function is always successful, unless it isn't.
bool LowerSetJmp::doInitialization(Module& M)
{
- const Type *SBPTy = Type::getInt8PtrTy(M.getContext());
- const Type *SBPPTy = PointerType::getUnqual(SBPTy);
+ Type *SBPTy = Type::getInt8PtrTy(M.getContext());
+ Type *SBPPTy = PointerType::getUnqual(SBPTy);
// N.B. See llvm/runtime/GCCLibraries/libexception/SJLJ-Exception.h for
// a description of the following library functions.
@@ -258,7 +258,7 @@ bool LowerSetJmp::IsTransformableFunction(StringRef Name) {
// throwing the exception for us.
void LowerSetJmp::TransformLongJmpCall(CallInst* Inst)
{
- const Type* SBPTy = Type::getInt8PtrTy(Inst->getContext());
+ Type* SBPTy = Type::getInt8PtrTy(Inst->getContext());
// Create the call to "__llvm_sjljeh_throw_longjmp". This takes the
// same parameters as "longjmp", except that the buffer is cast to a
@@ -308,7 +308,7 @@ AllocaInst* LowerSetJmp::GetSetJmpMap(Function* Func)
assert(Inst && "Couldn't find even ONE instruction in entry block!");
// Fill in the alloca and call to initialize the SJ map.
- const Type *SBPTy =
+ Type *SBPTy =
Type::getInt8PtrTy(Func->getContext());
AllocaInst* Map = new AllocaInst(SBPTy, 0, "SJMap", Inst);
CallInst::Create(InitSJMap, Map, "", Inst);
@@ -378,7 +378,7 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
Function* Func = ABlock->getParent();
// Add this setjmp to the setjmp map.
- const Type* SBPTy =
+ Type* SBPTy =
Type::getInt8PtrTy(Inst->getContext());
CastInst* BufPtr =
new BitCastInst(Inst->getArgOperand(0), SBPTy, "SBJmpBuf", Inst);
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 7796d05b7b..bba3067dc4 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -76,7 +76,7 @@ STATISTIC(NumDoubleWeak, "Number of new functions created");
/// functions that will compare equal, without looking at the instructions
/// inside the function.
static unsigned profileFunction(const Function *F) {
- const FunctionType *FTy = F->getFunctionType();
+ FunctionType *FTy = F->getFunctionType();
FoldingSetNodeID ID;
ID.AddInteger(F->size());
@@ -185,7 +185,7 @@ private:
}
/// Compare two Types, treating all pointer types as equal.
- bool isEquivalentType(const Type *Ty1, const Type *Ty2) const;
+ bool isEquivalentType(Type *Ty1, Type *Ty2) const;
// The two functions undergoing comparison.
const Function *F1, *F2;
@@ -200,8 +200,8 @@ private:
// Any two pointers in the same address space are equivalent, intptr_t and
// pointers are equivalent. Otherwise, standard type equivalence rules apply.
-bool FunctionComparator::isEquivalentType(const Type *Ty1,
- const Type *Ty2) const {
+bool FunctionComparator::isEquivalentType(Type *Ty1,
+ Type *Ty2) const {
if (Ty1 == Ty2)
return true;
if (Ty1->getTypeID() != Ty2->getTypeID()) {
@@ -233,14 +233,14 @@ bool FunctionComparator::isEquivalentType(const Type *Ty1,
return true;
case Type::PointerTyID: {
- const PointerType *PTy1 = cast<PointerType>(Ty1);
- const PointerType *PTy2 = cast<PointerType>(Ty2);
+ PointerType *PTy1 = cast<PointerType>(Ty1);
+ PointerType *PTy2 = cast<PointerType>(Ty2);
return PTy1->getAddressSpace() == PTy2->getAddressSpace();
}
case Type::StructTyID: {
- const StructType *STy1 = cast<StructType>(Ty1);
- const StructType *STy2 = cast<StructType>(Ty2);
+ StructType *STy1 = cast<StructType>(Ty1);
+ StructType *STy2 = cast<StructType>(Ty2);
if (STy1->getNumElements() != STy2->getNumElements())
return false;
@@ -255,8 +255,8 @@ bool FunctionComparator::isEquivalentType(const Type *Ty1,
}
case Type::FunctionTyID: {
- const FunctionType *FTy1 = cast<FunctionType>(Ty1);
- const FunctionType *FTy2 = cast<FunctionType>(Ty2);
+ FunctionType *FTy1 = cast<FunctionType>(Ty1);
+ FunctionType *FTy2 = cast<FunctionType>(Ty2);
if (FTy1->getNumParams() != FTy2->getNumParams() ||
FTy1->isVarArg() != FTy2->isVarArg())
return false;
@@ -272,8 +272,8 @@ bool FunctionComparator::isEquivalentType(const Type *Ty1,
}
case Type::ArrayTyID: {
- const ArrayType *ATy1 = cast<ArrayType>(Ty1);
- const ArrayType *ATy2 = cast<ArrayType>(Ty2);
+ ArrayType *ATy1 = cast<ArrayType>(Ty1);
+ ArrayType *ATy2 = cast<ArrayType>(Ty2);
return ATy1->getNumElements() == ATy2->getNumElements() &&
isEquivalentType(ATy1->getElementType(), ATy2->getElementType());
}
@@ -725,7 +725,7 @@ void MergeFunctions::writeThunk(Function *F, Function *G) {
SmallVector<Value *, 16> Args;
unsigned i = 0;
- const FunctionType *FFTy = F->getFunctionType();
+ FunctionType *FFTy = F->getFunctionType();
for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end();
AI != AE; ++AI) {
Args.push_back(Builder.CreateBitCast(AI, FFTy->getParamType(i)));
diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h
index 8257d6b89d..c6bdb08998 100644
--- a/lib/Transforms/InstCombine/InstCombine.h
+++ b/lib/Transforms/InstCombine/InstCombine.h
@@ -103,7 +103,7 @@ public:
//
Instruction *visitAdd(BinaryOperator &I);
Instruction *visitFAdd(BinaryOperator &I);
- Value *OptimizePointerDifference(Value *LHS, Value *RHS, const Type *Ty);
+ Value *OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty);
Instruction *visitSub(BinaryOperator &I);
Instruction *visitFSub(BinaryOperator &I);
Instruction *visitMul(BinaryOperator &I);
@@ -197,10 +197,10 @@ public:
Instruction *visitInstruction(Instruction &I) { return 0; }
private:
- bool ShouldChangeType(const Type *From, const Type *To) const;
+ bool ShouldChangeType(Type *From, Type *To) const;
Value *dyn_castNegVal(Value *V) const;
Value *dyn_castFNegVal(Value *V) const;
- const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
+ Type *FindElementAtOffset(Type *Ty, int64_t Offset,
SmallVectorImpl<Value*> &NewIndices);
Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
@@ -209,7 +209,7 @@ private:
/// the cast can be eliminated by some other simple transformation, we prefer
/// to do the simplification first.
bool ShouldOptimizeCast(Instruction::CastOps opcode,const Value *V,
- const Type *Ty);
+ Type *Ty);
Instruction *visitCallSite(CallSite CS);
Instruction *tryOptimizeCall(CallInst *CI, const TargetData *TD);
@@ -357,7 +357,7 @@ private:
Instruction *SimplifyMemSet(MemSetInst *MI);
- Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
+ Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
};
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index c36a9552e7..d10046c10b 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -188,7 +188,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
return BinaryOperator::CreateMul(LHS, AddOne(C2));
// A+B --> A|B iff A and B have no bits set in common.
- if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
+ if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
APInt LHSKnownOne(IT->getBitWidth(), 0);
APInt LHSKnownZero(IT->getBitWidth(), 0);
@@ -401,7 +401,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
Value *InstCombiner::EmitGEPOffset(User *GEP) {
TargetData &TD = *getTargetData();
gep_type_iterator GTI = gep_type_begin(GEP);
- const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
Value *Result = Constant::getNullValue(IntPtrTy);
// If the GEP is inbounds, we know that none of the addressing operations will
@@ -420,7 +420,7 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
if (OpC->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
if (Size)
@@ -460,7 +460,7 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
/// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
///
Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
- const Type *Ty) {
+ Type *Ty) {
assert(TD && "Must have target data info for this");
// If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 64ea36fb1e..32920fabc3 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1224,7 +1224,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// fold (and (cast A), (cast B)) -> (cast (and A, B))
if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
- const Type *SrcTy = Op0C->getOperand(0)->getType();
+ Type *SrcTy = Op0C->getOperand(0)->getType();
if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
SrcTy == Op1C->getOperand(0)->getType() &&
SrcTy->isIntOrIntVectorTy()) {
@@ -2008,7 +2008,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
CastInst *Op1C = dyn_cast<CastInst>(Op1);
if (Op1C && Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
- const Type *SrcTy = Op0C->getOperand(0)->getType();
+ Type *SrcTy = Op0C->getOperand(0)->getType();
if (SrcTy == Op1C->getOperand(0)->getType() &&
SrcTy->isIntOrIntVectorTy()) {
Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
@@ -2288,7 +2288,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
- const Type *SrcTy = Op0C->getOperand(0)->getType();
+ Type *SrcTy = Op0C->getOperand(0)->getType();
if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() &&
// Only do this if the casts both really cause code to be generated.
ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0),
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 537f2b318a..12096476ff 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -22,8 +22,8 @@ using namespace llvm;
/// getPromotedType - Return the specified type promoted as it would be to pass
/// though a va_arg area.
-static const Type *getPromotedType(const Type *Ty) {
- if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
+static Type *getPromotedType(Type *Ty) {
+ if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
if (ITy->getBitWidth() < 32)
return Type::getInt32Ty(Ty->getContext());
}
@@ -64,7 +64,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
unsigned DstAddrSp =
cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
- const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
+ IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
@@ -76,18 +76,18 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// integer datatype.
Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
if (StrippedDest != MI->getArgOperand(0)) {
- const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
+ Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip
// down through these levels if so.
while (!SrcETy->isSingleValueType()) {
- if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
+ if (StructType *STy = dyn_cast<StructType>(SrcETy)) {
if (STy->getNumElements() == 1)
SrcETy = STy->getElementType(0);
else
break;
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
if (ATy->getNumElements() == 1)
SrcETy = ATy->getElementType();
else
@@ -142,7 +142,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
- const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
+ Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
Value *Dest = MI->getDest();
unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
@@ -250,7 +250,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// We need target data for just about everything so depend on it.
if (!TD) break;
- const Type *ReturnTy = CI.getType();
+ Type *ReturnTy = CI.getType();
uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
// Get to the real allocated thing and offset as fast as possible.
@@ -300,7 +300,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
} else if (CallInst *MI = extractMallocCall(Op1)) {
// Get allocation size.
- const Type* MallocType = getMallocAllocatedType(MI);
+ Type* MallocType = getMallocAllocatedType(MI);
if (MallocType && MallocType->isSized())
if (Value *NElems = getMallocArraySize(MI, TD, true))
if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
@@ -355,7 +355,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::cttz: {
// If all bits below the first known one are known zero,
// this value is constant.
- const IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
+ IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
// FIXME: Try to simplify vectors of integers.
if (!IT) break;
uint32_t BitWidth = IT->getBitWidth();
@@ -374,7 +374,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ctlz: {
// If all bits above the first known one are known zero,
// this value is constant.
- const IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
+ IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
// FIXME: Try to simplify vectors of integers.
if (!IT) break;
uint32_t BitWidth = IT->getBitWidth();
@@ -392,7 +392,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
case Intrinsic::uadd_with_overflow: {
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
- const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
+ IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt Mask = APInt::getSignBit(BitWidth);
APInt LHSKnownZero(BitWidth, 0);
@@ -416,7 +416,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
UndefValue::get(LHS->getType()),
ConstantInt::getTrue(II->getContext())
};
- const StructType *ST = cast<StructType>(II->getType());
+ StructType *ST = cast<StructType>(II->getType());
Constant *Struct = ConstantStruct::get(ST, V);
return InsertValueInst::Create(Struct, Add, 0);
}
@@ -430,7 +430,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
UndefValue::get(LHS->getType()),
ConstantInt::getFalse(II->getContext())
};
- const StructType *ST = cast<StructType>(II->getType());
+ StructType *ST = cast<StructType>(II->getType());
Constant *Struct = ConstantStruct::get(ST, V);
return InsertValueInst::Create(Struct, Add, 0);
}
@@ -559,7 +559,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
- const Type *OpPtrTy =
+ Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr);
@@ -570,7 +570,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
- const Type *OpPtrTy =
+ Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(1)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
return new StoreInst(II->getArgOperand(1), Ptr);
@@ -765,9 +765,9 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
if (!CS.paramHasAttr(ix, Attribute::ByVal))
return true;
- const Type* SrcTy =
+ Type* SrcTy =
cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
- const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
+ Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
if (!SrcTy->isSized() || !DstTy->isSized())
return false;
if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
@@ -884,8 +884,8 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
if (In->getIntrinsicID() == Intrinsic::init_trampoline)
return transformCallThroughTrampoline(CS);
- const PointerType *PTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
+ PointerType *PTy = cast<PointerType>(Callee->getType());
+ FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
if (FTy->isVarArg()) {
int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
// See if we can optimize any arguments passed through the varargs area of
@@ -934,9 +934,9 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// would cause a type conversion of one of our arguments, change this call to
// be a direct call with arguments casted to the appropriate types.
//
- const FunctionType *FT = Callee->getFunctionType();
- const Type *OldRetTy = Caller->getType();
- const Type *NewRetTy = FT->getReturnType();
+ FunctionType *FT = Callee->getFunctionType();
+ Type *OldRetTy = Caller->getType();
+ Type *NewRetTy = FT->getReturnType();
if (NewRetTy->isStructTy())
return false; // TODO: Handle multiple return values.
@@ -982,8 +982,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
CallSite::arg_iterator AI = CS.arg_begin();
for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
- const Type *ParamTy = FT->getParamType(i);
- const Type *ActTy = (*AI)->getType();
+ Type *ParamTy = FT->getParamType(i);
+ Type *ActTy = (*AI)->getType();
if (!CastInst::isCastable(ActTy, ParamTy))
return false; // Cannot transform this parameter value.
@@ -995,11 +995,11 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// If the parameter is passed as a byval argument, then we have to have a
// sized type and the sized type has to have the same size as the old type.
if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
- const PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
+ PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
return false;
- const Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
+ Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
if (TD->getTypeAllocSize(CurElTy) !=
TD->getTypeAllocSize(ParamPTy->getElementType()))
return false;
@@ -1023,7 +1023,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// If the callee is just a declaration, don't change the varargsness of the
// call. We don't want to introduce a varargs call where one doesn't
// already exist.
- const PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
+ PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
return false;
}
@@ -1062,7 +1062,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
AI = CS.arg_begin();
for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
- const Type *ParamTy = FT->getParamType(i);
+ Type *ParamTy = FT->getParamType(i);
if ((*AI)->getType() == ParamTy) {
Args.push_back(*AI);
} else {
@@ -1089,7 +1089,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
} else {
// Add all of the arguments in their promoted form to the arg list.
for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
- const Type *PTy = getPromotedType((*AI)->getType());
+ Type *PTy = getPromotedType((*AI)->getType());
if (PTy != (*AI)->getType()) {
// Must promote to pass through va_arg area!
Instruction::CastOps opcode =
@@ -1168,8 +1168,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
//
Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
Value *Callee = CS.getCalledValue();
- const PointerType *PTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
+ PointerType *PTy = cast<PointerType>(Callee->getType());
+ FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
const AttrListPtr &Attrs = CS.getAttributes();
// If the call already has the 'nest' attribute somewhere then give up -
@@ -1181,8 +1181,8 @@ Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
- const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
- const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
+ PointerType *NestFPTy = cast<PointerType>(NestF->getType());
+ FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
const AttrListPtr &NestAttrs = NestF->getAttributes();
if (!NestAttrs.isEmpty()) {
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 82c734e0b8..f99e457482 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -79,14 +79,14 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// This requires TargetData to get the alloca alignment and size information.
if (!TD) return 0;
- const PointerType *PTy = cast<PointerType>(CI.getType());
+ PointerType *PTy = cast<PointerType>(CI.getType());
BuilderTy AllocaBuilder(*Builder);
AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
// Get the type really allocated and the type casted to.
- const Type *AllocElTy = AI.getAllocatedType();
- const Type *CastElTy = PTy->getElementType();
+ Type *AllocElTy = AI.getAllocatedType();
+ Type *CastElTy = PTy->getElementType();
if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
@@ -151,7 +151,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
/// EvaluateInDifferentType - Given an expression that
/// CanEvaluateTruncated or CanEvaluateSExtd returns true for, actually
/// insert the code to evaluate the expression.
-Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
+Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
bool isSigned) {
if (Constant *C = dyn_cast<Constant>(V)) {
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
@@ -229,12 +229,12 @@ static Instruction::CastOps
isEliminableCastPair(
const CastInst *CI, ///< The first cast instruction
unsigned opcode, ///< The opcode of the second cast instruction
- const Type *DstTy, ///< The target type for the second cast instruction
+ Type *DstTy, ///< The target type for the second cast instruction
TargetData *TD ///< The target data for pointer size
) {
- const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
- const Type *MidTy = CI->getType(); // B from above
+ Type *SrcTy = CI->getOperand(0)->getType(); // A from above
+ Type *MidTy = CI->getType(); // B from above
// Get the opcodes of the two Cast instructions
Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
@@ -260,7 +260,7 @@ isEliminableCastPair(
/// the cast can be eliminated by some other simple transformation, we prefer
/// to do the simplification first.
bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
- const Type *Ty) {
+ Type *Ty) {
// Noop casts and casts of constants should be eliminated trivially.
if (V->getType() == Ty || isa<Constant>(V)) return false;
@@ -324,7 +324,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
///
/// This function works on both vectors and scalars.
///
-static bool CanEvaluateTruncated(Value *V, const Type *Ty) {
+static bool CanEvaluateTruncated(Value *V, Type *Ty) {
// We can always evaluate constants in another type.
if (isa<Constant>(V))
return true;
@@ -332,7 +332,7 @@ static bool CanEvaluateTruncated(Value *V, const Type *Ty) {
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
- const Type *OrigTy = V->getType();
+ Type *OrigTy = V->getType();
// If this is an extension from the dest type, we can eliminate it, even if it
// has multiple uses.
@@ -435,7 +435,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
return &CI;
Value *Src = CI.getOperand(0);
- const Type *DestTy = CI.getType(), *SrcTy = Src->getType();
+ Type *DestTy = CI.getType(), *SrcTy = Src->getType();
// Attempt to truncate the entire input expression tree to the destination
// type. Only do this if the dest type is a simple type, don't convert the
@@ -586,7 +586,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
// It is also profitable to transform icmp eq into not(xor(A, B)) because that
// may lead to additional simplifications.
if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
- if (const IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
+ if (IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
uint32_t BitWidth = ITy->getBitWidth();
Value *LHS = ICI->getOperand(0);
Value *RHS = ICI->getOperand(1);
@@ -644,7 +644,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
/// clear the top bits anyway, doing this has no extra cost.
///
/// This function works on both vectors and scalars.
-static bool CanEvaluateZExtd(Value *V, const Type *Ty, unsigned &BitsToClear) {
+static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
BitsToClear = 0;
if (isa<Constant>(V))
return true;
@@ -758,7 +758,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
return &CI;
Value *Src = CI.getOperand(0);
- const Type *SrcTy = Src->getType(), *DestTy = CI.getType();
+ Type *SrcTy = Src->getType(), *DestTy = CI.getType();
// Attempt to extend the entire input expression tree to the destination
// type. Only do this if the dest type is a simple type, don't convert the
@@ -965,10 +965,10 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
}
// vector (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed.
- if (const VectorType *VTy = dyn_cast<VectorType>(CI.getType())) {
+ if (VectorType *VTy = dyn_cast<VectorType>(CI.getType())) {
if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_Zero()) &&
Op0->getType() == CI.getType()) {
- const Type *EltTy = VTy->getElementType();
+ Type *EltTy = VTy->getElementType();
// splat the shift constant to a constant vector.
Constant *VSh = ConstantInt::get(VTy, EltTy->getScalarSizeInBits()-1);
@@ -988,7 +988,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
///
/// This function works on both vectors and scalars.
///
-static bool CanEvaluateSExtd(Value *V, const Type *Ty) {
+static bool CanEvaluateSExtd(Value *V, Type *Ty) {
assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
"Can't sign extend type to a smaller type");
// If this is a constant, it can be trivially promoted.
@@ -1063,7 +1063,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
return &CI;
Value *Src = CI.getOperand(0);
- const Type *SrcTy = Src->getType(), *DestTy = CI.getType();
+ Type *SrcTy = Src->getType(), *DestTy = CI.getType();
// Attempt to extend the entire input expression tree to the destination
// type. Only do this if the dest type is a simple type, don't convert the
@@ -1192,7 +1192,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
case Instruction::FMul:
case Instruction::FDiv:
case Instruction::FRem:
- const Type *SrcTy = OpI->getType();
+ Type *SrcTy = OpI->getType();
Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
if (LHSTrunc->getType() != SrcTy &&
@@ -1351,7 +1351,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
// Get the base pointer input of the bitcast, and the type it points to.
Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
- const Type *GEPIdxTy =
+ Type *GEPIdxTy =
cast<PointerType>(OrigBase->getType())->getElementType();
SmallVector<Value*, 8> NewIndices;
if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices)) {
@@ -1402,12 +1402,12 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
/// replace it with a shuffle (and vector/vector bitcast) if possible.
///
/// The source and destination vector types may have different element types.
-static Instruction *OptimizeVectorResize(Value *InVal, const VectorType *DestTy,
+static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
InstCombiner &IC) {
// We can only do this optimization if the output is a multiple of the input
// element size, or the input is a multiple of the output element size.
// Convert the input type to have the same element type as the output.
- const VectorType *SrcTy = cast<VectorType>(InVal->getType());
+ VectorType *SrcTy = cast<VectorType>(InVal->getType());
if (SrcTy->getElementType() != DestTy->getElementType()) {
// The input types don't need to be identical, but for now they must be the
@@ -1427,7 +1427,7 @@ static Instruction *OptimizeVectorResize(Value *InVal, const VectorType *DestTy,
// size of the input.
SmallVector<Constant*, 16> ShuffleMask;
Value *V2;
- const IntegerType *Int32Ty = Type::getInt32Ty(SrcTy->getContext());
+ IntegerType *Int32Ty = Type::getInt32Ty(SrcTy->getContext());
if (SrcTy->getNumElements() > DestTy->getNumElements()) {
// If we're shrinking the number of elements, just shuffle in the low
@@ -1453,11 +1453,11 @@ static Instruction *OptimizeVectorResize(Value *InVal, const VectorType *DestTy,
return new ShuffleVectorInst(InVal, V2, ConstantVector::get(ShuffleMask));
}
-static bool isMultipleOfTypeSize(unsigned Value, const Type *Ty) {
+static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
return Value % Ty->getPrimitiveSizeInBits() == 0;
}
-static unsigned getTypeSizeIndex(unsigned Value, const Type *Ty) {
+static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
return Value / Ty->getPrimitiveSizeInBits();
}
@@ -1471,7 +1471,7 @@ static unsigned getTypeSizeIndex(unsigned Value, const Type *Ty) {
/// filling in Elements with the elements found here.
static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
SmallVectorImpl<Value*> &Elements,
- const Type *VecEltTy) {
+ Type *VecEltTy) {
// Undef values never contribute useful bits to the result.
if (isa<UndefValue>(V)) return true;
@@ -1508,7 +1508,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
C->getType()->getPrimitiveSizeInBits()));
unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
- const Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
+ Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
@@ -1572,7 +1572,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
/// Into two insertelements that do "buildvector{%inc, %inc5}".
static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
InstCombiner &IC) {
- const VectorType *DestVecTy = cast<VectorType>(CI.getType());
+ VectorType *DestVecTy = cast<VectorType>(CI.getType());
Value *IntInput = CI.getOperand(0);
SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
@@ -1599,7 +1599,7 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
/// bitcast. The various long double bitcasts can't get in here.
static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
Value *Src = CI.getOperand(0);
- const Type *DestTy = CI.getType();
+ Type *DestTy = CI.getType();
// If this is a bitcast from int to float, check to see if the int is an
// extraction from a vector.
@@ -1607,7 +1607,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
// bitcast(trunc(bitcast(somevector)))
if (match(Src, m_Trunc(m_BitCast(m_Value(VecInput)))) &&
isa<VectorType>(VecInput->getType())) {
- const VectorType *VecTy = cast<VectorType>(VecInput->getType());
+ VectorType *VecTy = cast<VectorType>(VecInput->getType());
unsigned DestWidth = DestTy->getPrimitiveSizeInBits();
if (VecTy->getPrimitiveSizeInBits() % DestWidth == 0) {
@@ -1628,7 +1628,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
if (match(Src, m_Trunc(m_LShr(m_BitCast(m_Value(VecInput)),
m_ConstantInt(ShAmt)))) &&
isa<VectorType>(VecInput->getType())) {
- const VectorType *VecTy = cast<VectorType>(VecInput->getType());
+ VectorType *VecTy = cast<VectorType>(VecInput->getType());
unsigned DestWidth = DestTy->getPrimitiveSizeInBits();
if (VecTy->getPrimitiveSizeInBits() % DestWidth == 0 &&
ShAmt->getZExtValue() % DestWidth == 0) {
@@ -1651,18 +1651,18 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// If the operands are integer typed then apply the integer transforms,
// otherwise just apply the common ones.
Value *Src = CI.getOperand(0);
- const Type *SrcTy = Src->getType();
- const Type *DestTy = CI.getType();
+ Type *SrcTy = Src->getType();
+ Type *DestTy = CI.getType();
// Get rid of casts from one type to the same type. These are useless and can
// be replaced by the operand.
if (DestTy == Src->getType())
return ReplaceInstUsesWith(CI, Src);
- if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
- const PointerType *SrcPTy = cast<PointerType>(SrcTy);
- const Type *DstElTy = DstPTy->getElementType();
- const Type *SrcElTy = SrcPTy->getElementType();
+ if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
+ PointerType *SrcPTy = cast<PointerType>(SrcTy);
+ Type *DstElTy = DstPTy->getElementType();
+ Type *SrcElTy = SrcPTy->getElementType();
// If the address spaces don't match, don't eliminate the bitcast, which is
// required for changing types.
@@ -1702,7 +1702,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
if (Instruction *I = OptimizeIntToFloatBitCast(CI, *this))
return I;
- if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
+ if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
@@ -1731,7 +1731,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
}
}
- if (const VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
if (SrcVTy->getNumElements() == 1 && !DestTy->isVectorTy()) {
Value *Elem =
Builder->CreateExtractElement(Src,
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index c78760b206..b8ce4b7eb9 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -56,7 +56,7 @@ static bool AddWithOverflow(Constant *&Result, Constant *In1,
Constant *In2, bool IsSigned = false) {
Result = ConstantExpr::getAdd(In1, In2);
- if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
+ if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
if (HasAddOverflow(ExtractElement(Result, Idx),
@@ -91,7 +91,7 @@ static bool SubWithOverflow(Constant *&Result, Constant *In1,
Constant *In2, bool IsSigned = false) {
Result = ConstantExpr::getSub(In1, In2);
- if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
+ if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
if (HasSubOverflow(ExtractElement(Result, Idx),
@@ -220,7 +220,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// structs.
SmallVector<unsigned, 4> LaterIndices;
- const Type *EltTy = cast<ArrayType>(Init->getType())->getElementType();
+ Type *EltTy = cast<ArrayType>(Init->getType())->getElementType();
for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
if (Idx == 0) return 0; // Variable index.
@@ -228,9 +228,9 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
uint64_t IdxVal = Idx->getZExtValue();
if ((unsigned)IdxVal != IdxVal) return 0; // Too large array index.
- if (const StructType *STy = dyn_cast<StructType>(EltTy))
+ if (StructType *STy = dyn_cast<StructType>(EltTy))
EltTy = STy->getElementType(IdxVal);
- else if (const ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
+ else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
if (IdxVal >= ATy->getNumElements()) return 0;
EltTy = ATy->getElementType();
} else {
@@ -441,7 +441,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// ((magic_cst >> i) & 1) != 0
if (Init->getNumOperands() <= 32 ||
(TD && Init->getNumOperands() <= 64 && TD->isLegalInteger(64))) {
- const Type *Ty;
+ Type *Ty;
if (Init->getNumOperands() <= 32)
Ty = Type::getInt32Ty(Init->getContext());
else
@@ -483,7 +483,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
@@ -513,7 +513,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
@@ -530,7 +530,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// we don't need to bother extending: the extension won't affect where the
// computation crosses zero.
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
- const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
}
return VariableIdx;
@@ -552,7 +552,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
return 0;
// Okay, we can do this evaluation. Start by converting the index to intptr.
- const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
if (VariableIdx->getType() != IntPtrTy)
VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
true /*Signed*/);
@@ -1098,7 +1098,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// If the LHS is an AND of a zext, and we have an equality compare, we can
// shrink the and/compare to the smaller type, eliminating the cast.
if (ZExtInst *Cast = dyn_cast<ZExtInst>(LHSI->getOperand(0))) {
- const IntegerType *Ty = cast<IntegerType>(Cast->getSrcTy());
+ IntegerType *Ty = cast<IntegerType>(Cast->getSrcTy());
// Make sure we don't compare the upper bits, SimplifyDemandedBits
// should fold the icmp to true/false in that case.
if (ICI.isEquality() && RHSV.getActiveBits() <= Ty->getBitWidth()) {
@@ -1121,8 +1121,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
ConstantInt *ShAmt;
ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
- const Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
- const Type *AndTy = AndCST->getType(); // Type of the and.
+ Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
+ Type *AndTy = AndCST->getType(); // Type of the and.
// We can fold this as long as we can't shift unknown bits
// into the mask. This can only happen with signed shift
@@ -1517,8 +1517,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
Value *LHSCIOp = LHSCI->getOperand(0);
- const Type *SrcTy = LHSCIOp->getType();
- const Type *DestTy = LHSCI->getType();
+ Type *SrcTy = LHSCIOp->getType();
+ Type *DestTy = LHSCI->getType();
Value *RHSCIOp;
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
@@ -1786,7 +1786,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
return ReplaceInstUsesWith(I, V);
- const Type *Ty = Op0->getType();
+ Type *Ty = Op0->getType();
// icmp's with boolean values can always be turned into bitwise operations
if (Ty->isIntegerTy(1)) {
@@ -2637,7 +2637,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
}
- const IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
+ IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
// Now we know that the APFloat is a normal number, zero or inf.
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index f499290fe8..bdd2edb991 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -26,7 +26,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
if (TD) {
- const Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
+ Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
if (AI.getArraySize()->getType() != IntPtrTy) {
Value *V = Builder->CreateIntCast(AI.getArraySize(),
IntPtrTy, false);
@@ -38,7 +38,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
if (AI.isArrayAllocation()) { // Check C != 1
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
- const Type *NewTy =
+ Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
@@ -92,22 +92,22 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
User *CI = cast<User>(LI.getOperand(0));
Value *CastOp = CI->getOperand(0);
- const PointerType *DestTy = cast<PointerType>(CI->getType());
- const Type *DestPTy = DestTy->getElementType();
- if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
+ PointerType *DestTy = cast<PointerType>(CI->getType());
+ Type *DestPTy = DestTy->getElementType();
+ if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
// If the address spaces don't match, don't eliminate the cast.
if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
return 0;
- const Type *SrcPTy = SrcTy->getElementType();
+ Type *SrcPTy = SrcTy->getElementType();
if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
DestPTy->isVectorTy()) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
// constants.
- if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
+ if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
if (Constant *CSrc = dyn_cast<Constant>(CastOp))
if (ASrcTy->getNumElements() != 0) {
Value *Idxs[2];
@@ -256,11 +256,11 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
User *CI = cast<User>(SI.getOperand(1));
Value *CastOp = CI->getOperand(0);
- const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
- const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
+ Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
+ PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
if (SrcTy == 0) return 0;
- const Type *SrcPTy = SrcTy->getElementType();
+ Type *SrcPTy = SrcTy->getElementType();
if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
return 0;
@@ -280,12 +280,12 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
NewGEPIndices.push_back(Zero);
while (1) {
- if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
+ if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
if (!STy->getNumElements()) /* Struct can be empty {} */
break;
NewGEPIndices.push_back(Zero);
SrcPTy = STy->getElementType(0);
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
NewGEPIndices.push_back(Zero);
SrcPTy = ATy->getElementType();
} else {
@@ -314,8 +314,8 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
Value *NewCast;
Value *SIOp0 = SI.getOperand(0);
Instruction::CastOps opcode = Instruction::BitCast;
- const Type* CastSrcTy = SIOp0->getType();
- const Type* CastDstTy = SrcPTy;
+ Type* CastSrcTy = SIOp0->getType();
+ Type* CastDstTy = SrcPTy;
if (CastDstTy->isPointerTy()) {
if (CastSrcTy->isIntegerTy())
opcode = Instruction::IntToPtr;
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 630a6fee39..53341ccbfc 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -421,7 +421,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
/// dyn_castZExtVal - Checks if V is a zext or constant that can
/// be truncated to Ty without losing bits.
-static Value *dyn_castZExtVal(Value *V, const Type *Ty) {
+static Value *dyn_castZExtVal(Value *V, Type *Ty) {
if (ZExtInst *Z = dyn_cast<ZExtInst>(V)) {
if (Z->getSrcTy() == Ty)
return Z->getOperand(0);
diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 3777340349..bf1049d152 100644
--- a/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -28,8 +28,8 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
Value *LHSVal = FirstInst->getOperand(0);
Value *RHSVal = FirstInst->getOperand(1);
- const Type *LHSType = LHSVal->getType();
- const Type *RHSType = RHSVal->getType();
+ Type *LHSType = LHSVal->getType();
+ Type *RHSType = RHSVal->getType();
bool isNUW = false, isNSW = false, isExact = false;
if (OverflowingBinaryOperator *BO =
@@ -397,7 +397,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
// the same type or "+42") we can pull the operation through the PHI, reducing
// code size and simplifying code.
Constant *ConstantOp = 0;
- const Type *CastSrcTy = 0;
+ Type *CastSrcTy = 0;
bool isNUW = false, isNSW = false, isExact = false;
if (isa<CastInst>(FirstInst)) {
@@ -572,7 +572,7 @@ struct LoweredPHIRecord {
unsigned Shift; // The amount shifted.
unsigned Width; // The width extracted.
- LoweredPHIRecord(PHINode *pn, unsigned Sh, const Type *Ty)
+ LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
: PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
// Ctor form used by DenseMap.
@@ -701,7 +701,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
unsigned PHIId = PHIUsers[UserI].PHIId;
PHINode *PN = PHIsToSlice[PHIId];
unsigned Offset = PHIUsers[UserI].Shift;
- const Type *Ty = PHIUsers[UserI].Inst->getType();
+ Type *Ty = PHIUsers[UserI].Inst->getType();
PHINode *EltPHI;
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 5733c20828..eb463902d6 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -363,7 +363,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_SGT: {
// These transformations only work for selects over integers.
- const IntegerType *SelectTy = dyn_cast<IntegerType>(SI.getType());
+ IntegerType *SelectTy = dyn_cast<IntegerType>(SI.getType());
if (!SelectTy)
break;
@@ -443,7 +443,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
// FIXME: Type and constness constraints could be lifted, but we have to
// watch code size carefully. We should consider xor instead of
// sub/add when we decide to do that.
- if (const IntegerType *Ty = dyn_cast<IntegerType>(CmpLHS->getType())) {
+ if (IntegerType *Ty = dyn_cast<IntegerType>(CmpLHS->getType())) {
if (TrueVal->getType() == Ty) {
if (ConstantInt *Cmp = dyn_cast<ConstantInt>(CmpRHS)) {
ConstantInt *C1 = NULL, *C2 = NULL;
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 811f94976f..65d1a66f71 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -528,7 +528,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
- const IntegerType *Ty = cast<IntegerType>(I.getType());
+ IntegerType *Ty = cast<IntegerType>(I.getType());
// Check for (X << c1) << c2 and (X >> c1) >> c2
if (I.getOpcode() == ShiftOp->getOpcode()) {
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 8fea8eb7ef..66f39be17b 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -103,7 +103,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
assert(V != 0 && "Null pointer of Value???");
assert(Depth <= 6 && "Limit Search Depth");
uint32_t BitWidth = DemandedMask.getBitWidth();
- const Type *VTy = V->getType();
+ Type *VTy = V->getType();
assert((TD || !VTy->isPointerTy()) &&
"SimplifyDemandedBits needs to know bit widths!");
assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
@@ -404,8 +404,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
return 0; // vector->int or fp->int?
- if (const VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
- if (const VectorType *SrcVTy =
+ if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
+ if (VectorType *SrcVTy =
dyn_cast<VectorType>(I->getOperand(0)->getType())) {
if (DstVTy->getNumElements() != SrcVTy->getNumElements())
// Don't touch a bitcast between vectors of different element counts.
@@ -826,7 +826,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
UndefElts = 0;
if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
- const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
+ Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Undef = UndefValue::get(EltTy);
std::vector<Constant*> Elts;
@@ -855,7 +855,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
if (DemandedElts.isAllOnesValue())
return 0;
- const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
+ Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Zero = Constant::getNullValue(EltTy);
Constant *Undef = UndefValue::get(EltTy);
std::vector<Constant*> Elts;
@@ -992,7 +992,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
}
case Instruction::BitCast: {
// Vector->vector casts only.
- const VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
+ VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
if (!VTy) break;
unsigned InVWidth = VTy->getNumElements();
APInt InputDemandedElts(InVWidth, 0);
diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index ad6a8d054e..154267c034 100644
--- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -77,7 +77,7 @@ static std::vector<int> getShuffleMask(const ShuffleVectorInst *SVI) {
/// extracted from the vector.
static Value *FindScalarElement(Value *V, unsigned EltNo) {
assert(V->getType()->isVectorTy() && "Not looking at a vector?");
- const VectorType *PTy = cast<VectorType>(V->getType());
+ VectorType *PTy = cast<VectorType>(V->getType());
unsigned Width = PTy->getNumElements();
if (EltNo >= Width) // Out of range access.
return UndefValue::get(PTy->getElementType());
@@ -175,7 +175,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// the same number of elements, see if we can find the source element from
// it. In this case, we will end up needing to bitcast the scalars.
if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
- if (const VectorType *VT =
+ if (VectorType *VT =
dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
if (VT->getNumElements() == VectorWidth)
if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal))
@@ -225,7 +225,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
SrcIdx -= LHSWidth;
Src = SVI->getOperand(1);
}
- const Type *Int32Ty = Type::getInt32Ty(EI.getContext());
+ Type *Int32Ty = Type::getInt32Ty(EI.getContext());
return ExtractElementInst::Create(Src,
ConstantInt::get(Int32Ty,
SrcIdx, false));
@@ -555,7 +555,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// shuffle mask, do the replacement.
if (isSplat || NewMask == LHSMask || NewMask == Mask) {
std::vector<Constant*> Elts;
- const Type *Int32Ty = Type::getInt32Ty(SVI.getContext());
+ Type *Int32Ty = Type::getInt32Ty(SVI.getContext());
for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
if (NewMask[i] < 0) {
Elts.push_back(UndefValue::get(Int32Ty));
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index ab98ef9fcc..5828ec2ee9 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -83,7 +83,7 @@ void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
/// ShouldChangeType - Return true if it is desirable to convert a computation
/// from 'From' to 'To'. We don't want to convert from a legal to an illegal
/// type for example, or from a smaller to a larger illegal type.
-bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const {
+bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
assert(From->isIntegerTy() && To->isIntegerTy());
// If we don't have TD, we don't know if the source/dest are legal.
@@ -516,8 +516,8 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
// If it's a bitcast involving vectors, make sure it has the same number of
// elements on both sides.
if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
- const VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
- const VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
+ VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
+ VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
// Verify that either both or neither are vectors.
if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
@@ -654,7 +654,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
}
} else {
CastInst *CI = cast<CastInst>(&I);
- const Type *RetTy = CI->getType();
+ Type *RetTy = CI->getType();
for (unsigned i = 0; i != NumPHIValues; ++i) {
Value *InV;
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
@@ -680,7 +680,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
/// or not there is a sequence of GEP indices into the type that will land us at
/// the specified offset. If so, fill them into NewIndices and return the
/// resultant element type, otherwise return null.
-const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
+Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
SmallVectorImpl<Value*> &NewIndices) {
if (!TD) return 0;
if (!Ty->isSized()) return 0;
@@ -688,7 +688,7 @@ const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
- const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
int64_t FirstIdx = 0;
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
@@ -711,7 +711,7 @@ const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
return 0;
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = TD->getStructLayout(STy);
assert(Offset < (int64_t)SL->getSizeInBytes() &&
"Offset must stay within the indexed type");
@@ -722,7 +722,7 @@ const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
Offset -= SL->getElementOffset(Elt);
Ty = STy->getElementType(Elt);
- } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+ } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
assert(EltSize && "Cannot index into a zero-sized array");
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
@@ -751,13 +751,13 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// by multiples of a zero size type with zero.
if (TD) {
bool MadeChange = false;
- const Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
+ Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
I != E; ++I, ++GTI) {
// Skip indices into struct types.
- const SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
+ SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
if (!SeqTy) continue;
// If the element type has zero size then any index over it is equivalent
@@ -859,7 +859,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
Value *StrippedPtr = PtrOp->stripPointerCasts();
- const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
+ PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
if (StrippedPtr != PtrOp &&
StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
@@ -875,8 +875,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
//
// This occurs when the program declares an array extern like "int X[];"
if (HasZeroPointerIndex) {
- const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
- if (const ArrayType *CATy =
+ PointerType *CPTy = cast<PointerType>(PtrOp->getType());
+ if (ArrayType *CATy =
dyn_cast<ArrayType>(CPTy->getElementType())) {
// GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
@@ -889,7 +889,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
return Res;
}
- if (const ArrayType *XATy =
+ if (ArrayType *XATy =
dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
// GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
if (CATy->getElementType() == XATy->getElementType()) {
@@ -907,8 +907,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Transform things like:
// %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
// into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
- const Type *SrcElTy = StrippedPtrTy->getElementType();
- const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
+ Type *SrcElTy = StrippedPtrTy->getElementType();
+ Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
if (TD && SrcElTy->isArrayTy() &&
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
TD->getTypeAllocSize(ResElTy)) {
@@ -1023,7 +1023,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// field at Offset in 'A's type. If so, we can pull the cast through the
// GEP.
SmallVector<Value*, 8> NewIndices;
- const Type *InTy =
+ Type *InTy =
cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
if (FindElementAtOffset(InTy, Offset, NewIndices)) {
Value *NGEP = GEP.isInBounds() ?
diff --git a/lib/Transforms/Instrumentation/EdgeProfiling.cpp b/lib/Transforms/Instrumentation/EdgeProfiling.cpp
index 1d31fcc4df..e8ef2654d2 100644
--- a/lib/Transforms/Instrumentation/EdgeProfiling.cpp
+++ b/lib/Transforms/Instrumentation/EdgeProfiling.cpp
@@ -74,7 +74,7 @@ bool EdgeProfiler::runOnModule(Module &M) {
}
}
- const Type *ATy = ArrayType::get(Type::getInt32Ty(M.getContext()), NumEdges);
+ Type *ATy = ArrayType::get(Type::getInt32Ty(M.getContext()), NumEdges);
GlobalVariable *Counters =
new GlobalVariable(M, ATy, false, GlobalValue::InternalLinkage,
Constant::getNullValue(ATy), "EdgeProfCounters");
diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 3f2c412388..bd1b46307e 100644
--- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -444,7 +444,7 @@ bool GCOVProfiler::emitProfileArcs(DebugInfoFinder &DIF) {
Edges += TI->getNumSuccessors();
}
- const ArrayType *CounterTy =
+ ArrayType *CounterTy =
ArrayType::get(Type::getInt64Ty(*Ctx), Edges);
GlobalVariable *Counters =
new GlobalVariable(*M, CounterTy, false,
@@ -499,7 +499,7 @@ bool GCOVProfiler::emitProfileArcs(DebugInfoFinder &DIF) {
ComplexEdgePreds, ComplexEdgeSuccs);
GlobalVariable *EdgeState = getEdgeStateValue();
- const Type *Int32Ty = Type::getInt32Ty(*Ctx);
+ Type *Int32Ty = Type::getInt32Ty(*Ctx);
for (int i = 0, e = ComplexEdgePreds.size(); i != e; ++i) {
IRBuilder<> Builder(ComplexEdgePreds[i+1]->getTerminator());
Builder.CreateStore(ConstantInt::get(Int32Ty, i), EdgeState);
@@ -535,8 +535,8 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable(
// read it. Threads and invoke make this untrue.
// emit [(succs * preds) x i64*], logically [succ x [pred x i64*]].
- const Type *Int64PtrTy = Type::getInt64PtrTy(*Ctx);
- const ArrayType *EdgeTableTy = ArrayType::get(
+ Type *Int64PtrTy = Type::getInt64PtrTy(*Ctx);
+ ArrayType *EdgeTableTy = ArrayType::get(
Int64PtrTy, Succs.size() * Preds.size());
Constant **EdgeTable = new Constant*[Succs.size() * Preds.size()];
@@ -572,7 +572,7 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable(
}
Constant *GCOVProfiler::getStartFileFunc() {
- const FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
Type::getInt8PtrTy(*Ctx), false);
return M->getOrInsertFunction("llvm_gcda_start_file", FTy);
}
@@ -582,7 +582,7 @@ Constant *GCOVProfiler::getIncrementIndirectCounterFunc() {
Type::getInt32PtrTy(*Ctx), // uint32_t *predecessor
Type::getInt64PtrTy(*Ctx)->getPointerTo(), // uint64_t **state_table_row
};
- const FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
Args, false);
return M->getOrInsertFunction("llvm_gcda_increment_indirect_counter", FTy);
}
@@ -592,7 +592,7 @@ Constant *GCOVProfiler::getEmitFunctionFunc() {
Type::getInt32Ty(*Ctx), // uint32_t ident
Type::getInt8PtrTy(*Ctx), // const char *function_name
};
- const FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
Args, false);
return M->getOrInsertFunction("llvm_gcda_emit_function", FTy);
}
@@ -602,13 +602,13 @@ Constant *GCOVProfiler::getEmitArcsFunc() {
Type::getInt32Ty(*Ctx), // uint32_t num_counters
Type::getInt64PtrTy(*Ctx), // uint64_t *counters
};
- const FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
Args, false);
return M->getOrInsertFunction("llvm_gcda_emit_arcs", FTy);
}
Constant *GCOVProfiler::getEndFileFunc() {
- const FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
return M->getOrInsertFunction("llvm_gcda_end_file", FTy);
}
@@ -628,7 +628,7 @@ GlobalVariable *GCOVProfiler::getEdgeStateValue() {
void GCOVProfiler::insertCounterWriteout(
DebugInfoFinder &DIF,
SmallVector<std::pair<GlobalVariable *, MDNode *>, 8> &CountersBySP) {
- const FunctionType *WriteoutFTy =
+ FunctionType *WriteoutFTy =
FunctionType::get(Type::getVoidTy(*Ctx), false);
Function *WriteoutF = Function::Create(WriteoutFTy,
GlobalValue::InternalLinkage,
diff --git a/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp b/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
index e09f882aa3..62c21b8e9c 100644
--- a/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
+++ b/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
@@ -112,8 +112,8 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
// be calculated from other edge counters on reading the profile info back
// in.
- const Type *Int32 = Type::getInt32Ty(M.getContext());
- const ArrayType *ATy = ArrayType::get(Int32, NumEdges);
+ Type *Int32 = Type::getInt32Ty(M.getContext());
+ ArrayType *ATy = ArrayType::get(Int32, NumEdges);
GlobalVariable *Counters =
new GlobalVariable(M, ATy, false, GlobalValue::InternalLinkage,
Constant::getNullValue(ATy), "OptEdgeProfCounters");
diff --git a/lib/Transforms/Instrumentation/PathProfiling.cpp b/lib/Transforms/Instrumentation/PathProfiling.cpp
index 75416637db..c6147fa18f 100644
--- a/lib/Transforms/Instrumentation/PathProfiling.cpp
+++ b/lib/Transforms/Instrumentation/PathProfiling.cpp
@@ -374,7 +374,7 @@ namespace llvm {
template<bool xcompile> class TypeBuilder<PathProfilingFunctionTable,
xcompile> {
public:
- static const StructType *get(LLVMContext& C) {
+ static StructType *get(LLVMContext& C) {
return( StructType::get(
TypeBuilder<types::i<32>, xcompile>::get(C), // type
TypeBuilder<types::i<32>, xcompile>::get(C), // array size
@@ -1289,7 +1289,7 @@ void PathProfiler::runOnFunction(std::vector<Constant*> &ftInit,
// Should we store the information in an array or hash
if( dag.getNumberOfPaths() <= HASH_THRESHHOLD ) {
- const Type* t = ArrayType::get(Type::getInt32Ty(*Context),
+ Type* t = ArrayType::get(Type::getInt32Ty(*Context),
dag.getNumberOfPaths());
dag.setCounterArray(new GlobalVariable(M, t, false,
@@ -1301,7 +1301,7 @@ void PathProfiler::runOnFunction(std::vector<Constant*> &ftInit,
// Add to global function reference table
unsigned type;
- const Type* voidPtr = TypeBuilder<types::i<8>*, true>::get(*Context);
+ Type* voidPtr = TypeBuilder<types::i<8>*, true>::get(*Context);
if( dag.getNumberOfPaths() <= HASH_THRESHHOLD )
type = ProfilingArray;
@@ -1315,7 +1315,7 @@ void PathProfiler::runOnFunction(std::vector<Constant*> &ftInit,
ConstantExpr::getBitCast(dag.getCounterArray(), voidPtr) :
Constant::getNullValue(voidPtr);
- const StructType* at = ftEntryTypeBuilder::get(*Context);
+ StructType* at = ftEntryTypeBuilder::get(*Context);
ConstantStruct* functionEntry =
(ConstantStruct*)ConstantStruct::get(at, entryArray);
ftInit.push_back(functionEntry);
@@ -1379,8 +1379,8 @@ bool PathProfiler::runOnModule(Module &M) {
runOnFunction(ftInit, *F, M);
}
- const Type *t = ftEntryTypeBuilder::get(*Context);
- const ArrayType* ftArrayType = ArrayType::get(t, ftInit.size());
+ Type *t = ftEntryTypeBuilder::get(*Context);
+ ArrayType* ftArrayType = ArrayType::get(t, ftInit.size());
Constant* ftInitConstant = ConstantArray::get(ftArrayType, ftInit);
DEBUG(dbgs() << " ftArrayType:" << *ftArrayType << "\n");
@@ -1388,7 +1388,7 @@ bool PathProfiler::runOnModule(Module &M) {
GlobalVariable* functionTable =
new GlobalVariable(M, ftArrayType, false, GlobalValue::InternalLinkage,
ftInitConstant, "functionPathTable");
- const Type *eltType = ftArrayType->getTypeAtIndex((unsigned)0);
+ Type *eltType = ftArrayType->getTypeAtIndex((unsigned)0);
InsertProfilingInitCall(Main, "llvm_start_path_profiling", functionTable,
PointerType::getUnqual(eltType));
diff --git a/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/lib/Transforms/Instrumentation/ProfilingUtils.cpp
index 445a5b6f60..0ebab33f5c 100644
--- a/lib/Transforms/Instrumentation/ProfilingUtils.cpp
+++ b/lib/Transforms/Instrumentation/ProfilingUtils.cpp
@@ -25,9 +25,9 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
GlobalValue *Array,
PointerType *arrayType) {
LLVMContext &Context = MainFn->getContext();
- const Type *ArgVTy =
+ Type *ArgVTy =
PointerType::getUnqual(Type::getInt8PtrTy(Context));
- const PointerType *UIntPtr = arrayType ? arrayType :
+ PointerType *UIntPtr = arrayType ? arrayType :
Type::getInt32PtrTy(Context);
Module &M = *MainFn->getParent();
Constant *InitFn = M.getOrInsertFunction(FnName, Type::getInt32Ty(Context),
@@ -137,7 +137,7 @@ void llvm::InsertProfilingShutdownCall(Function *Callee, Module *Mod) {
Type::getInt32Ty(Mod->getContext()),
FunctionType::get(Type::getVoidTy(Mod->getContext()), false)->getPointerTo()
};
- const StructType *GlobalDtorElemTy =
+ StructType *GlobalDtorElemTy =
StructType::get(Mod->getContext(), GlobalDtorElems, false);
// Construct the new element we'll be adding.
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 0af14ed17b..17beeb5aa8 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -104,7 +104,7 @@ namespace {
void EliminateMostlyEmptyBlock(BasicBlock *BB);
bool OptimizeBlock(BasicBlock &BB);
bool OptimizeInst(Instruction *I);
- bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy);
+ bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy);
bool OptimizeInlineAsmInst(CallInst *CS);
bool OptimizeCallInst(CallInst *CI);
bool MoveExtToFormExtLoad(Instruction *I);
@@ -528,7 +528,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
- const Type *ReturnTy = CI->getType();
+ Type *ReturnTy = CI->getType();
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
// Substituting this can cause recursive simplifications, which can
@@ -724,7 +724,7 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
/// This method is used to optimize both load/store and inline asms with memory
/// operands.
bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
- const Type *AccessTy) {
+ Type *AccessTy) {
Value *Repl = Addr;
// Try to collapse single-value PHI nodes. This is necessary to undo
@@ -837,7 +837,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
} else {
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst);
- const Type *IntPtrTy =
+ Type *IntPtrTy =
TLI->getTargetData()->getIntPtrType(AccessTy->getContext());
Value *Result = 0;
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index cb9b5bebc5..e6089a9a43 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -264,7 +264,7 @@ static uint64_t getPointerSize(Value *V, AliasAnalysis &AA) {
}
assert(isa<Argument>(V) && "Expected AllocaInst or Argument!");
- const PointerType *PT = cast<PointerType>(V->getType());
+ PointerType *PT = cast<PointerType>(V->getType());
return TD->getTypeAllocSize(PT->getElementType());
}
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 87b7317ad2..b4d5667bfa 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -63,7 +63,7 @@ static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
namespace {
struct Expression {
uint32_t opcode;
- const Type *type;
+ Type *type;
SmallVector<uint32_t, 4> varargs;
Expression(uint32_t o = ~2U) : opcode(o) { }
@@ -655,7 +655,7 @@ SpeculationFailure:
/// CanCoerceMustAliasedValueToLoad - Return true if
/// CoerceAvailableValueToLoadType will succeed.
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
- const Type *LoadTy,
+ Type *LoadTy,
const TargetData &TD) {
// If the loaded or stored value is an first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
@@ -680,14 +680,14 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
///
/// If we can't do it, return null.
static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
- const Type *LoadedTy,
+ Type *LoadedTy,
Instruction *InsertPt,
const TargetData &TD) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
return 0;
// If this is already the right type, just return it.
- const Type *StoredValTy = StoredVal->getType();
+ Type *StoredValTy = StoredVal->getType();
uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy);
uint64_t LoadSize = TD.getTypeStoreSizeInBits(LoadedTy);
@@ -704,7 +704,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
- const Type *TypeToCastTo = LoadedTy;
+ Type *TypeToCastTo = LoadedTy;
if (TypeToCastTo->isPointerTy())
TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
@@ -743,7 +743,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
}
// Truncate the integer to the right size now.
- const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
+ Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
if (LoadedTy == NewIntTy)
@@ -765,7 +765,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
/// Check this case to see if there is anything more we can do before we give
/// up. This returns -1 if we have to give up, or a byte number in the stored
/// value of the piece that feeds the load.
-static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
+static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
Value *WritePtr,
uint64_t WriteSizeInBits,
const TargetData &TD) {
@@ -839,7 +839,7 @@ static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
/// AnalyzeLoadFromClobberingStore - This function is called when we have a
/// memdep query of a load that ends up being a clobbering store.
-static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
+static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
StoreInst *DepSI,
const TargetData &TD) {
// Cannot handle reading from store of first-class aggregate yet.
@@ -856,7 +856,7 @@ static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
/// AnalyzeLoadFromClobberingLoad - This function is called when we have a
/// memdep query of a load that ends up being clobbered by another load. See if
/// the other load can feed into the second load.
-static int AnalyzeLoadFromClobberingLoad(const Type *LoadTy, Value *LoadPtr,
+static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
LoadInst *DepLI, const TargetData &TD){
// Cannot handle reading from store of first-class aggregate yet.
if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
@@ -883,7 +883,7 @@ static int AnalyzeLoadFromClobberingLoad(const Type *LoadTy, Value *LoadPtr,
-static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
+static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
MemIntrinsic *MI,
const TargetData &TD) {
// If the mem operation is a non-constant size, we can't handle it.
@@ -934,7 +934,7 @@ static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
/// mustalias. Check this case to see if there is anything more we can do
/// before we give up.
static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
- const Type *LoadTy,
+ Type *LoadTy,
Instruction *InsertPt, const TargetData &TD){
LLVMContext &Ctx = SrcVal->getType()->getContext();
@@ -974,7 +974,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
/// because the pointers don't mustalias. Check this case to see if there is
/// anything more we can do before we give up.
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
- const Type *LoadTy, Instruction *InsertPt,
+ Type *LoadTy, Instruction *InsertPt,
GVN &gvn) {
const TargetData &TD = *gvn.getTargetData();
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
@@ -996,7 +996,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
// memdep queries will find the new load. We can't easily remove the old
// load completely because it is already in the value numbering table.
IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal));
- const Type *DestPTy =
+ Type *DestPTy =
IntegerType::get(LoadTy->getContext(), NewLoadSize*8);
DestPTy = PointerType::get(DestPTy,
cast<PointerType>(PtrVal->getType())->getAddressSpace());
@@ -1034,7 +1034,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
/// GetMemInstValueForLoad - This function is called when we have a
/// memdep query of a load that ends up being a clobbering mem intrinsic.
static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
- const Type *LoadTy, Instruction *InsertPt,
+ Type *LoadTy, Instruction *InsertPt,
const TargetData &TD){
LLVMContext &Ctx = LoadTy->getContext();
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
@@ -1154,7 +1154,7 @@ struct AvailableValueInBlock {
/// MaterializeAdjustedValue - Emit code into this block to adjust the value
/// defined here to the specified type. This handles various coercion cases.
- Value *MaterializeAdjustedValue(const Type *LoadTy, GVN &gvn) const {
+ Value *MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const {
Value *Res;
if (isSimpleValue()) {
Res = getSimpleValue();
@@ -1213,7 +1213,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
SSAUpdater SSAUpdate(&NewPHIs);
SSAUpdate.Initialize(LI->getType(), LI->getName());
- const Type *LoadTy = LI->getType();
+ Type *LoadTy = LI->getType();
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
const AvailableValueInBlock &AV = ValuesPerBlock[i];
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index dee3d38d72..cf75448aa2 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -390,7 +390,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
return;
}
- const IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext());
+ IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext());
// Insert new integer induction variable.
PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName()+".int", PN);
@@ -665,7 +665,7 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
// of different sizes.
for (IVUsers::iterator UI = IU->begin(), E = IU->end(); UI != E; ++UI) {
Value *Op = UI->getOperandValToReplace();
- const Type *UseTy = Op->getType();
+ Type *UseTy = Op->getType();
Instruction *User = UI->getUser();
// Compute the final addrec to expand into code.
@@ -747,7 +747,7 @@ namespace {
// extend operations. This information is recorded by CollectExtend and
// provides the input to WidenIV.
struct WideIVInfo {
- const Type *WidestNativeType; // Widest integer type created [sz]ext
+ Type *WidestNativeType; // Widest integer type created [sz]ext
bool IsSigned; // Was an sext user seen before a zext?
WideIVInfo() : WidestNativeType(0), IsSigned(false) {}
@@ -759,7 +759,7 @@ namespace {
/// the final width of the IV before actually widening it.
static void CollectExtend(CastInst *Cast, bool IsSigned, WideIVInfo &WI,
ScalarEvolution *SE, const TargetData *TD) {
- const Type *Ty = Cast->getType();
+ Type *Ty = Cast->getType();
uint64_t Width = SE->getTypeSizeInBits(Ty);
if (TD && !TD->isLegalInteger(Width))
return;
@@ -787,7 +787,7 @@ namespace {
class WidenIV {
// Parameters
PHINode *OrigPhi;
- const Type *WideType;
+ Type *WideType;
bool IsSigned;
// Context
@@ -839,7 +839,7 @@ protected:
};
} // anonymous namespace
-static Value *getExtend( Value *NarrowOper, const Type *WideType,
+static Value *getExtend( Value *NarrowOper, Type *WideType,
bool IsSigned, IRBuilder<> &Builder) {
return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) :
Builder.CreateZExt(NarrowOper, WideType);
@@ -1489,7 +1489,7 @@ static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) {
/// through Truncs.
///
/// TODO: Unnecessary if LFTR does not force a canonical IV.
-static const Type *getBackedgeIVType(Loop *L) {
+static Type *getBackedgeIVType(Loop *L) {
if (!L->getExitingBlock())
return 0;
@@ -1502,7 +1502,7 @@ static const Type *getBackedgeIVType(Loop *L) {
if (!Cond)
return 0;
- const Type *Ty = 0;
+ Type *Ty = 0;
for(User::op_iterator OI = Cond->op_begin(), OE = Cond->op_end();
OI != OE; ++OI) {
assert((!Ty || Ty == (*OI)->getType()) && "bad icmp operand types");
@@ -1748,7 +1748,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// Compute the type of the largest recurrence expression, and decide whether
// a canonical induction variable should be inserted.
- const Type *LargestType = 0;
+ Type *LargestType = 0;
bool NeedCannIV = false;
bool ExpandBECount = canExpandBackedgeTakenCount(L, SE);
if (ExpandBECount) {
@@ -1756,7 +1756,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// rewriting the loop exit test condition below, which requires a
// canonical induction variable.
NeedCannIV = true;
- const Type *Ty = BackedgeTakenCount->getType();
+ Type *Ty = BackedgeTakenCount->getType();
if (DisableIVRewrite) {
// In this mode, SimplifyIVUsers may have already widened the IV used by
// the backedge test and inserted a Trunc on the compare's operand. Get
@@ -1772,7 +1772,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
if (!DisableIVRewrite) {
for (IVUsers::const_iterator I = IU->begin(), E = IU->end(); I != E; ++I) {
NeedCannIV = true;
- const Type *Ty =
+ Type *Ty =
SE->getEffectiveSCEVType(I->getOperandValToReplace()->getType());
if (!LargestType ||
SE->getTypeSizeInBits(Ty) >
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index a0e41d9a97..ea4c515f41 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -498,7 +498,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- const Type *IntPtr = TD->getIntPtrType(DestPtr->getContext());
+ Type *IntPtr = TD->getIntPtrType(DestPtr->getContext());
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
@@ -604,7 +604,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- const Type *IntPtr = TD->getIntPtrType(SI->getContext());
+ Type *IntPtr = TD->getIntPtrType(SI->getContext());
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 509d0264f1..e90b5bcacd 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -219,7 +219,7 @@ struct Formula {
void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
unsigned getNumRegs() const;
- const Type *getType() const;
+ Type *getType() const;
void DeleteBaseReg(const SCEV *&S);
@@ -319,7 +319,7 @@ unsigned Formula::getNumRegs() const {
/// getType - Return the type of this formula, if it has one, or null
/// otherwise. This type is meaningless except for the bit size.
-const Type *Formula::getType() const {
+Type *Formula::getType() const {
return !BaseRegs.empty() ? BaseRegs.front()->getType() :
ScaledReg ? ScaledReg->getType() :
AM.BaseGV ? AM.BaseGV->getType() :
@@ -397,7 +397,7 @@ void Formula::dump() const {
/// isAddRecSExtable - Return true if the given addrec can be sign-extended
/// without changing its value.
static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
- const Type *WideTy =
+ Type *WideTy =
IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1);
return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
}
@@ -405,7 +405,7 @@ static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
/// isAddSExtable - Return true if the given add can be sign-extended
/// without changing its value.
static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
- const Type *WideTy =
+ Type *WideTy =
IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
}
@@ -413,7 +413,7 @@ static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
/// isMulSExtable - Return true if the given mul can be sign-extended
/// without changing its value.
static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
- const Type *WideTy =
+ Type *WideTy =
IntegerType::get(SE.getContext(),
SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
@@ -594,8 +594,8 @@ static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
}
/// getAccessType - Return the type of the memory being accessed.
-static const Type *getAccessType(const Instruction *Inst) {
- const Type *AccessTy = Inst->getType();
+static Type *getAccessType(const Instruction *Inst) {
+ Type *AccessTy = Inst->getType();
if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
AccessTy = SI->getOperand(0)->getType();
else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
@@ -614,7 +614,7 @@ static const Type *getAccessType(const Instruction *Inst) {
// All pointers have the same requirements, so canonicalize them to an
// arbitrary pointer type to minimize variation.
- if (const PointerType *PTy = dyn_cast<PointerType>(AccessTy))
+ if (PointerType *PTy = dyn_cast<PointerType>(AccessTy))
AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1),
PTy->getAddressSpace());
@@ -980,7 +980,7 @@ public:
};
KindType Kind;
- const Type *AccessTy;
+ Type *AccessTy;
SmallVector<int64_t, 8> Offsets;
int64_t MinOffset;
@@ -995,7 +995,7 @@ public:
/// this LSRUse. FindUseWithSimilarFormula can't consider uses with different
/// max fixup widths to be equivalent, because the narrower one may be relying
/// on the implicit truncation to truncate away bogus bits.
- const Type *WidestFixupType;
+ Type *WidestFixupType;
/// Formulae - A list of ways to build a value that can satisfy this user.
/// After the list is populated, one of these is selected heuristically and
@@ -1005,7 +1005,7 @@ public:
/// Regs - The set of register candidates used by all formulae in this LSRUse.
SmallPtrSet<const SCEV *, 4> Regs;
- LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T),
+ LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T),
MinOffset(INT64_MAX),
MaxOffset(INT64_MIN),
AllFixupsOutsideLoop(true),
@@ -1127,7 +1127,7 @@ void LSRUse::dump() const {
/// be completely folded into the user instruction at isel time. This includes
/// address-mode folding and special icmp tricks.
static bool isLegalUse(const TargetLowering::AddrMode &AM,
- LSRUse::KindType Kind, const Type *AccessTy,
+ LSRUse::KindType Kind, Type *AccessTy,
const TargetLowering *TLI) {
switch (Kind) {
case LSRUse::Address:
@@ -1176,7 +1176,7 @@ static bool isLegalUse(const TargetLowering::AddrMode &AM,
static bool isLegalUse(TargetLowering::AddrMode AM,
int64_t MinOffset, int64_t MaxOffset,
- LSRUse::KindType Kind, const Type *AccessTy,
+ LSRUse::KindType Kind, Type *AccessTy,
const TargetLowering *TLI) {
// Check for overflow.
if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) !=
@@ -1198,7 +1198,7 @@ static bool isLegalUse(TargetLowering::AddrMode AM,
static bool isAlwaysFoldable(int64_t BaseOffs,
GlobalValue *BaseGV,
bool HasBaseReg,
- LSRUse::KindType Kind, const Type *AccessTy,
+ LSRUse::KindType Kind, Type *AccessTy,
const TargetLowering *TLI) {
// Fast-path: zero is always foldable.
if (BaseOffs == 0 && !BaseGV) return true;
@@ -1224,7 +1224,7 @@ static bool isAlwaysFoldable(int64_t BaseOffs,
static bool isAlwaysFoldable(const SCEV *S,
int64_t MinOffset, int64_t MaxOffset,
bool HasBaseReg,
- LSRUse::KindType Kind, const Type *AccessTy,
+ LSRUse::KindType Kind, Type *AccessTy,
const TargetLowering *TLI,
ScalarEvolution &SE) {
// Fast-path: zero is always foldable.
@@ -1299,7 +1299,7 @@ class LSRInstance {
SmallSetVector<int64_t, 8> Factors;
/// Types - Interesting use types, to facilitate truncation reuse.
- SmallSetVector<const Type *, 4> Types;
+ SmallSetVector<Type *, 4> Types;
/// Fixups - The list of operands which are to be replaced.
SmallVector<LSRFixup, 16> Fixups;
@@ -1330,11 +1330,11 @@ class LSRInstance {
UseMapTy UseMap;
bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
- LSRUse::KindType Kind, const Type *AccessTy);
+ LSRUse::KindType Kind, Type *AccessTy);
std::pair<size_t, int64_t> getUse(const SCEV *&Expr,
LSRUse::KindType Kind,
- const Type *AccessTy);
+ Type *AccessTy);
void DeleteUse(LSRUse &LU, size_t LUIdx);
@@ -1426,7 +1426,7 @@ void LSRInstance::OptimizeShadowIV() {
IVUsers::const_iterator CandidateUI = UI;
++UI;
Instruction *ShadowUse = CandidateUI->getUser();
- const Type *DestTy = NULL;
+ Type *DestTy = NULL;
/* If shadow use is a int->float cast then insert a second IV
to eliminate this cast.
@@ -1457,7 +1457,7 @@ void LSRInstance::OptimizeShadowIV() {
if (!PH) continue;
if (PH->getNumIncomingValues() != 2) continue;
- const Type *SrcTy = PH->getType();
+ Type *SrcTy = PH->getType();
int Mantissa = DestTy->getFPMantissaWidth();
if (Mantissa == -1) continue;
if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa)
@@ -1776,7 +1776,7 @@ LSRInstance::OptimizeLoopTermCond() {
if (!TLI)
goto decline_post_inc;
// Check for possible scaled-address reuse.
- const Type *AccessTy = getAccessType(UI->getUser());
+ Type *AccessTy = getAccessType(UI->getUser());
TargetLowering::AddrMode AM;
AM.Scale = C->getSExtValue();
if (TLI->isLegalAddressingMode(AM, AccessTy))
@@ -1840,10 +1840,10 @@ LSRInstance::OptimizeLoopTermCond() {
/// return true.
bool
LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
- LSRUse::KindType Kind, const Type *AccessTy) {
+ LSRUse::KindType Kind, Type *AccessTy) {
int64_t NewMinOffset = LU.MinOffset;
int64_t NewMaxOffset = LU.MaxOffset;
- const Type *NewAccessTy = AccessTy;
+ Type *NewAccessTy = AccessTy;
// Check for a mismatched kind. It's tempting to collapse mismatched kinds to
// something conservative, however this can pessimize in the case that one of
@@ -1882,7 +1882,7 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
/// Either reuse an existing use or create a new one, as needed.
std::pair<size_t, int64_t>
LSRInstance::getUse(const SCEV *&Expr,
- LSRUse::KindType Kind, const Type *AccessTy) {
+ LSRUse::KindType Kind, Type *AccessTy) {
const SCEV *Copy = Expr;
int64_t Offset = ExtractImmediate(Expr, SE);
@@ -2044,7 +2044,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
LF.PostIncLoops = UI->getPostIncLoops();
LSRUse::KindType Kind = LSRUse::Basic;
- const Type *AccessTy = 0;
+ Type *AccessTy = 0;
if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) {
Kind = LSRUse::Address;
AccessTy = getAccessType(LF.UserInst);
@@ -2464,7 +2464,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
if (LU.Kind != LSRUse::ICmpZero) return;
// Determine the integer type for the base formula.
- const Type *IntTy = Base.getType();
+ Type *IntTy = Base.getType();
if (!IntTy) return;
if (SE.getTypeSizeInBits(IntTy) > 64) return;
@@ -2538,7 +2538,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
/// scaled-offset address modes, for example.
void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
// Determine the integer type for the base formula.
- const Type *IntTy = Base.getType();
+ Type *IntTy = Base.getType();
if (!IntTy) return;
// If this Formula already has a scaled register, we can't add another one.
@@ -2598,13 +2598,13 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
if (Base.AM.BaseGV) return;
// Determine the integer type for the base formula.
- const Type *DstTy = Base.getType();
+ Type *DstTy = Base.getType();
if (!DstTy) return;
DstTy = SE.getEffectiveSCEVType(DstTy);
- for (SmallSetVector<const Type *, 4>::const_iterator
+ for (SmallSetVector<Type *, 4>::const_iterator
I = Types.begin(), E = Types.end(); I != E; ++I) {
- const Type *SrcTy = *I;
+ Type *SrcTy = *I;
if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) {
Formula F = Base;
@@ -2741,7 +2741,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
int64_t Imm = WI.Imm;
const SCEV *OrigReg = WI.OrigReg;
- const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
+ Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
@@ -3440,9 +3440,9 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
Rewriter.setPostInc(LF.PostIncLoops);
// This is the type that the user actually needs.
- const Type *OpTy = LF.OperandValToReplace->getType();
+ Type *OpTy = LF.OperandValToReplace->getType();
// This will be the type that we'll initially expand to.
- const Type *Ty = F.getType();
+ Type *Ty = F.getType();
if (!Ty)
// No type known; just expand directly to the ultimate type.
Ty = OpTy;
@@ -3450,7 +3450,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
// Expand directly to the ultimate type if it's the right size.
Ty = OpTy;
// This is the type to do integer arithmetic in.
- const Type *IntTy = SE.getEffectiveSCEVType(Ty);
+ Type *IntTy = SE.getEffectiveSCEVType(Ty);
// Build up a list of operands to add together to form the full base.
SmallVector<const SCEV *, 8> Ops;
@@ -3637,7 +3637,7 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts);
// If this is reuse-by-noop-cast, insert the noop cast.
- const Type *OpTy = LF.OperandValToReplace->getType();
+ Type *OpTy = LF.OperandValToReplace->getType();
if (FullV->getType() != OpTy)
FullV =
CastInst::Create(CastInst::getCastOpcode(FullV, false,
@@ -3667,7 +3667,7 @@ void LSRInstance::Rewrite(const LSRFixup &LF,
Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts);
// If this is reuse-by-noop-cast, insert the noop cast.
- const Type *OpTy = LF.OperandValToReplace->getType();
+ Type *OpTy = LF.OperandValToReplace->getType();
if (FullV->getType() != OpTy) {
Instruction *Cast =
CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false),
@@ -3793,7 +3793,7 @@ void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
OS << '*' << *I;
}
- for (SmallSetVector<const Type *, 4>::const_iterator
+ for (SmallSetVector<Type *, 4>::const_iterator
I = Types.begin(), E = Types.end(); I != E; ++I) {
if (!First) OS << ", ";
First = false;
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 7ed3db6cc1..ba5ee68ebb 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -54,7 +54,7 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
if (OpC->isZero()) continue; // No offset.
// Handle struct indices, which add their field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
continue;
}
@@ -448,7 +448,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// Determine alignment
unsigned Alignment = Range.Alignment;
if (Alignment == 0) {
- const Type *EltType =
+ Type *EltType =
cast<PointerType>(StartPtr->getType())->getElementType();
Alignment = TD->getABITypeAlignment(EltType);
}
@@ -616,7 +616,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
if (!A->hasStructRetAttr())
return false;
- const Type *StructTy = cast<PointerType>(A->getType())->getElementType();
+ Type *StructTy = cast<PointerType>(A->getType())->getElementType();
uint64_t destSize = TD->getTypeAllocSize(StructTy);
if (destSize < srcSize)
@@ -860,7 +860,7 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
// Find out what feeds this byval argument.
Value *ByValArg = CS.getArgument(ArgNo);
- const Type *ByValTy =cast<PointerType>(ByValArg->getType())->getElementType();
+ Type *ByValTy =cast<PointerType>(ByValArg->getType())->getElementType();
uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);
MemDepResult DepInfo =
MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
diff --git a/lib/Transforms/Scalar/ObjCARC.cpp b/lib/Transforms/Scalar/ObjCARC.cpp
index ee132d3be4..f2e5ff9ac9 100644
--- a/lib/Transforms/Scalar/ObjCARC.cpp
+++ b/lib/Transforms/Scalar/ObjCARC.cpp
@@ -180,7 +180,7 @@ static bool IsPotentialUse(const Value *Op) {
Arg->hasStructRetAttr())
return false;
// Only consider values with pointer types, and not function pointers.
- const PointerType *Ty = dyn_cast<PointerType>(Op->getType());
+ PointerType *Ty = dyn_cast<PointerType>(Op->getType());
if (!Ty || isa<FunctionType>(Ty->getElementType()))
return false;
// Conservatively assume anything else is a potential use.
@@ -213,8 +213,8 @@ static InstructionClass GetFunctionClass(const Function *F) {
const Argument *A0 = AI++;
if (AI == AE)
// Argument is a pointer.
- if (const PointerType *PTy = dyn_cast<PointerType>(A0->getType())) {
- const Type *ETy = PTy->getElementType();
+ if (PointerType *PTy = dyn_cast<PointerType>(A0->getType())) {
+ Type *ETy = PTy->getElementType();
// Argument is i8*.
if (ETy->isIntegerTy(8))
return StringSwitch<InstructionClass>(F->getName())
@@ -234,7 +234,7 @@ static InstructionClass GetFunctionClass(const Function *F) {
.Default(IC_CallOrUser);
// Argument is i8**
- if (const PointerType *Pte = dyn_cast<PointerType>(ETy))
+ if (PointerType *Pte = dyn_cast<PointerType>(ETy))
if (Pte->getElementType()->isIntegerTy(8))
return StringSwitch<InstructionClass>(F->getName())
.Case("objc_loadWeakRetained", IC_LoadWeakRetained)
@@ -246,11 +246,11 @@ static InstructionClass GetFunctionClass(const Function *F) {
// Two arguments, first is i8**.
const Argument *A1 = AI++;
if (AI == AE)
- if (const PointerType *PTy = dyn_cast<PointerType>(A0->getType()))
- if (const PointerType *Pte = dyn_cast<PointerType>(PTy->getElementType()))
+ if (PointerType *PTy = dyn_cast<PointerType>(A0->getType()))
+ if (PointerType *Pte = dyn_cast<PointerType>(PTy->getElementType()))
if (Pte->getElementType()->isIntegerTy(8))
- if (const PointerType *PTy1 = dyn_cast<PointerType>(A1->getType())) {
- const Type *ETy1 = PTy1->getElementType();
+ if (PointerType *PTy1 = dyn_cast<PointerType>(A1->getType())) {
+ Type *ETy1 = PTy1->getElementType();
// Second argument is i8*
if (ETy1->isIntegerTy(8))
return StringSwitch<InstructionClass>(F->getName())
@@ -258,7 +258,7 @@ static InstructionClass GetFunctionClass(const Function *F) {
.Case("objc_initWeak", IC_InitWeak)
.Default(IC_CallOrUser);
// Second argument is i8**.
- if (const PointerType *Pte1 = dyn_cast<PointerType>(ETy1))
+ if (PointerType *Pte1 = dyn_cast<PointerType>(ETy1))
if (Pte1->getElementType()->isIntegerTy(8))
return StringSwitch<InstructionClass>(F->getName())
.Case("objc_moveWeak", IC_MoveWeak)
@@ -1501,7 +1501,7 @@ Constant *ObjCARCOpt::getRetainRVCallee(Module *M) {
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
std::vector<Type *> Params;
Params.push_back(I8X);
- const FunctionType *FTy =
+ FunctionType *FTy =
FunctionType::get(I8X, Params, /*isVarArg=*/false);
AttrListPtr Attributes;
Attributes.addAttr(~0u, Attribute::NoUnwind);
@@ -1518,7 +1518,7 @@ Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
std::vector<Type *> Params;
Params.push_back(I8X);
- const FunctionType *FTy =
+ FunctionType *FTy =
FunctionType::get(I8X, Params, /*isVarArg=*/false);
AttrListPtr Attributes;
Attributes.addAttr(~0u, Attribute::NoUnwind);
@@ -1953,7 +1953,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
case IC_DestroyWeak: {
CallInst *CI = cast<CallInst>(Inst);
if (isNullOrUndef(CI->getArgOperand(0))) {
- const Type *Ty = CI->getArgOperand(0)->getType();
+ Type *Ty = CI->getArgOperand(0)->getType();
new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
Constant::getNullValue(Ty),
CI);
@@ -1968,7 +1968,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
CallInst *CI = cast<CallInst>(Inst);
if (isNullOrUndef(CI->getArgOperand(0)) ||
isNullOrUndef(CI->getArgOperand(1))) {
- const Type *Ty = CI->getArgOperand(0)->getType();
+ Type *Ty = CI->getArgOperand(0)->getType();
new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
Constant::getNullValue(Ty),
CI);
@@ -2090,7 +2090,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
++NumPartialNoops;
// Clone the call into each predecessor that has a non-null value.
CallInst *CInst = cast<CallInst>(Inst);
- const Type *ParamTy = CInst->getArgOperand(0)->getType();
+ Type *ParamTy = CInst->getArgOperand(0)->getType();
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming =
StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
@@ -2566,8 +2566,8 @@ void ObjCARCOpt::MoveCalls(Value *Arg,
MapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases,
SmallVectorImpl<Instruction *> &DeadInsts) {
- const Type *ArgTy = Arg->getType();
- const Type *ParamTy =
+ Type *ArgTy = Arg->getType();
+ Type *ParamTy =
(RetainRVFunc ? RetainRVFunc :
RetainFunc ? RetainFunc :
RetainBlockFunc)->arg_begin()->getType();
@@ -3294,7 +3294,7 @@ Constant *ObjCARCContract::getRetainAutoreleaseCallee(Module *M) {
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
std::vector<Type *> Params;
Params.push_back(I8X);
- const FunctionType *FTy =
+ FunctionType *FTy =
FunctionType::get(I8X, Params, /*isVarArg=*/false);
AttrListPtr Attributes;
Attributes.addAttr(~0u, Attribute::NoUnwind);
@@ -3310,7 +3310,7 @@ Constant *ObjCARCContract::getRetainAutoreleaseRVCallee(Module *M) {
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
std::vector<Type *> Params;
Params.push_back(I8X);
- const FunctionType *FTy =
+ FunctionType *FTy =
FunctionType::get(I8X, Params, /*isVarArg=*/false);
AttrListPtr Attributes;
Attributes.addAttr(~0u, Attribute::NoUnwind);
@@ -3411,8 +3411,8 @@ void ObjCARCContract::ContractRelease(Instruction *Release,
++NumStoreStrongs;
LLVMContext &C = Release->getContext();
- const Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
- const Type *I8XX = PointerType::getUnqual(I8X);
+ Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
+ Type *I8XX = PointerType::getUnqual(I8X);
Value *Args[] = { Load->getPointerOperand(), New };
if (Args[0]->getType() != I8XX)
@@ -3548,7 +3548,7 @@ bool ObjCARCContract::runOnFunction(Function &F) {
if (Inst != UserInst && DT->dominates(Inst, UserInst)) {
Changed = true;
Instruction *Replacement = Inst;
- const Type *UseTy = U.get()->getType();
+ Type *UseTy = U.get()->getType();
if (PHINode *PHI = dyn_cast<PHINode>(UserInst)) {
// For PHI nodes, insert the bitcast in the predecessor block.
unsigned ValNo =
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 083412ed94..3d116417f5 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -241,7 +241,7 @@ public:
/// this method must be called.
void AddTrackedFunction(Function *F) {
// Add an entry, F -> undef.
- if (const StructType *STy = dyn_cast<StructType>(F->getReturnType())) {
+ if (StructType *STy = dyn_cast<StructType>(F->getReturnType())) {
MRVFunctionsTracked.insert(F);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
TrackedMultipleRetVals.insert(std::make_pair(std::make_pair(F, i),
@@ -302,7 +302,7 @@ public:
/// markAnythingOverdefined - Mark the specified value overdefined. This
/// works with both scalars and structs.
void markAnythingOverdefined(Value *V) {
- if (const StructType *STy = dyn_cast<StructType>(V->getType()))
+ if (StructType *STy = dyn_cast<StructType>(V->getType()))
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
markOverdefined(getStructValueState(V, i), V);
else
@@ -417,7 +417,7 @@ private:
else if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C))
LV.markConstant(CS->getOperand(i)); // Constants are constant.
else if (isa<ConstantAggregateZero>(C)) {
- const Type *FieldTy = cast<StructType>(V->getType())->getElementType(i);
+ Type *FieldTy = cast<StructType>(V->getType())->getElementType(i);
LV.markConstant(Constant::getNullValue(FieldTy));
} else
LV.markOverdefined(); // Unknown sort of constant.
@@ -772,7 +772,7 @@ void SCCPSolver::visitReturnInst(ReturnInst &I) {
// Handle functions that return multiple values.
if (!TrackedMultipleRetVals.empty()) {
- if (const StructType *STy = dyn_cast<StructType>(ResultOp->getType()))
+ if (StructType *STy = dyn_cast<StructType>(ResultOp->getType()))
if (MRVFunctionsTracked.count(F))
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
@@ -825,7 +825,7 @@ void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) {
}
void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) {
- const StructType *STy = dyn_cast<StructType>(IVI.getType());
+ StructType *STy = dyn_cast<StructType>(IVI.getType());
if (STy == 0)
return markOverdefined(&IVI);
@@ -925,7 +925,7 @@ void SCCPSolver::visitBinaryOperator(Instruction &I) {
// Could annihilate value.
if (I.getOpcode() == Instruction::And)
markConstant(IV, &I, Constant::getNullValue(I.getType()));
- else if (const VectorType *PT = dyn_cast<VectorType>(I.getType()))
+ else if (VectorType *PT = dyn_cast<VectorType>(I.getType()))
markConstant(IV, &I, Constant::getAllOnesValue(PT));
else
markConstant(IV, &I,
@@ -1303,7 +1303,7 @@ CallOverdefined:
continue;
}
- if (const StructType *STy = dyn_cast<StructType>(AI->getType())) {
+ if (StructType *STy = dyn_cast<StructType>(AI->getType())) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
LatticeVal CallArg = getStructValueState(*CAI, i);
mergeInValue(getStructValueState(AI, i), AI, CallArg);
@@ -1315,7 +1315,7 @@ CallOverdefined:
}
// If this is a single/zero retval case, see if we're tracking the function.
- if (const StructType *STy = dyn_cast<StructType>(F->getReturnType())) {
+ if (StructType *STy = dyn_cast<StructType>(F->getReturnType())) {
if (!MRVFunctionsTracked.count(F))
goto CallOverdefined; // Not tracking this callee.
@@ -1419,7 +1419,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// Look for instructions which produce undef values.
if (I->getType()->isVoidTy()) continue;
- if (const StructType *STy = dyn_cast<StructType>(I->getType())) {
+ if (StructType *STy = dyn_cast<StructType>(I->getType())) {
// Only a few things that can be structs matter for undef. Just send
// all their results to overdefined. We could be more precise than this
// but it isn't worth bothering.
@@ -1457,7 +1457,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// If this is an instructions whose result is defined even if the input is
// not fully defined, propagate the information.
- const Type *ITy = I->getType();
+ Type *ITy = I->getType();
switch (I->getOpcode()) {
default: break; // Leave the instruction as an undef.
case Instruction::ZExt:
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 7d6349cf4e..9b704f6e72 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -129,11 +129,11 @@ namespace {
AllocaInfo &Info);
void isSafeGEP(GetElementPtrInst *GEPI, uint64_t &Offset, AllocaInfo &Info);
void isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
- const Type *MemOpType, bool isStore, AllocaInfo &Info,
+ Type *MemOpType, bool isStore, AllocaInfo &Info,
Instruction *TheAccess, bool AllowWholeAccess);
- bool TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size);
- uint64_t FindElementAndOffset(const Type *&T, uint64_t &Offset,
- const Type *&IdxTy);
+ bool TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size);
+ uint64_t FindElementAndOffset(Type *&T, uint64_t &Offset,
+ Type *&IdxTy);
void DoScalarReplacement(AllocaInst *AI,
std::vector<AllocaInst*> &WorkList);
@@ -253,7 +253,7 @@ class ConvertToScalarInfo {
/// VectorTy - This tracks the type that we should promote the vector to if
/// it is possible to turn it into a vector. This starts out null, and if it
/// isn't possible to turn into a vector type, it gets set to VoidTy.
- const VectorType *VectorTy;
+ VectorType *VectorTy;
/// HadNonMemTransferAccess - True if there is at least one access to the
/// alloca that is not a MemTransferInst. We don't want to turn structs into
@@ -269,11 +269,11 @@ public:
private:
bool CanConvertToScalar(Value *V, uint64_t Offset);
- void MergeInTypeForLoadOrStore(const Type *In, uint64_t Offset);
- bool MergeInVectorType(const VectorType *VInTy, uint64_t Offset);
+ void MergeInTypeForLoadOrStore(Type *In, uint64_t Offset);
+ bool MergeInVectorType(VectorType *VInTy, uint64_t Offset);
void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
- Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
+ Value *ConvertScalar_ExtractValue(Value *NV, Type *ToType,
uint64_t Offset, IRBuilder<> &Builder);
Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
uint64_t Offset, IRBuilder<> &Builder);
@@ -306,7 +306,7 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
// random stuff that doesn't use vectors (e.g. <9 x double>) because then
// we just get a lot of insert/extracts. If at least one vector is
// involved, then we probably really do have a union of vector/array.
- const Type *NewTy;
+ Type *NewTy;
if (ScalarKind == Vector) {
assert(VectorTy && "Missing type for vector scalar.");
DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = "
@@ -344,7 +344,7 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
/// large) integer type with extract and insert operations where the loads
/// and stores would mutate the memory. We mark this by setting VectorTy
/// to VoidTy.
-void ConvertToScalarInfo::MergeInTypeForLoadOrStore(const Type *In,
+void ConvertToScalarInfo::MergeInTypeForLoadOrStore(Type *In,
uint64_t Offset) {
// If we already decided to turn this into a blob of integer memory, there is
// nothing to be done.
@@ -355,7 +355,7 @@ void ConvertToScalarInfo::MergeInTypeForLoadOrStore(const Type *In,
// If the In type is a vector that is the same size as the alloca, see if it
// matches the existing VecTy.
- if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
+ if (VectorType *VInTy = dyn_cast<VectorType>(In)) {
if (MergeInVectorType(VInTy, Offset))
return;
} else if (In->isFloatTy() || In->isDoubleTy() ||
@@ -395,7 +395,7 @@ void ConvertToScalarInfo::MergeInTypeForLoadOrStore(const Type *In,
/// MergeInVectorType - Handles the vector case of MergeInTypeForLoadOrStore,
/// returning true if the type was successfully merged and false otherwise.
-bool ConvertToScalarInfo::MergeInVectorType(const VectorType *VInTy,
+bool ConvertToScalarInfo::MergeInVectorType(VectorType *VInTy,
uint64_t Offset) {
// TODO: Support nonzero offsets?
if (Offset != 0)
@@ -422,8 +422,8 @@ bool ConvertToScalarInfo::MergeInVectorType(const VectorType *VInTy,
return true;
}
- const Type *ElementTy = VectorTy->getElementType();
- const Type *InElementTy = VInTy->getElementType();
+ Type *ElementTy = VectorTy->getElementType();
+ Type *InElementTy = VInTy->getElementType();
// Do not allow mixed integer and floating-point accesses from vectors of
// different sizes.
@@ -668,8 +668,8 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
// pointer (bitcasted), then a store to our new alloca.
assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
Value *SrcPtr = MTI->getSource();
- const PointerType* SPTy = cast<PointerType>(SrcPtr->getType());
- const PointerType* AIPTy = cast<PointerType>(NewAI->getType());
+ PointerType* SPTy = cast<PointerType>(SrcPtr->getType());
+ PointerType* AIPTy = cast<PointerType>(NewAI->getType());
if (SPTy->getAddressSpace() != AIPTy->getAddressSpace()) {
AIPTy = PointerType::get(AIPTy->getElementType(),
SPTy->getAddressSpace());
@@ -685,8 +685,8 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
- const PointerType* DPTy = cast<PointerType>(MTI->getDest()->getType());
- const PointerType* AIPTy = cast<PointerType>(NewAI->getType());
+ PointerType* DPTy = cast<PointerType>(MTI->getDest()->getType());
+ PointerType* AIPTy = cast<PointerType>(NewAI->getType());
if (DPTy->getAddressSpace() != AIPTy->getAddressSpace()) {
AIPTy = PointerType::get(AIPTy->getElementType(),
DPTy->getAddressSpace());
@@ -711,7 +711,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
/// access of an alloca. The input types must be integer or floating-point
/// scalar or vector types, and the resulting type is an integer, float or
/// double.
-static const Type *getScaledElementType(const Type *Ty1, const Type *Ty2,
+static Type *getScaledElementType(Type *Ty1, Type *Ty2,
unsigned NewBitWidth) {
bool IsFP1 = Ty1->isFloatingPointTy() ||
(Ty1->isVectorTy() &&
@@ -737,11 +737,11 @@ static const Type *getScaledElementType(const Type *Ty1, const Type *Ty2,
/// CreateShuffleVectorCast - Creates a shuffle vector to convert one vector
/// to another vector of the same element type which has the same allocation
/// size but different primitive sizes (e.g. <3 x i32> and <4 x i32>).
-static Value *CreateShuffleVectorCast(Value *FromVal, const Type *ToType,
+static Value *CreateShuffleVectorCast(Value *FromVal, Type *ToType,
IRBuilder<> &Builder) {
- const Type *FromType = FromVal->getType();
- const VectorType *FromVTy = cast<VectorType>(FromType);
- const VectorType *ToVTy = cast<VectorType>(ToType);
+ Type *FromType = FromVal->getType();
+ VectorType *FromVTy = cast<VectorType>(FromType);
+ VectorType *ToVTy = cast<VectorType>(ToType);
assert((ToVTy->getElementType() == FromVTy->getElementType()) &&
"Vectors must have the same element type");
Value *UnV = UndefValue::get(FromType);
@@ -749,7 +749,7 @@ static Value *CreateShuffleVectorCast(Value *FromVal, const Type *ToType,
unsigned numEltsTo = ToVTy->getNumElements();
SmallVector<Constant*, 3> Args;
- const Type* Int32Ty = Builder.getInt32Ty();
+ Type* Int32Ty = Builder.getInt32Ty();
unsigned minNumElts = std::min(numEltsFrom, numEltsTo);
unsigned i;
for (i=0; i != minNumElts; ++i)
@@ -775,16 +775,16 @@ static Value *CreateShuffleVectorCast(Value *FromVal, const Type *ToType,
/// Offset is an offset from the original alloca, in bits that need to be
/// shifted to the right.
Value *ConvertToScalarInfo::
-ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
+ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
uint64_t Offset, IRBuilder<> &Builder) {
// If the load is of the whole new alloca, no conversion is needed.
- const Type *FromType = FromVal->getType();
+ Type *FromType = FromVal->getType();
if (FromType == ToType && Offset == 0)
return FromVal;
// If the result alloca is a vector type, this is either an element
// access or a bitcast to another vector type of the same size.
- if (const VectorType *VTy = dyn_cast<VectorType>(FromType)) {
+ if (VectorType *VTy = dyn_cast<VectorType>(FromType)) {
unsigned FromTypeSize = TD.getTypeAllocSize(FromType);
unsigned ToTypeSize = TD.getTypeAllocSize(ToType);
if (FromTypeSize == ToTypeSize) {
@@ -803,12 +803,12 @@ ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
assert(!(ToType->isVectorTy() && Offset != 0) && "Can't extract a value "
"of a smaller vector type at a nonzero offset.");
- const Type *CastElementTy = getScaledElementType(FromType, ToType,
+ Type *CastElementTy = getScaledElementType(FromType, ToType,
ToTypeSize * 8);
unsigned NumCastVectorElements = FromTypeSize / ToTypeSize;
LLVMContext &Context = FromVal->getContext();
- const Type *CastTy = VectorType::get(CastElementTy,
+ Type *CastTy = VectorType::get(CastElementTy,
NumCastVectorElements);
Value *Cast = Builder.CreateBitCast(FromVal, CastTy, "tmp");
@@ -837,7 +837,7 @@ ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
// If ToType is a first class aggregate, extract out each of the pieces and
// use insertvalue's to form the FCA.
- if (const StructType *ST = dyn_cast<StructType>(ToType)) {
+ if (StructType *ST = dyn_cast<StructType>(ToType)) {
const StructLayout &Layout = *TD.getStructLayout(ST);
Value *Res = UndefValue::get(ST);
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
@@ -849,7 +849,7 @@ ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
return Res;
}
- if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
+ if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
Value *Res = UndefValue::get(AT);
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
@@ -861,7 +861,7 @@ ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
}
// Otherwise, this must be a union that was converted to an integer value.
- const IntegerType *NTy = cast<IntegerType>(FromVal->getType());
+ IntegerType *NTy = cast<IntegerType>(FromVal->getType());
// If this is a big-endian system and the load is narrower than the
// full alloca type, we need to do a shift to get the right bits.
@@ -927,10 +927,10 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
uint64_t Offset, IRBuilder<> &Builder) {
// Convert the stored type to the actual type, shift it left to insert
// then 'or' into place.
- const Type *AllocaType = Old->getType();
+ Type *AllocaType = Old->getType();
LLVMContext &Context = Old->getContext();
- if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
+ if (VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy);
uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType());
@@ -952,12 +952,12 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
assert(!(SV->getType()->isVectorTy() && Offset != 0) && "Can't insert a "
"value of a smaller vector type at a nonzero offset.");
- const Type *CastElementTy = getScaledElementType(VTy, SV->getType(),
+ Type *CastElementTy = getScaledElementType(VTy, SV->getType(),
ValSize);
unsigned NumCastVectorElements = VecSize / ValSize;
LLVMContext &Context = SV->getContext();
- const Type *OldCastTy = VectorType::get(CastElementTy,
+ Type *OldCastTy = VectorType::get(CastElementTy,
NumCastVectorElements);
Value *OldCast = Builder.CreateBitCast(Old, OldCastTy, "tmp");
@@ -982,7 +982,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
}
// If SV is a first-class aggregate value, insert each value recursively.
- if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
+ if (StructType *ST = dyn_cast<StructType>(SV->getType())) {
const StructLayout &Layout = *TD.getStructLayout(ST);
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
@@ -993,7 +993,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
return Old;
}
- if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
+ if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
@@ -1393,7 +1393,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
continue;
}
- const Type *LoadTy = cast<PointerType>(PN->getType())->getElementType();
+ Type *LoadTy = cast<PointerType>(PN->getType())->getElementType();
PHINode *NewPN = PHINode::Create(LoadTy, PN->getNumIncomingValues(),
PN->getName()+".ld", PN);
@@ -1483,13 +1483,13 @@ bool SROA::performPromotion(Function &F) {
/// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for
/// SROA. It must be a struct or array type with a small number of elements.
static bool ShouldAttemptScalarRepl(AllocaInst *AI) {
- const Type *T = AI->getAllocatedType();
+ Type *T = AI->getAllocatedType();
// Do not promote any struct into more than 32 separate vars.
- if (const StructType *ST = dyn_cast<StructType>(T))
+ if (StructType *ST = dyn_cast<StructType>(T))
return ST->getNumElements() <= 32;
// Arrays are much less likely to be safe for SROA; only consider
// them if they are very small.
- if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ if (ArrayType *AT = dyn_cast<ArrayType>(T))
return AT->getNumElements() <= 8;
return false;
}
@@ -1594,7 +1594,7 @@ void SROA::DoScalarReplacement(AllocaInst *AI,
std::vector<AllocaInst*> &WorkList) {
DEBUG(dbgs() << "Found inst to SROA: " << *AI << '\n');
SmallVector<AllocaInst*, 32> ElementAllocas;
- if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
+ if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
ElementAllocas.reserve(ST->getNumContainedTypes());
for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
@@ -1604,9 +1604,9 @@ void SROA::DoScalarReplacement(AllocaInst *AI,
WorkList.push_back(NA); // Add to worklist for recursive processing
}
} else {
- const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
+ ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
ElementAllocas.reserve(AT->getNumElements());
- const Type *ElTy = AT->getElementType();
+ Type *ElTy = AT->getElementType();
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
AI->getName() + "." + Twine(i), AI);
@@ -1672,7 +1672,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
if (LI->isVolatile())
return MarkUnsafe(Info, User);
- const Type *LIType = LI->getType();
+ Type *LIType = LI->getType();
isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
LIType, false, Info, LI, true /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
@@ -1682,7 +1682,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
if (SI->isVolatile() || SI->getOperand(0) == I)
return MarkUnsafe(Info, User);
- const Type *SIType = SI->getOperand(0)->getType();
+ Type *SIType = SI->getOperand(0)->getType();
isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
SIType, true, Info, SI, true /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
@@ -1727,7 +1727,7 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
if (LI->isVolatile())
return MarkUnsafe(Info, User);
- const Type *LIType = LI->getType();
+ Type *LIType = LI->getType();
isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
LIType, false, Info, LI, false /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
@@ -1737,7 +1737,7 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
if (SI->isVolatile() || SI->getOperand(0) == I)
return MarkUnsafe(Info, User);
- const Type *SIType = SI->getOperand(0)->getType();
+ Type *SIType = SI->getOperand(0)->getType();
isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
SIType, true, Info, SI, false /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
@@ -1786,14 +1786,14 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI,
/// elements of the same type (which is always true for arrays). If so,
/// return true with NumElts and EltTy set to the number of elements and the
/// element type, respectively.
-static bool isHomogeneousAggregate(const Type *T, unsigned &NumElts,
- const Type *&EltTy) {
- if (const ArrayType *AT = dyn_cast<ArrayType>(T)) {
+static bool isHomogeneousAggregate(Type *T, unsigned &NumElts,
+ Type *&EltTy) {
+ if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
NumElts = AT->getNumElements();
EltTy = (NumElts == 0 ? 0 : AT->getElementType());
return true;
}
- if (const StructType *ST = dyn_cast<StructType>(T)) {
+ if (StructType *ST = dyn_cast<StructType>(T)) {
NumElts = ST->getNumContainedTypes();
EltTy = (NumElts == 0 ? 0 : ST->getContainedType(0));
for (unsigned n = 1; n < NumElts; ++n) {
@@ -1807,12 +1807,12 @@ static bool isHomogeneousAggregate(const Type *T, unsigned &NumElts,
/// isCompatibleAggregate - Check if T1 and T2 are either the same type or are
/// "homogeneous" aggregates with the same element type and number of elements.
-static bool isCompatibleAggregate(const Type *T1, const Type *T2) {
+static bool isCompatibleAggregate(Type *T1, Type *T2) {
if (T1 == T2)
return true;
unsigned NumElts1, NumElts2;
- const Type *EltTy1, *EltTy2;
+ Type *EltTy1, *EltTy2;
if (isHomogeneousAggregate(T1, NumElts1, EltTy1) &&
isHomogeneousAggregate(T2, NumElts2, EltTy2) &&
NumElts1 == NumElts2 &&
@@ -1830,7 +1830,7 @@ static bool isCompatibleAggregate(const Type *T1, const Type *T2) {
/// If AllowWholeAccess is true, then this allows uses of the entire alloca as a
/// unit. If false, it only allows accesses known to be in a single element.
void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
- const Type *MemOpType, bool isStore,
+ Type *MemOpType, bool isStore,
AllocaInfo &Info, Instruction *TheAccess,
bool AllowWholeAccess) {
// Check if this is a load/store of the entire alloca.
@@ -1857,7 +1857,7 @@ void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
}
}
// Check if the offset/size correspond to a component within the alloca type.
- const Type *T = Info.AI->getAllocatedType();
+ Type *T = Info.AI->getAllocatedType();
if (TypeHasComponent(T, Offset, MemSize)) {
Info.hasSubelementAccess = true;
return;
@@ -1868,16 +1868,16 @@ void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
/// TypeHasComponent - Return true if T has a component type with the
/// specified offset and size. If Size is zero, do not check the size.
-bool SROA::TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size) {
- const Type *EltTy;
+bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
+ Type *EltTy;
uint64_t EltSize;
- if (const StructType *ST = dyn_cast<StructType>(T)) {
+ if (StructType *ST = dyn_cast<StructType>(T)) {
const StructLayout *Layout = TD->getStructLayout(ST);
unsigned EltIdx = Layout->getElementContainingOffset(Offset);
EltTy = ST->getContainedType(EltIdx);
EltSize = TD->getTypeAllocSize(EltTy);
Offset -= Layout->getElementOffset(EltIdx);
- } else if (const ArrayType *AT = dyn_cast<ArrayType>(T)) {
+ } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
EltTy = AT->getElementType();
EltSize = TD->getTypeAllocSize(EltTy);
if (Offset >= AT->getNumElements() * EltSize)
@@ -1926,7 +1926,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
}
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
- const Type *LIType = LI->getType();
+ Type *LIType = LI->getType();
if (isCompatibleAggregate(LIType, AI->getAllocatedType())) {
// Replace:
@@ -1956,7 +1956,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
Value *Val = SI->getOperand(0);
- const Type *SIType = Val->getType();
+ Type *SIType = Val->getType();
if (isCompatibleAggregate(SIType, AI->getAllocatedType())) {
// Replace:
// store { i32, i32 } %val, { i32, i32 }* %alloc
@@ -2026,10 +2026,10 @@ void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
/// Sets T to the type of the element and Offset to the offset within that
/// element. IdxTy is set to the type of the index result to be used in a
/// GEP instruction.
-uint64_t SROA::FindElementAndOffset(const Type *&T, uint64_t &Offset,
- const Type *&IdxTy) {
+uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
+ Type *&IdxTy) {
uint64_t Idx = 0;
- if (const StructType *ST = dyn_cast<StructType>(T)) {
+ if (StructType *ST = dyn_cast<StructType>(T)) {
const StructLayout *Layout = TD->getStructLayout(ST);
Idx = Layout->getElementContainingOffset(Offset);
T = ST->getContainedType(Idx);
@@ -2037,7 +2037,7 @@ uint64_t SROA::FindElementAndOffset(const Type *&T, uint64_t &Offset,
IdxTy = Type::getInt32Ty(T->getContext());
return Idx;
}
- const ArrayType *AT = cast<ArrayType>(T);
+ ArrayType *AT = cast<ArrayType>(T);
T = AT->getElementType();
uint64_t EltSize = TD->getTypeAllocSize(T);
Idx = Offset / EltSize;
@@ -2058,8 +2058,8 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
- const Type *T = AI->getAllocatedType();
- const Type *IdxTy;
+ Type *T = AI->getAllocatedType();
+ Type *IdxTy;
uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy);
if (GEPI->getOperand(0) == AI)
OldIdx = ~0ULL; // Force the GEP to be rewritten.
@@ -2073,7 +2073,7 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
if (Idx == OldIdx)
return;
- const Type *i32Ty = Type::getInt32Ty(AI->getContext());
+ Type *i32Ty = Type::getInt32Ty(AI->getContext());
SmallVector<Value*, 8> NewArgs;
NewArgs.push_back(Constant::getNullValue(i32Ty));
while (EltOffset != 0) {
@@ -2139,7 +2139,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// If the pointer is not the right type, insert a bitcast to the right
// type.
- const Type *NewTy =
+ Type *NewTy =
PointerType::get(AI->getType()->getElementType(), AddrSpace);
if (OtherPtr->getType() != NewTy)
@@ -2163,12 +2163,12 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
OtherPtr->getName()+"."+Twine(i),
MI);
uint64_t EltOffset;
- const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
- const Type *OtherTy = OtherPtrTy->getElementType();
- if (const StructType *ST = dyn_cast<StructType>(OtherTy)) {
+ PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
+ Type *OtherTy = OtherPtrTy->getElementType();
+ if (StructType *ST = dyn_cast<StructType>(OtherTy)) {
EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
} else {
- const Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
+ Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
EltOffset = TD->getTypeAllocSize(EltTy)*i;
}
@@ -2181,7 +2181,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
}
Value *EltPtr = NewElts[i];
- const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
+ Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
// If we got down to a scalar, insert a load or store as appropriate.
if (EltTy->isSingleValueType()) {
@@ -2207,7 +2207,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
} else {
// If EltTy is a vector type, get the element type.
- const Type *ValTy = EltTy->getScalarType();
+ Type *ValTy = EltTy->getScalarType();
// Construct an integer with the right value.
unsigned EltSize = TD->getTypeSizeInBits(ValTy);
@@ -2271,7 +2271,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
// Extract each element out of the integer according to its structure offset
// and store the element value to the individual alloca.
Value *SrcVal = SI->getOperand(0);
- const Type *AllocaEltTy = AI->getAllocatedType();
+ Type *AllocaEltTy = AI->getAllocatedType();
uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
IRBuilder<> Builder(SI);
@@ -2286,12 +2286,12 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
// There are two forms here: AI could be an array or struct. Both cases
// have different ways to compute the element offset.
- if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
+ if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
const StructLayout *Layout = TD->getStructLayout(EltSTy);
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// Get the number of bits to shift SrcVal to get the value.
- const Type *FieldTy = EltSTy->getElementType(i);
+ Type *FieldTy = EltSTy->getElementType(i);
uint64_t Shift = Layout->getElementOffsetInBits(i);
if (TD->isBigEndian())
@@ -2327,8 +2327,8 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
}
} else {
- const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
- const Type *ArrayEltTy = ATy->getElementType();
+ ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
+ Type *ArrayEltTy = ATy->getElementType();
uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
@@ -2384,7 +2384,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts) {
// Extract each element out of the NewElts according to its structure offset
// and form the result value.
- const Type *AllocaEltTy = AI->getAllocatedType();
+ Type *AllocaEltTy = AI->getAllocatedType();
uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
@@ -2394,10 +2394,10 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
// have different ways to compute the element offset.
const StructLayout *Layout = 0;
uint64_t ArrayEltBitOffset = 0;
- if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
+ if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
Layout = TD->getStructLayout(EltSTy);
} else {
- const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
+ Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
}
@@ -2408,14 +2408,14 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
// Load the value from the alloca. If the NewElt is an aggregate, cast
// the pointer to an integer of the same size before doing the load.
Value *SrcField = NewElts[i];
- const Type *FieldTy =
+ Type *FieldTy =
cast<PointerType>(SrcField->getType())->getElementType();
uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
- const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
+ IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
FieldSizeBits);
if (!FieldTy->isIntegerTy() && !FieldTy->isFloatingPointTy() &&
!FieldTy->isVectorTy())
@@ -2468,14 +2468,14 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
/// HasPadding - Return true if the specified type has any structure or
/// alignment padding in between the elements that would be split apart
/// by SROA; return false otherwise.
-static bool HasPadding(const Type *Ty, const TargetData &TD) {
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+static bool HasPadding(Type *Ty, const TargetData &TD) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Ty = ATy->getElementType();
return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
}
// SROA currently handles only Arrays and Structs.
- const StructType *STy = cast<StructType>(Ty);
+ StructType *STy = cast<StructType>(Ty);
const StructLayout *SL = TD.getStructLayout(STy);
unsigned PrevFieldBitOffset = 0;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -2530,7 +2530,7 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
// and fusion code.
if (!Info.hasSubelementAccess && Info.hasALoadOrStore) {
// If the struct/array just has one element, use basic SRoA.
- if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
+ if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
if (ST->getNumElements() > 1) return false;
} else {
if (cast<ArrayType>(AI->getAllocatedType())->getNumElements() > 1)
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index 7c415e5150..ad52417f7f 100644
--- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -134,7 +134,7 @@ namespace {
struct StrCatOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strcat" function prototype.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
@@ -184,7 +184,7 @@ struct StrCatOpt : public LibCallOptimization {
struct StrNCatOpt : public StrCatOpt {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strncat" function prototype.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
@@ -232,7 +232,7 @@ struct StrNCatOpt : public StrCatOpt {
struct StrChrOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strchr" function prototype.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
@@ -282,7 +282,7 @@ struct StrChrOpt : public LibCallOptimization {
struct StrRChrOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strrchr" function prototype.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
@@ -323,7 +323,7 @@ struct StrRChrOpt : public LibCallOptimization {
struct StrCmpOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strcmp" function prototype.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
!FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
@@ -371,7 +371,7 @@ struct StrCmpOpt : public LibCallOptimization {
struct StrNCmpOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strncmp" function prototype.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
!FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
@@ -426,7 +426,7 @@ struct StrCpyOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strcpy" function prototype.
unsigned NumParams = OptChkCall ? 3 : 2;
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != NumParams ||
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
@@ -462,7 +462,7 @@ struct StrCpyOpt : public LibCallOptimization {
struct StrNCpyOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy() ||
@@ -511,7 +511,7 @@ struct StrNCpyOpt : public LibCallOptimization {
struct StrLenOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 1 ||
FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getReturnType()->isIntegerTy())
@@ -537,7 +537,7 @@ struct StrLenOpt : public LibCallOptimization {
struct StrPBrkOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getParamType(0) != B.getInt8PtrTy() ||
FT->getParamType(1) != FT->getParamType(0) ||
@@ -575,7 +575,7 @@ struct StrPBrkOpt : public LibCallOptimization {
struct StrToOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if ((FT->getNumParams() != 2 && FT->getNumParams() != 3) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy())
@@ -597,7 +597,7 @@ struct StrToOpt : public LibCallOptimization {
struct StrSpnOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getParamType(0) != B.getInt8PtrTy() ||
FT->getParamType(1) != FT->getParamType(0) ||
@@ -626,7 +626,7 @@ struct StrSpnOpt : public LibCallOptimization {
struct StrCSpnOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getParamType(0) != B.getInt8PtrTy() ||
FT->getParamType(1) != FT->getParamType(0) ||
@@ -658,7 +658,7 @@ struct StrCSpnOpt : public LibCallOptimization {
struct StrStrOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
@@ -722,7 +722,7 @@ struct StrStrOpt : public LibCallOptimization {
struct MemCmpOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy(32))
@@ -773,7 +773,7 @@ struct MemCpyOpt : public LibCallOptimization {
// These optimizations require TargetData.
if (!TD) return 0;
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
@@ -795,7 +795,7 @@ struct MemMoveOpt : public LibCallOptimization {
// These optimizations require TargetData.
if (!TD) return 0;
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
@@ -817,7 +817,7 @@ struct MemSetOpt : public LibCallOptimization {
// These optimizations require TargetData.
if (!TD) return 0;
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
@@ -840,7 +840,7 @@ struct MemSetOpt : public LibCallOptimization {
struct PowOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
if (FT->getNumParams() != 2 || FT->getReturnType() != FT->getParamType(0) ||
@@ -895,7 +895,7 @@ struct PowOpt : public LibCallOptimization {
struct Exp2Opt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
// Just make sure this has 1 argument of FP type, which matches the
// result type.
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
@@ -946,7 +946,7 @@ struct Exp2Opt : public LibCallOptimization {
struct UnaryDoubleFPOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 1 || !FT->getReturnType()->isDoubleTy() ||
!FT->getParamType(0)->isDoubleTy())
return 0;
@@ -973,7 +973,7 @@ struct UnaryDoubleFPOpt : public LibCallOptimization {
struct FFSOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
if (FT->getNumParams() != 1 ||
@@ -1009,7 +1009,7 @@ struct FFSOpt : public LibCallOptimization {
struct IsDigitOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !FT->getReturnType()->isIntegerTy() ||
!FT->getParamType(0)->isIntegerTy(32))
@@ -1028,7 +1028,7 @@ struct IsDigitOpt : public LibCallOptimization {
struct IsAsciiOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !FT->getReturnType()->isIntegerTy() ||
!FT->getParamType(0)->isIntegerTy(32))
@@ -1046,7 +1046,7 @@ struct IsAsciiOpt : public LibCallOptimization {
struct AbsOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
// We require integer(integer) where the types agree.
if (FT->getNumParams() != 1 || !FT->getReturnType()->isIntegerTy() ||
FT->getParamType(0) != FT->getReturnType())
@@ -1067,7 +1067,7 @@ struct AbsOpt : public LibCallOptimization {
struct ToAsciiOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
// We require i32(i32)
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isIntegerTy(32))
@@ -1147,7 +1147,7 @@ struct PrintFOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Require one fixed pointer argument and an integer/void result.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() < 1 || !FT->getParamType(0)->isPointerTy() ||
!(FT->getReturnType()->isIntegerTy() ||
FT->getReturnType()->isVoidTy()))
@@ -1241,7 +1241,7 @@ struct SPrintFOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Require two fixed pointer arguments and an integer result.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy())
@@ -1272,7 +1272,7 @@ struct SPrintFOpt : public LibCallOptimization {
struct FWriteOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Require a pointer, an integer, an integer, a pointer, returning integer.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 4 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
!FT->getParamType(2)->isIntegerTy() ||
@@ -1310,7 +1310,7 @@ struct FPutsOpt : public LibCallOptimization {
if (!TD) return 0;
// Require two pointers. Also, we can't optimize if return value is used.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!CI->use_empty())
@@ -1379,7 +1379,7 @@ struct FPrintFOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Require two fixed paramters as pointers and integer result.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy())
@@ -1410,7 +1410,7 @@ struct FPrintFOpt : public LibCallOptimization {
struct PutsOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Require one fixed pointer argument and an integer/void result.
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() < 1 || !FT->getParamType(0)->isPointerTy() ||
!(FT->getReturnType()->isIntegerTy() ||
FT->getReturnType()->isVoidTy()))
@@ -1685,7 +1685,7 @@ void SimplifyLibCalls::setDoesNotAlias(Function &F, unsigned n) {
void SimplifyLibCalls::inferPrototypeAttributes(Function &F) {
- const FunctionType *FTy = F.getFunctionType();
+ FunctionType *FTy = F.getFunctionType();
StringRef Name = F.getName();
switch (Name[0]) {
diff --git a/lib/Transforms/Utils/AddrModeMatcher.cpp b/lib/Transforms/Utils/AddrModeMatcher.cpp
index be7bed1cec..8e5a1eb2c8 100644
--- a/lib/Transforms/Utils/AddrModeMatcher.cpp
+++ b/lib/Transforms/Utils/AddrModeMatcher.cpp
@@ -222,7 +222,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
const TargetData *TD = TLI.getTargetData();
gep_type_iterator GTI = gep_type_begin(AddrInst);
for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
const StructLayout *SL = TD->getStructLayout(STy);
unsigned Idx =
cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
@@ -557,7 +557,7 @@ IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
Value *Address = User->getOperand(OpNo);
if (!Address->getType()->isPointerTy())
return false;
- const Type *AddressAccessTy =
+ Type *AddressAccessTy =
cast<PointerType>(Address->getType())->getElementType();
// Do a match against the root of this address, ignoring profitability. This
diff --git a/lib/Transforms/Utils/BuildLibCalls.cpp b/lib/Transforms/Utils/BuildLibCalls.cpp
index 14bb17fbda..4b5f45b31f 100644
--- a/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -58,8 +58,8 @@ Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
AttributeWithIndex AWI =
AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind);
- const Type *I8Ptr = B.getInt8PtrTy();
- const Type *I32Ty = B.getInt32Ty();
+ Type *I8Ptr = B.getInt8PtrTy();
+ Type *I32Ty = B.getInt32Ty();
Constant *StrChr = M->getOrInsertFunction("strchr", AttrListPtr::get(&AWI, 1),
I8Ptr, I8Ptr, I32Ty, NULL);
CallInst *CI = B.CreateCall2(StrChr, CastToCStr(Ptr, B),
@@ -102,7 +102,7 @@ Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
AttributeWithIndex AWI[2];
AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
- const Type *I8Ptr = B.getInt8PtrTy();
+ Type *I8Ptr = B.getInt8PtrTy();
Value *StrCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI, 2),
I8Ptr, I8Ptr, I8Ptr, NULL);
CallInst *CI = B.CreateCall2(StrCpy, CastToCStr(Dst, B), CastToCStr(Src, B),
@@ -120,7 +120,7 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
AttributeWithIndex AWI[2];
AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
- const Type *I8Ptr = B.getInt8PtrTy();
+ Type *I8Ptr = B.getInt8PtrTy();
Value *StrNCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI, 2),
I8Ptr, I8Ptr, I8Ptr,
Len->getType(), NULL);
@@ -361,7 +361,7 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
this->CI = CI;
Function *Callee = CI->getCalledFunction();
StringRef Name = Callee->getName();
- const FunctionType *FT = Callee->getFunctionType();
+ FunctionType *FT = Callee->getFunctionType();
LLVMContext &Context = CI->getParent()->getContext();
IRBuilder<> B(CI);
diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp
index 081352358b..8f8e3dc0b0 100644
--- a/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/lib/Transforms/Utils/CodeExtractor.cpp
@@ -50,7 +50,7 @@ namespace {
DominatorTree* DT;
bool AggregateArgs;
unsigned NumExitBlocks;
- const Type *RetTy;
+ Type *RetTy;
public:
CodeExtractor(DominatorTree* dt = 0, bool AggArgs = false)
: DT(dt), AggregateArgs(AggArgs||AggregateArgsOpt), NumExitBlocks(~0U) {}
@@ -290,7 +290,7 @@ Function *CodeExtractor::constructFunction(const Values &inputs,
paramTy.clear();
paramTy.push_back(StructPtr);
}
- const FunctionType *funcType =
+ FunctionType *funcType =
FunctionType::get(RetTy, paramTy, false);
// Create the new function
@@ -580,7 +580,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
}
// Now that we've done the deed, simplify the switch instruction.
- const Type *OldFnRetTy = TheSwitch->getParent()->getParent()->getReturnType();
+ Type *OldFnRetTy = TheSwitch->getParent()->getParent()->getReturnType();
switch (NumExitBlocks) {
case 0:
// There are no successors (the block containing the switch itself), which
diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp
index d5b382e55e..6d8a319635 100644
--- a/lib/Transforms/Utils/InlineFunction.cpp
+++ b/lib/Transforms/Utils/InlineFunction.cpp
@@ -636,7 +636,7 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
const Function *CalledFunc,
InlineFunctionInfo &IFI,
unsigned ByValAlignment) {
- const Type *AggTy = cast<PointerType>(Arg->getType())->getElementType();
+ Type *AggTy = cast<PointerType>(Arg->getType())->getElementType();
// If the called function is readonly, then it could not mutate the caller's
// copy of the byval'd memory. In this case, it is safe to elide the copy and
@@ -726,7 +726,7 @@ static bool isUsedByLifetimeMarker(Value *V) {
// hasLifetimeMarkers - Check whether the given alloca already has
// lifetime.start or lifetime.end intrinsics.
static bool hasLifetimeMarkers(AllocaInst *AI) {
- const Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext());
+ Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext());
if (AI->getType() == Int8PtrTy)
return isUsedByLifetimeMarker(AI);
@@ -1090,7 +1090,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// Handle all of the return instructions that we just cloned in, and eliminate
// any users of the original call/invoke instruction.
- const Type *RTy = CalledFunc->getReturnType();
+ Type *RTy = CalledFunc->getReturnType();
PHINode *PHI = 0;
if (Returns.size() > 1) {
diff --git a/lib/Transforms/Utils/LowerExpectIntrinsic.cpp b/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
index c1213fac7b..61ab3f6533 100644
--- a/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
+++ b/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
@@ -58,7 +58,7 @@ bool LowerExpectIntrinsic::HandleSwitchExpect(SwitchInst *SI) {
return false;
LLVMContext &Context = CI->getContext();
- const Type *Int32Ty = Type::getInt32Ty(Context);
+ Type *Int32Ty = Type::getInt32Ty(Context);
unsigned caseNo = SI->findCaseValue(ExpectedValue);
std::vector<Value *> Vec;
@@ -105,7 +105,7 @@ bool LowerExpectIntrinsic::HandleIfExpect(BranchInst *BI) {
return false;
LLVMContext &Context = CI->getContext();
- const Type *Int32Ty = Type::getInt32Ty(Context);
+ Type *Int32Ty = Type::getInt32Ty(Context);
bool Likely = ExpectedValue->isOne();
// If expect value is equal to 1 it means that we are more likely to take
diff --git a/lib/Transforms/Utils/LowerInvoke.cpp b/lib/Transforms/Utils/LowerInvoke.cpp
index f77d19de90..8b5891f329 100644
--- a/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/lib/Transforms/Utils/LowerInvoke.cpp
@@ -120,7 +120,7 @@ FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI,
// doInitialization - Make sure that there is a prototype for abort in the
// current module.
bool LowerInvoke::doInitialization(Module &M) {
- const Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
+ Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
if (useExpensiveEHSupport) {
// Insert a type for the linked list of jump buffers.
unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
@@ -131,7 +131,7 @@ bool LowerInvoke::doInitialization(Module &M) {
Type *Elts[] = { JmpBufTy, PointerType::getUnqual(JBLinkTy) };
JBLinkTy->setBody(Elts);
- const Type *PtrJBList = PointerType::getUnqual(JBLinkTy);
+ Type *PtrJBList = PointerType::getUnqual(JBLinkTy);
// Now that we've done that, insert the jmpbuf list head global, unless it
// already exists.
@@ -305,7 +305,7 @@ splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
++AfterAllocaInsertPt;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI) {
- const Type *Ty = AI->getType();
+ Type *Ty = AI->getType();
// Aggregate types can't be cast, but are legal argument types, so we have
// to handle them differently. We use an extract/insert pair as a
// lightweight method to achieve the same goal.
diff --git a/lib/Transforms/Utils/SSAUpdater.cpp b/lib/Transforms/Utils/SSAUpdater.cpp
index d2ff8aea38..fa8061c2b4 100644
--- a/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/lib/Transforms/Utils/SSAUpdater.cpp
@@ -44,7 +44,7 @@ SSAUpdater::~SSAUpdater() {
/// Initialize - Reset this object to get ready for a new set of SSA
/// updates with type 'Ty'. PHI nodes get a name based on 'Name'.
-void SSAUpdater::Initialize(const Type *Ty, StringRef Name) {
+void SSAUpdater::Initialize(Type *Ty, StringRef Name) {
if (AV == 0)
AV = new AvailableValsTy();
else
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 9d9c324b84..52145b9c97 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -322,7 +322,7 @@ static ConstantInt *GetConstantInt(Value *V, const TargetData *TD) {
// This is some kind of pointer constant. Turn it into a pointer-sized
// ConstantInt if possible.
- const IntegerType *PtrTy = TD->getIntPtrType(V->getContext());
+ IntegerType *PtrTy = TD->getIntPtrType(V->getContext());
// Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*).
if (isa<ConstantPointerNull>(V))
diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp
index 94794c35fe..1ce0447d80 100644
--- a/lib/VMCore/AsmWriter.cpp
+++ b/lib/VMCore/AsmWriter.cpp
@@ -1457,7 +1457,7 @@ void AssemblyWriter::printFunction(const Function *F) {
default: Out << "cc" << F->getCallingConv() << " "; break;
}
- const FunctionType *FT = F->getFunctionType();
+ FunctionType *FT = F->getFunctionType();
const AttrListPtr &Attrs = F->getAttributes();
Attributes RetAttrs = Attrs.getRetAttributes();
if (RetAttrs != Attribute::None)
diff --git a/lib/VMCore/Attributes.cpp b/lib/VMCore/Attributes.cpp
index bf6efa1645..b728b9284b 100644
--- a/lib/VMCore/Attributes.cpp
+++ b/lib/VMCore/Attributes.cpp
@@ -92,7 +92,7 @@ std::string Attribute::getAsString(Attributes Attrs) {
return Result;
}
-Attributes Attribute::typeIncompatible(const Type *Ty) {
+Attributes Attribute::typeIncompatible(Type *Ty) {
Attributes Incompatible = None;
if (!Ty->isIntegerTy())
diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp
index 9e93ff370e..d98728069b 100644
--- a/lib/VMCore/AutoUpgrade.cpp
+++ b/lib/VMCore/AutoUpgrade.cpp
@@ -34,7 +34,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
return false;
Name = Name.substr(5); // Strip off "llvm."
- const FunctionType *FTy = F->getFunctionType();
+ FunctionType *FTy = F->getFunctionType();
Module *M = F->getParent();
switch (Name[0]) {
@@ -139,8 +139,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
F->getName() == "llvm.x86.sse2.loadu.dq" ||
F->getName() == "llvm.x86.sse2.loadu.pd") {
// Convert to a native, unaligned load.
- const Type *VecTy = CI->getType();
- const Type *IntTy = IntegerType::get(C, 128);
+ Type *VecTy = CI->getType();
+ Type *IntTy = IntegerType::get(C, 128);
IRBuilder<> Builder(C);
Builder.SetInsertPoint(CI->getParent(), CI);
@@ -192,7 +192,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::prefetch: {
IRBuilder<> Builder(C);
Builder.SetInsertPoint(CI->getParent(), CI);
- const llvm::Type *I32Ty = llvm::Type::getInt32Ty(CI->getContext());
+ llvm::Type *I32Ty = llvm::Type::getInt32Ty(CI->getContext());
// Add the extra "data cache" argument
Value *Operands[4] = { CI->getArgOperand(0), CI->getArgOperand(1),
diff --git a/lib/VMCore/ConstantFold.cpp b/lib/VMCore/ConstantFold.cpp
index 323e2a2809..85badc8b29 100644
--- a/lib/VMCore/ConstantFold.cpp
+++ b/lib/VMCore/ConstantFold.cpp
@@ -42,7 +42,7 @@ using namespace llvm;
/// specified vector type. At this point, we know that the elements of the
/// input vector constant are all simple integer or FP values.
static Constant *BitCastConstantVector(ConstantVector *CV,
- const VectorType *DstTy) {
+ VectorType *DstTy) {
if (CV->isAllOnesValue()) return Constant::getAllOnesValue(DstTy);
if (CV->isNullValue()) return Constant::getNullValue(DstTy);
@@ -63,7 +63,7 @@ static Constant *BitCastConstantVector(ConstantVector *CV,
// Bitcast each element now.
std::vector<Constant*> Result;
- const Type *DstEltTy = DstTy->getElementType();
+ Type *DstEltTy = DstTy->getElementType();
for (unsigned i = 0; i != NumElts; ++i)
Result.push_back(ConstantExpr::getBitCast(CV->getOperand(i),
DstEltTy));
@@ -78,15 +78,15 @@ static unsigned
foldConstantCastPair(
unsigned opc, ///< opcode of the second cast constant expression
ConstantExpr *Op, ///< the first cast constant expression
- const Type *DstTy ///< desintation type of the first cast
+ Type *DstTy ///< desintation type of the first cast
) {
assert(Op && Op->isCast() && "Can't fold cast of cast without a cast!");
assert(DstTy && DstTy->isFirstClassType() && "Invalid cast destination type");
assert(CastInst::isCast(opc) && "Invalid cast opcode");
// The the types and opcodes for the two Cast constant expressions
- const Type *SrcTy = Op->getOperand(0)->getType();
- const Type *MidTy = Op->getType();
+ Type *SrcTy = Op->getOperand(0)->getType();
+ Type *MidTy = Op->getType();
Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode());
Instruction::CastOps secondOp = Instruction::CastOps(opc);
@@ -95,27 +95,27 @@ foldConstantCastPair(
Type::getInt64Ty(DstTy->getContext()));
}
-static Constant *FoldBitCast(Constant *V, const Type *DestTy) {
- const Type *SrcTy = V->getType();
+static Constant *FoldBitCast(Constant *V, Type *DestTy) {
+ Type *SrcTy = V->getType();
if (SrcTy == DestTy)
return V; // no-op cast
// Check to see if we are casting a pointer to an aggregate to a pointer to
// the first element. If so, return the appropriate GEP instruction.
- if (const PointerType *PTy = dyn_cast<PointerType>(V->getType()))
- if (const PointerType *DPTy = dyn_cast<PointerType>(DestTy))
+ if (PointerType *PTy = dyn_cast<PointerType>(V->getType()))
+ if (PointerType *DPTy = dyn_cast<PointerType>(DestTy))
if (PTy->getAddressSpace() == DPTy->getAddressSpace()) {
SmallVector<Value*, 8> IdxList;
Value *Zero =
Constant::getNullValue(Type::getInt32Ty(DPTy->getContext()));
IdxList.push_back(Zero);
- const Type *ElTy = PTy->getElementType();
+ Type *ElTy = PTy->getElementType();
while (ElTy != DPTy->getElementType()) {
- if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ if (StructType *STy = dyn_cast<StructType>(ElTy)) {
if (STy->getNumElements() == 0) break;
ElTy = STy->getElementType(0);
IdxList.push_back(Zero);
- } else if (const SequentialType *STy =
+ } else if (SequentialType *STy =
dyn_cast<SequentialType>(ElTy)) {
if (ElTy->isPointerTy()) break; // Can't index into pointers!
ElTy = STy->getElementType();
@@ -133,8 +133,8 @@ static Constant *FoldBitCast(Constant *V, const Type *DestTy) {
// Handle casts from one vector constant to another. We know that the src
// and dest type have the same size (otherwise its an illegal cast).
- if (const VectorType *DestPTy = dyn_cast<VectorType>(DestTy)) {
- if (const VectorType *SrcTy = dyn_cast<VectorType>(V->getType())) {
+ if (VectorType *DestPTy = dyn_cast<VectorType>(DestTy)) {
+ if (VectorType *SrcTy = dyn_cast<VectorType>(V->getType())) {
assert(DestPTy->getBitWidth() == SrcTy->getBitWidth() &&
"Not cast between same sized vectors!");
SrcTy = NULL;
@@ -332,15 +332,15 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
/// return null if no factoring was possible, to avoid endlessly
/// bouncing an unfoldable expression back into the top-level folder.
///
-static Constant *getFoldedSizeOf(const Type *Ty, const Type *DestTy,
+static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy,
bool Folded) {
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Constant *N = ConstantInt::get(DestTy, ATy->getNumElements());
Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
return ConstantExpr::getNUWMul(E, N);
}
- if (const StructType *STy = dyn_cast<StructType>(Ty))
+ if (StructType *STy = dyn_cast<StructType>(Ty))
if (!STy->isPacked()) {
unsigned NumElems = STy->getNumElements();
// An empty struct has size zero.
@@ -364,7 +364,7 @@ static Constant *getFoldedSizeOf(const Type *Ty, const Type *DestTy,
// Pointer size doesn't depend on the pointee type, so canonicalize them
// to an arbitrary pointee.
- if (const PointerType *PTy = dyn_cast<PointerType>(Ty))
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty))
if (!PTy->getElementType()->isIntegerTy(1))
return
getFoldedSizeOf(PointerType::get(IntegerType::get(PTy->getContext(), 1),
@@ -389,11 +389,11 @@ static Constant *getFoldedSizeOf(const Type *Ty, const Type *DestTy,
/// return null if no factoring was possible, to avoid endlessly
/// bouncing an unfoldable expression back into the top-level folder.
///
-static Constant *getFoldedAlignOf(const Type *Ty, const Type *DestTy,
+static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy,
bool Folded) {
// The alignment of an array is equal to the alignment of the
// array element. Note that this is not always true for vectors.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Constant *C = ConstantExpr::getAlignOf(ATy->getElementType());
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
DestTy,
@@ -402,7 +402,7 @@ static Constant *getFoldedAlignOf(const Type *Ty, const Type *DestTy,
return C;
}
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
// Packed structs always have an alignment of 1.
if (STy->isPacked())
return ConstantInt::get(DestTy, 1);
@@ -429,7 +429,7 @@ static Constant *getFoldedAlignOf(const Type *Ty, const Type *DestTy,
// Pointer alignment doesn't depend on the pointee type, so canonicalize them
// to an arbitrary pointee.
- if (const PointerType *PTy = dyn_cast<PointerType>(Ty))
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty))
if (!PTy->getElementType()->isIntegerTy(1))
return
getFoldedAlignOf(PointerType::get(IntegerType::get(PTy->getContext(),
@@ -455,10 +455,10 @@ static Constant *getFoldedAlignOf(const Type *Ty, const Type *DestTy,
/// return null if no factoring was possible, to avoid endlessly
/// bouncing an unfoldable expression back into the top-level folder.
///
-static Constant *getFoldedOffsetOf(const Type *Ty, Constant *FieldNo,
- const Type *DestTy,
+static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo,
+ Type *DestTy,
bool Folded) {
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false,
DestTy, false),
FieldNo, DestTy);
@@ -466,7 +466,7 @@ static Constant *getFoldedOffsetOf(const Type *Ty, Constant *FieldNo,
return ConstantExpr::getNUWMul(E, N);
}
- if (const StructType *STy = dyn_cast<StructType>(Ty))
+ if (StructType *STy = dyn_cast<StructType>(Ty))
if (!STy->isPacked()) {
unsigned NumElems = STy->getNumElements();
// An empty struct has no members.
@@ -506,7 +506,7 @@ static Constant *getFoldedOffsetOf(const Type *Ty, Constant *FieldNo,
}
Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
- const Type *DestTy) {
+ Type *DestTy) {
if (isa<UndefValue>(V)) {
// zext(undef) = 0, because the top bits will be zero.
// sext(undef) = 0, because the top bits will all be the same.
@@ -554,8 +554,8 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
cast<VectorType>(DestTy)->getNumElements() ==
CV->getType()->getNumElements()) {
std::vector<Constant*> res;
- const VectorType *DestVecTy = cast<VectorType>(DestTy);
- const Type *DstEltTy = DestVecTy->getElementType();
+ VectorType *DestVecTy = cast<VectorType>(DestTy);
+ Type *DstEltTy = DestVecTy->getElementType();
for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i)
res.push_back(ConstantExpr::getCast(opc,
CV->getOperand(i), DstEltTy));
@@ -608,7 +608,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
if (CE->getOpcode() == Instruction::GetElementPtr &&
CE->getOperand(0)->isNullValue()) {
- const Type *Ty =
+ Type *Ty =
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
if (CE->getNumOperands() == 2) {
// Handle a sizeof-like expression.
@@ -623,7 +623,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
} else if (CE->getNumOperands() == 3 &&
CE->getOperand(1)->isNullValue()) {
// Handle an alignof-like expression.
- if (const StructType *STy = dyn_cast<StructType>(Ty))
+ if (StructType *STy = dyn_cast<StructType>(Ty))
if (!STy->isPacked()) {
ConstantInt *CI = cast<ConstantInt>(CE->getOperand(2));
if (CI->isOne() &&
@@ -701,7 +701,7 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
if (CondV->isAllOnesValue()) return V1;
- const VectorType *VTy = cast<VectorType>(V1->getType());
+ VectorType *VTy = cast<VectorType>(V1->getType());
ConstantVector *CP1 = dyn_cast<ConstantVector>(V1);
ConstantVector *CP2 = dyn_cast<ConstantVector>(V2);
@@ -709,7 +709,7 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
(CP2 || isa<ConstantAggregateZero>(V2))) {
// Find the element type of the returned vector
- const Type *EltTy = VTy->getElementType();
+ Type *EltTy = VTy->getElementType();
unsigned NumElem = VTy->getNumElements();
std::vector<Constant*> Res(NumElem);
@@ -834,7 +834,7 @@ static Constant *GetVectorElement(Constant *C, unsigned EltNo) {
if (ConstantVector *CV = dyn_cast<ConstantVector>(C))
return CV->getOperand(EltNo);
- const Type *EltTy = cast<VectorType>(C->getType())->getElementType();
+ Type *EltTy = cast<VectorType>(C->getType())->getElementType();
if (isa<ConstantAggregateZero>(C))
return Constant::getNullValue(EltTy);
if (isa<UndefValue>(C))
@@ -850,7 +850,7 @@ Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1,
unsigned MaskNumElts = cast<VectorType>(Mask->getType())->getNumElements();
unsigned SrcNumElts = cast<VectorType>(V1->getType())->getNumElements();
- const Type *EltTy = cast<VectorType>(V1->getType())->getElementType();
+ Type *EltTy = cast<VectorType>(V1->getType())->getElementType();
// Loop over the shuffle mask, evaluating each element.
SmallVector<Constant*, 32> Result;
@@ -922,16 +922,16 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
// Otherwise break the aggregate undef into multiple undefs and do
// the insertion.
- const CompositeType *AggTy = cast<CompositeType>(Agg->getType());
+ CompositeType *AggTy = cast<CompositeType>(Agg->getType());
unsigned numOps;
- if (const ArrayType *AR = dyn_cast<ArrayType>(AggTy))
+ if (ArrayType *AR = dyn_cast<ArrayType>(AggTy))
numOps = AR->getNumElements();
else
numOps = cast<StructType>(AggTy)->getNumElements();
std::vector<Constant*> Ops(numOps);
for (unsigned i = 0; i < numOps; ++i) {
- const Type *MemberTy = AggTy->getTypeAtIndex(i);
+ Type *MemberTy = AggTy->getTypeAtIndex(i);
Constant *Op =
(Idxs[0] == i) ?
ConstantFoldInsertValueInstruction(UndefValue::get(MemberTy),
@@ -940,7 +940,7 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
Ops[i] = Op;
}
- if (const StructType* ST = dyn_cast<StructType>(AggTy))
+ if (StructType* ST = dyn_cast<StructType>(AggTy))
return ConstantStruct::get(ST, Ops);
return ConstantArray::get(cast<ArrayType>(AggTy), Ops);
}
@@ -953,16 +953,16 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
// Otherwise break the aggregate zero into multiple zeros and do
// the insertion.
- const CompositeType *AggTy = cast<CompositeType>(Agg->getType());
+ CompositeType *AggTy = cast<CompositeType>(Agg->getType());
unsigned numOps;
- if (const ArrayType *AR = dyn_cast<ArrayType>(AggTy))
+ if (ArrayType *AR = dyn_cast<ArrayType>(AggTy))
numOps = AR->getNumElements();
else
numOps = cast<StructType>(AggTy)->getNumElements();
std::vector<Constant*> Ops(numOps);
for (unsigned i = 0; i < numOps; ++i) {
- const Type *MemberTy = AggTy->getTypeAtIndex(i);
+ Type *MemberTy = AggTy->getTypeAtIndex(i);
Constant *Op =
(Idxs[0] == i) ?
ConstantFoldInsertValueInstruction(Constant::getNullValue(MemberTy),
@@ -971,7 +971,7 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
Ops[i] = Op;
}
- if (const StructType *ST = dyn_cast<StructType>(AggTy))
+ if (StructType *ST = dyn_cast<StructType>(AggTy))
return ConstantStruct::get(ST, Ops);
return ConstantArray::get(cast<ArrayType>(AggTy), Ops);
}
@@ -986,7 +986,7 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
Ops[i] = Op;
}
- if (const StructType* ST = dyn_cast<StructType>(Agg->getType()))
+ if (StructType* ST = dyn_cast<StructType>(Agg->getType()))
return ConstantStruct::get(ST, Ops);
return ConstantArray::get(cast<ArrayType>(Agg->getType()), Ops);
}
@@ -1265,13 +1265,13 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
return ConstantFP::get(C1->getContext(), C3V);
}
}
- } else if (const VectorType *VTy = dyn_cast<VectorType>(C1->getType())) {
+ } else if (VectorType *VTy = dyn_cast<VectorType>(C1->getType())) {
ConstantVector *CP1 = dyn_cast<ConstantVector>(C1);
ConstantVector *CP2 = dyn_cast<ConstantVector>(C2);
if ((CP1 != NULL || isa<ConstantAggregateZero>(C1)) &&
(CP2 != NULL || isa<ConstantAggregateZero>(C2))) {
std::vector<Constant*> Res;
- const Type* EltTy = VTy->getElementType();
+ Type* EltTy = VTy->getElementType();
Constant *C1 = 0;
Constant *C2 = 0;
switch (Opcode) {
@@ -1461,8 +1461,8 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
/// isZeroSizedType - This type is zero sized if its an array or structure of
/// zero sized types. The only leaf zero sized type is an empty structure.
-static bool isMaybeZeroSizedType(const Type *Ty) {
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+static bool isMaybeZeroSizedType(Type *Ty) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
if (STy->isOpaque()) return true; // Can't say.
// If all of elements have zero size, this does too.
@@ -1470,7 +1470,7 @@ static bool isMaybeZeroSizedType(const Type *Ty) {
if (!isMaybeZeroSizedType(STy->getElementType(i))) return false;
return true;
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
return isMaybeZeroSizedType(ATy->getElementType());
}
return false;
@@ -1483,7 +1483,7 @@ static bool isMaybeZeroSizedType(const Type *Ty) {
/// first is less than the second, return -1, if the second is less than the
/// first, return 1. If the constants are not integral, return -2.
///
-static int IdxCompare(Constant *C1, Constant *C2, const Type *ElTy) {
+static int IdxCompare(Constant *C1, Constant *C2, Type *ElTy) {
if (C1 == C2) return 0;
// Ok, we found a different index. If they are not ConstantInt, we can't do
@@ -1832,8 +1832,8 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
Constant *C1, Constant *C2) {
- const Type *ResultTy;
- if (const VectorType *VT = dyn_cast<VectorType>(C1->getType()))
+ Type *ResultTy;
+ if (VectorType *VT = dyn_cast<VectorType>(C1->getType()))
ResultTy = VectorType::get(Type::getInt1Ty(C1->getContext()),
VT->getNumElements());
else
@@ -2174,8 +2174,8 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
return C;
if (isa<UndefValue>(C)) {
- const PointerType *Ptr = cast<PointerType>(C->getType());
- const Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs, Idxs+NumIdx);
+ PointerType *Ptr = cast<PointerType>(C->getType());
+ Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs, Idxs+NumIdx);
assert(Ty != 0 && "Invalid indices for GEP!");
return UndefValue::get(PointerType::get(Ty, Ptr->getAddressSpace()));
}
@@ -2188,8 +2188,8 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
break;
}
if (isNull) {
- const PointerType *Ptr = cast<PointerType>(C->getType());
- const Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs,
+ PointerType *Ptr = cast<PointerType>(C->getType());
+ Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs,
Idxs+NumIdx);
assert(Ty != 0 && "Invalid indices for GEP!");
return ConstantPointerNull::get(PointerType::get(Ty,
@@ -2203,7 +2203,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
// getelementptr instructions into a single instruction.
//
if (CE->getOpcode() == Instruction::GetElementPtr) {
- const Type *LastTy = 0;
+ Type *LastTy = 0;
for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
I != E; ++I)
LastTy = *I;
@@ -2219,9 +2219,9 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
Constant *Combined = CE->getOperand(CE->getNumOperands()-1);
// Otherwise it must be an array.
if (!Idx0->isNullValue()) {
- const Type *IdxTy = Combined->getType();
+ Type *IdxTy = Combined->getType();
if (IdxTy != Idx0->getType()) {
- const Type *Int64Ty = Type::getInt64Ty(IdxTy->getContext());
+ Type *Int64Ty = Type::getInt64Ty(IdxTy->getContext());
Constant *C1 = ConstantExpr::getSExtOrBitCast(Idx0, Int64Ty);
Constant *C2 = ConstantExpr::getSExtOrBitCast(Combined, Int64Ty);
Combined = ConstantExpr::get(Instruction::Add, C1, C2);
@@ -2249,10 +2249,10 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
// To: i32* getelementptr ([3 x i32]* %X, i64 0, i64 0)
//
if (CE->isCast() && NumIdx > 1 && Idx0->isNullValue()) {
- if (const PointerType *SPT =
+ if (PointerType *SPT =
dyn_cast<PointerType>(CE->getOperand(0)->getType()))
- if (const ArrayType *SAT = dyn_cast<ArrayType>(SPT->getElementType()))
- if (const ArrayType *CAT =
+ if (ArrayType *SAT = dyn_cast<ArrayType>(SPT->getElementType()))
+ if (ArrayType *CAT =
dyn_cast<ArrayType>(cast<PointerType>(C->getType())->getElementType()))
if (CAT->getElementType() == SAT->getElementType())
return inBounds ?
@@ -2268,12 +2268,12 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
// out into preceding dimensions.
bool Unknown = false;
SmallVector<Constant *, 8> NewIdxs;
- const Type *Ty = C->getType();
- const Type *Prev = 0;
+ Type *Ty = C->getType();
+ Type *Prev = 0;
for (unsigned i = 0; i != NumIdx;
Prev = Ty, Ty = cast<CompositeType>(Ty)->getTypeAtIndex(Idxs[i]), ++i) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) {
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty))
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty))
if (ATy->getNumElements() <= INT64_MAX &&
ATy->getNumElements() != 0 &&
CI->getSExtValue() >= (int64_t)ATy->getNumElements()) {
diff --git a/lib/VMCore/ConstantFold.h b/lib/VMCore/ConstantFold.h
index 653a1c3f37..aa1af8f7cf 100644
--- a/lib/VMCore/ConstantFold.h
+++ b/lib/VMCore/ConstantFold.h
@@ -30,7 +30,7 @@ namespace llvm {
Constant *ConstantFoldCastInstruction(
unsigned opcode, ///< The opcode of the cast
Constant *V, ///< The source constant
- const Type *DestTy ///< The destination type
+ Type *DestTy ///< The destination type
);
Constant *ConstantFoldSelectInstruction(Constant *Cond,
Constant *V1, Constant *V2);
diff --git a/lib/VMCore/Constants.cpp b/lib/VMCore/Constants.cpp
index 316c8846f9..e9c049e1ce 100644
--- a/lib/VMCore/Constants.cpp
+++ b/lib/VMCore/Constants.cpp
@@ -63,7 +63,7 @@ bool Constant::isNullValue() const {
}
// Constructor to create a '0' constant of arbitrary type...
-Constant *Constant::getNullValue(const Type *Ty) {
+Constant *Constant::getNullValue(Type *Ty) {
switch (Ty->getTypeID()) {
case Type::IntegerTyID:
return ConstantInt::get(Ty, 0);
@@ -95,25 +95,25 @@ Constant *Constant::getNullValue(const Type *Ty) {
}
}
-Constant *Constant::getIntegerValue(const Type *Ty, const APInt &V) {
- const Type *ScalarTy = Ty->getScalarType();
+Constant *Constant::getIntegerValue(Type *Ty, const APInt &V) {
+ Type *ScalarTy = Ty->getScalarType();
// Create the base integer constant.
Constant *C = ConstantInt::get(Ty->getContext(), V);
// Convert an integer to a pointer, if necessary.
- if (const PointerType *PTy = dyn_cast<PointerType>(ScalarTy))
+ if (PointerType *PTy = dyn_cast<PointerType>(ScalarTy))
C = ConstantExpr::getIntToPtr(C, PTy);
// Broadcast a scalar to a vector, if necessary.
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
C = ConstantVector::get(std::vector<Constant *>(VTy->getNumElements(), C));
return C;
}
-Constant *Constant::getAllOnesValue(const Type *Ty) {
- if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty))
+Constant *Constant::getAllOnesValue(Type *Ty) {
+ if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
return ConstantInt::get(Ty->getContext(),
APInt::getAllOnesValue(ITy->getBitWidth()));
@@ -124,7 +124,7 @@ Constant *Constant::getAllOnesValue(const Type *Ty) {
}
SmallVector<Constant*, 16> Elts;
- const VectorType *VTy = cast<VectorType>(Ty);
+ VectorType *VTy = cast<VectorType>(Ty);
Elts.resize(VTy->getNumElements(), getAllOnesValue(VTy->getElementType()));
assert(Elts[0] && "Not a vector integer type!");
return cast<ConstantVector>(ConstantVector::get(Elts));
@@ -269,7 +269,7 @@ void Constant::getVectorElements(SmallVectorImpl<Constant*> &Elts) const {
return;
}
- const VectorType *VT = cast<VectorType>(getType());
+ VectorType *VT = cast<VectorType>(getType());
if (isa<ConstantAggregateZero>(this)) {
Elts.assign(VT->getNumElements(),
Constant::getNullValue(VT->getElementType()));
@@ -343,7 +343,7 @@ void Constant::removeDeadConstantUsers() const {
// ConstantInt
//===----------------------------------------------------------------------===//
-ConstantInt::ConstantInt(const IntegerType *Ty, const APInt& V)
+ConstantInt::ConstantInt(IntegerType *Ty, const APInt& V)
: Constant(Ty, ConstantIntVal, 0, 0), Val(V) {
assert(V.getBitWidth() == Ty->getBitWidth() && "Invalid constant for type");
}
@@ -362,8 +362,8 @@ ConstantInt *ConstantInt::getFalse(LLVMContext &Context) {
return pImpl->TheFalseVal;
}
-Constant *ConstantInt::getTrue(const Type *Ty) {
- const VectorType *VTy = dyn_cast<VectorType>(Ty);
+Constant *ConstantInt::getTrue(Type *Ty) {
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
if (!VTy) {
assert(Ty->isIntegerTy(1) && "True must be i1 or vector of i1.");
return ConstantInt::getTrue(Ty->getContext());
@@ -375,8 +375,8 @@ Constant *ConstantInt::getTrue(const Type *Ty) {
return ConstantVector::get(Splat);
}
-Constant *ConstantInt::getFalse(const Type *Ty) {
- const VectorType *VTy = dyn_cast<VectorType>(Ty);
+Constant *ConstantInt::getFalse(Type *Ty) {
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
if (!VTy) {
assert(Ty->isIntegerTy(1) && "False must be i1 or vector of i1.");
return ConstantInt::getFalse(Ty->getContext());
@@ -396,7 +396,7 @@ Constant *ConstantInt::getFalse(const Type *Ty) {
// invariant which generates an assertion.
ConstantInt *ConstantInt::get(LLVMContext &Context, const APInt &V) {
// Get the corresponding integer type for the bit width of the value.
- const IntegerType *ITy = IntegerType::get(Context, V.getBitWidth());
+ IntegerType *ITy = IntegerType::get(Context, V.getBitWidth());
// get an existing value or the insertion position
DenseMapAPIntKeyInfo::KeyTy Key(V, ITy);
ConstantInt *&Slot = Context.pImpl->IntConstants[Key];
@@ -404,44 +404,44 @@ ConstantInt *ConstantInt::get(LLVMContext &Context, const APInt &V) {
return Slot;
}
-Constant *ConstantInt::get(const Type *Ty, uint64_t V, bool isSigned) {
+Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) {
Constant *C = get(cast<IntegerType>(Ty->getScalarType()), V, isSigned);
// For vectors, broadcast the value.
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::get(SmallVector<Constant*,
16>(VTy->getNumElements(), C));
return C;
}
-ConstantInt* ConstantInt::get(const IntegerType* Ty, uint64_t V,
+ConstantInt* ConstantInt::get(IntegerType* Ty, uint64_t V,
bool isSigned) {
return get(Ty->getContext(), APInt(Ty->getBitWidth(), V, isSigned));
}
-ConstantInt* ConstantInt::getSigned(const IntegerType* Ty, int64_t V) {
+ConstantInt* ConstantInt::getSigned(IntegerType* Ty, int64_t V) {
return get(Ty, V, true);
}
-Constant *ConstantInt::getSigned(const Type *Ty, int64_t V) {
+Constant *ConstantInt::getSigned(Type *Ty, int64_t V) {
return get(Ty, V, true);
}
-Constant *ConstantInt::get(const Type* Ty, const APInt& V) {
+Constant *ConstantInt::get(Type* Ty, const APInt& V) {
ConstantInt *C = get(Ty->getContext(), V);
assert(C->getType() == Ty->getScalarType() &&
"ConstantInt type doesn't match the type implied by its value!");
// For vectors, broadcast the value.
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::get(
SmallVector<Constant *, 16>(VTy->getNumElements(), C));
return C;
}
-ConstantInt* ConstantInt::get(const IntegerType* Ty, StringRef Str,
+ConstantInt* ConstantInt::get(IntegerType* Ty, StringRef Str,
uint8_t radix) {
return get(Ty->getContext(), APInt(Ty->getBitWidth(), Str, radix));
}
@@ -450,7 +450,7 @@ ConstantInt* ConstantInt::get(const IntegerType* Ty, StringRef Str,
// ConstantFP
//===----------------------------------------------------------------------===//
-static const fltSemantics *TypeToFloatSemantics(const Type *Ty) {
+static const fltSemantics *TypeToFloatSemantics(Type *Ty) {
if (Ty->isFloatTy())
return &APFloat::IEEEsingle;
if (Ty->isDoubleTy())
@@ -467,7 +467,7 @@ static const fltSemantics *TypeToFloatSemantics(const Type *Ty) {
/// get() - This returns a constant fp for the specified value in the
/// specified type. This should only be used for simple constant values like
/// 2.0/1.0 etc, that are known-valid both as double and as the target format.
-Constant *ConstantFP::get(const Type* Ty, double V) {
+Constant *ConstantFP::get(Type* Ty, double V) {
LLVMContext &Context = Ty->getContext();
APFloat FV(V);
@@ -477,7 +477,7 @@ Constant *ConstantFP::get(const Type* Ty, double V) {
Constant *C = get(Context, FV);
// For vectors, broadcast the value.
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::get(
SmallVector<Constant *, 16>(VTy->getNumElements(), C));
@@ -485,14 +485,14 @@ Constant *ConstantFP::get(const Type* Ty, double V) {
}
-Constant *ConstantFP::get(const Type* Ty, StringRef Str) {
+Constant *ConstantFP::get(Type* Ty, StringRef Str) {
LLVMContext &Context = Ty->getContext();
APFloat FV(*TypeToFloatSemantics(Ty->getScalarType()), Str);
Constant *C = get(Context, FV);
// For vectors, broadcast the value.
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::get(
SmallVector<Constant *, 16>(VTy->getNumElements(), C));
@@ -500,7 +500,7 @@ Constant *ConstantFP::get(const Type* Ty, StringRef Str) {
}
-ConstantFP* ConstantFP::getNegativeZero(const Type* Ty) {
+ConstantFP* ConstantFP::getNegativeZero(Type* Ty) {
LLVMContext &Context = Ty->getContext();
APFloat apf = cast <ConstantFP>(Constant::getNullValue(Ty))->getValueAPF();
apf.changeSign();
@@ -508,8 +508,8 @@ ConstantFP* ConstantFP::getNegativeZero(const Type* Ty) {
}
-Constant *ConstantFP::getZeroValueForNegation(const Type* Ty) {
- if (const VectorType *PTy = dyn_cast<VectorType>(Ty))
+Constant *ConstantFP::getZeroValueForNegation(Type* Ty) {
+ if (VectorType *PTy = dyn_cast<VectorType>(Ty))
if (PTy->getElementType()->isFloatingPointTy()) {
SmallVector<Constant*, 16> zeros(PTy->getNumElements(),
getNegativeZero(PTy->getElementType()));
@@ -532,7 +532,7 @@ ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) {
ConstantFP *&Slot = pImpl->FPConstants[Key];
if (!Slot) {
- const Type *Ty;
+ Type *Ty;
if (&V.getSemantics() == &APFloat::IEEEsingle)
Ty = Type::getFloatTy(Context);
else if (&V.getSemantics() == &APFloat::IEEEdouble)
@@ -552,13 +552,13 @@ ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) {
return Slot;
}
-ConstantFP *ConstantFP::getInfinity(const Type *Ty, bool Negative) {
+ConstantFP *ConstantFP::getInfinity(Type *Ty, bool Negative) {
const fltSemantics &Semantics = *TypeToFloatSemantics(Ty);
return ConstantFP::get(Ty->getContext(),
APFloat::getInf(Semantics, Negative));
}
-ConstantFP::ConstantFP(const Type *Ty, const APFloat& V)
+ConstantFP::ConstantFP(Type *Ty, const APFloat& V)
: Constant(Ty, ConstantFPVal, 0, 0), Val(V) {
assert(&V.getSemantics() == TypeToFloatSemantics(Ty) &&
"FP type Mismatch");
@@ -573,7 +573,7 @@ bool ConstantFP::isExactlyValue(const APFloat &V) const {
//===----------------------------------------------------------------------===//
-ConstantArray::ConstantArray(const ArrayType *T,
+ConstantArray::ConstantArray(ArrayType *T,
const std::vector<Constant*> &V)
: Constant(T, ConstantArrayVal,
OperandTraits<ConstantArray>::op_end(this) - V.size(),
@@ -590,7 +590,7 @@ ConstantArray::ConstantArray(const ArrayType *T,
}
}
-Constant *ConstantArray::get(const ArrayType *Ty, ArrayRef<Constant*> V) {
+Constant *ConstantArray::get(ArrayType *Ty, ArrayRef<Constant*> V) {
for (unsigned i = 0, e = V.size(); i != e; ++i) {
assert(V[i]->getType() == Ty->getElementType() &&
"Wrong type in array element initializer");
@@ -653,7 +653,7 @@ StructType *ConstantStruct::getTypeForElements(ArrayRef<Constant*> V,
}
-ConstantStruct::ConstantStruct(const StructType *T,
+ConstantStruct::ConstantStruct(StructType *T,
const std::vector<Constant*> &V)
: Constant(T, ConstantStructVal,
OperandTraits<ConstantStruct>::op_end(this) - V.size(),
@@ -671,7 +671,7 @@ ConstantStruct::ConstantStruct(const StructType *T,
}
// ConstantStruct accessors.
-Constant *ConstantStruct::get(const StructType *ST, ArrayRef<Constant*> V) {
+Constant *ConstantStruct::get(StructType *ST, ArrayRef<Constant*> V) {
// Create a ConstantAggregateZero value if all elements are zeros.
for (unsigned i = 0, e = V.size(); i != e; ++i)
if (!V[i]->isNullValue())
@@ -682,7 +682,7 @@ Constant *ConstantStruct::get(const StructType *ST, ArrayRef<Constant*> V) {
return ConstantAggregateZero::get(ST);
}
-Constant* ConstantStruct::get(const StructType *T, ...) {
+Constant* ConstantStruct::get(StructType *T, ...) {
va_list ap;
SmallVector<Constant*, 8> Values;
va_start(ap, T);
@@ -692,7 +692,7 @@ Constant* ConstantStruct::get(const StructType *T, ...) {
return get(T, Values);
}
-ConstantVector::ConstantVector(const VectorType *T,
+ConstantVector::ConstantVector(VectorType *T,
const std::vector<Constant*> &V)
: Constant(T, ConstantVectorVal,
OperandTraits<ConstantVector>::op_end(this) - V.size(),
@@ -710,7 +710,7 @@ ConstantVector::ConstantVector(const VectorType *T,
// ConstantVector accessors.
Constant *ConstantVector::get(ArrayRef<Constant*> V) {
assert(!V.empty() && "Vectors can't be empty");
- const VectorType *T = VectorType::get(V.front()->getType(), V.size());
+ VectorType *T = VectorType::get(V.front()->getType(), V.size());
LLVMContextImpl *pImpl = T->getContext().pImpl;
// If this is an all-undef or all-zero vector, return a
@@ -761,7 +761,7 @@ bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const {
for (; GEPI != E; ++GEPI, ++OI) {
ConstantInt *CI = dyn_cast<ConstantInt>(*OI);
if (!CI) return false;
- if (const ArrayType *ATy = dyn_cast<ArrayType>(*GEPI))
+ if (ArrayType *ATy = dyn_cast<ArrayType>(*GEPI))
if (CI->getValue().getActiveBits() > 64 ||
CI->getZExtValue() >= ATy->getNumElements())
return false;
@@ -859,7 +859,7 @@ ConstantExpr::getWithOperandReplaced(unsigned OpNo, Constant *Op) const {
/// operands replaced with the specified values. The specified array must
/// have the same number of operands as our current one.
Constant *ConstantExpr::
-getWithOperands(ArrayRef<Constant*> Ops, const Type *Ty) const {
+getWithOperands(ArrayRef<Constant*> Ops, Type *Ty) const {
assert(Ops.size() == getNumOperands() && "Operand count mismatch!");
bool AnyChange = Ty != getType();
for (unsigned i = 0; i != Ops.size(); ++i)
@@ -907,7 +907,7 @@ getWithOperands(ArrayRef<Constant*> Ops, const Type *Ty) const {
//===----------------------------------------------------------------------===//
// isValueValidForType implementations
-bool ConstantInt::isValueValidForType(const Type *Ty, uint64_t Val) {
+bool ConstantInt::isValueValidForType(Type *Ty, uint64_t Val) {
unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth(); // assert okay
if (Ty == Type::getInt1Ty(Ty->getContext()))
return Val == 0 || Val == 1;
@@ -917,7 +917,7 @@ bool ConstantInt::isValueValidForType(const Type *Ty, uint64_t Val) {
return Val <= Max;
}
-bool ConstantInt::isValueValidForType(const Type *Ty, int64_t Val) {
+bool ConstantInt::isValueValidForType(Type *Ty, int64_t Val) {
unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth(); // assert okay
if (Ty == Type::getInt1Ty(Ty->getContext()))
return Val == 0 || Val == 1 || Val == -1;
@@ -928,7 +928,7 @@ bool ConstantInt::isValueValidForType(const Type *Ty, int64_t Val) {
return (Val >= Min && Val <= Max);
}
-bool ConstantFP::isValueValidForType(const Type *Ty, const APFloat& Val) {
+bool ConstantFP::isValueValidForType(Type *Ty, const APFloat& Val) {
// convert modifies in place, so make a copy.
APFloat Val2 = APFloat(Val);
bool losesInfo;
@@ -968,7 +968,7 @@ bool ConstantFP::isValueValidForType(const Type *Ty, const APFloat& Val) {
//===----------------------------------------------------------------------===//
// Factory Function Implementation
-ConstantAggregateZero* ConstantAggregateZero::get(const Type* Ty) {
+ConstantAggregateZero* ConstantAggregateZero::get(Type* Ty) {
assert((Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()) &&
"Cannot create an aggregate zero of non-aggregate type!");
@@ -1103,7 +1103,7 @@ Constant *ConstantVector::getSplatValue() const {
//---- ConstantPointerNull::get() implementation.
//
-ConstantPointerNull *ConstantPointerNull::get(const PointerType *Ty) {
+ConstantPointerNull *ConstantPointerNull::get(PointerType *Ty) {
return Ty->getContext().pImpl->NullPtrConstants.getOrCreate(Ty, 0);
}
@@ -1118,7 +1118,7 @@ void ConstantPointerNull::destroyConstant() {
//---- UndefValue::get() implementation.
//
-UndefValue *UndefValue::get(const Type *Ty) {
+UndefValue *UndefValue::get(Type *Ty) {
return Ty->getContext().pImpl->UndefValueConstants.getOrCreate(Ty, 0);
}
@@ -1209,7 +1209,7 @@ void BlockAddress::replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) {
/// This is a utility function to handle folding of casts and lookup of the
/// cast in the ExprConstants map. It is used by the various get* methods below.
static inline Constant *getFoldedCast(
- Instruction::CastOps opc, Constant *C, const Type *Ty) {
+ Instruction::CastOps opc, Constant *C, Type *Ty) {
assert(Ty->isFirstClassType() && "Cannot cast to an aggregate type!");
// Fold a few common cases
if (Constant *FC = ConstantFoldCastInstruction(opc, C, Ty))
@@ -1224,7 +1224,7 @@ static inline Constant *getFoldedCast(
return pImpl->ExprConstants.getOrCreate(Ty, Key);
}
-Constant *ConstantExpr::getCast(unsigned oc, Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty) {
Instruction::CastOps opc = Instruction::CastOps(oc);
assert(Instruction::isCast(opc) && "opcode out of range");
assert(C && Ty && "Null arguments to getCast");
@@ -1250,25 +1250,25 @@ Constant *ConstantExpr::getCast(unsigned oc, Constant *C, const Type *Ty) {
return 0;
}
-Constant *ConstantExpr::getZExtOrBitCast(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getZExtOrBitCast(Constant *C, Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
return getBitCast(C, Ty);
return getZExt(C, Ty);
}
-Constant *ConstantExpr::getSExtOrBitCast(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getSExtOrBitCast(Constant *C, Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
return getBitCast(C, Ty);
return getSExt(C, Ty);
}
-Constant *ConstantExpr::getTruncOrBitCast(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getTruncOrBitCast(Constant *C, Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
return getBitCast(C, Ty);
return getTrunc(C, Ty);
}
-Constant *ConstantExpr::getPointerCast(Constant *S, const Type *Ty) {
+Constant *ConstantExpr::getPointerCast(Constant *S, Type *Ty) {
assert(S->getType()->isPointerTy() && "Invalid cast");
assert((Ty->isIntegerTy() || Ty->isPointerTy()) && "Invalid cast");
@@ -1277,7 +1277,7 @@ Constant *ConstantExpr::getPointerCast(Constant *S, const Type *Ty) {
return getBitCast(S, Ty);
}
-Constant *ConstantExpr::getIntegerCast(Constant *C, const Type *Ty,
+Constant *ConstantExpr::getIntegerCast(Constant *C, Type *Ty,
bool isSigned) {
assert(C->getType()->isIntOrIntVectorTy() &&
Ty->isIntOrIntVectorTy() && "Invalid cast");
@@ -1290,7 +1290,7 @@ Constant *ConstantExpr::getIntegerCast(Constant *C, const Type *Ty,
return getCast(opcode, C, Ty);
}
-Constant *ConstantExpr::getFPCast(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getFPCast(Constant *C, Type *Ty) {
assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
"Invalid cast");
unsigned SrcBits = C->getType()->getScalarSizeInBits();
@@ -1302,7 +1302,7 @@ Constant *ConstantExpr::getFPCast(Constant *C, const Type *Ty) {
return getCast(opcode, C, Ty);
}
-Constant *ConstantExpr::getTrunc(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getTrunc(Constant *C, Type *Ty) {
#ifndef NDEBUG
bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
bool toVec = Ty->getTypeID() == Type::VectorTyID;
@@ -1316,7 +1316,7 @@ Constant *ConstantExpr::getTrunc(Constant *C, const Type *Ty) {
return getFoldedCast(Instruction::Trunc, C, Ty);
}
-Constant *ConstantExpr::getSExt(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getSExt(Constant *C, Type *Ty) {
#ifndef NDEBUG
bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
bool toVec = Ty->getTypeID() == Type::VectorTyID;
@@ -1330,7 +1330,7 @@ Constant *ConstantExpr::getSExt(Constant *C, const Type *Ty) {
return getFoldedCast(Instruction::SExt, C, Ty);
}
-Constant *ConstantExpr::getZExt(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getZExt(Constant *C, Type *Ty) {
#ifndef NDEBUG
bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
bool toVec = Ty->getTypeID() == Type::VectorTyID;
@@ -1344,7 +1344,7 @@ Constant *ConstantExpr::getZExt(Constant *C, const Type *Ty) {
return getFoldedCast(Instruction::ZExt, C, Ty);
}
-Constant *ConstantExpr::getFPTrunc(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getFPTrunc(Constant *C, Type *Ty) {
#ifndef NDEBUG
bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
bool toVec = Ty->getTypeID() == Type::VectorTyID;
@@ -1356,7 +1356,7 @@ Constant *ConstantExpr::getFPTrunc(Constant *C, const Type *Ty) {
return getFoldedCast(Instruction::FPTrunc, C, Ty);
}
-Constant *ConstantExpr::getFPExtend(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getFPExtend(Constant *C, Type *Ty) {
#ifndef NDEBUG
bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
bool toVec = Ty->getTypeID() == Type::VectorTyID;
@@ -1368,7 +1368,7 @@ Constant *ConstantExpr::getFPExtend(Constant *C, const Type *Ty) {
return getFoldedCast(Instruction::FPExt, C, Ty);
}
-Constant *ConstantExpr::getUIToFP(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getUIToFP(Constant *C, Type *Ty) {
#ifndef NDEBUG
bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
bool toVec = Ty->getTypeID() == Type::VectorTyID;
@@ -1379,7 +1379,7 @@ Constant *ConstantExpr::getUIToFP(Constant *C, const Type *Ty) {
return getFoldedCast(Instruction::UIToFP, C, Ty);
}
-Constant *ConstantExpr::getSIToFP(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getSIToFP(Constant *C, Type *Ty) {
#ifndef NDEBUG
bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
bool toVec = Ty->getTypeID() == Type::VectorTyID;
@@ -1390,7 +1390,7 @@ Constant *ConstantExpr::getSIToFP(Constant *C, const Type *Ty) {
return getFoldedCast(Instruction::SIToFP, C, Ty);
}
-Constant *ConstantExpr::getFPToUI(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getFPToUI(Constant *C, Type *Ty) {
#ifndef NDEBUG
bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
bool toVec = Ty->getTypeID() == Type::VectorTyID;
@@ -1401,7 +1401,7 @@ Constant *ConstantExpr::getFPToUI(Constant *C, const Type *Ty) {
return getFoldedCast(Instruction::FPToUI, C, Ty);
}
-Constant *ConstantExpr::getFPToSI(Constant *C, const Type *Ty) {
+Constant *ConstantExpr::getFPToSI(Constant *C, Type *Ty) {
#ifndef NDEBUG
bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
bool toVec = Ty->getTypeID() == Type::VectorTyID;
@@ -1412,19 +1412,19 @@ Constant *ConstantExpr::getFPToSI(Constant *C, const Type *Ty) {
return getFoldedCast(Instruction::FPToSI, C, Ty);
}
-Constant *ConstantExpr::getPtrToInt(Constant *C, const Type *DstTy) {
+Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy) {
assert(C->getType()->isPointerTy() && "PtrToInt source must be pointer");
assert(DstTy->isIntegerTy() && "PtrToInt destination must be integral");
return getFoldedCast(Instruction::PtrToInt, C, DstTy);
}
-Constant *ConstantExpr::getIntToPtr(Constant *C, const Type *DstTy) {
+Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy) {
assert(C->getType()->isIntegerTy() && "IntToPtr source must be integral");
assert(DstTy->isPointerTy() && "IntToPtr destination must be a pointer");
return getFoldedCast(Instruction::IntToPtr, C, DstTy);
}
-Constant *ConstantExpr::getBitCast(Constant *C, const Type *DstTy) {
+Constant *ConstantExpr::getBitCast(Constant *C, Type *DstTy) {
assert(CastInst::castIsValid(Instruction::BitCast, C, DstTy) &&
"Invalid constantexpr bitcast!");
@@ -1513,7 +1513,7 @@ Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2,
return pImpl->ExprConstants.getOrCreate(C1->getType(), Key);
}
-Constant *ConstantExpr::getSizeOf(const Type* Ty) {
+Constant *ConstantExpr::getSizeOf(Type* Ty) {
// sizeof is implemented as: (i64) gep (Ty*)null, 1
// Note that a non-inbounds gep is used, as null isn't within any object.
Constant *GEPIdx = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
@@ -1523,10 +1523,10 @@ Constant *ConstantExpr::getSizeOf(const Type* Ty) {
Type::getInt64Ty(Ty->getContext()));
}
-Constant *ConstantExpr::getAlignOf(const Type* Ty) {
+Constant *ConstantExpr::getAlignOf(Type* Ty) {
// alignof is implemented as: (i64) gep ({i1,Ty}*)null, 0, 1
// Note that a non-inbounds gep is used, as null isn't within any object.
- const Type *AligningTy =
+ Type *AligningTy =
StructType::get(Type::getInt1Ty(Ty->getContext()), Ty, NULL);
Constant *NullPtr = Constant::getNullValue(AligningTy->getPointerTo());
Constant *Zero = ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0);
@@ -1537,12 +1537,12 @@ Constant *ConstantExpr::getAlignOf(const Type* Ty) {
Type::getInt64Ty(Ty->getContext()));
}
-Constant *ConstantExpr::getOffsetOf(const StructType* STy, unsigned FieldNo) {
+Constant *ConstantExpr::getOffsetOf(StructType* STy, unsigned FieldNo) {
return getOffsetOf(STy, ConstantInt::get(Type::getInt32Ty(STy->getContext()),
FieldNo));
}
-Constant *ConstantExpr::getOffsetOf(const Type* Ty, Constant *FieldNo) {
+Constant *ConstantExpr::getOffsetOf(Type* Ty, Constant *FieldNo) {
// offsetof is implemented as: (i64) gep (Ty*)null, 0, FieldNo
// Note that a non-inbounds gep is used, as null isn't within any object.
Constant *GEPIdx[] = {
@@ -1598,7 +1598,7 @@ Constant *ConstantExpr::getGetElementPtr(Constant *C, Value* const *Idxs,
return FC; // Fold a few common cases.
// Get the result type of the getelementptr!
- const Type *Ty =
+ Type *Ty =
GetElementPtrInst::getIndexedType(C->getType(), Idxs, Idxs+NumIdx);
assert(Ty && "GEP indices invalid!");
unsigned AS = cast<PointerType>(C->getType())->getAddressSpace();
@@ -1635,8 +1635,8 @@ ConstantExpr::getICmp(unsigned short pred, Constant *LHS, Constant *RHS) {
// Get the key type with both the opcode and predicate
const ExprMapKeyType Key(Instruction::ICmp, ArgVec, pred);
- const Type *ResultTy = Type::getInt1Ty(LHS->getContext());
- if (const VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
+ Type *ResultTy = Type::getInt1Ty(LHS->getContext());
+ if (VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
ResultTy = VectorType::get(ResultTy, VT->getNumElements());
LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl;
@@ -1658,8 +1658,8 @@ ConstantExpr::getFCmp(unsigned short pred, Constant *LHS, Constant *RHS) {
// Get the key type with both the opcode and predicate
const ExprMapKeyType Key(Instruction::FCmp, ArgVec, pred);
- const Type *ResultTy = Type::getInt1Ty(LHS->getContext());
- if (const VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
+ Type *ResultTy = Type::getInt1Ty(LHS->getContext());
+ if (VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
ResultTy = VectorType::get(ResultTy, VT->getNumElements());
LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl;
@@ -1715,8 +1715,8 @@ Constant *ConstantExpr::getShuffleVector(Constant *V1, Constant *V2,
return FC; // Fold a few common cases.
unsigned NElts = cast<VectorType>(Mask->getType())->getNumElements();
- const Type *EltTy = cast<VectorType>(V1->getType())->getElementType();
- const Type *ShufTy = VectorType::get(EltTy, NElts);
+ Type *EltTy = cast<VectorType>(V1->getType())->getElementType();
+ Type *ShufTy = VectorType::get(EltTy, NElts);
// Look up the constant in the table first to ensure uniqueness
std::vector<Constant*> ArgVec(1, V1);
@@ -1745,7 +1745,7 @@ Constant *ConstantExpr::getExtractValue(Constant *Agg,
assert(Agg->getType()->isFirstClassType() &&
"Tried to create extractelement operation on non-first-class type!");
- const Type *ReqTy = ExtractValueInst::getIndexedType(Agg->getType(), Idxs);
+ Type *ReqTy = ExtractValueInst::getIndexedType(Agg->getType(), Idxs);
(void)ReqTy;
assert(ReqTy && "extractvalue indices invalid!");
@@ -1878,7 +1878,7 @@ const char *ConstantExpr::getOpcodeName() const {
GetElementPtrConstantExpr::
GetElementPtrConstantExpr(Constant *C, const std::vector<Constant*> &IdxList,
- const Type *DestTy)
+ Type *DestTy)
: ConstantExpr(DestTy, Instruction::GetElementPtr,
OperandTraits<GetElementPtrConstantExpr>::op_end(this)
- (IdxList.size()+1), IdxList.size()+1) {
diff --git a/lib/VMCore/ConstantsContext.h b/lib/VMCore/ConstantsContext.h
index bd134d9b89..1077004d7c 100644
--- a/lib/VMCore/ConstantsContext.h
+++ b/lib/VMCore/ConstantsContext.h
@@ -36,7 +36,7 @@ public:
void *operator new(size_t s) {
return User::operator new(s, 1);
}
- UnaryConstantExpr(unsigned Opcode, Constant *C, const Type *Ty)
+ UnaryConstantExpr(unsigned Opcode, Constant *C, Type *Ty)
: ConstantExpr(Ty, Opcode, &Op<0>(), 1) {
Op<0>() = C;
}
@@ -159,7 +159,7 @@ public:
}
ExtractValueConstantExpr(Constant *Agg,
const SmallVector<unsigned, 4> &IdxList,
- const Type *DestTy)
+ Type *DestTy)
: ConstantExpr(DestTy, Instruction::ExtractValue, &Op<0>(), 1),
Indices(IdxList) {
Op<0>() = Agg;
@@ -184,7 +184,7 @@ public:
}
InsertValueConstantExpr(Constant *Agg, Constant *Val,
const SmallVector<unsigned, 4> &IdxList,
- const Type *DestTy)
+ Type *DestTy)
: ConstantExpr(DestTy, Instruction::InsertValue, &Op<0>(), 2),
Indices(IdxList) {
Op<0>() = Agg;
@@ -203,11 +203,11 @@ public:
/// used behind the scenes to implement getelementpr constant exprs.
class GetElementPtrConstantExpr : public ConstantExpr {
GetElementPtrConstantExpr(Constant *C, const std::vector<Constant*> &IdxList,
- const Type *DestTy);
+ Type *DestTy);
public:
static GetElementPtrConstantExpr *Create(Constant *C,
const std::vector<Constant*>&IdxList,
- const Type *DestTy,
+ Type *DestTy,
unsigned Flags) {
GetElementPtrConstantExpr *Result =
new(IdxList.size() + 1) GetElementPtrConstantExpr(C, IdxList, DestTy);
@@ -228,7 +228,7 @@ struct CompareConstantExpr : public ConstantExpr {
return User::operator new(s, 2);
}
unsigned short predicate;
- CompareConstantExpr(const Type *ty, Instruction::OtherOps opc,
+ CompareConstantExpr(Type *ty, Instruction::OtherOps opc,
unsigned short pred, Constant* LHS, Constant* RHS)
: ConstantExpr(ty, opc, &Op<0>(), 2), predicate(pred) {
Op<0>() = LHS;
@@ -392,7 +392,7 @@ struct ConstantTraits<Constant *> {
template<class ConstantClass, class TypeClass, class ValType>
struct ConstantCreator {
- static ConstantClass *create(const TypeClass *Ty, const ValType &V) {
+ static ConstantClass *create(TypeClass *Ty, const ValType &V) {
return new(ConstantTraits<ValType>::uses(V)) ConstantClass(Ty, V);
}
};
@@ -407,7 +407,7 @@ struct ConstantKeyData {
template<>
struct ConstantCreator<ConstantExpr, Type, ExprMapKeyType> {
- static ConstantExpr *create(const Type *Ty, const ExprMapKeyType &V,
+ static ConstantExpr *create(Type *Ty, const ExprMapKeyType &V,
unsigned short pred = 0) {
if (Instruction::isCast(V.opcode))
return new UnaryConstantExpr(V.opcode, V.operands[0], Ty);
@@ -470,7 +470,7 @@ struct ConstantKeyData<ConstantExpr> {
// ConstantAggregateZero does not take extra "value" argument...
template<class ValType>
struct ConstantCreator<ConstantAggregateZero, Type, ValType> {
- static ConstantAggregateZero *create(const Type *Ty, const ValType &V){
+ static ConstantAggregateZero *create(Type *Ty, const ValType &V){
return new ConstantAggregateZero(Ty);
}
};
@@ -522,7 +522,7 @@ struct ConstantKeyData<ConstantStruct> {
// ConstantPointerNull does not take extra "value" argument...
template<class ValType>
struct ConstantCreator<ConstantPointerNull, PointerType, ValType> {
- static ConstantPointerNull *create(const PointerType *Ty, const ValType &V){
+ static ConstantPointerNull *create(PointerType *Ty, const ValType &V){
return new ConstantPointerNull(Ty);
}
};
@@ -538,7 +538,7 @@ struct ConstantKeyData<ConstantPointerNull> {
// UndefValue does not take extra "value" argument...
template<class ValType>
struct ConstantCreator<UndefValue, Type, ValType> {
- static UndefValue *create(const Type *Ty, const ValType &V) {
+ static UndefValue *create(Type *Ty, const ValType &V) {
return new UndefValue(Ty);
}
};
@@ -553,7 +553,7 @@ struct ConstantKeyData<UndefValue> {
template<>
struct ConstantCreator<InlineAsm, PointerType, InlineAsmKeyType> {
- static InlineAsm *create(const PointerType *Ty, const InlineAsmKeyType &Key) {
+ static InlineAsm *create(PointerType *Ty, const InlineAsmKeyType &Key) {
return new InlineAsm(Ty, Key.asm_string, Key.constraints,
Key.has_side_effects, Key.is_align_stack);
}
@@ -572,7 +572,7 @@ template<class ValType, class ValRefType, class TypeClass, class ConstantClass,
bool HasLargeKey = false /*true for arrays and structs*/ >
class ConstantUniqueMap {
public:
- typedef std::pair<const TypeClass*, ValType> MapKey;
+ typedef std::pair<TypeClass*, ValType> MapKey;
typedef std::map<MapKey, ConstantClass *> MapTy;
typedef std::map<ConstantClass *, typename MapTy::iterator> InverseMapTy;
private:
@@ -623,7 +623,7 @@ private:
}
typename MapTy::iterator I =
- Map.find(MapKey(static_cast<const TypeClass*>(CP->getType()),
+ Map.find(MapKey(static_cast<TypeClass*>(CP->getType()),
ConstantKeyData<ConstantClass>::getValType(CP)));
if (I == Map.end() || I->second != CP) {
// FIXME: This should not use a linear scan. If this gets to be a
@@ -634,7 +634,7 @@ private:
return I;
}
- ConstantClass *Create(const TypeClass *Ty, ValRefType V,
+ ConstantClass *Create(TypeClass *Ty, ValRefType V,
typename MapTy::iterator I) {
ConstantClass* Result =
ConstantCreator<ConstantClass,TypeClass,ValType>::create(Ty, V);
@@ -651,7 +651,7 @@ public:
/// getOrCreate - Return the specified constant from the map, creating it if
/// necessary.
- ConstantClass *getOrCreate(const TypeClass *Ty, ValRefType V) {
+ ConstantClass *getOrCreate(TypeClass *Ty, ValRefType V) {
MapKey Lookup(Ty, V);
ConstantClass* Result = 0;
diff --git a/lib/VMCore/Core.cpp b/lib/VMCore/Core.cpp
index 2a816e123a..c39375db1b 100644
--- a/lib/VMCore/Core.cpp
+++ b/lib/VMCore/Core.cpp
@@ -600,7 +600,7 @@ LLVMValueRef LLVMConstNamedStruct(LLVMTypeRef StructTy,
LLVMValueRef *ConstantVals,
unsigned Count) {
Constant **Elements = unwrap<Constant>(ConstantVals, Count);
- const StructType *Ty = cast<StructType>(unwrap(StructTy));
+ StructType *Ty = cast<StructType>(unwrap(StructTy));
return wrap(ConstantStruct::get(Ty, ArrayRef<Constant*>(Elements, Count)));
}
@@ -1861,7 +1861,7 @@ LLVMValueRef LLVMBuildNot(LLVMBuilderRef B, LLVMValueRef V, const char *Name) {
LLVMValueRef LLVMBuildMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
const char *Name) {
- const Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext());
+ Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext());
Constant* AllocSize = ConstantExpr::getSizeOf(unwrap(Ty));
AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy);
Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(),
@@ -1872,7 +1872,7 @@ LLVMValueRef LLVMBuildMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
LLVMValueRef Val, const char *Name) {
- const Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext());
+ Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext());
Constant* AllocSize = ConstantExpr::getSizeOf(unwrap(Ty));
AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy);
Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(),
diff --git a/lib/VMCore/DebugLoc.cpp b/lib/VMCore/DebugLoc.cpp
index 4ff6b2cd80..b9c245d951 100644
--- a/lib/VMCore/DebugLoc.cpp
+++ b/lib/VMCore/DebugLoc.cpp
@@ -104,7 +104,7 @@ MDNode *DebugLoc::getAsMDNode(const LLVMContext &Ctx) const {
assert(Scope && "If scope is null, this should be isUnknown()");
LLVMContext &Ctx2 = Scope->getContext();
- const Type *Int32 = Type::getInt32Ty(Ctx2);
+ Type *Int32 = Type::getInt32Ty(Ctx2);
Value *Elts[] = {
ConstantInt::get(Int32, getLine()), ConstantInt::get(Int32, getCol()),
Scope, IA
diff --git a/lib/VMCore/Function.cpp b/lib/VMCore/Function.cpp
index 6536bcd0e2..1f59bf9716 100644
--- a/lib/VMCore/Function.cpp
+++ b/lib/VMCore/Function.cpp
@@ -38,7 +38,7 @@ template class llvm::SymbolTableListTraits<BasicBlock, Function>;
// Argument Implementation
//===----------------------------------------------------------------------===//
-Argument::Argument(const Type *Ty, const Twine &Name, Function *Par)
+Argument::Argument(Type *Ty, const Twine &Name, Function *Par)
: Value(Ty, Value::ArgumentVal) {
Parent = 0;
@@ -158,7 +158,7 @@ void Function::eraseFromParent() {
// Function Implementation
//===----------------------------------------------------------------------===//
-Function::Function(const FunctionType *Ty, LinkageTypes Linkage,
+Function::Function(FunctionType *Ty, LinkageTypes Linkage,
const Twine &name, Module *ParentModule)
: GlobalValue(PointerType::getUnqual(Ty),
Value::FunctionVal, 0, 0, Linkage, name) {
@@ -195,7 +195,7 @@ Function::~Function() {
void Function::BuildLazyArguments() const {
// Create the arguments vector, all arguments start out unnamed.
- const FunctionType *FT = getFunctionType();
+ FunctionType *FT = getFunctionType();
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
assert(!FT->getParamType(i)->isVoidTy() &&
"Cannot have void typed arguments!");
@@ -345,7 +345,7 @@ std::string Intrinsic::getName(ID id, ArrayRef<Type*> Tys) {
return Table[id];
std::string Result(Table[id]);
for (unsigned i = 0; i < Tys.size(); ++i) {
- if (const PointerType* PTyp = dyn_cast<PointerType>(Tys[i])) {
+ if (PointerType* PTyp = dyn_cast<PointerType>(Tys[i])) {
Result += ".p" + llvm::utostr(PTyp->getAddressSpace()) +
EVT::getEVT(PTyp->getElementType()).getEVTString();
}
@@ -355,9 +355,9 @@ std::string Intrinsic::getName(ID id, ArrayRef<Type*> Tys) {
return Result;
}
-const FunctionType *Intrinsic::getType(LLVMContext &Context,
+FunctionType *Intrinsic::getType(LLVMContext &Context,
ID id, ArrayRef<Type*> Tys) {
- const Type *ResultTy = NULL;
+ Type *ResultTy = NULL;
std::vector<Type*> ArgTys;
bool IsVarArg = false;
diff --git a/lib/VMCore/Globals.cpp b/lib/VMCore/Globals.cpp
index db008e09d1..b8acc45d18 100644
--- a/lib/VMCore/Globals.cpp
+++ b/lib/VMCore/Globals.cpp
@@ -80,7 +80,7 @@ bool GlobalValue::isDeclaration() const {
// GlobalVariable Implementation
//===----------------------------------------------------------------------===//
-GlobalVariable::GlobalVariable(const Type *Ty, bool constant, LinkageTypes Link,
+GlobalVariable::GlobalVariable(Type *Ty, bool constant, LinkageTypes Link,
Constant *InitVal, const Twine &Name,
bool ThreadLocal, unsigned AddressSpace)
: GlobalValue(PointerType::get(Ty, AddressSpace),
@@ -97,7 +97,7 @@ GlobalVariable::GlobalVariable(const Type *Ty, bool constant, LinkageTypes Link,
LeakDetector::addGarbageObject(this);
}
-GlobalVariable::GlobalVariable(Module &M, const Type *Ty, bool constant,
+GlobalVariable::GlobalVariable(Module &M, Type *Ty, bool constant,
LinkageTypes Link, Constant *InitVal,
const Twine &Name,
GlobalVariable *Before, bool ThreadLocal,
@@ -186,7 +186,7 @@ void GlobalVariable::copyAttributesFrom(const GlobalValue *Src) {
// GlobalAlias Implementation
//===----------------------------------------------------------------------===//
-GlobalAlias::GlobalAlias(const Type *Ty, LinkageTypes Link,
+GlobalAlias::GlobalAlias(Type *Ty, LinkageTypes Link,
const Twine &Name, Constant* aliasee,
Module *ParentModule)
: GlobalValue(Ty, Value::GlobalAliasVal, &Op<0>(), 1, Link, Name) {
diff --git a/lib/VMCore/IRBuilder.cpp b/lib/VMCore/IRBuilder.cpp
index ffe961fee7..5114e2d498 100644
--- a/lib/VMCore/IRBuilder.cpp
+++ b/lib/VMCore/IRBuilder.cpp
@@ -40,7 +40,7 @@ Type *IRBuilderBase::getCurrentFunctionReturnType() const {
}
Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
- const PointerType *PT = cast<PointerType>(Ptr->getType());
+ PointerType *PT = cast<PointerType>(Ptr->getType());
if (PT->getElementType()->isIntegerTy(8))
return Ptr;
diff --git a/lib/VMCore/InlineAsm.cpp b/lib/VMCore/InlineAsm.cpp
index 4a03b395e9..736e370a6d 100644
--- a/lib/VMCore/InlineAsm.cpp
+++ b/lib/VMCore/InlineAsm.cpp
@@ -25,7 +25,7 @@ InlineAsm::~InlineAsm() {
}
-InlineAsm *InlineAsm::get(const FunctionType *Ty, StringRef AsmString,
+InlineAsm *InlineAsm::get(FunctionType *Ty, StringRef AsmString,
StringRef Constraints, bool hasSideEffects,
bool isAlignStack) {
InlineAsmKeyType Key(AsmString, Constraints, hasSideEffects, isAlignStack);
@@ -33,7 +33,7 @@ InlineAsm *InlineAsm::get(const FunctionType *Ty, StringRef AsmString,
return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(Ty), Key);
}
-InlineAsm::InlineAsm(const PointerType *Ty, const std::string &asmString,
+InlineAsm::InlineAsm(PointerType *Ty, const std::string &asmString,
const std::string &constraints, bool hasSideEffects,
bool isAlignStack)
: Value(Ty, Value::InlineAsmVal),
@@ -242,7 +242,7 @@ InlineAsm::ParseConstraints(StringRef Constraints) {
/// Verify - Verify that the specified constraint string is reasonable for the
/// specified function type, and otherwise validate the constraint string.
-bool InlineAsm::Verify(const FunctionType *Ty, StringRef ConstStr) {
+bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) {
if (Ty->isVarArg()) return false;
ConstraintInfoVector Constraints = ParseConstraints(ConstStr);
@@ -282,7 +282,7 @@ bool InlineAsm::Verify(const FunctionType *Ty, StringRef ConstStr) {
if (Ty->getReturnType()->isStructTy()) return false;
break;
default:
- const StructType *STy = dyn_cast<StructType>(Ty->getReturnType());
+ StructType *STy = dyn_cast<StructType>(Ty->getReturnType());
if (STy == 0 || STy->getNumElements() != NumOutputs)
return false;
break;
diff --git a/lib/VMCore/Instruction.cpp b/lib/VMCore/Instruction.cpp
index 02c0757439..4627e7182b 100644
--- a/lib/VMCore/Instruction.cpp
+++ b/lib/VMCore/Instruction.cpp
@@ -20,7 +20,7 @@
#include "llvm/Support/LeakDetector.h"
using namespace llvm;
-Instruction::Instruction(const Type *ty, unsigned it, Use *Ops, unsigned NumOps,
+Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
Instruction *InsertBefore)
: User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(0) {
// Make sure that we get added to a basicblock
@@ -34,7 +34,7 @@ Instruction::Instruction(const Type *ty, unsigned it, Use *Ops, unsigned NumOps,
}
}
-Instruction::Instruction(const Type *ty, unsigned it, Use *Ops, unsigned NumOps,
+Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
BasicBlock *InsertAtEnd)
: User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(0) {
// Make sure that we get added to a basicblock
diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp
index 9baad09cb2..df4fc16b3d 100644
--- a/lib/VMCore/Instructions.cpp
+++ b/lib/VMCore/Instructions.cpp
@@ -62,11 +62,11 @@ const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
if (Op1->getType() != Op2->getType())
return "both values to select must have same type";
- if (const VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
+ if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
// Vector select.
if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
return "vector select condition element type must be i1";
- const VectorType *ET = dyn_cast<VectorType>(Op1->getType());
+ VectorType *ET = dyn_cast<VectorType>(Op1->getType());
if (ET == 0)
return "selected values for vector select must be vectors";
if (ET->getNumElements() != VT->getNumElements())
@@ -179,7 +179,7 @@ void CallInst::init(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr) {
Op<-1>() = Func;
#ifndef NDEBUG
- const FunctionType *FTy =
+ FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
assert((Args.size() == FTy->getNumParams() ||
@@ -201,7 +201,7 @@ void CallInst::init(Value *Func, const Twine &NameStr) {
Op<-1>() = Func;
#ifndef NDEBUG
- const FunctionType *FTy =
+ FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
@@ -269,8 +269,8 @@ static bool IsConstantOne(Value *val) {
}
static Instruction *createMalloc(Instruction *InsertBefore,
- BasicBlock *InsertAtEnd, const Type *IntPtrTy,
- const Type *AllocTy, Value *AllocSize,
+ BasicBlock *InsertAtEnd, Type *IntPtrTy,
+ Type *AllocTy, Value *AllocSize,
Value *ArraySize, Function *MallocF,
const Twine &Name) {
assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
@@ -319,7 +319,7 @@ static Instruction *createMalloc(Instruction *InsertBefore,
if (!MallocFunc)
// prototype malloc as "void *malloc(size_t)"
MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy, NULL);
- const PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
+ PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
CallInst *MCall = NULL;
Instruction *Result = NULL;
if (InsertBefore) {
@@ -354,7 +354,7 @@ static Instruction *createMalloc(Instruction *InsertBefore,
/// 2. Call malloc with that argument.
/// 3. Bitcast the result of the malloc call to the specified type.
Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
- const Type *IntPtrTy, const Type *AllocTy,
+ Type *IntPtrTy, Type *AllocTy,
Value *AllocSize, Value *ArraySize,
Function * MallocF,
const Twine &Name) {
@@ -371,7 +371,7 @@ Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
/// Note: This function does not add the bitcast to the basic block, that is the
/// responsibility of the caller.
Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
- const Type *IntPtrTy, const Type *AllocTy,
+ Type *IntPtrTy, Type *AllocTy,
Value *AllocSize, Value *ArraySize,
Function *MallocF, const Twine &Name) {
return createMalloc(NULL, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
@@ -388,8 +388,8 @@ static Instruction* createFree(Value* Source, Instruction *InsertBefore,
BasicBlock* BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
Module* M = BB->getParent()->getParent();
- const Type *VoidTy = Type::getVoidTy(M->getContext());
- const Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
+ Type *VoidTy = Type::getVoidTy(M->getContext());
+ Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
// prototype free as "void free(void*)"
Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy, NULL);
CallInst* Result = NULL;
@@ -436,7 +436,7 @@ void InvokeInst::init(Value *Fn, BasicBlock *IfNormal, BasicBlock *IfException,
Op<-1>() = IfException;
#ifndef NDEBUG
- const FunctionType *FTy =
+ FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType());
assert(((Args.size() == FTy->getNumParams()) ||
@@ -692,7 +692,7 @@ static Value *getAISize(LLVMContext &Context, Value *Amt) {
return Amt;
}
-AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize,
+AllocaInst::AllocaInst(Type *Ty, Value *ArraySize,
const Twine &Name, Instruction *InsertBefore)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
getAISize(Ty->getContext(), ArraySize), InsertBefore) {
@@ -701,7 +701,7 @@ AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize,
setName(Name);
}
-AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize,
+AllocaInst::AllocaInst(Type *Ty, Value *ArraySize,
const Twine &Name, BasicBlock *InsertAtEnd)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
getAISize(Ty->getContext(), ArraySize), InsertAtEnd) {
@@ -710,7 +710,7 @@ AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize,
setName(Name);
}
-AllocaInst::AllocaInst(const Type *Ty, const Twine &Name,
+AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
Instruction *InsertBefore)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
getAISize(Ty->getContext(), 0), InsertBefore) {
@@ -719,7 +719,7 @@ AllocaInst::AllocaInst(const Type *Ty, const Twine &Name,
setName(Name);
}
-AllocaInst::AllocaInst(const Type *Ty, const Twine &Name,
+AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
BasicBlock *InsertAtEnd)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
getAISize(Ty->getContext(), 0), InsertAtEnd) {
@@ -728,7 +728,7 @@ AllocaInst::AllocaInst(const Type *Ty, const Twine &Name,
setName(Name);
}
-AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
+AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
const Twine &Name, Instruction *InsertBefore)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
getAISize(Ty->getContext(), ArraySize), InsertBefore) {
@@ -737,7 +737,7 @@ AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
setName(Name);
}
-AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
+AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
const Twine &Name, BasicBlock *InsertAtEnd)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
getAISize(Ty->getContext(), ArraySize), InsertAtEnd) {
@@ -1067,9 +1067,9 @@ GetElementPtrInst::GetElementPtrInst(Value *Ptr, Value *Idx,
/// pointer type.
///
template <typename IndexTy>
-static Type *getIndexedTypeInternal(const Type *Ptr, IndexTy const *Idxs,
+static Type *getIndexedTypeInternal(Type *Ptr, IndexTy const *Idxs,
unsigned NumIdx) {
- const PointerType *PTy = dyn_cast<PointerType>(Ptr);
+ PointerType *PTy = dyn_cast<PointerType>(Ptr);
if (!PTy) return 0; // Type isn't a pointer type!
Type *Agg = PTy->getElementType();
@@ -1093,25 +1093,25 @@ static Type *getIndexedTypeInternal(const Type *Ptr, IndexTy const *Idxs,
return CurIdx == NumIdx ? Agg : 0;
}
-Type *GetElementPtrInst::getIndexedType(const Type *Ptr, Value* const *Idxs,
+Type *GetElementPtrInst::getIndexedType(Type *Ptr, Value* const *Idxs,
unsigned NumIdx) {
return getIndexedTypeInternal(Ptr, Idxs, NumIdx);
}
-Type *GetElementPtrInst::getIndexedType(const Type *Ptr,
+Type *GetElementPtrInst::getIndexedType(Type *Ptr,
Constant* const *Idxs,
unsigned NumIdx) {
return getIndexedTypeInternal(Ptr, Idxs, NumIdx);
}
-Type *GetElementPtrInst::getIndexedType(const Type *Ptr,
+Type *GetElementPtrInst::getIndexedType(Type *Ptr,
uint64_t const *Idxs,
unsigned NumIdx) {
return getIndexedTypeInternal(Ptr, Idxs, NumIdx);
}
-Type *GetElementPtrInst::getIndexedType(const Type *Ptr, Value *Idx) {
- const PointerType *PTy = dyn_cast<PointerType>(Ptr);
+Type *GetElementPtrInst::getIndexedType(Type *Ptr, Value *Idx) {
+ PointerType *PTy = dyn_cast<PointerType>(Ptr);
if (!PTy) return 0; // Type isn't a pointer type!
// Check the pointer index.
@@ -1286,13 +1286,13 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
return false;
- const VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
+ VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
if (MaskTy == 0 || !MaskTy->getElementType()->isIntegerTy(32))
return false;
// Check to see if Mask is valid.
if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
- const VectorType *VTy = cast<VectorType>(V1->getType());
+ VectorType *VTy = cast<VectorType>(V1->getType());
for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) {
if (ConstantInt* CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
if (CI->uge(VTy->getNumElements()*2))
@@ -1382,7 +1382,7 @@ ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
// A null type is returned if the indices are invalid for the specified
// pointer type.
//
-Type *ExtractValueInst::getIndexedType(const Type *Agg,
+Type *ExtractValueInst::getIndexedType(Type *Agg,
ArrayRef<unsigned> Idxs) {
for (unsigned CurIdx = 0; CurIdx != Idxs.size(); ++CurIdx) {
unsigned Index = Idxs[CurIdx];
@@ -1392,10 +1392,10 @@ Type *ExtractValueInst::getIndexedType(const Type *Agg,
// insertvalue we need to check array indexing manually.
// Since the only other types we can index into are struct types it's just
// as easy to check those manually as well.
- if (const ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
+ if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
if (Index >= AT->getNumElements())
return 0;
- } else if (const StructType *ST = dyn_cast<StructType>(Agg)) {
+ } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
if (Index >= ST->getNumElements())
return 0;
} else {
@@ -1413,7 +1413,7 @@ Type *ExtractValueInst::getIndexedType(const Type *Agg,
//===----------------------------------------------------------------------===//
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
- const Type *Ty, const Twine &Name,
+ Type *Ty, const Twine &Name,
Instruction *InsertBefore)
: Instruction(Ty, iType,
OperandTraits<BinaryOperator>::op_begin(this),
@@ -1426,7 +1426,7 @@ BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
}
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
- const Type *Ty, const Twine &Name,
+ Type *Ty, const Twine &Name,
BasicBlock *InsertAtEnd)
: Instruction(Ty, iType,
OperandTraits<BinaryOperator>::op_begin(this),
@@ -1589,7 +1589,7 @@ BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
Instruction *InsertBefore) {
Constant *C;
- if (const VectorType *PTy = dyn_cast<VectorType>(Op->getType())) {
+ if (VectorType *PTy = dyn_cast<VectorType>(Op->getType())) {
C = Constant::getAllOnesValue(PTy->getElementType());
C = ConstantVector::get(
std::vector<Constant*>(PTy->getNumElements(), C));
@@ -1604,7 +1604,7 @@ BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd) {
Constant *AllOnes;
- if (const VectorType *PTy = dyn_cast<VectorType>(Op->getType())) {
+ if (VectorType *PTy = dyn_cast<VectorType>(Op->getType())) {
// Create a vector of all ones values.
Constant *Elt = Constant::getAllOnesValue(PTy->getElementType());
AllOnes = ConstantVector::get(
@@ -1743,8 +1743,8 @@ bool CastInst::isLosslessCast() const {
return false;
// Identity cast is always lossless
- const Type* SrcTy = getOperand(0)->getType();
- const Type* DstTy = getType();
+ Type* SrcTy = getOperand(0)->getType();
+ Type* DstTy = getType();
if (SrcTy == DstTy)
return true;
@@ -1763,9 +1763,9 @@ bool CastInst::isLosslessCast() const {
/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
/// @brief Determine if the described cast is a no-op.
bool CastInst::isNoopCast(Instruction::CastOps Opcode,
- const Type *SrcTy,
- const Type *DestTy,
- const Type *IntPtrTy) {
+ Type *SrcTy,
+ Type *DestTy,
+ Type *IntPtrTy) {
switch (Opcode) {
default:
assert(!"Invalid CastOp");
@@ -1791,7 +1791,7 @@ bool CastInst::isNoopCast(Instruction::CastOps Opcode,
}
/// @brief Determine if a cast is a no-op.
-bool CastInst::isNoopCast(const Type *IntPtrTy) const {
+bool CastInst::isNoopCast(Type *IntPtrTy) const {
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
}
@@ -1805,7 +1805,7 @@ bool CastInst::isNoopCast(const Type *IntPtrTy) const {
/// If no such cast is permited, the function returns 0.
unsigned CastInst::isEliminableCastPair(
Instruction::CastOps firstOp, Instruction::CastOps secondOp,
- const Type *SrcTy, const Type *MidTy, const Type *DstTy, const Type *IntPtrTy)
+ Type *SrcTy, Type *MidTy, Type *DstTy, Type *IntPtrTy)
{
// Define the 144 possibilities for these two cast instructions. The values
// in this matrix determine what to do in a given situation and select the
@@ -1967,7 +1967,7 @@ unsigned CastInst::isEliminableCastPair(
return 0;
}
-CastInst *CastInst::Create(Instruction::CastOps op, Value *S, const Type *Ty,
+CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
const Twine &Name, Instruction *InsertBefore) {
assert(castIsValid(op, S, Ty) && "Invalid cast!");
// Construct and return the appropriate CastInst subclass
@@ -1990,7 +1990,7 @@ CastInst *CastInst::Create(Instruction::CastOps op, Value *S, const Type *Ty,
return 0;
}
-CastInst *CastInst::Create(Instruction::CastOps op, Value *S, const Type *Ty,
+CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
const Twine &Name, BasicBlock *InsertAtEnd) {
assert(castIsValid(op, S, Ty) && "Invalid cast!");
// Construct and return the appropriate CastInst subclass
@@ -2013,7 +2013,7 @@ CastInst *CastInst::Create(Instruction::CastOps op, Value *S, const Type *Ty,
return 0;
}
-CastInst *CastInst::CreateZExtOrBitCast(Value *S, const Type *Ty,
+CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@@ -2021,7 +2021,7 @@ CastInst *CastInst::CreateZExtOrBitCast(Value *S, const Type *Ty,
return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateZExtOrBitCast(Value *S, const Type *Ty,
+CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@@ -2029,7 +2029,7 @@ CastInst *CastInst::CreateZExtOrBitCast(Value *S, const Type *Ty,
return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
}
-CastInst *CastInst::CreateSExtOrBitCast(Value *S, const Type *Ty,
+CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@@ -2037,7 +2037,7 @@ CastInst *CastInst::CreateSExtOrBitCast(Value *S, const Type *Ty,
return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateSExtOrBitCast(Value *S, const Type *Ty,
+CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@@ -2045,7 +2045,7 @@ CastInst *CastInst::CreateSExtOrBitCast(Value *S, const Type *Ty,
return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
}
-CastInst *CastInst::CreateTruncOrBitCast(Value *S, const Type *Ty,
+CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@@ -2053,7 +2053,7 @@ CastInst *CastInst::CreateTruncOrBitCast(Value *S, const Type *Ty,
return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateTruncOrBitCast(Value *S, const Type *Ty,
+CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@@ -2061,7 +2061,7 @@ CastInst *CastInst::CreateTruncOrBitCast(Value *S, const Type *Ty,
return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
}
-CastInst *CastInst::CreatePointerCast(Value *S, const Type *Ty,
+CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
assert(S->getType()->isPointerTy() && "Invalid cast");
@@ -2074,7 +2074,7 @@ CastInst *CastInst::CreatePointerCast(Value *S, const Type *Ty,
}
/// @brief Create a BitCast or a PtrToInt cast instruction
-CastInst *CastInst::CreatePointerCast(Value *S, const Type *Ty,
+CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
assert(S->getType()->isPointerTy() && "Invalid cast");
@@ -2086,7 +2086,7 @@ CastInst *CastInst::CreatePointerCast(Value *S, const Type *Ty,
return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateIntegerCast(Value *C, const Type *Ty,
+CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
bool isSigned, const Twine &Name,
Instruction *InsertBefore) {
assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
@@ -2100,7 +2100,7 @@ CastInst *CastInst::CreateIntegerCast(Value *C, const Type *Ty,
return Create(opcode, C, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateIntegerCast(Value *C, const Type *Ty,
+CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
bool isSigned, const Twine &Name,
BasicBlock *InsertAtEnd) {
assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
@@ -2114,7 +2114,7 @@ CastInst *CastInst::CreateIntegerCast(Value *C, const Type *Ty,
return Create(opcode, C, Ty, Name, InsertAtEnd);
}
-CastInst *CastInst::CreateFPCast(Value *C, const Type *Ty,
+CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
@@ -2127,7 +2127,7 @@ CastInst *CastInst::CreateFPCast(Value *C, const Type *Ty,
return Create(opcode, C, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateFPCast(Value *C, const Type *Ty,
+CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
@@ -2142,15 +2142,15 @@ CastInst *CastInst::CreateFPCast(Value *C, const Type *Ty,
// Check whether it is valid to call getCastOpcode for these types.
// This routine must be kept in sync with getCastOpcode.
-bool CastInst::isCastable(const Type *SrcTy, const Type *DestTy) {
+bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
return false;
if (SrcTy == DestTy)
return true;
- if (const VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
- if (const VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
+ if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
// An element by element cast. Valid if casting the elements is valid.
SrcTy = SrcVecTy->getElementType();
@@ -2212,8 +2212,8 @@ bool CastInst::isCastable(const Type *SrcTy, const Type *DestTy) {
// This routine must be kept in sync with isCastable.
Instruction::CastOps
CastInst::getCastOpcode(
- const Value *Src, bool SrcIsSigned, const Type *DestTy, bool DestIsSigned) {
- const Type *SrcTy = Src->getType();
+ const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
+ Type *SrcTy = Src->getType();
assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
"Only first class types are castable!");
@@ -2221,8 +2221,8 @@ CastInst::getCastOpcode(
if (SrcTy == DestTy)
return BitCast;
- if (const VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
- if (const VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
+ if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
// An element by element cast. Find the appropriate opcode based on the
// element types.
@@ -2320,10 +2320,10 @@ CastInst::getCastOpcode(
/// it in one place and to eliminate the redundant code for getting the sizes
/// of the types involved.
bool
-CastInst::castIsValid(Instruction::CastOps op, Value *S, const Type *DstTy) {
+CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
// Check for type sanity on the arguments
- const Type *SrcTy = S->getType();
+ Type *SrcTy = S->getType();
if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
SrcTy->isAggregateType() || DstTy->isAggregateType())
return false;
@@ -2384,144 +2384,144 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, const Type *DstTy) {
}
TruncInst::TruncInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
}
TruncInst::TruncInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
}
ZExtInst::ZExtInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
}
ZExtInst::ZExtInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
}
SExtInst::SExtInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, SExt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
}
SExtInst::SExtInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
}
FPTruncInst::FPTruncInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
}
FPTruncInst::FPTruncInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
}
FPExtInst::FPExtInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
}
FPExtInst::FPExtInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
}
UIToFPInst::UIToFPInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
}
UIToFPInst::UIToFPInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
}
SIToFPInst::SIToFPInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
}
SIToFPInst::SIToFPInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
}
FPToUIInst::FPToUIInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
}
FPToUIInst::FPToUIInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
}
FPToSIInst::FPToSIInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
}
FPToSIInst::FPToSIInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
}
PtrToIntInst::PtrToIntInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
}
PtrToIntInst::PtrToIntInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
}
IntToPtrInst::IntToPtrInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
}
IntToPtrInst::IntToPtrInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
}
BitCastInst::BitCastInst(
- Value *S, const Type *Ty, const Twine &Name, Instruction *InsertBefore
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
}
BitCastInst::BitCastInst(
- Value *S, const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
}
@@ -2532,7 +2532,7 @@ BitCastInst::BitCastInst(
void CmpInst::Anchor() const {}
-CmpInst::CmpInst(const Type *ty, OtherOps op, unsigned short predicate,
+CmpInst::CmpInst(Type *ty, OtherOps op, unsigned short predicate,
Value *LHS, Value *RHS, const Twine &Name,
Instruction *InsertBefore)
: Instruction(ty, op,
@@ -2545,7 +2545,7 @@ CmpInst::CmpInst(const Type *ty, OtherOps op, unsigned short predicate,
setName(Name);
}
-CmpInst::CmpInst(const Type *ty, OtherOps op, unsigned short predicate,
+CmpInst::CmpInst(Type *ty, OtherOps op, unsigned short predicate,
Value *LHS, Value *RHS, const Twine &Name,
BasicBlock *InsertAtEnd)
: Instruction(ty, op,
diff --git a/lib/VMCore/LLVMContextImpl.h b/lib/VMCore/LLVMContextImpl.h
index 06a6f2a25a..a3f68fecbb 100644
--- a/lib/VMCore/LLVMContextImpl.h
+++ b/lib/VMCore/LLVMContextImpl.h
@@ -42,8 +42,8 @@ class Value;
struct DenseMapAPIntKeyInfo {
struct KeyTy {
APInt val;
- const Type* type;
- KeyTy(const APInt& V, const Type* Ty) : val(V), type(Ty) {}
+ Type* type;
+ KeyTy(const APInt& V, Type* Ty) : val(V), type(Ty) {}
KeyTy(const KeyTy& that) : val(that.val), type(that.type) {}
bool operator==(const KeyTy& that) const {
return type == that.type && this->val == that.val;
diff --git a/lib/VMCore/Module.cpp b/lib/VMCore/Module.cpp
index be2fcb8ac6..25d5391b9e 100644
--- a/lib/VMCore/Module.cpp
+++ b/lib/VMCore/Module.cpp
@@ -149,7 +149,7 @@ void Module::getMDKindNames(SmallVectorImpl<StringRef> &Result) const {
// the symbol table directly for this common task.
//
Constant *Module::getOrInsertFunction(StringRef Name,
- const FunctionType *Ty,
+ FunctionType *Ty,
AttrListPtr AttributeList) {
// See if we have a definition for the specified function already.
GlobalValue *F = getNamedValue(Name);
@@ -182,7 +182,7 @@ Constant *Module::getOrInsertFunction(StringRef Name,
}
Constant *Module::getOrInsertTargetIntrinsic(StringRef Name,
- const FunctionType *Ty,
+ FunctionType *Ty,
AttrListPtr AttributeList) {
// See if we have a definition for the specified function already.
GlobalValue *F = getNamedValue(Name);
@@ -199,7 +199,7 @@ Constant *Module::getOrInsertTargetIntrinsic(StringRef Name,
}
Constant *Module::getOrInsertFunction(StringRef Name,
- const FunctionType *Ty) {
+ FunctionType *Ty) {
AttrListPtr AttributeList = AttrListPtr::get((AttributeWithIndex *)0, 0);
return getOrInsertFunction(Name, Ty, AttributeList);
}
@@ -211,7 +211,7 @@ Constant *Module::getOrInsertFunction(StringRef Name,
//
Constant *Module::getOrInsertFunction(StringRef Name,
AttrListPtr AttributeList,
- const Type *RetTy, ...) {
+ Type *RetTy, ...) {
va_list Args;
va_start(Args, RetTy);
@@ -229,7 +229,7 @@ Constant *Module::getOrInsertFunction(StringRef Name,
}
Constant *Module::getOrInsertFunction(StringRef Name,
- const Type *RetTy, ...) {
+ Type *RetTy, ...) {
va_list Args;
va_start(Args, RetTy);
@@ -279,7 +279,7 @@ GlobalVariable *Module::getGlobalVariable(StringRef Name,
/// with a constantexpr cast to the right type.
/// 3. Finally, if the existing global is the correct delclaration, return the
/// existing global.
-Constant *Module::getOrInsertGlobal(StringRef Name, const Type *Ty) {
+Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
// See if we have a definition for the specified global already.
GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(getNamedValue(Name));
if (GV == 0) {
@@ -436,7 +436,7 @@ namespace {
// To avoid walking constant expressions multiple times and other IR
// objects, we keep several helper maps.
DenseSet<const Value*> VisitedConstants;
- DenseSet<const Type*> VisitedTypes;
+ DenseSet<Type*> VisitedTypes;
std::vector<StructType*> &StructTypes;
public:
diff --git a/lib/VMCore/Type.cpp b/lib/VMCore/Type.cpp
index f874d1b283..bf8af07030 100644
--- a/lib/VMCore/Type.cpp
+++ b/lib/VMCore/Type.cpp
@@ -40,8 +40,8 @@ Type *Type::getPrimitiveType(LLVMContext &C, TypeID IDNumber) {
/// getScalarType - If this is a vector type, return the element type,
/// otherwise return this.
-const Type *Type::getScalarType() const {
- if (const VectorType *VTy = dyn_cast<VectorType>(this))
+Type *Type::getScalarType() {
+ if (VectorType *VTy = dyn_cast<VectorType>(this))
return VTy->getElementType();
return this;
}
@@ -77,7 +77,7 @@ bool Type::isFPOrFPVectorTy() const {
// canLosslesslyBitCastTo - Return true if this type can be converted to
// 'Ty' without any reinterpretation of bits. For example, i8* to i32*.
//
-bool Type::canLosslesslyBitCastTo(const Type *Ty) const {
+bool Type::canLosslesslyBitCastTo(Type *Ty) const {
// Identity cast means no change so return true
if (this == Ty)
return true;
@@ -146,7 +146,7 @@ unsigned Type::getPrimitiveSizeInBits() const {
/// getScalarSizeInBits - If this is a vector type, return the
/// getPrimitiveSizeInBits value for the element type. Otherwise return the
/// getPrimitiveSizeInBits value for this type.
-unsigned Type::getScalarSizeInBits() const {
+unsigned Type::getScalarSizeInBits() {
return getScalarType()->getPrimitiveSizeInBits();
}
@@ -306,7 +306,7 @@ APInt IntegerType::getMask() const {
// FunctionType Implementation
//===----------------------------------------------------------------------===//
-FunctionType::FunctionType(const Type *Result, ArrayRef<Type*> Params,
+FunctionType::FunctionType(Type *Result, ArrayRef<Type*> Params,
bool IsVarArgs)
: Type(Result->getContext(), FunctionTyID) {
Type **SubTys = reinterpret_cast<Type**>(this+1);
@@ -326,7 +326,7 @@ FunctionType::FunctionType(const Type *Result, ArrayRef<Type*> Params,
}
// FunctionType::get - The factory function for the FunctionType class.
-FunctionType *FunctionType::get(const Type *ReturnType,
+FunctionType *FunctionType::get(Type *ReturnType,
ArrayRef<Type*> Params, bool isVarArg) {
// TODO: This is brutally slow.
std::vector<Type*> Key;
@@ -351,21 +351,21 @@ FunctionType *FunctionType::get(const Type *ReturnType,
}
-FunctionType *FunctionType::get(const Type *Result, bool isVarArg) {
+FunctionType *FunctionType::get(Type *Result, bool isVarArg) {
return get(Result, ArrayRef<Type *>(), isVarArg);
}
/// isValidReturnType - Return true if the specified type is valid as a return
/// type.
-bool FunctionType::isValidReturnType(const Type *RetTy) {
+bool FunctionType::isValidReturnType(Type *RetTy) {
return !RetTy->isFunctionTy() && !RetTy->isLabelTy() &&
!RetTy->isMetadataTy();
}
/// isValidArgumentType - Return true if the specified type is valid as an
/// argument type.
-bool FunctionType::isValidArgumentType(const Type *ArgTy) {
+bool FunctionType::isValidArgumentType(Type *ArgTy) {
return ArgTy->isFirstClassType();
}
@@ -524,14 +524,14 @@ void StructType::setBody(Type *type, ...) {
setBody(StructFields);
}
-bool StructType::isValidElementType(const Type *ElemTy) {
+bool StructType::isValidElementType(Type *ElemTy) {
return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
!ElemTy->isMetadataTy() && !ElemTy->isFunctionTy();
}
/// isLayoutIdentical - Return true if this is layout identical to the
/// specified struct.
-bool StructType::isLayoutIdentical(const StructType *Other) const {
+bool StructType::isLayoutIdentical(StructType *Other) const {
if (this == Other) return true;
if (isPacked() != Other->isPacked() ||
@@ -557,8 +557,8 @@ StructType *Module::getTypeByName(StringRef Name) const {
// CompositeType Implementation
//===----------------------------------------------------------------------===//
-Type *CompositeType::getTypeAtIndex(const Value *V) const {
- if (const StructType *STy = dyn_cast<StructType>(this)) {
+Type *CompositeType::getTypeAtIndex(const Value *V) {
+ if (StructType *STy = dyn_cast<StructType>(this)) {
unsigned Idx = (unsigned)cast<ConstantInt>(V)->getZExtValue();
assert(indexValid(Idx) && "Invalid structure index!");
return STy->getElementType(Idx);
@@ -566,8 +566,8 @@ Type *CompositeType::getTypeAtIndex(const Value *V) const {
return cast<SequentialType>(this)->getElementType();
}
-Type *CompositeType::getTypeAtIndex(unsigned Idx) const {
- if (const StructType *STy = dyn_cast<StructType>(this)) {
+Type *CompositeType::getTypeAtIndex(unsigned Idx) {
+ if (StructType *STy = dyn_cast<StructType>(this)) {
assert(indexValid(Idx) && "Invalid structure index!");
return STy->getElementType(Idx);
}
@@ -605,7 +605,7 @@ ArrayType::ArrayType(Type *ElType, uint64_t NumEl)
}
-ArrayType *ArrayType::get(const Type *elementType, uint64_t NumElements) {
+ArrayType *ArrayType::get(Type *elementType, uint64_t NumElements) {
Type *ElementType = const_cast<Type*>(elementType);
assert(isValidElementType(ElementType) && "Invalid type for array element!");
@@ -618,7 +618,7 @@ ArrayType *ArrayType::get(const Type *elementType, uint64_t NumElements) {
return Entry;
}
-bool ArrayType::isValidElementType(const Type *ElemTy) {
+bool ArrayType::isValidElementType(Type *ElemTy) {
return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
!ElemTy->isMetadataTy() && !ElemTy->isFunctionTy();
}
@@ -632,7 +632,7 @@ VectorType::VectorType(Type *ElType, unsigned NumEl)
NumElements = NumEl;
}
-VectorType *VectorType::get(const Type *elementType, unsigned NumElements) {
+VectorType *VectorType::get(Type *elementType, unsigned NumElements) {
Type *ElementType = const_cast<Type*>(elementType);
assert(NumElements > 0 && "#Elements of a VectorType must be greater than 0");
assert(isValidElementType(ElementType) &&
@@ -647,7 +647,7 @@ VectorType *VectorType::get(const Type *elementType, unsigned NumElements) {
return Entry;
}
-bool VectorType::isValidElementType(const Type *ElemTy) {
+bool VectorType::isValidElementType(Type *ElemTy) {
return ElemTy->isIntegerTy() || ElemTy->isFloatingPointTy();
}
@@ -655,8 +655,7 @@ bool VectorType::isValidElementType(const Type *ElemTy) {
// PointerType Implementation
//===----------------------------------------------------------------------===//
-PointerType *PointerType::get(const Type *eltTy, unsigned AddressSpace) {
- Type *EltTy = const_cast<Type*>(eltTy);
+PointerType *PointerType::get(Type *EltTy, unsigned AddressSpace) {
assert(EltTy && "Can't get a pointer to <null> type!");
assert(isValidElementType(EltTy) && "Invalid type for pointer element!");
@@ -677,11 +676,11 @@ PointerType::PointerType(Type *E, unsigned AddrSpace)
setSubclassData(AddrSpace);
}
-PointerType *Type::getPointerTo(unsigned addrs) const {
+PointerType *Type::getPointerTo(unsigned addrs) {
return PointerType::get(this, addrs);
}
-bool PointerType::isValidElementType(const Type *ElemTy) {
+bool PointerType::isValidElementType(Type *ElemTy) {
return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
!ElemTy->isMetadataTy();
}
diff --git a/lib/VMCore/Value.cpp b/lib/VMCore/Value.cpp
index f1815e377e..2fa5f08a3e 100644
--- a/lib/VMCore/Value.cpp
+++ b/lib/VMCore/Value.cpp
@@ -35,12 +35,12 @@ using namespace llvm;
// Value Class
//===----------------------------------------------------------------------===//
-static inline Type *checkType(const Type *Ty) {
+static inline Type *checkType(Type *Ty) {
assert(Ty && "Value defined with a null type: Error!");
return const_cast<Type*>(Ty);
}
-Value::Value(const Type *ty, unsigned scid)
+Value::Value(Type *ty, unsigned scid)
: SubclassID(scid), HasValueHandle(0),
SubclassOptionalData(0), SubclassData(0), VTy((Type*)checkType(ty)),
UseList(0), Name(0) {
@@ -369,7 +369,7 @@ bool Value::isDereferenceablePointer() const {
for (User::const_op_iterator I = GEP->op_begin()+1,
E = GEP->op_end(); I != E; ++I) {
Value *Index = *I;
- const Type *Ty = *GTI++;
+ Type *Ty = *GTI++;
// Struct indices can't be out of bounds.
if (isa<StructType>(Ty))
continue;
@@ -380,7 +380,7 @@ bool Value::isDereferenceablePointer() const {
if (CI->isZero())
continue;
// Check to see that it's within the bounds of an array.
- const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
+ ArrayType *ATy = dyn_cast<ArrayType>(Ty);
if (!ATy)
return false;
if (CI->getValue().getActiveBits() > 64)
diff --git a/lib/VMCore/ValueTypes.cpp b/lib/VMCore/ValueTypes.cpp
index 21a1f03444..525228bcd8 100644
--- a/lib/VMCore/ValueTypes.cpp
+++ b/lib/VMCore/ValueTypes.cpp
@@ -77,9 +77,9 @@ unsigned EVT::getExtendedVectorNumElements() const {
unsigned EVT::getExtendedSizeInBits() const {
assert(isExtended() && "Type is not extended!");
- if (const IntegerType *ITy = dyn_cast<IntegerType>(LLVMTy))
+ if (IntegerType *ITy = dyn_cast<IntegerType>(LLVMTy))
return ITy->getBitWidth();
- if (const VectorType *VTy = dyn_cast<VectorType>(LLVMTy))
+ if (VectorType *VTy = dyn_cast<VectorType>(LLVMTy))
return VTy->getBitWidth();
assert(false && "Unrecognized extended type!");
return 0; // Suppress warnings.
@@ -140,7 +140,7 @@ std::string EVT::getEVTString() const {
/// getTypeForEVT - This method returns an LLVM type corresponding to the
/// specified EVT. For integer types, this returns an unsigned type. Note
/// that this will abort for types that cannot be represented.
-const Type *EVT::getTypeForEVT(LLVMContext &Context) const {
+Type *EVT::getTypeForEVT(LLVMContext &Context) const {
switch (V.SimpleTy) {
default:
assert(isExtended() && "Type is not extended!");
@@ -186,7 +186,7 @@ const Type *EVT::getTypeForEVT(LLVMContext &Context) const {
/// getEVT - Return the value type corresponding to the specified type. This
/// returns all pointers as MVT::iPTR. If HandleUnknown is true, unknown types
/// are returned as Other, otherwise they are invalid.
-EVT EVT::getEVT(const Type *Ty, bool HandleUnknown){
+EVT EVT::getEVT(Type *Ty, bool HandleUnknown){
switch (Ty->getTypeID()) {
default:
if (HandleUnknown) return MVT(MVT::Other);
@@ -204,7 +204,7 @@ EVT EVT::getEVT(const Type *Ty, bool HandleUnknown){
case Type::PPC_FP128TyID: return MVT(MVT::ppcf128);
case Type::PointerTyID: return MVT(MVT::iPTR);
case Type::VectorTyID: {
- const VectorType *VTy = cast<VectorType>(Ty);
+ VectorType *VTy = cast<VectorType>(Ty);
return getVectorVT(Ty->getContext(), getEVT(VTy->getElementType(), false),
VTy->getNumElements());
}
diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp
index b146b896cb..4594916d94 100644
--- a/lib/VMCore/Verifier.cpp
+++ b/lib/VMCore/Verifier.cpp
@@ -283,13 +283,13 @@ namespace {
void visitInsertValueInst(InsertValueInst &IVI);
void VerifyCallSite(CallSite CS);
- bool PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
+ bool PerformTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty,
int VT, unsigned ArgNo, std::string &Suffix);
void VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
unsigned RetNum, unsigned ParamNum, ...);
- void VerifyParameterAttrs(Attributes Attrs, const Type *Ty,
+ void VerifyParameterAttrs(Attributes Attrs, Type *Ty,
bool isReturnValue, const Value *V);
- void VerifyFunctionAttrs(const FunctionType *FT, const AttrListPtr &Attrs,
+ void VerifyFunctionAttrs(FunctionType *FT, const AttrListPtr &Attrs,
const Value *V);
void WriteValue(const Value *V) {
@@ -302,7 +302,7 @@ namespace {
}
}
- void WriteType(const Type *T) {
+ void WriteType(Type *T) {
if (!T) return;
MessagesStr << ' ' << *T;
}
@@ -323,7 +323,7 @@ namespace {
}
void CheckFailed(const Twine &Message, const Value *V1,
- const Type *T2, const Value *V3 = 0) {
+ Type *T2, const Value *V3 = 0) {
MessagesStr << Message.str() << "\n";
WriteValue(V1);
WriteType(T2);
@@ -331,8 +331,8 @@ namespace {
Broken = true;
}
- void CheckFailed(const Twine &Message, const Type *T1,
- const Type *T2 = 0, const Type *T3 = 0) {
+ void CheckFailed(const Twine &Message, Type *T1,
+ Type *T2 = 0, Type *T3 = 0) {
MessagesStr << Message.str() << "\n";
WriteType(T1);
WriteType(T2);
@@ -421,9 +421,9 @@ void Verifier::visitGlobalVariable(GlobalVariable &GV) {
"invalid linkage for intrinsic global variable", &GV);
// Don't worry about emitting an error for it not being an array,
// visitGlobalValue will complain on appending non-array.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(GV.getType())) {
- const StructType *STy = dyn_cast<StructType>(ATy->getElementType());
- const PointerType *FuncPtrTy =
+ if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getType())) {
+ StructType *STy = dyn_cast<StructType>(ATy->getElementType());
+ PointerType *FuncPtrTy =
FunctionType::get(Type::getVoidTy(*Context), false)->getPointerTo();
Assert1(STy && STy->getNumElements() == 2 &&
STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
@@ -514,7 +514,7 @@ void Verifier::visitMDNode(MDNode &MD, Function *F) {
// VerifyParameterAttrs - Check the given attributes for an argument or return
// value of the specified type. The value V is printed in error messages.
-void Verifier::VerifyParameterAttrs(Attributes Attrs, const Type *Ty,
+void Verifier::VerifyParameterAttrs(Attributes Attrs, Type *Ty,
bool isReturnValue, const Value *V) {
if (Attrs == Attribute::None)
return;
@@ -541,7 +541,7 @@ void Verifier::VerifyParameterAttrs(Attributes Attrs, const Type *Ty,
Attribute::getAsString(TypeI), V);
Attributes ByValI = Attrs & Attribute::ByVal;
- if (const PointerType *PTy = dyn_cast<PointerType>(Ty)) {
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
Assert1(!ByValI || PTy->getElementType()->isSized(),
"Attribute " + Attribute::getAsString(ByValI) +
" does not support unsized types!", V);
@@ -554,7 +554,7 @@ void Verifier::VerifyParameterAttrs(Attributes Attrs, const Type *Ty,
// VerifyFunctionAttrs - Check parameter attributes against a function type.
// The value V is printed in error messages.
-void Verifier::VerifyFunctionAttrs(const FunctionType *FT,
+void Verifier::VerifyFunctionAttrs(FunctionType *FT,
const AttrListPtr &Attrs,
const Value *V) {
if (Attrs.isEmpty())
@@ -565,7 +565,7 @@ void Verifier::VerifyFunctionAttrs(const FunctionType *FT,
for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
const AttributeWithIndex &Attr = Attrs.getSlot(i);
- const Type *Ty;
+ Type *Ty;
if (Attr.Index == 0)
Ty = FT->getReturnType();
else if (Attr.Index-1 < FT->getNumParams())
@@ -615,7 +615,7 @@ static bool VerifyAttributeCount(const AttrListPtr &Attrs, unsigned Params) {
//
void Verifier::visitFunction(Function &F) {
// Check function arguments.
- const FunctionType *FT = F.getFunctionType();
+ FunctionType *FT = F.getFunctionType();
unsigned NumArgs = F.arg_size();
Assert1(Context == &F.getContext(),
@@ -795,7 +795,7 @@ void Verifier::visitReturnInst(ReturnInst &RI) {
void Verifier::visitSwitchInst(SwitchInst &SI) {
// Check to make sure that all of the constants in the switch instruction
// have the same type as the switched-on value.
- const Type *SwitchTy = SI.getCondition()->getType();
+ Type *SwitchTy = SI.getCondition()->getType();
SmallPtrSet<ConstantInt*, 32> Constants;
for (unsigned i = 1, e = SI.getNumCases(); i != e; ++i) {
Assert1(SI.getCaseValue(i)->getType() == SwitchTy,
@@ -836,8 +836,8 @@ void Verifier::visitUserOp1(Instruction &I) {
void Verifier::visitTruncInst(TruncInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
// Get the size of the types in bits, we'll need this later
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
@@ -854,8 +854,8 @@ void Verifier::visitTruncInst(TruncInst &I) {
void Verifier::visitZExtInst(ZExtInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
// Get the size of the types in bits, we'll need this later
Assert1(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
@@ -872,8 +872,8 @@ void Verifier::visitZExtInst(ZExtInst &I) {
void Verifier::visitSExtInst(SExtInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
// Get the size of the types in bits, we'll need this later
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
@@ -890,8 +890,8 @@ void Verifier::visitSExtInst(SExtInst &I) {
void Verifier::visitFPTruncInst(FPTruncInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
// Get the size of the types in bits, we'll need this later
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
unsigned DestBitSize = DestTy->getScalarSizeInBits();
@@ -907,8 +907,8 @@ void Verifier::visitFPTruncInst(FPTruncInst &I) {
void Verifier::visitFPExtInst(FPExtInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
// Get the size of the types in bits, we'll need this later
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
@@ -925,8 +925,8 @@ void Verifier::visitFPExtInst(FPExtInst &I) {
void Verifier::visitUIToFPInst(UIToFPInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
bool SrcVec = SrcTy->isVectorTy();
bool DstVec = DestTy->isVectorTy();
@@ -948,8 +948,8 @@ void Verifier::visitUIToFPInst(UIToFPInst &I) {
void Verifier::visitSIToFPInst(SIToFPInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
bool SrcVec = SrcTy->isVectorTy();
bool DstVec = DestTy->isVectorTy();
@@ -971,8 +971,8 @@ void Verifier::visitSIToFPInst(SIToFPInst &I) {
void Verifier::visitFPToUIInst(FPToUIInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
bool SrcVec = SrcTy->isVectorTy();
bool DstVec = DestTy->isVectorTy();
@@ -994,8 +994,8 @@ void Verifier::visitFPToUIInst(FPToUIInst &I) {
void Verifier::visitFPToSIInst(FPToSIInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
bool SrcVec = SrcTy->isVectorTy();
bool DstVec = DestTy->isVectorTy();
@@ -1017,8 +1017,8 @@ void Verifier::visitFPToSIInst(FPToSIInst &I) {
void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
Assert1(SrcTy->isPointerTy(), "PtrToInt source must be pointer", &I);
Assert1(DestTy->isIntegerTy(), "PtrToInt result must be integral", &I);
@@ -1028,8 +1028,8 @@ void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
Assert1(SrcTy->isIntegerTy(), "IntToPtr source must be an integral", &I);
Assert1(DestTy->isPointerTy(), "IntToPtr result must be a pointer",&I);
@@ -1039,8 +1039,8 @@ void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
void Verifier::visitBitCastInst(BitCastInst &I) {
// Get the source and destination types
- const Type *SrcTy = I.getOperand(0)->getType();
- const Type *DestTy = I.getType();
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
// Get the size of the types in bits, we'll need this later
unsigned SrcBitSize = SrcTy->getPrimitiveSizeInBits();
@@ -1090,11 +1090,11 @@ void Verifier::VerifyCallSite(CallSite CS) {
Assert1(CS.getCalledValue()->getType()->isPointerTy(),
"Called function must be a pointer!", I);
- const PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
+ PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
Assert1(FPTy->getElementType()->isFunctionTy(),
"Called function is not pointer to function type!", I);
- const FunctionType *FTy = cast<FunctionType>(FPTy->getElementType());
+ FunctionType *FTy = cast<FunctionType>(FPTy->getElementType());
// Verify that the correct number of arguments are being passed
if (FTy->isVarArg())
@@ -1219,8 +1219,8 @@ void Verifier::visitBinaryOperator(BinaryOperator &B) {
void Verifier::visitICmpInst(ICmpInst &IC) {
// Check that the operands are the same type
- const Type *Op0Ty = IC.getOperand(0)->getType();
- const Type *Op1Ty = IC.getOperand(1)->getType();
+ Type *Op0Ty = IC.getOperand(0)->getType();
+ Type *Op1Ty = IC.getOperand(1)->getType();
Assert1(Op0Ty == Op1Ty,
"Both operands to ICmp instruction are not of the same type!", &IC);
// Check that the operands are the right type
@@ -1236,8 +1236,8 @@ void Verifier::visitICmpInst(ICmpInst &IC) {
void Verifier::visitFCmpInst(FCmpInst &FC) {
// Check that the operands are the same type
- const Type *Op0Ty = FC.getOperand(0)->getType();
- const Type *Op1Ty = FC.getOperand(1)->getType();
+ Type *Op0Ty = FC.getOperand(0)->getType();
+ Type *Op1Ty = FC.getOperand(1)->getType();
Assert1(Op0Ty == Op1Ty,
"Both operands to FCmp instruction are not of the same type!", &FC);
// Check that the operands are the right type
@@ -1275,7 +1275,7 @@ void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
- const Type *ElTy =
+ Type *ElTy =
GetElementPtrInst::getIndexedType(GEP.getOperand(0)->getType(),
Idxs.begin(), Idxs.end());
Assert1(ElTy, "Invalid indices for GEP pointer type!", &GEP);
@@ -1286,18 +1286,18 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
void Verifier::visitLoadInst(LoadInst &LI) {
- const PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
+ PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
Assert1(PTy, "Load operand must be a pointer.", &LI);
- const Type *ElTy = PTy->getElementType();
+ Type *ElTy = PTy->getElementType();
Assert2(ElTy == LI.getType(),
"Load result type does not match pointer operand type!", &LI, ElTy);
visitInstruction(LI);
}
void Verifier::visitStoreInst(StoreInst &SI) {
- const PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
+ PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
Assert1(PTy, "Store operand must be a pointer.", &SI);
- const Type *ElTy = PTy->getElementType();
+ Type *ElTy = PTy->getElementType();
Assert2(ElTy == SI.getOperand(0)->getType(),
"Stored value type does not match pointer operand type!",
&SI, ElTy);
@@ -1305,7 +1305,7 @@ void Verifier::visitStoreInst(StoreInst &SI) {
}
void Verifier::visitAllocaInst(AllocaInst &AI) {
- const PointerType *PTy = AI.getType();
+ PointerType *PTy = AI.getType();
Assert1(PTy->getAddressSpace() == 0,
"Allocation instruction pointer not in the generic address space!",
&AI);
@@ -1588,20 +1588,20 @@ static std::string IntrinsicParam(unsigned ArgNo, unsigned NumRets) {
return "Intrinsic result type #" + utostr(ArgNo);
}
-bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
+bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty,
int VT, unsigned ArgNo, std::string &Suffix) {
- const FunctionType *FTy = F->getFunctionType();
+ FunctionType *FTy = F->getFunctionType();
unsigned NumElts = 0;
- const Type *EltTy = Ty;
- const VectorType *VTy = dyn_cast<VectorType>(Ty);
+ Type *EltTy = Ty;
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
if (VTy) {
EltTy = VTy->getElementType();
NumElts = VTy->getNumElements();
}
- const Type *RetTy = FTy->getReturnType();
- const StructType *ST = dyn_cast<StructType>(RetTy);
+ Type *RetTy = FTy->getReturnType();
+ StructType *ST = dyn_cast<StructType>(RetTy);
unsigned NumRetVals;
if (RetTy->isVoidTy())
NumRetVals = 0;
@@ -1618,7 +1618,7 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
// type.
if ((Match & (ExtendedElementVectorType |
TruncatedElementVectorType)) != 0) {
- const IntegerType *IEltTy = dyn_cast<IntegerType>(EltTy);
+ IntegerType *IEltTy = dyn_cast<IntegerType>(EltTy);
if (!VTy || !IEltTy) {
CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not "
"an integral vector type.", F);
@@ -1709,7 +1709,7 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
// Outside of TableGen, we don't distinguish iPTRAny (to any address space)
// and iPTR. In the verifier, we can not distinguish which case we have so
// allow either case to be legal.
- if (const PointerType* PTyp = dyn_cast<PointerType>(Ty)) {
+ if (PointerType* PTyp = dyn_cast<PointerType>(Ty)) {
EVT PointeeVT = EVT::getEVT(PTyp->getElementType(), true);
if (PointeeVT == MVT::Other) {
CheckFailed("Intrinsic has pointer to complex type.");
@@ -1757,7 +1757,7 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
unsigned NumParams, ...) {
va_list VA;
va_start(VA, NumParams);
- const FunctionType *FTy = F->getFunctionType();
+ FunctionType *FTy = F->getFunctionType();
// For overloaded intrinsics, the Suffix of the function name must match the
// types of the arguments. This variable keeps track of the expected
@@ -1769,8 +1769,8 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
return;
}
- const Type *Ty = FTy->getReturnType();
- const StructType *ST = dyn_cast<StructType>(Ty);
+ Type *Ty = FTy->getReturnType();
+ StructType *ST = dyn_cast<StructType>(Ty);
if (NumRetVals == 0 && !Ty->isVoidTy()) {
CheckFailed("Intrinsic should return void", F);
diff --git a/tools/bugpoint/ExtractFunction.cpp b/tools/bugpoint/ExtractFunction.cpp
index 9941add52e..73b65ca94f 100644
--- a/tools/bugpoint/ExtractFunction.cpp
+++ b/tools/bugpoint/ExtractFunction.cpp
@@ -175,7 +175,7 @@ static Constant *GetTorInit(std::vector<std::pair<Function*, int> > &TorList) {
std::vector<Constant*> ArrayElts;
Type *Int32Ty = Type::getInt32Ty(TorList[0].first->getContext());
- const StructType *STy =
+ StructType *STy =
StructType::get(Int32Ty, TorList[0].first->getType(), NULL);
for (unsigned i = 0, e = TorList.size(); i != e; ++i) {
Constant *Elts[] = {
diff --git a/tools/bugpoint/Miscompilation.cpp b/tools/bugpoint/Miscompilation.cpp
index d645dfb898..4da444bca6 100644
--- a/tools/bugpoint/Miscompilation.cpp
+++ b/tools/bugpoint/Miscompilation.cpp
@@ -384,7 +384,7 @@ static bool ExtractLoops(BugDriver &BD,
outs() << "*** Loop extraction successful!\n";
- std::vector<std::pair<std::string, const FunctionType*> > MisCompFunctions;
+ std::vector<std::pair<std::string, FunctionType*> > MisCompFunctions;
for (Module::iterator I = ToOptimizeLoopExtracted->begin(),
E = ToOptimizeLoopExtracted->end(); I != E; ++I)
if (!I->isDeclaration())
@@ -569,7 +569,7 @@ static bool ExtractBlocks(BugDriver &BD,
// together.
delete ToExtract;
- std::vector<std::pair<std::string, const FunctionType*> > MisCompFunctions;
+ std::vector<std::pair<std::string, FunctionType*> > MisCompFunctions;
for (Module::iterator I = Extracted->begin(), E = Extracted->end();
I != E; ++I)
if (!I->isDeclaration())
@@ -850,7 +850,7 @@ static void CleanupAndPrepareModules(BugDriver &BD, Module *&Test,
NullPtr,F->getName()+".fpcache");
// Construct a new stub function that will re-route calls to F
- const FunctionType *FuncTy = F->getFunctionType();
+ FunctionType *FuncTy = F->getFunctionType();
Function *FuncWrapper = Function::Create(FuncTy,
GlobalValue::InternalLinkage,
F->getName() + "_wrapper",
diff --git a/tools/lto/LTOCodeGenerator.cpp b/tools/lto/LTOCodeGenerator.cpp
index 14594cf553..68cd2bb939 100644
--- a/tools/lto/LTOCodeGenerator.cpp
+++ b/tools/lto/LTOCodeGenerator.cpp
@@ -329,7 +329,7 @@ void LTOCodeGenerator::applyScopeRestrictions() {
if (LLVMCompilerUsed)
LLVMCompilerUsed->eraseFromParent();
- const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(_context);
+ llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(_context);
std::vector<Constant*> asmUsed2;
for (SmallPtrSet<GlobalValue*, 16>::const_iterator i = asmUsed.begin(),
e = asmUsed.end(); i !=e; ++i) {
diff --git a/unittests/ADT/SmallVectorTest.cpp b/unittests/ADT/SmallVectorTest.cpp
index 0d3535d6db..d5bfe76800 100644
--- a/unittests/ADT/SmallVectorTest.cpp
+++ b/unittests/ADT/SmallVectorTest.cpp
@@ -383,7 +383,7 @@ TEST_F(SmallVectorTest, ComparisonTest) {
// Constant vector tests.
TEST_F(SmallVectorTest, ConstVectorTest) {
- const VectorType constVector;
+ VectorType constVector;
EXPECT_EQ(0u, constVector.size());
EXPECT_TRUE(constVector.empty());
diff --git a/unittests/Analysis/ScalarEvolutionTest.cpp b/unittests/Analysis/ScalarEvolutionTest.cpp
index 39ced2a16e..a09cb1c95e 100644
--- a/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -22,13 +22,13 @@ TEST(ScalarEvolutionsTest, SCEVUnknownRAUW) {
LLVMContext Context;
Module M("world", Context);
- const FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
std::vector<Type *>(), false);
Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
ReturnInst::Create(Context, 0, BB);
- const Type *Ty = Type::getInt1Ty(Context);
+ Type *Ty = Type::getInt1Ty(Context);
Constant *Init = Constant::getNullValue(Ty);
Value *V0 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V0");
Value *V1 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V1");
diff --git a/unittests/ExecutionEngine/ExecutionEngineTest.cpp b/unittests/ExecutionEngine/ExecutionEngineTest.cpp
index 904ee2b6c4..4dcef20c6e 100644
--- a/unittests/ExecutionEngine/ExecutionEngineTest.cpp
+++ b/unittests/ExecutionEngine/ExecutionEngineTest.cpp
@@ -30,7 +30,7 @@ protected:
ASSERT_TRUE(Engine.get() != NULL);
}
- GlobalVariable *NewExtGlobal(const Type *T, const Twine &Name) {
+ GlobalVariable *NewExtGlobal(Type *T, const Twine &Name) {
return new GlobalVariable(*M, T, false, // Not constant.
GlobalValue::ExternalLinkage, NULL, Name);
}
diff --git a/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp b/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp
index 039b5e00f2..be5d152c1c 100644
--- a/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp
+++ b/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp
@@ -22,7 +22,7 @@ namespace {
Function *makeFakeFunction() {
std::vector<Type*> params;
- const FunctionType *FTy =
+ FunctionType *FTy =
FunctionType::get(Type::getVoidTy(getGlobalContext()), params, false);
return Function::Create(FTy, GlobalValue::ExternalLinkage);
}
diff --git a/unittests/ExecutionEngine/JIT/JITTest.cpp b/unittests/ExecutionEngine/JIT/JITTest.cpp
index 9c001c423f..3cac282022 100644
--- a/unittests/ExecutionEngine/JIT/JITTest.cpp
+++ b/unittests/ExecutionEngine/JIT/JITTest.cpp
@@ -38,13 +38,13 @@ namespace {
Function *makeReturnGlobal(std::string Name, GlobalVariable *G, Module *M) {
std::vector<Type*> params;
- const FunctionType *FTy = FunctionType::get(G->getType()->getElementType(),
+ FunctionType *FTy = FunctionType::get(G->getType()->getElementType(),
params, false);
Function *F = Function::Create(FTy, GlobalValue::ExternalLinkage, Name, M);
BasicBlock *Entry = BasicBlock::Create(M->getContext(), "entry", F);
IRBuilder<> builder(Entry);
Value *Load = builder.CreateLoad(G);
- const Type *GTy = G->getType()->getElementType();
+ Type *GTy = G->getType()->getElementType();
Value *Add = builder.CreateAdd(Load, ConstantInt::get(GTy, 1LL));
builder.CreateStore(Add, G);
builder.CreateRet(Add);
@@ -236,7 +236,7 @@ TEST(JIT, GlobalInFunction) {
ASSERT_EQ(Error, "");
// Create a global variable.
- const Type *GTy = Type::getInt32Ty(context);
+ Type *GTy = Type::getInt32Ty(context);
GlobalVariable *G = new GlobalVariable(
*M,
GTy,
@@ -320,11 +320,11 @@ TEST_F(JITTest, FarCallToKnownFunction) {
TEST_F(JITTest, NonLazyCompilationStillNeedsStubs) {
TheJIT->DisableLazyCompilation(true);
- const FunctionType *Func1Ty =
+ FunctionType *Func1Ty =
cast<FunctionType>(TypeBuilder<void(void), false>::get(Context));
std::vector<Type*> arg_types;
arg_types.push_back(Type::getInt1Ty(Context));
- const FunctionType *FuncTy = FunctionType::get(
+ FunctionType *FuncTy = FunctionType::get(
Type::getVoidTy(Context), arg_types, false);
Function *Func1 = Function::Create(Func1Ty, Function::ExternalLinkage,
"func1", M);
@@ -377,7 +377,7 @@ TEST_F(JITTest, NonLazyLeaksNoStubs) {
TheJIT->DisableLazyCompilation(true);
// Create two functions with a single basic block each.
- const FunctionType *FuncTy =
+ FunctionType *FuncTy =
cast<FunctionType>(TypeBuilder<int(), false>::get(Context));
Function *Func1 = Function::Create(FuncTy, Function::ExternalLinkage,
"func1", M);
diff --git a/unittests/Support/TypeBuilderTest.cpp b/unittests/Support/TypeBuilderTest.cpp
index 06091784cf..6e3fbc231f 100644
--- a/unittests/Support/TypeBuilderTest.cpp
+++ b/unittests/Support/TypeBuilderTest.cpp
@@ -184,14 +184,14 @@ class MyPortableType {
namespace llvm {
template<bool cross> class TypeBuilder<MyType, cross> {
public:
- static const StructType *get(LLVMContext &Context) {
+ static StructType *get(LLVMContext &Context) {
// Using the static result variable ensures that the type is
// only looked up once.
std::vector<Type*> st;
st.push_back(TypeBuilder<int, cross>::get(Context));
st.push_back(TypeBuilder<int*, cross>::get(Context));
st.push_back(TypeBuilder<void*[], cross>::get(Context));
- static const StructType *const result = StructType::get(Context, st);
+ static StructType *const result = StructType::get(Context, st);
return result;
}
@@ -207,14 +207,14 @@ public:
template<bool cross> class TypeBuilder<MyPortableType, cross> {
public:
- static const StructType *get(LLVMContext &Context) {
+ static StructType *get(LLVMContext &Context) {
// Using the static result variable ensures that the type is
// only looked up once.
std::vector<Type*> st;
st.push_back(TypeBuilder<types::i<32>, cross>::get(Context));
st.push_back(TypeBuilder<types::i<32>*, cross>::get(Context));
st.push_back(TypeBuilder<types::i<8>*[], cross>::get(Context));
- static const StructType *const result = StructType::get(Context, st);
+ static StructType *const result = StructType::get(Context, st);
return result;
}
diff --git a/unittests/VMCore/ConstantsTest.cpp b/unittests/VMCore/ConstantsTest.cpp
index 8277584ba2..623ea0d102 100644
--- a/unittests/VMCore/ConstantsTest.cpp
+++ b/unittests/VMCore/ConstantsTest.cpp
@@ -16,7 +16,7 @@ namespace llvm {
namespace {
TEST(ConstantsTest, Integer_i1) {
- const IntegerType* Int1 = IntegerType::get(getGlobalContext(), 1);
+ IntegerType* Int1 = IntegerType::get(getGlobalContext(), 1);
Constant* One = ConstantInt::get(Int1, 1, true);
Constant* Zero = ConstantInt::get(Int1, 0);
Constant* NegOne = ConstantInt::get(Int1, static_cast<uint64_t>(-1), true);
@@ -97,7 +97,7 @@ TEST(ConstantsTest, Integer_i1) {
}
TEST(ConstantsTest, IntSigns) {
- const IntegerType* Int8Ty = Type::getInt8Ty(getGlobalContext());
+ IntegerType* Int8Ty = Type::getInt8Ty(getGlobalContext());
EXPECT_EQ(100, ConstantInt::get(Int8Ty, 100, false)->getSExtValue());
EXPECT_EQ(100, ConstantInt::get(Int8Ty, 100, true)->getSExtValue());
EXPECT_EQ(100, ConstantInt::getSigned(Int8Ty, 100)->getSExtValue());
@@ -110,9 +110,9 @@ TEST(ConstantsTest, IntSigns) {
}
TEST(ConstantsTest, FP128Test) {
- const Type *FP128Ty = Type::getFP128Ty(getGlobalContext());
+ Type *FP128Ty = Type::getFP128Ty(getGlobalContext());
- const IntegerType *Int128Ty = Type::getIntNTy(getGlobalContext(), 128);
+ IntegerType *Int128Ty = Type::getIntNTy(getGlobalContext(), 128);
Constant *Zero128 = Constant::getNullValue(Int128Ty);
Constant *X = ConstantExpr::getUIToFP(Zero128, FP128Ty);
EXPECT_TRUE(isa<ConstantFP>(X));
diff --git a/unittests/VMCore/InstructionsTest.cpp b/unittests/VMCore/InstructionsTest.cpp
index 9624b816a8..0a01538899 100644
--- a/unittests/VMCore/InstructionsTest.cpp
+++ b/unittests/VMCore/InstructionsTest.cpp
@@ -26,7 +26,7 @@ TEST(InstructionsTest, ReturnInst) {
EXPECT_EQ(r0->getNumOperands(), 0U);
EXPECT_EQ(r0->op_begin(), r0->op_end());
- const IntegerType* Int1 = IntegerType::get(C, 1);
+ IntegerType* Int1 = IntegerType::get(C, 1);
Constant* One = ConstantInt::get(Int1, 1, true);
const ReturnInst* r1 = ReturnInst::Create(C, One);
EXPECT_EQ(r1->getNumOperands(), 1U);
@@ -64,7 +64,7 @@ TEST(InstructionsTest, BranchInst) {
EXPECT_EQ(llvm::next(b0->op_begin()), b0->op_end());
- const IntegerType* Int1 = IntegerType::get(C, 1);
+ IntegerType* Int1 = IntegerType::get(C, 1);
Constant* One = ConstantInt::get(Int1, 1, true);
// Conditional BranchInst
@@ -111,11 +111,11 @@ TEST(InstructionsTest, BranchInst) {
TEST(InstructionsTest, CastInst) {
LLVMContext &C(getGlobalContext());
- const Type* Int8Ty = Type::getInt8Ty(C);
- const Type* Int64Ty = Type::getInt64Ty(C);
- const Type* V8x8Ty = VectorType::get(Int8Ty, 8);
- const Type* V8x64Ty = VectorType::get(Int64Ty, 8);
- const Type* X86MMXTy = Type::getX86_MMXTy(C);
+ Type* Int8Ty = Type::getInt8Ty(C);
+ Type* Int64Ty = Type::getInt64Ty(C);
+ Type* V8x8Ty = VectorType::get(Int8Ty, 8);
+ Type* V8x64Ty = VectorType::get(Int64Ty, 8);
+ Type* X86MMXTy = Type::getX86_MMXTy(C);
const Constant* c8 = Constant::getNullValue(V8x8Ty);
const Constant* c64 = Constant::getNullValue(V8x64Ty);
diff --git a/unittests/VMCore/VerifierTest.cpp b/unittests/VMCore/VerifierTest.cpp
index 1924661200..324b4e193b 100644
--- a/unittests/VMCore/VerifierTest.cpp
+++ b/unittests/VMCore/VerifierTest.cpp
@@ -47,7 +47,7 @@ TEST(VerifierTest, Branch_i1) {
TEST(VerifierTest, AliasUnnamedAddr) {
LLVMContext &C = getGlobalContext();
Module M("M", C);
- const Type *Ty = Type::getInt8Ty(C);
+ Type *Ty = Type::getInt8Ty(C);
Constant *Init = Constant::getNullValue(Ty);
GlobalVariable *Aliasee = new GlobalVariable(M, Ty, true,
GlobalValue::ExternalLinkage,