diff options
author | Alexander Kornienko <alexfh@google.com> | 2013-03-14 10:51:38 +0000 |
---|---|---|
committer | Alexander Kornienko <alexfh@google.com> | 2013-03-14 10:51:38 +0000 |
commit | 647735c781c5b37061ee03d6e9e6c7dda92218e2 (patch) | |
tree | 5a5e56606d41060263048b5a5586b3d2380898ba /lib/IR | |
parent | 6aed25d93d1cfcde5809a73ffa7dc1b0d6396f66 (diff) | |
parent | f635ef401786c84df32090251a8cf45981ecca33 (diff) |
Updating branches/google/stable to r176857
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/google/stable@177040 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/IR')
47 files changed, 31324 insertions, 0 deletions
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp new file mode 100644 index 0000000000..fb591a891d --- /dev/null +++ b/lib/IR/AsmWriter.cpp @@ -0,0 +1,2236 @@ +//===-- AsmWriter.cpp - Printing LLVM as an assembly file -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This library implements the functionality defined in llvm/Assembly/Writer.h +// +// Note that these routines must be extremely tolerant of various errors in the +// LLVM code, because it can be used for debugging transformations. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Assembly/Writer.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Assembly/AssemblyAnnotationWriter.h" +#include "llvm/Assembly/PrintModulePass.h" +#include "llvm/DebugInfo.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" +#include "llvm/IR/TypeFinder.h" +#include "llvm/IR/ValueSymbolTable.h" +#include "llvm/Support/CFG.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Dwarf.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FormattedStream.h" +#include "llvm/Support/MathExtras.h" +#include <algorithm> +#include <cctype> +using namespace llvm; + +// Make virtual table appear in this compilation unit. +AssemblyAnnotationWriter::~AssemblyAnnotationWriter() {} + +//===----------------------------------------------------------------------===// +// Helper Functions +//===----------------------------------------------------------------------===// + +static const Module *getModuleFromVal(const Value *V) { + if (const Argument *MA = dyn_cast<Argument>(V)) + return MA->getParent() ? MA->getParent()->getParent() : 0; + + if (const BasicBlock *BB = dyn_cast<BasicBlock>(V)) + return BB->getParent() ? BB->getParent()->getParent() : 0; + + if (const Instruction *I = dyn_cast<Instruction>(V)) { + const Function *M = I->getParent() ? I->getParent()->getParent() : 0; + return M ? M->getParent() : 0; + } + + if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) + return GV->getParent(); + return 0; +} + +static void PrintCallingConv(unsigned cc, raw_ostream &Out) { + switch (cc) { + default: Out << "cc" << cc; break; + case CallingConv::Fast: Out << "fastcc"; break; + case CallingConv::Cold: Out << "coldcc"; break; + case CallingConv::X86_StdCall: Out << "x86_stdcallcc"; break; + case CallingConv::X86_FastCall: Out << "x86_fastcallcc"; break; + case CallingConv::X86_ThisCall: Out << "x86_thiscallcc"; break; + case CallingConv::Intel_OCL_BI: Out << "intel_ocl_bicc"; break; + case CallingConv::ARM_APCS: Out << "arm_apcscc"; break; + case CallingConv::ARM_AAPCS: Out << "arm_aapcscc"; break; + case CallingConv::ARM_AAPCS_VFP: Out << "arm_aapcs_vfpcc"; break; + case CallingConv::MSP430_INTR: Out << "msp430_intrcc"; break; + case CallingConv::PTX_Kernel: Out << "ptx_kernel"; break; + case CallingConv::PTX_Device: Out << "ptx_device"; break; + } +} + +// PrintEscapedString - Print each character of the specified string, escaping +// it if it is not printable or if it is an escape char. +static void PrintEscapedString(StringRef Name, raw_ostream &Out) { + for (unsigned i = 0, e = Name.size(); i != e; ++i) { + unsigned char C = Name[i]; + if (isprint(C) && C != '\\' && C != '"') + Out << C; + else + Out << '\\' << hexdigit(C >> 4) << hexdigit(C & 0x0F); + } +} + +enum PrefixType { + GlobalPrefix, + LabelPrefix, + LocalPrefix, + NoPrefix +}; + +/// PrintLLVMName - Turn the specified name into an 'LLVM name', which is either +/// prefixed with % (if the string only contains simple characters) or is +/// surrounded with ""'s (if it has special chars in it). Print it out. +static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) { + assert(!Name.empty() && "Cannot get empty name!"); + switch (Prefix) { + case NoPrefix: break; + case GlobalPrefix: OS << '@'; break; + case LabelPrefix: break; + case LocalPrefix: OS << '%'; break; + } + + // Scan the name to see if it needs quotes first. + bool NeedsQuotes = isdigit(static_cast<unsigned char>(Name[0])); + if (!NeedsQuotes) { + for (unsigned i = 0, e = Name.size(); i != e; ++i) { + // By making this unsigned, the value passed in to isalnum will always be + // in the range 0-255. This is important when building with MSVC because + // its implementation will assert. This situation can arise when dealing + // with UTF-8 multibyte characters. + unsigned char C = Name[i]; + if (!isalnum(static_cast<unsigned char>(C)) && C != '-' && C != '.' && + C != '_') { + NeedsQuotes = true; + break; + } + } + } + + // If we didn't need any quotes, just write out the name in one blast. + if (!NeedsQuotes) { + OS << Name; + return; + } + + // Okay, we need quotes. Output the quotes and escape any scary characters as + // needed. + OS << '"'; + PrintEscapedString(Name, OS); + OS << '"'; +} + +/// PrintLLVMName - Turn the specified name into an 'LLVM name', which is either +/// prefixed with % (if the string only contains simple characters) or is +/// surrounded with ""'s (if it has special chars in it). Print it out. +static void PrintLLVMName(raw_ostream &OS, const Value *V) { + PrintLLVMName(OS, V->getName(), + isa<GlobalValue>(V) ? GlobalPrefix : LocalPrefix); +} + +//===----------------------------------------------------------------------===// +// TypePrinting Class: Type printing machinery +//===----------------------------------------------------------------------===// + +/// TypePrinting - Type printing machinery. +namespace { +class TypePrinting { + TypePrinting(const TypePrinting &) LLVM_DELETED_FUNCTION; + void operator=(const TypePrinting&) LLVM_DELETED_FUNCTION; +public: + + /// NamedTypes - The named types that are used by the current module. + TypeFinder NamedTypes; + + /// NumberedTypes - The numbered types, along with their value. + DenseMap<StructType*, unsigned> NumberedTypes; + + + TypePrinting() {} + ~TypePrinting() {} + + void incorporateTypes(const Module &M); + + void print(Type *Ty, raw_ostream &OS); + + void printStructBody(StructType *Ty, raw_ostream &OS); +}; +} // end anonymous namespace. + + +void TypePrinting::incorporateTypes(const Module &M) { + NamedTypes.run(M, false); + + // The list of struct types we got back includes all the struct types, split + // the unnamed ones out to a numbering and remove the anonymous structs. + unsigned NextNumber = 0; + + std::vector<StructType*>::iterator NextToUse = NamedTypes.begin(), I, E; + for (I = NamedTypes.begin(), E = NamedTypes.end(); I != E; ++I) { + StructType *STy = *I; + + // Ignore anonymous types. + if (STy->isLiteral()) + continue; + + if (STy->getName().empty()) + NumberedTypes[STy] = NextNumber++; + else + *NextToUse++ = STy; + } + + NamedTypes.erase(NextToUse, NamedTypes.end()); +} + + +/// CalcTypeName - Write the specified type to the specified raw_ostream, making +/// use of type names or up references to shorten the type name where possible. +void TypePrinting::print(Type *Ty, raw_ostream &OS) { + switch (Ty->getTypeID()) { + case Type::VoidTyID: OS << "void"; break; + case Type::HalfTyID: OS << "half"; break; + case Type::FloatTyID: OS << "float"; break; + case Type::DoubleTyID: OS << "double"; break; + case Type::X86_FP80TyID: OS << "x86_fp80"; break; + case Type::FP128TyID: OS << "fp128"; break; + case Type::PPC_FP128TyID: OS << "ppc_fp128"; break; + case Type::LabelTyID: OS << "label"; break; + case Type::MetadataTyID: OS << "metadata"; break; + case Type::X86_MMXTyID: OS << "x86_mmx"; break; + case Type::IntegerTyID: + OS << 'i' << cast<IntegerType>(Ty)->getBitWidth(); + return; + + case Type::FunctionTyID: { + FunctionType *FTy = cast<FunctionType>(Ty); + print(FTy->getReturnType(), OS); + OS << " ("; + for (FunctionType::param_iterator I = FTy->param_begin(), + E = FTy->param_end(); I != E; ++I) { + if (I != FTy->param_begin()) + OS << ", "; + print(*I, OS); + } + if (FTy->isVarArg()) { + if (FTy->getNumParams()) OS << ", "; + OS << "..."; + } + OS << ')'; + return; + } + case Type::StructTyID: { + StructType *STy = cast<StructType>(Ty); + + if (STy->isLiteral()) + return printStructBody(STy, OS); + + if (!STy->getName().empty()) + return PrintLLVMName(OS, STy->getName(), LocalPrefix); + + DenseMap<StructType*, unsigned>::iterator I = NumberedTypes.find(STy); + if (I != NumberedTypes.end()) + OS << '%' << I->second; + else // Not enumerated, print the hex address. + OS << "%\"type " << STy << '\"'; + return; + } + case Type::PointerTyID: { + PointerType *PTy = cast<PointerType>(Ty); + print(PTy->getElementType(), OS); + if (unsigned AddressSpace = PTy->getAddressSpace()) + OS << " addrspace(" << AddressSpace << ')'; + OS << '*'; + return; + } + case Type::ArrayTyID: { + ArrayType *ATy = cast<ArrayType>(Ty); + OS << '[' << ATy->getNumElements() << " x "; + print(ATy->getElementType(), OS); + OS << ']'; + return; + } + case Type::VectorTyID: { + VectorType *PTy = cast<VectorType>(Ty); + OS << "<" << PTy->getNumElements() << " x "; + print(PTy->getElementType(), OS); + OS << '>'; + return; + } + default: + OS << "<unrecognized-type>"; + return; + } +} + +void TypePrinting::printStructBody(StructType *STy, raw_ostream &OS) { + if (STy->isOpaque()) { + OS << "opaque"; + return; + } + + if (STy->isPacked()) + OS << '<'; + + if (STy->getNumElements() == 0) { + OS << "{}"; + } else { + StructType::element_iterator I = STy->element_begin(); + OS << "{ "; + print(*I++, OS); + for (StructType::element_iterator E = STy->element_end(); I != E; ++I) { + OS << ", "; + print(*I, OS); + } + + OS << " }"; + } + if (STy->isPacked()) + OS << '>'; +} + + + +//===----------------------------------------------------------------------===// +// SlotTracker Class: Enumerate slot numbers for unnamed values +//===----------------------------------------------------------------------===// + +namespace { + +/// This class provides computation of slot numbers for LLVM Assembly writing. +/// +class SlotTracker { +public: + /// ValueMap - A mapping of Values to slot numbers. + typedef DenseMap<const Value*, unsigned> ValueMap; + +private: + /// TheModule - The module for which we are holding slot numbers. + const Module* TheModule; + + /// TheFunction - The function for which we are holding slot numbers. + const Function* TheFunction; + bool FunctionProcessed; + + /// mMap - The slot map for the module level data. + ValueMap mMap; + unsigned mNext; + + /// fMap - The slot map for the function level data. + ValueMap fMap; + unsigned fNext; + + /// mdnMap - Map for MDNodes. + DenseMap<const MDNode*, unsigned> mdnMap; + unsigned mdnNext; + + /// asMap - The slot map for attribute sets. + DenseMap<AttributeSet, unsigned> asMap; + unsigned asNext; +public: + /// Construct from a module + explicit SlotTracker(const Module *M); + /// Construct from a function, starting out in incorp state. + explicit SlotTracker(const Function *F); + + /// Return the slot number of the specified value in it's type + /// plane. If something is not in the SlotTracker, return -1. + int getLocalSlot(const Value *V); + int getGlobalSlot(const GlobalValue *V); + int getMetadataSlot(const MDNode *N); + int getAttributeGroupSlot(AttributeSet AS); + + /// If you'd like to deal with a function instead of just a module, use + /// this method to get its data into the SlotTracker. + void incorporateFunction(const Function *F) { + TheFunction = F; + FunctionProcessed = false; + } + + /// After calling incorporateFunction, use this method to remove the + /// most recently incorporated function from the SlotTracker. This + /// will reset the state of the machine back to just the module contents. + void purgeFunction(); + + /// MDNode map iterators. + typedef DenseMap<const MDNode*, unsigned>::iterator mdn_iterator; + mdn_iterator mdn_begin() { return mdnMap.begin(); } + mdn_iterator mdn_end() { return mdnMap.end(); } + unsigned mdn_size() const { return mdnMap.size(); } + bool mdn_empty() const { return mdnMap.empty(); } + + /// AttributeSet map iterators. + typedef DenseMap<AttributeSet, unsigned>::iterator as_iterator; + as_iterator as_begin() { return asMap.begin(); } + as_iterator as_end() { return asMap.end(); } + unsigned as_size() const { return asMap.size(); } + bool as_empty() const { return asMap.empty(); } + + /// This function does the actual initialization. + inline void initialize(); + + // Implementation Details +private: + /// CreateModuleSlot - Insert the specified GlobalValue* into the slot table. + void CreateModuleSlot(const GlobalValue *V); + + /// CreateMetadataSlot - Insert the specified MDNode* into the slot table. + void CreateMetadataSlot(const MDNode *N); + + /// CreateFunctionSlot - Insert the specified Value* into the slot table. + void CreateFunctionSlot(const Value *V); + + /// \brief Insert the specified AttributeSet into the slot table. + void CreateAttributeSetSlot(AttributeSet AS); + + /// Add all of the module level global variables (and their initializers) + /// and function declarations, but not the contents of those functions. + void processModule(); + + /// Add all of the functions arguments, basic blocks, and instructions. + void processFunction(); + + SlotTracker(const SlotTracker &) LLVM_DELETED_FUNCTION; + void operator=(const SlotTracker &) LLVM_DELETED_FUNCTION; +}; + +} // end anonymous namespace + + +static SlotTracker *createSlotTracker(const Value *V) { + if (const Argument *FA = dyn_cast<Argument>(V)) + return new SlotTracker(FA->getParent()); + + if (const Instruction *I = dyn_cast<Instruction>(V)) + if (I->getParent()) + return new SlotTracker(I->getParent()->getParent()); + + if (const BasicBlock *BB = dyn_cast<BasicBlock>(V)) + return new SlotTracker(BB->getParent()); + + if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) + return new SlotTracker(GV->getParent()); + + if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) + return new SlotTracker(GA->getParent()); + + if (const Function *Func = dyn_cast<Function>(V)) + return new SlotTracker(Func); + + if (const MDNode *MD = dyn_cast<MDNode>(V)) { + if (!MD->isFunctionLocal()) + return new SlotTracker(MD->getFunction()); + + return new SlotTracker((Function *)0); + } + + return 0; +} + +#if 0 +#define ST_DEBUG(X) dbgs() << X +#else +#define ST_DEBUG(X) +#endif + +// Module level constructor. Causes the contents of the Module (sans functions) +// to be added to the slot table. +SlotTracker::SlotTracker(const Module *M) + : TheModule(M), TheFunction(0), FunctionProcessed(false), + mNext(0), fNext(0), mdnNext(0), asNext(0) { +} + +// Function level constructor. Causes the contents of the Module and the one +// function provided to be added to the slot table. +SlotTracker::SlotTracker(const Function *F) + : TheModule(F ? F->getParent() : 0), TheFunction(F), FunctionProcessed(false), + mNext(0), fNext(0), mdnNext(0), asNext(0) { +} + +inline void SlotTracker::initialize() { + if (TheModule) { + processModule(); + TheModule = 0; ///< Prevent re-processing next time we're called. + } + + if (TheFunction && !FunctionProcessed) + processFunction(); +} + +// Iterate through all the global variables, functions, and global +// variable initializers and create slots for them. +void SlotTracker::processModule() { + ST_DEBUG("begin processModule!\n"); + + // Add all of the unnamed global variables to the value table. + for (Module::const_global_iterator I = TheModule->global_begin(), + E = TheModule->global_end(); I != E; ++I) { + if (!I->hasName()) + CreateModuleSlot(I); + } + + // Add metadata used by named metadata. + for (Module::const_named_metadata_iterator + I = TheModule->named_metadata_begin(), + E = TheModule->named_metadata_end(); I != E; ++I) { + const NamedMDNode *NMD = I; + for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) + CreateMetadataSlot(NMD->getOperand(i)); + } + + for (Module::const_iterator I = TheModule->begin(), E = TheModule->end(); + I != E; ++I) { + if (!I->hasName()) + // Add all the unnamed functions to the table. + CreateModuleSlot(I); + + // Add all the function attributes to the table. + // FIXME: Add attributes of other objects? + AttributeSet FnAttrs = I->getAttributes().getFnAttributes(); + if (FnAttrs.hasAttributes(AttributeSet::FunctionIndex)) + CreateAttributeSetSlot(FnAttrs); + } + + ST_DEBUG("end processModule!\n"); +} + +// Process the arguments, basic blocks, and instructions of a function. +void SlotTracker::processFunction() { + ST_DEBUG("begin processFunction!\n"); + fNext = 0; + + // Add all the function arguments with no names. + for(Function::const_arg_iterator AI = TheFunction->arg_begin(), + AE = TheFunction->arg_end(); AI != AE; ++AI) + if (!AI->hasName()) + CreateFunctionSlot(AI); + + ST_DEBUG("Inserting Instructions:\n"); + + SmallVector<std::pair<unsigned, MDNode*>, 4> MDForInst; + + // Add all of the basic blocks and instructions with no names. + for (Function::const_iterator BB = TheFunction->begin(), + E = TheFunction->end(); BB != E; ++BB) { + if (!BB->hasName()) + CreateFunctionSlot(BB); + + for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; + ++I) { + if (!I->getType()->isVoidTy() && !I->hasName()) + CreateFunctionSlot(I); + + // Intrinsics can directly use metadata. We allow direct calls to any + // llvm.foo function here, because the target may not be linked into the + // optimizer. + if (const CallInst *CI = dyn_cast<CallInst>(I)) { + if (Function *F = CI->getCalledFunction()) + if (F->getName().startswith("llvm.")) + for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) + if (MDNode *N = dyn_cast_or_null<MDNode>(I->getOperand(i))) + CreateMetadataSlot(N); + + // Add all the call attributes to the table. + AttributeSet Attrs = CI->getAttributes().getFnAttributes(); + if (Attrs.hasAttributes(AttributeSet::FunctionIndex)) + CreateAttributeSetSlot(Attrs); + } else if (const InvokeInst *II = dyn_cast<InvokeInst>(I)) { + // Add all the call attributes to the table. + AttributeSet Attrs = II->getAttributes().getFnAttributes(); + if (Attrs.hasAttributes(AttributeSet::FunctionIndex)) + CreateAttributeSetSlot(Attrs); + } + + // Process metadata attached with this instruction. + I->getAllMetadata(MDForInst); + for (unsigned i = 0, e = MDForInst.size(); i != e; ++i) + CreateMetadataSlot(MDForInst[i].second); + MDForInst.clear(); + } + } + + FunctionProcessed = true; + + ST_DEBUG("end processFunction!\n"); +} + +/// Clean up after incorporating a function. This is the only way to get out of +/// the function incorporation state that affects get*Slot/Create*Slot. Function +/// incorporation state is indicated by TheFunction != 0. +void SlotTracker::purgeFunction() { + ST_DEBUG("begin purgeFunction!\n"); + fMap.clear(); // Simply discard the function level map + TheFunction = 0; + FunctionProcessed = false; + ST_DEBUG("end purgeFunction!\n"); +} + +/// getGlobalSlot - Get the slot number of a global value. +int SlotTracker::getGlobalSlot(const GlobalValue *V) { + // Check for uninitialized state and do lazy initialization. + initialize(); + + // Find the value in the module map + ValueMap::iterator MI = mMap.find(V); + return MI == mMap.end() ? -1 : (int)MI->second; +} + +/// getMetadataSlot - Get the slot number of a MDNode. +int SlotTracker::getMetadataSlot(const MDNode *N) { + // Check for uninitialized state and do lazy initialization. + initialize(); + + // Find the MDNode in the module map + mdn_iterator MI = mdnMap.find(N); + return MI == mdnMap.end() ? -1 : (int)MI->second; +} + + +/// getLocalSlot - Get the slot number for a value that is local to a function. +int SlotTracker::getLocalSlot(const Value *V) { + assert(!isa<Constant>(V) && "Can't get a constant or global slot with this!"); + + // Check for uninitialized state and do lazy initialization. + initialize(); + + ValueMap::iterator FI = fMap.find(V); + return FI == fMap.end() ? -1 : (int)FI->second; +} + +int SlotTracker::getAttributeGroupSlot(AttributeSet AS) { + // Check for uninitialized state and do lazy initialization. + initialize(); + + // Find the AttributeSet in the module map. + as_iterator AI = asMap.find(AS); + return AI == asMap.end() ? -1 : (int)AI->second; +} + +/// CreateModuleSlot - Insert the specified GlobalValue* into the slot table. +void SlotTracker::CreateModuleSlot(const GlobalValue *V) { + assert(V && "Can't insert a null Value into SlotTracker!"); + assert(!V->getType()->isVoidTy() && "Doesn't need a slot!"); + assert(!V->hasName() && "Doesn't need a slot!"); + + unsigned DestSlot = mNext++; + mMap[V] = DestSlot; + + ST_DEBUG(" Inserting value [" << V->getType() << "] = " << V << " slot=" << + DestSlot << " ["); + // G = Global, F = Function, A = Alias, o = other + ST_DEBUG((isa<GlobalVariable>(V) ? 'G' : + (isa<Function>(V) ? 'F' : + (isa<GlobalAlias>(V) ? 'A' : 'o'))) << "]\n"); +} + +/// CreateSlot - Create a new slot for the specified value if it has no name. +void SlotTracker::CreateFunctionSlot(const Value *V) { + assert(!V->getType()->isVoidTy() && !V->hasName() && "Doesn't need a slot!"); + + unsigned DestSlot = fNext++; + fMap[V] = DestSlot; + + // G = Global, F = Function, o = other + ST_DEBUG(" Inserting value [" << V->getType() << "] = " << V << " slot=" << + DestSlot << " [o]\n"); +} + +/// CreateModuleSlot - Insert the specified MDNode* into the slot table. +void SlotTracker::CreateMetadataSlot(const MDNode *N) { + assert(N && "Can't insert a null Value into SlotTracker!"); + + // Don't insert if N is a function-local metadata, these are always printed + // inline. + if (!N->isFunctionLocal()) { + mdn_iterator I = mdnMap.find(N); + if (I != mdnMap.end()) + return; + + unsigned DestSlot = mdnNext++; + mdnMap[N] = DestSlot; + } + + // Recursively add any MDNodes referenced by operands. + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) + if (const MDNode *Op = dyn_cast_or_null<MDNode>(N->getOperand(i))) + CreateMetadataSlot(Op); +} + +void SlotTracker::CreateAttributeSetSlot(AttributeSet AS) { + assert(AS.hasAttributes(AttributeSet::FunctionIndex) && + "Doesn't need a slot!"); + + as_iterator I = asMap.find(AS); + if (I != asMap.end()) + return; + + unsigned DestSlot = asNext++; + asMap[AS] = DestSlot; +} + +//===----------------------------------------------------------------------===// +// AsmWriter Implementation +//===----------------------------------------------------------------------===// + +static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, + TypePrinting *TypePrinter, + SlotTracker *Machine, + const Module *Context); + + + +static const char *getPredicateText(unsigned predicate) { + const char * pred = "unknown"; + switch (predicate) { + case FCmpInst::FCMP_FALSE: pred = "false"; break; + case FCmpInst::FCMP_OEQ: pred = "oeq"; break; + case FCmpInst::FCMP_OGT: pred = "ogt"; break; + case FCmpInst::FCMP_OGE: pred = "oge"; break; + case FCmpInst::FCMP_OLT: pred = "olt"; break; + case FCmpInst::FCMP_OLE: pred = "ole"; break; + case FCmpInst::FCMP_ONE: pred = "one"; break; + case FCmpInst::FCMP_ORD: pred = "ord"; break; + case FCmpInst::FCMP_UNO: pred = "uno"; break; + case FCmpInst::FCMP_UEQ: pred = "ueq"; break; + case FCmpInst::FCMP_UGT: pred = "ugt"; break; + case FCmpInst::FCMP_UGE: pred = "uge"; break; + case FCmpInst::FCMP_ULT: pred = "ult"; break; + case FCmpInst::FCMP_ULE: pred = "ule"; break; + case FCmpInst::FCMP_UNE: pred = "une"; break; + case FCmpInst::FCMP_TRUE: pred = "true"; break; + case ICmpInst::ICMP_EQ: pred = "eq"; break; + case ICmpInst::ICMP_NE: pred = "ne"; break; + case ICmpInst::ICMP_SGT: pred = "sgt"; break; + case ICmpInst::ICMP_SGE: pred = "sge"; break; + case ICmpInst::ICMP_SLT: pred = "slt"; break; + case ICmpInst::ICMP_SLE: pred = "sle"; break; + case ICmpInst::ICMP_UGT: pred = "ugt"; break; + case ICmpInst::ICMP_UGE: pred = "uge"; break; + case ICmpInst::ICMP_ULT: pred = "ult"; break; + case ICmpInst::ICMP_ULE: pred = "ule"; break; + } + return pred; +} + +static void writeAtomicRMWOperation(raw_ostream &Out, + AtomicRMWInst::BinOp Op) { + switch (Op) { + default: Out << " <unknown operation " << Op << ">"; break; + case AtomicRMWInst::Xchg: Out << " xchg"; break; + case AtomicRMWInst::Add: Out << " add"; break; + case AtomicRMWInst::Sub: Out << " sub"; break; + case AtomicRMWInst::And: Out << " and"; break; + case AtomicRMWInst::Nand: Out << " nand"; break; + case AtomicRMWInst::Or: Out << " or"; break; + case AtomicRMWInst::Xor: Out << " xor"; break; + case AtomicRMWInst::Max: Out << " max"; break; + case AtomicRMWInst::Min: Out << " min"; break; + case AtomicRMWInst::UMax: Out << " umax"; break; + case AtomicRMWInst::UMin: Out << " umin"; break; + } +} + +static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { + if (const FPMathOperator *FPO = dyn_cast<const FPMathOperator>(U)) { + // Unsafe algebra implies all the others, no need to write them all out + if (FPO->hasUnsafeAlgebra()) + Out << " fast"; + else { + if (FPO->hasNoNaNs()) + Out << " nnan"; + if (FPO->hasNoInfs()) + Out << " ninf"; + if (FPO->hasNoSignedZeros()) + Out << " nsz"; + if (FPO->hasAllowReciprocal()) + Out << " arcp"; + } + } + + if (const OverflowingBinaryOperator *OBO = + dyn_cast<OverflowingBinaryOperator>(U)) { + if (OBO->hasNoUnsignedWrap()) + Out << " nuw"; + if (OBO->hasNoSignedWrap()) + Out << " nsw"; + } else if (const PossiblyExactOperator *Div = + dyn_cast<PossiblyExactOperator>(U)) { + if (Div->isExact()) + Out << " exact"; + } else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { + if (GEP->isInBounds()) + Out << " inbounds"; + } +} + +static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, + TypePrinting &TypePrinter, + SlotTracker *Machine, + const Module *Context) { + if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) { + if (CI->getType()->isIntegerTy(1)) { + Out << (CI->getZExtValue() ? "true" : "false"); + return; + } + Out << CI->getValue(); + return; + } + + if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) { + if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle || + &CFP->getValueAPF().getSemantics() == &APFloat::IEEEdouble) { + // We would like to output the FP constant value in exponential notation, + // but we cannot do this if doing so will lose precision. Check here to + // make sure that we only output it in exponential format if we can parse + // the value back and get the same value. + // + bool ignored; + bool isHalf = &CFP->getValueAPF().getSemantics()==&APFloat::IEEEhalf; + bool isDouble = &CFP->getValueAPF().getSemantics()==&APFloat::IEEEdouble; + bool isInf = CFP->getValueAPF().isInfinity(); + bool isNaN = CFP->getValueAPF().isNaN(); + if (!isHalf && !isInf && !isNaN) { + double Val = isDouble ? CFP->getValueAPF().convertToDouble() : + CFP->getValueAPF().convertToFloat(); + SmallString<128> StrVal; + raw_svector_ostream(StrVal) << Val; + + // Check to make sure that the stringized number is not some string like + // "Inf" or NaN, that atof will accept, but the lexer will not. Check + // that the string matches the "[-+]?[0-9]" regex. + // + if ((StrVal[0] >= '0' && StrVal[0] <= '9') || + ((StrVal[0] == '-' || StrVal[0] == '+') && + (StrVal[1] >= '0' && StrVal[1] <= '9'))) { + // Reparse stringized version! + if (APFloat(APFloat::IEEEdouble, StrVal).convertToDouble() == Val) { + Out << StrVal.str(); + return; + } + } + } + // Otherwise we could not reparse it to exactly the same value, so we must + // output the string in hexadecimal format! Note that loading and storing + // floating point types changes the bits of NaNs on some hosts, notably + // x86, so we must not use these types. + assert(sizeof(double) == sizeof(uint64_t) && + "assuming that double is 64 bits!"); + char Buffer[40]; + APFloat apf = CFP->getValueAPF(); + // Halves and floats are represented in ASCII IR as double, convert. + if (!isDouble) + apf.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, + &ignored); + Out << "0x" << + utohex_buffer(uint64_t(apf.bitcastToAPInt().getZExtValue()), + Buffer+40); + return; + } + + // Either half, or some form of long double. + // These appear as a magic letter identifying the type, then a + // fixed number of hex digits. + Out << "0x"; + // Bit position, in the current word, of the next nibble to print. + int shiftcount; + + if (&CFP->getValueAPF().getSemantics() == &APFloat::x87DoubleExtended) { + Out << 'K'; + // api needed to prevent premature destruction + APInt api = CFP->getValueAPF().bitcastToAPInt(); + const uint64_t* p = api.getRawData(); + uint64_t word = p[1]; + shiftcount = 12; + int width = api.getBitWidth(); + for (int j=0; j<width; j+=4, shiftcount-=4) { + unsigned int nibble = (word>>shiftcount) & 15; + if (nibble < 10) + Out << (unsigned char)(nibble + '0'); + else + Out << (unsigned char)(nibble - 10 + 'A'); + if (shiftcount == 0 && j+4 < width) { + word = *p; + shiftcount = 64; + if (width-j-4 < 64) + shiftcount = width-j-4; + } + } + return; + } else if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEquad) { + shiftcount = 60; + Out << 'L'; + } else if (&CFP->getValueAPF().getSemantics() == &APFloat::PPCDoubleDouble) { + shiftcount = 60; + Out << 'M'; + } else if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEhalf) { + shiftcount = 12; + Out << 'H'; + } else + llvm_unreachable("Unsupported floating point type"); + // api needed to prevent premature destruction + APInt api = CFP->getValueAPF().bitcastToAPInt(); + const uint64_t* p = api.getRawData(); + uint64_t word = *p; + int width = api.getBitWidth(); + for (int j=0; j<width; j+=4, shiftcount-=4) { + unsigned int nibble = (word>>shiftcount) & 15; + if (nibble < 10) + Out << (unsigned char)(nibble + '0'); + else + Out << (unsigned char)(nibble - 10 + 'A'); + if (shiftcount == 0 && j+4 < width) { + word = *(++p); + shiftcount = 64; + if (width-j-4 < 64) + shiftcount = width-j-4; + } + } + return; + } + + if (isa<ConstantAggregateZero>(CV)) { + Out << "zeroinitializer"; + return; + } + + if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) { + Out << "blockaddress("; + WriteAsOperandInternal(Out, BA->getFunction(), &TypePrinter, Machine, + Context); + Out << ", "; + WriteAsOperandInternal(Out, BA->getBasicBlock(), &TypePrinter, Machine, + Context); + Out << ")"; + return; + } + + if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) { + Type *ETy = CA->getType()->getElementType(); + Out << '['; + TypePrinter.print(ETy, Out); + Out << ' '; + WriteAsOperandInternal(Out, CA->getOperand(0), + &TypePrinter, Machine, + Context); + for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) { + Out << ", "; + TypePrinter.print(ETy, Out); + Out << ' '; + WriteAsOperandInternal(Out, CA->getOperand(i), &TypePrinter, Machine, + Context); + } + Out << ']'; + return; + } + + if (const ConstantDataArray *CA = dyn_cast<ConstantDataArray>(CV)) { + // As a special case, print the array as a string if it is an array of + // i8 with ConstantInt values. + if (CA->isString()) { + Out << "c\""; + PrintEscapedString(CA->getAsString(), Out); + Out << '"'; + return; + } + + Type *ETy = CA->getType()->getElementType(); + Out << '['; + TypePrinter.print(ETy, Out); + Out << ' '; + WriteAsOperandInternal(Out, CA->getElementAsConstant(0), + &TypePrinter, Machine, + Context); + for (unsigned i = 1, e = CA->getNumElements(); i != e; ++i) { + Out << ", "; + TypePrinter.print(ETy, Out); + Out << ' '; + WriteAsOperandInternal(Out, CA->getElementAsConstant(i), &TypePrinter, + Machine, Context); + } + Out << ']'; + return; + } + + + if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) { + if (CS->getType()->isPacked()) + Out << '<'; + Out << '{'; + unsigned N = CS->getNumOperands(); + if (N) { + Out << ' '; + TypePrinter.print(CS->getOperand(0)->getType(), Out); + Out << ' '; + + WriteAsOperandInternal(Out, CS->getOperand(0), &TypePrinter, Machine, + Context); + + for (unsigned i = 1; i < N; i++) { + Out << ", "; + TypePrinter.print(CS->getOperand(i)->getType(), Out); + Out << ' '; + + WriteAsOperandInternal(Out, CS->getOperand(i), &TypePrinter, Machine, + Context); + } + Out << ' '; + } + + Out << '}'; + if (CS->getType()->isPacked()) + Out << '>'; + return; + } + + if (isa<ConstantVector>(CV) || isa<ConstantDataVector>(CV)) { + Type *ETy = CV->getType()->getVectorElementType(); + Out << '<'; + TypePrinter.print(ETy, Out); + Out << ' '; + WriteAsOperandInternal(Out, CV->getAggregateElement(0U), &TypePrinter, + Machine, Context); + for (unsigned i = 1, e = CV->getType()->getVectorNumElements(); i != e;++i){ + Out << ", "; + TypePrinter.print(ETy, Out); + Out << ' '; + WriteAsOperandInternal(Out, CV->getAggregateElement(i), &TypePrinter, + Machine, Context); + } + Out << '>'; + return; + } + + if (isa<ConstantPointerNull>(CV)) { + Out << "null"; + return; + } + + if (isa<UndefValue>(CV)) { + Out << "undef"; + return; + } + + if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) { + Out << CE->getOpcodeName(); + WriteOptimizationInfo(Out, CE); + if (CE->isCompare()) + Out << ' ' << getPredicateText(CE->getPredicate()); + Out << " ("; + + for (User::const_op_iterator OI=CE->op_begin(); OI != CE->op_end(); ++OI) { + TypePrinter.print((*OI)->getType(), Out); + Out << ' '; + WriteAsOperandInternal(Out, *OI, &TypePrinter, Machine, Context); + if (OI+1 != CE->op_end()) + Out << ", "; + } + + if (CE->hasIndices()) { + ArrayRef<unsigned> Indices = CE->getIndices(); + for (unsigned i = 0, e = Indices.size(); i != e; ++i) + Out << ", " << Indices[i]; + } + + if (CE->isCast()) { + Out << " to "; + TypePrinter.print(CE->getType(), Out); + } + + Out << ')'; + return; + } + + Out << "<placeholder or erroneous Constant>"; +} + +static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node, + TypePrinting *TypePrinter, + SlotTracker *Machine, + const Module *Context) { + Out << "!{"; + for (unsigned mi = 0, me = Node->getNumOperands(); mi != me; ++mi) { + const Value *V = Node->getOperand(mi); + if (V == 0) + Out << "null"; + else { + TypePrinter->print(V->getType(), Out); + Out << ' '; + WriteAsOperandInternal(Out, Node->getOperand(mi), + TypePrinter, Machine, Context); + } + if (mi + 1 != me) + Out << ", "; + } + + Out << "}"; +} + + +/// WriteAsOperand - Write the name of the specified value out to the specified +/// ostream. This can be useful when you just want to print int %reg126, not +/// the whole instruction that generated it. +/// +static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, + TypePrinting *TypePrinter, + SlotTracker *Machine, + const Module *Context) { + if (V->hasName()) { + PrintLLVMName(Out, V); + return; + } + + const Constant *CV = dyn_cast<Constant>(V); + if (CV && !isa<GlobalValue>(CV)) { + assert(TypePrinter && "Constants require TypePrinting!"); + WriteConstantInternal(Out, CV, *TypePrinter, Machine, Context); + return; + } + + if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) { + Out << "asm "; + if (IA->hasSideEffects()) + Out << "sideeffect "; + if (IA->isAlignStack()) + Out << "alignstack "; + // We don't emit the AD_ATT dialect as it's the assumed default. + if (IA->getDialect() == InlineAsm::AD_Intel) + Out << "inteldialect "; + Out << '"'; + PrintEscapedString(IA->getAsmString(), Out); + Out << "\", \""; + PrintEscapedString(IA->getConstraintString(), Out); + Out << '"'; + return; + } + + if (const MDNode *N = dyn_cast<MDNode>(V)) { + if (N->isFunctionLocal()) { + // Print metadata inline, not via slot reference number. + WriteMDNodeBodyInternal(Out, N, TypePrinter, Machine, Context); + return; + } + + if (!Machine) { + if (N->isFunctionLocal()) + Machine = new SlotTracker(N->getFunction()); + else + Machine = new SlotTracker(Context); + } + int Slot = Machine->getMetadataSlot(N); + if (Slot == -1) + Out << "<badref>"; + else + Out << '!' << Slot; + return; + } + + if (const MDString *MDS = dyn_cast<MDString>(V)) { + Out << "!\""; + PrintEscapedString(MDS->getString(), Out); + Out << '"'; + return; + } + + if (V->getValueID() == Value::PseudoSourceValueVal || + V->getValueID() == Value::FixedStackPseudoSourceValueVal) { + V->print(Out); + return; + } + + char Prefix = '%'; + int Slot; + // If we have a SlotTracker, use it. + if (Machine) { + if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { + Slot = Machine->getGlobalSlot(GV); + Prefix = '@'; + } else { + Slot = Machine->getLocalSlot(V); + + // If the local value didn't succeed, then we may be referring to a value + // from a different function. Translate it, as this can happen when using + // address of blocks. + if (Slot == -1) + if ((Machine = createSlotTracker(V))) { + Slot = Machine->getLocalSlot(V); + delete Machine; + } + } + } else if ((Machine = createSlotTracker(V))) { + // Otherwise, create one to get the # and then destroy it. + if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { + Slot = Machine->getGlobalSlot(GV); + Prefix = '@'; + } else { + Slot = Machine->getLocalSlot(V); + } + delete Machine; + Machine = 0; + } else { + Slot = -1; + } + + if (Slot != -1) + Out << Prefix << Slot; + else + Out << "<badref>"; +} + +void llvm::WriteAsOperand(raw_ostream &Out, const Value *V, + bool PrintType, const Module *Context) { + + // Fast path: Don't construct and populate a TypePrinting object if we + // won't be needing any types printed. + if (!PrintType && + ((!isa<Constant>(V) && !isa<MDNode>(V)) || + V->hasName() || isa<GlobalValue>(V))) { + WriteAsOperandInternal(Out, V, 0, 0, Context); + return; + } + + if (Context == 0) Context = getModuleFromVal(V); + + TypePrinting TypePrinter; + if (Context) + TypePrinter.incorporateTypes(*Context); + if (PrintType) { + TypePrinter.print(V->getType(), Out); + Out << ' '; + } + + WriteAsOperandInternal(Out, V, &TypePrinter, 0, Context); +} + +namespace { + +class AssemblyWriter { + formatted_raw_ostream &Out; + SlotTracker &Machine; + const Module *TheModule; + TypePrinting TypePrinter; + AssemblyAnnotationWriter *AnnotationWriter; + +public: + inline AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac, + const Module *M, + AssemblyAnnotationWriter *AAW) + : Out(o), Machine(Mac), TheModule(M), AnnotationWriter(AAW) { + if (M) + TypePrinter.incorporateTypes(*M); + } + + void printMDNodeBody(const MDNode *MD); + void printNamedMDNode(const NamedMDNode *NMD); + + void printModule(const Module *M); + + void writeOperand(const Value *Op, bool PrintType); + void writeParamOperand(const Value *Operand, AttributeSet Attrs,unsigned Idx); + void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope); + + void writeAllMDNodes(); + void writeAllAttributeGroups(); + + void printTypeIdentities(); + void printGlobal(const GlobalVariable *GV); + void printAlias(const GlobalAlias *GV); + void printFunction(const Function *F); + void printArgument(const Argument *FA, AttributeSet Attrs, unsigned Idx); + void printBasicBlock(const BasicBlock *BB); + void printInstruction(const Instruction &I); + +private: + // printInfoComment - Print a little comment after the instruction indicating + // which slot it occupies. + void printInfoComment(const Value &V); +}; +} // end of anonymous namespace + +void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) { + if (Operand == 0) { + Out << "<null operand!>"; + return; + } + if (PrintType) { + TypePrinter.print(Operand->getType(), Out); + Out << ' '; + } + WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule); +} + +void AssemblyWriter::writeAtomic(AtomicOrdering Ordering, + SynchronizationScope SynchScope) { + if (Ordering == NotAtomic) + return; + + switch (SynchScope) { + case SingleThread: Out << " singlethread"; break; + case CrossThread: break; + } + + switch (Ordering) { + default: Out << " <bad ordering " << int(Ordering) << ">"; break; + case Unordered: Out << " unordered"; break; + case Monotonic: Out << " monotonic"; break; + case Acquire: Out << " acquire"; break; + case Release: Out << " release"; break; + case AcquireRelease: Out << " acq_rel"; break; + case SequentiallyConsistent: Out << " seq_cst"; break; + } +} + +void AssemblyWriter::writeParamOperand(const Value *Operand, + AttributeSet Attrs, unsigned Idx) { + if (Operand == 0) { + Out << "<null operand!>"; + return; + } + + // Print the type + TypePrinter.print(Operand->getType(), Out); + // Print parameter attributes list + if (Attrs.hasAttributes(Idx)) + Out << ' ' << Attrs.getAsString(Idx); + Out << ' '; + // Print the operand + WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule); +} + +void AssemblyWriter::printModule(const Module *M) { + Machine.initialize(); + + if (!M->getModuleIdentifier().empty() && + // Don't print the ID if it will start a new line (which would + // require a comment char before it). + M->getModuleIdentifier().find('\n') == std::string::npos) + Out << "; ModuleID = '" << M->getModuleIdentifier() << "'\n"; + + if (!M->getDataLayout().empty()) + Out << "target datalayout = \"" << M->getDataLayout() << "\"\n"; + if (!M->getTargetTriple().empty()) + Out << "target triple = \"" << M->getTargetTriple() << "\"\n"; + + if (!M->getModuleInlineAsm().empty()) { + // Split the string into lines, to make it easier to read the .ll file. + std::string Asm = M->getModuleInlineAsm(); + size_t CurPos = 0; + size_t NewLine = Asm.find_first_of('\n', CurPos); + Out << '\n'; + while (NewLine != std::string::npos) { + // We found a newline, print the portion of the asm string from the + // last newline up to this newline. + Out << "module asm \""; + PrintEscapedString(std::string(Asm.begin()+CurPos, Asm.begin()+NewLine), + Out); + Out << "\"\n"; + CurPos = NewLine+1; + NewLine = Asm.find_first_of('\n', CurPos); + } + std::string rest(Asm.begin()+CurPos, Asm.end()); + if (!rest.empty()) { + Out << "module asm \""; + PrintEscapedString(rest, Out); + Out << "\"\n"; + } + } + + printTypeIdentities(); + + // Output all globals. + if (!M->global_empty()) Out << '\n'; + for (Module::const_global_iterator I = M->global_begin(), E = M->global_end(); + I != E; ++I) { + printGlobal(I); Out << '\n'; + } + + // Output all aliases. + if (!M->alias_empty()) Out << "\n"; + for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end(); + I != E; ++I) + printAlias(I); + + // Output all of the functions. + for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I) + printFunction(I); + + // Output all attribute groups. + if (!Machine.as_empty()) { + Out << '\n'; + writeAllAttributeGroups(); + } + + // Output named metadata. + if (!M->named_metadata_empty()) Out << '\n'; + + for (Module::const_named_metadata_iterator I = M->named_metadata_begin(), + E = M->named_metadata_end(); I != E; ++I) + printNamedMDNode(I); + + // Output metadata. + if (!Machine.mdn_empty()) { + Out << '\n'; + writeAllMDNodes(); + } +} + +void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) { + Out << '!'; + StringRef Name = NMD->getName(); + if (Name.empty()) { + Out << "<empty name> "; + } else { + if (isalpha(static_cast<unsigned char>(Name[0])) || + Name[0] == '-' || Name[0] == '$' || + Name[0] == '.' || Name[0] == '_') + Out << Name[0]; + else + Out << '\\' << hexdigit(Name[0] >> 4) << hexdigit(Name[0] & 0x0F); + for (unsigned i = 1, e = Name.size(); i != e; ++i) { + unsigned char C = Name[i]; + if (isalnum(static_cast<unsigned char>(C)) || C == '-' || C == '$' || + C == '.' || C == '_') + Out << C; + else + Out << '\\' << hexdigit(C >> 4) << hexdigit(C & 0x0F); + } + } + Out << " = !{"; + for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) { + if (i) Out << ", "; + int Slot = Machine.getMetadataSlot(NMD->getOperand(i)); + if (Slot == -1) + Out << "<badref>"; + else + Out << '!' << Slot; + } + Out << "}\n"; +} + + +static void PrintLinkage(GlobalValue::LinkageTypes LT, + formatted_raw_ostream &Out) { + switch (LT) { + case GlobalValue::ExternalLinkage: break; + case GlobalValue::PrivateLinkage: Out << "private "; break; + case GlobalValue::LinkerPrivateLinkage: Out << "linker_private "; break; + case GlobalValue::LinkerPrivateWeakLinkage: + Out << "linker_private_weak "; + break; + case GlobalValue::InternalLinkage: Out << "internal "; break; + case GlobalValue::LinkOnceAnyLinkage: Out << "linkonce "; break; + case GlobalValue::LinkOnceODRLinkage: Out << "linkonce_odr "; break; + case GlobalValue::LinkOnceODRAutoHideLinkage: + Out << "linkonce_odr_auto_hide "; + break; + case GlobalValue::WeakAnyLinkage: Out << "weak "; break; + case GlobalValue::WeakODRLinkage: Out << "weak_odr "; break; + case GlobalValue::CommonLinkage: Out << "common "; break; + case GlobalValue::AppendingLinkage: Out << "appending "; break; + case GlobalValue::DLLImportLinkage: Out << "dllimport "; break; + case GlobalValue::DLLExportLinkage: Out << "dllexport "; break; + case GlobalValue::ExternalWeakLinkage: Out << "extern_weak "; break; + case GlobalValue::AvailableExternallyLinkage: + Out << "available_externally "; + break; + } +} + + +static void PrintVisibility(GlobalValue::VisibilityTypes Vis, + formatted_raw_ostream &Out) { + switch (Vis) { + case GlobalValue::DefaultVisibility: break; + case GlobalValue::HiddenVisibility: Out << "hidden "; break; + case GlobalValue::ProtectedVisibility: Out << "protected "; break; + } +} + +static void PrintThreadLocalModel(GlobalVariable::ThreadLocalMode TLM, + formatted_raw_ostream &Out) { + switch (TLM) { + case GlobalVariable::NotThreadLocal: + break; + case GlobalVariable::GeneralDynamicTLSModel: + Out << "thread_local "; + break; + case GlobalVariable::LocalDynamicTLSModel: + Out << "thread_local(localdynamic) "; + break; + case GlobalVariable::InitialExecTLSModel: + Out << "thread_local(initialexec) "; + break; + case GlobalVariable::LocalExecTLSModel: + Out << "thread_local(localexec) "; + break; + } +} + +void AssemblyWriter::printGlobal(const GlobalVariable *GV) { + if (GV->isMaterializable()) + Out << "; Materializable\n"; + + WriteAsOperandInternal(Out, GV, &TypePrinter, &Machine, GV->getParent()); + Out << " = "; + + if (!GV->hasInitializer() && GV->hasExternalLinkage()) + Out << "external "; + + PrintLinkage(GV->getLinkage(), Out); + PrintVisibility(GV->getVisibility(), Out); + PrintThreadLocalModel(GV->getThreadLocalMode(), Out); + + if (unsigned AddressSpace = GV->getType()->getAddressSpace()) + Out << "addrspace(" << AddressSpace << ") "; + if (GV->hasUnnamedAddr()) Out << "unnamed_addr "; + if (GV->isExternallyInitialized()) Out << "externally_initialized "; + Out << (GV->isConstant() ? "constant " : "global "); + TypePrinter.print(GV->getType()->getElementType(), Out); + + if (GV->hasInitializer()) { + Out << ' '; + writeOperand(GV->getInitializer(), false); + } + + if (GV->hasSection()) { + Out << ", section \""; + PrintEscapedString(GV->getSection(), Out); + Out << '"'; + } + if (GV->getAlignment()) + Out << ", align " << GV->getAlignment(); + + printInfoComment(*GV); +} + +void AssemblyWriter::printAlias(const GlobalAlias *GA) { + if (GA->isMaterializable()) + Out << "; Materializable\n"; + + // Don't crash when dumping partially built GA + if (!GA->hasName()) + Out << "<<nameless>> = "; + else { + PrintLLVMName(Out, GA); + Out << " = "; + } + PrintVisibility(GA->getVisibility(), Out); + + Out << "alias "; + + PrintLinkage(GA->getLinkage(), Out); + + const Constant *Aliasee = GA->getAliasee(); + + if (Aliasee == 0) { + TypePrinter.print(GA->getType(), Out); + Out << " <<NULL ALIASEE>>"; + } else { + writeOperand(Aliasee, !isa<ConstantExpr>(Aliasee)); + } + + printInfoComment(*GA); + Out << '\n'; +} + +void AssemblyWriter::printTypeIdentities() { + if (TypePrinter.NumberedTypes.empty() && + TypePrinter.NamedTypes.empty()) + return; + + Out << '\n'; + + // We know all the numbers that each type is used and we know that it is a + // dense assignment. Convert the map to an index table. + std::vector<StructType*> NumberedTypes(TypePrinter.NumberedTypes.size()); + for (DenseMap<StructType*, unsigned>::iterator I = + TypePrinter.NumberedTypes.begin(), E = TypePrinter.NumberedTypes.end(); + I != E; ++I) { + assert(I->second < NumberedTypes.size() && "Didn't get a dense numbering?"); + NumberedTypes[I->second] = I->first; + } + + // Emit all numbered types. + for (unsigned i = 0, e = NumberedTypes.size(); i != e; ++i) { + Out << '%' << i << " = type "; + + // Make sure we print out at least one level of the type structure, so + // that we do not get %2 = type %2 + TypePrinter.printStructBody(NumberedTypes[i], Out); + Out << '\n'; + } + + for (unsigned i = 0, e = TypePrinter.NamedTypes.size(); i != e; ++i) { + PrintLLVMName(Out, TypePrinter.NamedTypes[i]->getName(), LocalPrefix); + Out << " = type "; + + // Make sure we print out at least one level of the type structure, so + // that we do not get %FILE = type %FILE + TypePrinter.printStructBody(TypePrinter.NamedTypes[i], Out); + Out << '\n'; + } +} + +/// printFunction - Print all aspects of a function. +/// +void AssemblyWriter::printFunction(const Function *F) { + // Print out the return type and name. + Out << '\n'; + + if (AnnotationWriter) AnnotationWriter->emitFunctionAnnot(F, Out); + + if (F->isMaterializable()) + Out << "; Materializable\n"; + + if (F->isDeclaration()) + Out << "declare "; + else + Out << "define "; + + PrintLinkage(F->getLinkage(), Out); + PrintVisibility(F->getVisibility(), Out); + + // Print the calling convention. + if (F->getCallingConv() != CallingConv::C) { + PrintCallingConv(F->getCallingConv(), Out); + Out << " "; + } + + FunctionType *FT = F->getFunctionType(); + const AttributeSet &Attrs = F->getAttributes(); + if (Attrs.hasAttributes(AttributeSet::ReturnIndex)) + Out << Attrs.getAsString(AttributeSet::ReturnIndex) << ' '; + TypePrinter.print(F->getReturnType(), Out); + Out << ' '; + WriteAsOperandInternal(Out, F, &TypePrinter, &Machine, F->getParent()); + Out << '('; + Machine.incorporateFunction(F); + + // Loop over the arguments, printing them... + + unsigned Idx = 1; + if (!F->isDeclaration()) { + // If this isn't a declaration, print the argument names as well. + for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); + I != E; ++I) { + // Insert commas as we go... the first arg doesn't get a comma + if (I != F->arg_begin()) Out << ", "; + printArgument(I, Attrs, Idx); + Idx++; + } + } else { + // Otherwise, print the types from the function type. + for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) { + // Insert commas as we go... the first arg doesn't get a comma + if (i) Out << ", "; + + // Output type... + TypePrinter.print(FT->getParamType(i), Out); + + if (Attrs.hasAttributes(i+1)) + Out << ' ' << Attrs.getAsString(i+1); + } + } + + // Finish printing arguments... + if (FT->isVarArg()) { + if (FT->getNumParams()) Out << ", "; + Out << "..."; // Output varargs portion of signature! + } + Out << ')'; + if (F->hasUnnamedAddr()) + Out << " unnamed_addr"; + if (Attrs.hasAttributes(AttributeSet::FunctionIndex)) + Out << " #" << Machine.getAttributeGroupSlot(Attrs.getFnAttributes()); + if (F->hasSection()) { + Out << " section \""; + PrintEscapedString(F->getSection(), Out); + Out << '"'; + } + if (F->getAlignment()) + Out << " align " << F->getAlignment(); + if (F->hasGC()) + Out << " gc \"" << F->getGC() << '"'; + if (F->isDeclaration()) { + Out << '\n'; + } else { + Out << " {"; + // Output all of the function's basic blocks. + for (Function::const_iterator I = F->begin(), E = F->end(); I != E; ++I) + printBasicBlock(I); + + Out << "}\n"; + } + + Machine.purgeFunction(); +} + +/// printArgument - This member is called for every argument that is passed into +/// the function. Simply print it out +/// +void AssemblyWriter::printArgument(const Argument *Arg, + AttributeSet Attrs, unsigned Idx) { + // Output type... + TypePrinter.print(Arg->getType(), Out); + + // Output parameter attributes list + if (Attrs.hasAttributes(Idx)) + Out << ' ' << Attrs.getAsString(Idx); + + // Output name, if available... + if (Arg->hasName()) { + Out << ' '; + PrintLLVMName(Out, Arg); + } +} + +/// printBasicBlock - This member is called for each basic block in a method. +/// +void AssemblyWriter::printBasicBlock(const BasicBlock *BB) { + if (BB->hasName()) { // Print out the label if it exists... + Out << "\n"; + PrintLLVMName(Out, BB->getName(), LabelPrefix); + Out << ':'; + } else if (!BB->use_empty()) { // Don't print block # of no uses... + Out << "\n; <label>:"; + int Slot = Machine.getLocalSlot(BB); + if (Slot != -1) + Out << Slot; + else + Out << "<badref>"; + } + + if (BB->getParent() == 0) { + Out.PadToColumn(50); + Out << "; Error: Block without parent!"; + } else if (BB != &BB->getParent()->getEntryBlock()) { // Not the entry block? + // Output predecessors for the block. + Out.PadToColumn(50); + Out << ";"; + const_pred_iterator PI = pred_begin(BB), PE = pred_end(BB); + + if (PI == PE) { + Out << " No predecessors!"; + } else { + Out << " preds = "; + writeOperand(*PI, false); + for (++PI; PI != PE; ++PI) { + Out << ", "; + writeOperand(*PI, false); + } + } + } + + Out << "\n"; + + if (AnnotationWriter) AnnotationWriter->emitBasicBlockStartAnnot(BB, Out); + + // Output all of the instructions in the basic block... + for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) { + printInstruction(*I); + Out << '\n'; + } + + if (AnnotationWriter) AnnotationWriter->emitBasicBlockEndAnnot(BB, Out); +} + +/// printInfoComment - Print a little comment after the instruction indicating +/// which slot it occupies. +/// +void AssemblyWriter::printInfoComment(const Value &V) { + if (AnnotationWriter) { + AnnotationWriter->printInfoComment(V, Out); + return; + } +} + +// This member is called for each Instruction in a function.. +void AssemblyWriter::printInstruction(const Instruction &I) { + if (AnnotationWriter) AnnotationWriter->emitInstructionAnnot(&I, Out); + + // Print out indentation for an instruction. + Out << " "; + + // Print out name if it exists... + if (I.hasName()) { + PrintLLVMName(Out, &I); + Out << " = "; + } else if (!I.getType()->isVoidTy()) { + // Print out the def slot taken. + int SlotNum = Machine.getLocalSlot(&I); + if (SlotNum == -1) + Out << "<badref> = "; + else + Out << '%' << SlotNum << " = "; + } + + if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall()) + Out << "tail "; + + // Print out the opcode... + Out << I.getOpcodeName(); + + // If this is an atomic load or store, print out the atomic marker. + if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) || + (isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic())) + Out << " atomic"; + + // If this is a volatile operation, print out the volatile marker. + if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) || + (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()) || + (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isVolatile()) || + (isa<AtomicRMWInst>(I) && cast<AtomicRMWInst>(I).isVolatile())) + Out << " volatile"; + + // Print out optimization information. + WriteOptimizationInfo(Out, &I); + + // Print out the compare instruction predicates + if (const CmpInst *CI = dyn_cast<CmpInst>(&I)) + Out << ' ' << getPredicateText(CI->getPredicate()); + + // Print out the atomicrmw operation + if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) + writeAtomicRMWOperation(Out, RMWI->getOperation()); + + // Print out the type of the operands... + const Value *Operand = I.getNumOperands() ? I.getOperand(0) : 0; + + // Special case conditional branches to swizzle the condition out to the front + if (isa<BranchInst>(I) && cast<BranchInst>(I).isConditional()) { + const BranchInst &BI(cast<BranchInst>(I)); + Out << ' '; + writeOperand(BI.getCondition(), true); + Out << ", "; + writeOperand(BI.getSuccessor(0), true); + Out << ", "; + writeOperand(BI.getSuccessor(1), true); + + } else if (isa<SwitchInst>(I)) { + const SwitchInst& SI(cast<SwitchInst>(I)); + // Special case switch instruction to get formatting nice and correct. + Out << ' '; + writeOperand(SI.getCondition(), true); + Out << ", "; + writeOperand(SI.getDefaultDest(), true); + Out << " ["; + for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end(); + i != e; ++i) { + Out << "\n "; + writeOperand(i.getCaseValue(), true); + Out << ", "; + writeOperand(i.getCaseSuccessor(), true); + } + Out << "\n ]"; + } else if (isa<IndirectBrInst>(I)) { + // Special case indirectbr instruction to get formatting nice and correct. + Out << ' '; + writeOperand(Operand, true); + Out << ", ["; + + for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { + if (i != 1) + Out << ", "; + writeOperand(I.getOperand(i), true); + } + Out << ']'; + } else if (const PHINode *PN = dyn_cast<PHINode>(&I)) { + Out << ' '; + TypePrinter.print(I.getType(), Out); + Out << ' '; + + for (unsigned op = 0, Eop = PN->getNumIncomingValues(); op < Eop; ++op) { + if (op) Out << ", "; + Out << "[ "; + writeOperand(PN->getIncomingValue(op), false); Out << ", "; + writeOperand(PN->getIncomingBlock(op), false); Out << " ]"; + } + } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&I)) { + Out << ' '; + writeOperand(I.getOperand(0), true); + for (const unsigned *i = EVI->idx_begin(), *e = EVI->idx_end(); i != e; ++i) + Out << ", " << *i; + } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&I)) { + Out << ' '; + writeOperand(I.getOperand(0), true); Out << ", "; + writeOperand(I.getOperand(1), true); + for (const unsigned *i = IVI->idx_begin(), *e = IVI->idx_end(); i != e; ++i) + Out << ", " << *i; + } else if (const LandingPadInst *LPI = dyn_cast<LandingPadInst>(&I)) { + Out << ' '; + TypePrinter.print(I.getType(), Out); + Out << " personality "; + writeOperand(I.getOperand(0), true); Out << '\n'; + + if (LPI->isCleanup()) + Out << " cleanup"; + + for (unsigned i = 0, e = LPI->getNumClauses(); i != e; ++i) { + if (i != 0 || LPI->isCleanup()) Out << "\n"; + if (LPI->isCatch(i)) + Out << " catch "; + else + Out << " filter "; + + writeOperand(LPI->getClause(i), true); + } + } else if (isa<ReturnInst>(I) && !Operand) { + Out << " void"; + } else if (const CallInst *CI = dyn_cast<CallInst>(&I)) { + // Print the calling convention being used. + if (CI->getCallingConv() != CallingConv::C) { + Out << " "; + PrintCallingConv(CI->getCallingConv(), Out); + } + + Operand = CI->getCalledValue(); + PointerType *PTy = cast<PointerType>(Operand->getType()); + FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); + Type *RetTy = FTy->getReturnType(); + const AttributeSet &PAL = CI->getAttributes(); + + if (PAL.hasAttributes(AttributeSet::ReturnIndex)) + Out << ' ' << PAL.getAsString(AttributeSet::ReturnIndex); + + // If possible, print out the short form of the call instruction. We can + // only do this if the first argument is a pointer to a nonvararg function, + // and if the return type is not a pointer to a function. + // + Out << ' '; + if (!FTy->isVarArg() && + (!RetTy->isPointerTy() || + !cast<PointerType>(RetTy)->getElementType()->isFunctionTy())) { + TypePrinter.print(RetTy, Out); + Out << ' '; + writeOperand(Operand, false); + } else { + writeOperand(Operand, true); + } + Out << '('; + for (unsigned op = 0, Eop = CI->getNumArgOperands(); op < Eop; ++op) { + if (op > 0) + Out << ", "; + writeParamOperand(CI->getArgOperand(op), PAL, op + 1); + } + Out << ')'; + if (PAL.hasAttributes(AttributeSet::FunctionIndex)) + Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttributes()); + } else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) { + Operand = II->getCalledValue(); + PointerType *PTy = cast<PointerType>(Operand->getType()); + FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); + Type *RetTy = FTy->getReturnType(); + const AttributeSet &PAL = II->getAttributes(); + + // Print the calling convention being used. + if (II->getCallingConv() != CallingConv::C) { + Out << " "; + PrintCallingConv(II->getCallingConv(), Out); + } + + if (PAL.hasAttributes(AttributeSet::ReturnIndex)) + Out << ' ' << PAL.getAsString(AttributeSet::ReturnIndex); + + // If possible, print out the short form of the invoke instruction. We can + // only do this if the first argument is a pointer to a nonvararg function, + // and if the return type is not a pointer to a function. + // + Out << ' '; + if (!FTy->isVarArg() && + (!RetTy->isPointerTy() || + !cast<PointerType>(RetTy)->getElementType()->isFunctionTy())) { + TypePrinter.print(RetTy, Out); + Out << ' '; + writeOperand(Operand, false); + } else { + writeOperand(Operand, true); + } + Out << '('; + for (unsigned op = 0, Eop = II->getNumArgOperands(); op < Eop; ++op) { + if (op) + Out << ", "; + writeParamOperand(II->getArgOperand(op), PAL, op + 1); + } + + Out << ')'; + if (PAL.hasAttributes(AttributeSet::FunctionIndex)) + Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttributes()); + + Out << "\n to "; + writeOperand(II->getNormalDest(), true); + Out << " unwind "; + writeOperand(II->getUnwindDest(), true); + + } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { + Out << ' '; + TypePrinter.print(AI->getAllocatedType(), Out); + if (!AI->getArraySize() || AI->isArrayAllocation()) { + Out << ", "; + writeOperand(AI->getArraySize(), true); + } + if (AI->getAlignment()) { + Out << ", align " << AI->getAlignment(); + } + } else if (isa<CastInst>(I)) { + if (Operand) { + Out << ' '; + writeOperand(Operand, true); // Work with broken code + } + Out << " to "; + TypePrinter.print(I.getType(), Out); + } else if (isa<VAArgInst>(I)) { + if (Operand) { + Out << ' '; + writeOperand(Operand, true); // Work with broken code + } + Out << ", "; + TypePrinter.print(I.getType(), Out); + } else if (Operand) { // Print the normal way. + + // PrintAllTypes - Instructions who have operands of all the same type + // omit the type from all but the first operand. If the instruction has + // different type operands (for example br), then they are all printed. + bool PrintAllTypes = false; + Type *TheType = Operand->getType(); + + // Select, Store and ShuffleVector always print all types. + if (isa<SelectInst>(I) || isa<StoreInst>(I) || isa<ShuffleVectorInst>(I) + || isa<ReturnInst>(I)) { + PrintAllTypes = true; + } else { + for (unsigned i = 1, E = I.getNumOperands(); i != E; ++i) { + Operand = I.getOperand(i); + // note that Operand shouldn't be null, but the test helps make dump() + // more tolerant of malformed IR + if (Operand && Operand->getType() != TheType) { + PrintAllTypes = true; // We have differing types! Print them all! + break; + } + } + } + + if (!PrintAllTypes) { + Out << ' '; + TypePrinter.print(TheType, Out); + } + + Out << ' '; + for (unsigned i = 0, E = I.getNumOperands(); i != E; ++i) { + if (i) Out << ", "; + writeOperand(I.getOperand(i), PrintAllTypes); + } + } + + // Print atomic ordering/alignment for memory operations + if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { + if (LI->isAtomic()) + writeAtomic(LI->getOrdering(), LI->getSynchScope()); + if (LI->getAlignment()) + Out << ", align " << LI->getAlignment(); + } else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { + if (SI->isAtomic()) + writeAtomic(SI->getOrdering(), SI->getSynchScope()); + if (SI->getAlignment()) + Out << ", align " << SI->getAlignment(); + } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) { + writeAtomic(CXI->getOrdering(), CXI->getSynchScope()); + } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) { + writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope()); + } else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) { + writeAtomic(FI->getOrdering(), FI->getSynchScope()); + } + + // Print Metadata info. + SmallVector<std::pair<unsigned, MDNode*>, 4> InstMD; + I.getAllMetadata(InstMD); + if (!InstMD.empty()) { + SmallVector<StringRef, 8> MDNames; + I.getType()->getContext().getMDKindNames(MDNames); + for (unsigned i = 0, e = InstMD.size(); i != e; ++i) { + unsigned Kind = InstMD[i].first; + if (Kind < MDNames.size()) { + Out << ", !" << MDNames[Kind]; + } else { + Out << ", !<unknown kind #" << Kind << ">"; + } + Out << ' '; + WriteAsOperandInternal(Out, InstMD[i].second, &TypePrinter, &Machine, + TheModule); + } + } + printInfoComment(I); +} + +static void WriteMDNodeComment(const MDNode *Node, + formatted_raw_ostream &Out) { + if (Node->getNumOperands() < 1) + return; + + Value *Op = Node->getOperand(0); + if (!Op || !isa<ConstantInt>(Op) || cast<ConstantInt>(Op)->getBitWidth() < 32) + return; + + DIDescriptor Desc(Node); + if (!Desc.Verify()) + return; + + unsigned Tag = Desc.getTag(); + Out.PadToColumn(50); + if (dwarf::TagString(Tag)) { + Out << "; "; + Desc.print(Out); + } else if (Tag == dwarf::DW_TAG_user_base) { + Out << "; [ DW_TAG_user_base ]"; + } +} + +void AssemblyWriter::writeAllMDNodes() { + SmallVector<const MDNode *, 16> Nodes; + Nodes.resize(Machine.mdn_size()); + for (SlotTracker::mdn_iterator I = Machine.mdn_begin(), E = Machine.mdn_end(); + I != E; ++I) + Nodes[I->second] = cast<MDNode>(I->first); + + for (unsigned i = 0, e = Nodes.size(); i != e; ++i) { + Out << '!' << i << " = metadata "; + printMDNodeBody(Nodes[i]); + } +} + +void AssemblyWriter::printMDNodeBody(const MDNode *Node) { + WriteMDNodeBodyInternal(Out, Node, &TypePrinter, &Machine, TheModule); + WriteMDNodeComment(Node, Out); + Out << "\n"; +} + +void AssemblyWriter::writeAllAttributeGroups() { + std::vector<std::pair<AttributeSet, unsigned> > asVec; + asVec.resize(Machine.as_size()); + + for (SlotTracker::as_iterator I = Machine.as_begin(), E = Machine.as_end(); + I != E; ++I) + asVec[I->second] = *I; + + for (std::vector<std::pair<AttributeSet, unsigned> >::iterator + I = asVec.begin(), E = asVec.end(); I != E; ++I) + Out << "attributes #" << I->second << " = { " + << I->first.getAsString(AttributeSet::FunctionIndex, true) << " }\n"; +} + +//===----------------------------------------------------------------------===// +// External Interface declarations +//===----------------------------------------------------------------------===// + +void Module::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const { + SlotTracker SlotTable(this); + formatted_raw_ostream OS(ROS); + AssemblyWriter W(OS, SlotTable, this, AAW); + W.printModule(this); +} + +void NamedMDNode::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const { + SlotTracker SlotTable(getParent()); + formatted_raw_ostream OS(ROS); + AssemblyWriter W(OS, SlotTable, getParent(), AAW); + W.printNamedMDNode(this); +} + +void Type::print(raw_ostream &OS) const { + if (this == 0) { + OS << "<null Type>"; + return; + } + TypePrinting TP; + TP.print(const_cast<Type*>(this), OS); + + // If the type is a named struct type, print the body as well. + if (StructType *STy = dyn_cast<StructType>(const_cast<Type*>(this))) + if (!STy->isLiteral()) { + OS << " = type "; + TP.printStructBody(STy, OS); + } +} + +void Value::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const { + if (this == 0) { + ROS << "printing a <null> value\n"; + return; + } + formatted_raw_ostream OS(ROS); + if (const Instruction *I = dyn_cast<Instruction>(this)) { + const Function *F = I->getParent() ? I->getParent()->getParent() : 0; + SlotTracker SlotTable(F); + AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), AAW); + W.printInstruction(*I); + } else if (const BasicBlock *BB = dyn_cast<BasicBlock>(this)) { + SlotTracker SlotTable(BB->getParent()); + AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), AAW); + W.printBasicBlock(BB); + } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(this)) { + SlotTracker SlotTable(GV->getParent()); + AssemblyWriter W(OS, SlotTable, GV->getParent(), AAW); + if (const GlobalVariable *V = dyn_cast<GlobalVariable>(GV)) + W.printGlobal(V); + else if (const Function *F = dyn_cast<Function>(GV)) + W.printFunction(F); + else + W.printAlias(cast<GlobalAlias>(GV)); + } else if (const MDNode *N = dyn_cast<MDNode>(this)) { + const Function *F = N->getFunction(); + SlotTracker SlotTable(F); + AssemblyWriter W(OS, SlotTable, F ? F->getParent() : 0, AAW); + W.printMDNodeBody(N); + } else if (const Constant *C = dyn_cast<Constant>(this)) { + TypePrinting TypePrinter; + TypePrinter.print(C->getType(), OS); + OS << ' '; + WriteConstantInternal(OS, C, TypePrinter, 0, 0); + } else if (isa<InlineAsm>(this) || isa<MDString>(this) || + isa<Argument>(this)) { + WriteAsOperand(OS, this, true, 0); + } else { + // Otherwise we don't know what it is. Call the virtual function to + // allow a subclass to print itself. + printCustom(OS); + } +} + +// Value::printCustom - subclasses should override this to implement printing. +void Value::printCustom(raw_ostream &OS) const { + llvm_unreachable("Unknown value to print out!"); +} + +// Value::dump - allow easy printing of Values from the debugger. +void Value::dump() const { print(dbgs()); dbgs() << '\n'; } + +// Type::dump - allow easy printing of Types from the debugger. +void Type::dump() const { print(dbgs()); } + +// Module::dump() - Allow printing of Modules from the debugger. +void Module::dump() const { print(dbgs(), 0); } + +// NamedMDNode::dump() - Allow printing of NamedMDNodes from the debugger. +void NamedMDNode::dump() const { print(dbgs(), 0); } diff --git a/lib/IR/AttributeImpl.h b/lib/IR/AttributeImpl.h new file mode 100644 index 0000000000..ad2670dade --- /dev/null +++ b/lib/IR/AttributeImpl.h @@ -0,0 +1,278 @@ +//===-- AttributeImpl.h - Attribute Internals -------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// \brief This file defines various helper methods and classes used by +/// LLVMContextImpl for creating and managing attributes. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ATTRIBUTESIMPL_H +#define LLVM_ATTRIBUTESIMPL_H + +#include "llvm/ADT/FoldingSet.h" +#include "llvm/IR/Attributes.h" +#include <string> + +namespace llvm { + +class Constant; +class LLVMContext; + +//===----------------------------------------------------------------------===// +/// \class +/// \brief A set of classes that contain the kind and (optional) value of the +/// attribute object. There are three main categories: enum attribute entries, +/// represented by Attribute::AttrKind; alignment attribute entries; and string +/// attribute enties, which are for target-dependent attributes. +class AttributeEntry { + unsigned char KindID; +protected: + enum AttrEntryKind { + EnumAttrEntry, + AlignAttrEntry, + StringAttrEntry + }; +public: + AttributeEntry(AttrEntryKind Kind) + : KindID(Kind) {} + virtual ~AttributeEntry() {} + + unsigned getKindID() const { return KindID; } + + static inline bool classof(const AttributeEntry *) { return true; } +}; + +class EnumAttributeEntry : public AttributeEntry { + Attribute::AttrKind Kind; +public: + EnumAttributeEntry(Attribute::AttrKind Kind) + : AttributeEntry(EnumAttrEntry), Kind(Kind) {} + + Attribute::AttrKind getEnumKind() const { return Kind; } + + static inline bool classof(const AttributeEntry *AE) { + return AE->getKindID() == EnumAttrEntry; + } + static inline bool classof(const EnumAttributeEntry *) { return true; } +}; + +class AlignAttributeEntry : public AttributeEntry { + Attribute::AttrKind Kind; + unsigned Align; +public: + AlignAttributeEntry(Attribute::AttrKind Kind, unsigned Align) + : AttributeEntry(AlignAttrEntry), Kind(Kind), Align(Align) {} + + Attribute::AttrKind getEnumKind() const { return Kind; } + unsigned getAlignment() const { return Align; } + + static inline bool classof(const AttributeEntry *AE) { + return AE->getKindID() == AlignAttrEntry; + } + static inline bool classof(const AlignAttributeEntry *) { return true; } +}; + +class StringAttributeEntry : public AttributeEntry { + std::string Kind; + std::string Val; +public: + StringAttributeEntry(StringRef Kind, StringRef Val = StringRef()) + : AttributeEntry(StringAttrEntry), Kind(Kind), Val(Val) {} + + StringRef getStringKind() const { return Kind; } + StringRef getStringValue() const { return Val; } + + static inline bool classof(const AttributeEntry *AE) { + return AE->getKindID() == StringAttrEntry; + } + static inline bool classof(const StringAttributeEntry *) { return true; } +}; + +//===----------------------------------------------------------------------===// +/// \class +/// \brief This class represents a single, uniqued attribute. That attribute +/// could be a single enum, a tuple, or a string. +class AttributeImpl : public FoldingSetNode { + LLVMContext &Context; ///< Global context for uniquing objects + + AttributeEntry *Entry; ///< Holds the kind and value of the attribute + + // AttributesImpl is uniqued, these should not be publicly available. + void operator=(const AttributeImpl &) LLVM_DELETED_FUNCTION; + AttributeImpl(const AttributeImpl &) LLVM_DELETED_FUNCTION; +public: + AttributeImpl(LLVMContext &C, Attribute::AttrKind Kind); + AttributeImpl(LLVMContext &C, Attribute::AttrKind Kind, unsigned Align); + AttributeImpl(LLVMContext &C, StringRef Kind, StringRef Val = StringRef()); + ~AttributeImpl(); + + LLVMContext &getContext() { return Context; } + + bool isEnumAttribute() const; + bool isAlignAttribute() const; + bool isStringAttribute() const; + + bool hasAttribute(Attribute::AttrKind A) const; + bool hasAttribute(StringRef Kind) const; + + Attribute::AttrKind getKindAsEnum() const; + uint64_t getValueAsInt() const; + + StringRef getKindAsString() const; + StringRef getValueAsString() const; + + /// \brief Used when sorting the attributes. + bool operator<(const AttributeImpl &AI) const; + + void Profile(FoldingSetNodeID &ID) const { + if (isEnumAttribute()) + Profile(ID, getKindAsEnum(), 0); + else if (isAlignAttribute()) + Profile(ID, getKindAsEnum(), getValueAsInt()); + else + Profile(ID, getKindAsString(), getValueAsString()); + } + static void Profile(FoldingSetNodeID &ID, Attribute::AttrKind Kind, + uint64_t Val) { + ID.AddInteger(Kind); + if (Val) ID.AddInteger(Val); + } + static void Profile(FoldingSetNodeID &ID, StringRef Kind, StringRef Values) { + ID.AddString(Kind); + if (!Values.empty()) ID.AddString(Values); + } + + // FIXME: Remove this! + static uint64_t getAttrMask(Attribute::AttrKind Val); +}; + +//===----------------------------------------------------------------------===// +/// \class +/// \brief This class represents a group of attributes that apply to one +/// element: function, return type, or parameter. +class AttributeSetNode : public FoldingSetNode { + SmallVector<Attribute, 4> AttrList; + + AttributeSetNode(ArrayRef<Attribute> Attrs) + : AttrList(Attrs.begin(), Attrs.end()) {} + + // AttributesSetNode is uniqued, these should not be publicly available. + void operator=(const AttributeSetNode &) LLVM_DELETED_FUNCTION; + AttributeSetNode(const AttributeSetNode &) LLVM_DELETED_FUNCTION; +public: + static AttributeSetNode *get(LLVMContext &C, ArrayRef<Attribute> Attrs); + + bool hasAttribute(Attribute::AttrKind Kind) const; + bool hasAttribute(StringRef Kind) const; + bool hasAttributes() const { return !AttrList.empty(); } + + Attribute getAttribute(Attribute::AttrKind Kind) const; + Attribute getAttribute(StringRef Kind) const; + + unsigned getAlignment() const; + unsigned getStackAlignment() const; + std::string getAsString(bool InAttrGrp) const; + + typedef SmallVectorImpl<Attribute>::iterator iterator; + typedef SmallVectorImpl<Attribute>::const_iterator const_iterator; + + iterator begin() { return AttrList.begin(); } + iterator end() { return AttrList.end(); } + + const_iterator begin() const { return AttrList.begin(); } + const_iterator end() const { return AttrList.end(); } + + void Profile(FoldingSetNodeID &ID) const { + Profile(ID, AttrList); + } + static void Profile(FoldingSetNodeID &ID, ArrayRef<Attribute> AttrList) { + for (unsigned I = 0, E = AttrList.size(); I != E; ++I) + AttrList[I].Profile(ID); + } +}; + +//===----------------------------------------------------------------------===// +/// \class +/// \brief This class represents a set of attributes that apply to the function, +/// return type, and parameters. +class AttributeSetImpl : public FoldingSetNode { + friend class AttributeSet; + + LLVMContext &Context; + + typedef std::pair<unsigned, AttributeSetNode*> IndexAttrPair; + SmallVector<IndexAttrPair, 4> AttrNodes; + + // AttributesSet is uniqued, these should not be publicly available. + void operator=(const AttributeSetImpl &) LLVM_DELETED_FUNCTION; + AttributeSetImpl(const AttributeSetImpl &) LLVM_DELETED_FUNCTION; +public: + AttributeSetImpl(LLVMContext &C, + ArrayRef<std::pair<unsigned, AttributeSetNode*> > attrs) + : Context(C), AttrNodes(attrs.begin(), attrs.end()) {} + + /// \brief Get the context that created this AttributeSetImpl. + LLVMContext &getContext() { return Context; } + + /// \brief Return the number of attributes this AttributeSet contains. + unsigned getNumAttributes() const { return AttrNodes.size(); } + + /// \brief Get the index of the given "slot" in the AttrNodes list. This index + /// is the index of the return, parameter, or function object that the + /// attributes are applied to, not the index into the AttrNodes list where the + /// attributes reside. + uint64_t getSlotIndex(unsigned Slot) const { + return AttrNodes[Slot].first; + } + + /// \brief Retrieve the attributes for the given "slot" in the AttrNode list. + /// \p Slot is an index into the AttrNodes list, not the index of the return / + /// parameter/ function which the attributes apply to. + AttributeSet getSlotAttributes(unsigned Slot) const { + return AttributeSet::get(Context, AttrNodes[Slot]); + } + + /// \brief Retrieve the attribute set node for the given "slot" in the + /// AttrNode list. + AttributeSetNode *getSlotNode(unsigned Slot) const { + return AttrNodes[Slot].second; + } + + typedef AttributeSetNode::iterator iterator; + typedef AttributeSetNode::const_iterator const_iterator; + + iterator begin(unsigned Idx) + { return AttrNodes[Idx].second->begin(); } + iterator end(unsigned Idx) + { return AttrNodes[Idx].second->end(); } + + const_iterator begin(unsigned Idx) const + { return AttrNodes[Idx].second->begin(); } + const_iterator end(unsigned Idx) const + { return AttrNodes[Idx].second->end(); } + + void Profile(FoldingSetNodeID &ID) const { + Profile(ID, AttrNodes); + } + static void Profile(FoldingSetNodeID &ID, + ArrayRef<std::pair<unsigned, AttributeSetNode*> > Nodes) { + for (unsigned i = 0, e = Nodes.size(); i != e; ++i) { + ID.AddInteger(Nodes[i].first); + ID.AddPointer(Nodes[i].second); + } + } + + // FIXME: This atrocity is temporary. + uint64_t Raw(uint64_t Index) const; +}; + +} // end llvm namespace + +#endif diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp new file mode 100644 index 0000000000..ed2bf05e90 --- /dev/null +++ b/lib/IR/Attributes.cpp @@ -0,0 +1,1173 @@ +//===-- Attributes.cpp - Implement AttributesList -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// \file +// \brief This file implements the Attribute, AttributeImpl, AttrBuilder, +// AttributeSetImpl, and AttributeSet classes. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Attributes.h" +#include "AttributeImpl.h" +#include "LLVMContextImpl.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/Atomic.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/Mutex.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Attribute Construction Methods +//===----------------------------------------------------------------------===// + +Attribute Attribute::get(LLVMContext &Context, Attribute::AttrKind Kind, + uint64_t Val) { + LLVMContextImpl *pImpl = Context.pImpl; + FoldingSetNodeID ID; + ID.AddInteger(Kind); + if (Val) ID.AddInteger(Val); + + void *InsertPoint; + AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint); + + if (!PA) { + // If we didn't find any existing attributes of the same shape then create a + // new one and insert it. + PA = !Val ? + new AttributeImpl(Context, Kind) : + new AttributeImpl(Context, Kind, Val); + pImpl->AttrsSet.InsertNode(PA, InsertPoint); + } + + // Return the Attribute that we found or created. + return Attribute(PA); +} + +Attribute Attribute::get(LLVMContext &Context, StringRef Kind, StringRef Val) { + LLVMContextImpl *pImpl = Context.pImpl; + FoldingSetNodeID ID; + ID.AddString(Kind); + if (!Val.empty()) ID.AddString(Val); + + void *InsertPoint; + AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint); + + if (!PA) { + // If we didn't find any existing attributes of the same shape then create a + // new one and insert it. + PA = new AttributeImpl(Context, Kind, Val); + pImpl->AttrsSet.InsertNode(PA, InsertPoint); + } + + // Return the Attribute that we found or created. + return Attribute(PA); +} + +Attribute Attribute::getWithAlignment(LLVMContext &Context, uint64_t Align) { + assert(isPowerOf2_32(Align) && "Alignment must be a power of two."); + assert(Align <= 0x40000000 && "Alignment too large."); + return get(Context, Alignment, Align); +} + +Attribute Attribute::getWithStackAlignment(LLVMContext &Context, + uint64_t Align) { + assert(isPowerOf2_32(Align) && "Alignment must be a power of two."); + assert(Align <= 0x100 && "Alignment too large."); + return get(Context, StackAlignment, Align); +} + +//===----------------------------------------------------------------------===// +// Attribute Accessor Methods +//===----------------------------------------------------------------------===// + +bool Attribute::isEnumAttribute() const { + return pImpl && pImpl->isEnumAttribute(); +} + +bool Attribute::isAlignAttribute() const { + return pImpl && pImpl->isAlignAttribute(); +} + +bool Attribute::isStringAttribute() const { + return pImpl && pImpl->isStringAttribute(); +} + +Attribute::AttrKind Attribute::getKindAsEnum() const { + assert((isEnumAttribute() || isAlignAttribute()) && + "Invalid attribute type to get the kind as an enum!"); + return pImpl ? pImpl->getKindAsEnum() : None; +} + +uint64_t Attribute::getValueAsInt() const { + assert(isAlignAttribute() && + "Expected the attribute to be an alignment attribute!"); + return pImpl ? pImpl->getValueAsInt() : 0; +} + +StringRef Attribute::getKindAsString() const { + assert(isStringAttribute() && + "Invalid attribute type to get the kind as a string!"); + return pImpl ? pImpl->getKindAsString() : StringRef(); +} + +StringRef Attribute::getValueAsString() const { + assert(isStringAttribute() && + "Invalid attribute type to get the value as a string!"); + return pImpl ? pImpl->getValueAsString() : StringRef(); +} + +bool Attribute::hasAttribute(AttrKind Kind) const { + return (pImpl && pImpl->hasAttribute(Kind)) || (!pImpl && Kind == None); +} + +bool Attribute::hasAttribute(StringRef Kind) const { + if (!isStringAttribute()) return false; + return pImpl && pImpl->hasAttribute(Kind); +} + +/// This returns the alignment field of an attribute as a byte alignment value. +unsigned Attribute::getAlignment() const { + assert(hasAttribute(Attribute::Alignment) && + "Trying to get alignment from non-alignment attribute!"); + return pImpl->getValueAsInt(); +} + +/// This returns the stack alignment field of an attribute as a byte alignment +/// value. +unsigned Attribute::getStackAlignment() const { + assert(hasAttribute(Attribute::StackAlignment) && + "Trying to get alignment from non-alignment attribute!"); + return pImpl->getValueAsInt(); +} + +std::string Attribute::getAsString(bool InAttrGrp) const { + if (!pImpl) return ""; + + if (hasAttribute(Attribute::SanitizeAddress)) + return "sanitize_address"; + if (hasAttribute(Attribute::AlwaysInline)) + return "alwaysinline"; + if (hasAttribute(Attribute::ByVal)) + return "byval"; + if (hasAttribute(Attribute::InlineHint)) + return "inlinehint"; + if (hasAttribute(Attribute::InReg)) + return "inreg"; + if (hasAttribute(Attribute::MinSize)) + return "minsize"; + if (hasAttribute(Attribute::Naked)) + return "naked"; + if (hasAttribute(Attribute::Nest)) + return "nest"; + if (hasAttribute(Attribute::NoAlias)) + return "noalias"; + if (hasAttribute(Attribute::NoBuiltin)) + return "nobuiltin"; + if (hasAttribute(Attribute::NoCapture)) + return "nocapture"; + if (hasAttribute(Attribute::NoDuplicate)) + return "noduplicate"; + if (hasAttribute(Attribute::NoImplicitFloat)) + return "noimplicitfloat"; + if (hasAttribute(Attribute::NoInline)) + return "noinline"; + if (hasAttribute(Attribute::NonLazyBind)) + return "nonlazybind"; + if (hasAttribute(Attribute::NoRedZone)) + return "noredzone"; + if (hasAttribute(Attribute::NoReturn)) + return "noreturn"; + if (hasAttribute(Attribute::NoUnwind)) + return "nounwind"; + if (hasAttribute(Attribute::OptimizeForSize)) + return "optsize"; + if (hasAttribute(Attribute::ReadNone)) + return "readnone"; + if (hasAttribute(Attribute::ReadOnly)) + return "readonly"; + if (hasAttribute(Attribute::ReturnsTwice)) + return "returns_twice"; + if (hasAttribute(Attribute::SExt)) + return "signext"; + if (hasAttribute(Attribute::StackProtect)) + return "ssp"; + if (hasAttribute(Attribute::StackProtectReq)) + return "sspreq"; + if (hasAttribute(Attribute::StackProtectStrong)) + return "sspstrong"; + if (hasAttribute(Attribute::StructRet)) + return "sret"; + if (hasAttribute(Attribute::SanitizeThread)) + return "sanitize_thread"; + if (hasAttribute(Attribute::SanitizeMemory)) + return "sanitize_memory"; + if (hasAttribute(Attribute::UWTable)) + return "uwtable"; + if (hasAttribute(Attribute::ZExt)) + return "zeroext"; + + // FIXME: These should be output like this: + // + // align=4 + // alignstack=8 + // + if (hasAttribute(Attribute::Alignment)) { + std::string Result; + Result += "align"; + Result += (InAttrGrp) ? "=" : " "; + Result += utostr(getValueAsInt()); + return Result; + } + + if (hasAttribute(Attribute::StackAlignment)) { + std::string Result; + Result += "alignstack"; + if (InAttrGrp) { + Result += "="; + Result += utostr(getValueAsInt()); + } else { + Result += "("; + Result += utostr(getValueAsInt()); + Result += ")"; + } + return Result; + } + + // Convert target-dependent attributes to strings of the form: + // + // "kind" + // "kind" = "value" + // + if (isStringAttribute()) { + std::string Result; + Result += '\"' + getKindAsString().str() + '"'; + + StringRef Val = pImpl->getValueAsString(); + if (Val.empty()) return Result; + + Result += "=\"" + Val.str() + '"'; + return Result; + } + + llvm_unreachable("Unknown attribute"); +} + +bool Attribute::operator<(Attribute A) const { + if (!pImpl && !A.pImpl) return false; + if (!pImpl) return true; + if (!A.pImpl) return false; + return *pImpl < *A.pImpl; +} + +//===----------------------------------------------------------------------===// +// AttributeImpl Definition +//===----------------------------------------------------------------------===// + +AttributeImpl::AttributeImpl(LLVMContext &C, Attribute::AttrKind Kind) + : Context(C), Entry(new EnumAttributeEntry(Kind)) {} + +AttributeImpl::AttributeImpl(LLVMContext &C, Attribute::AttrKind Kind, + unsigned Align) + : Context(C) { + assert((Kind == Attribute::Alignment || Kind == Attribute::StackAlignment) && + "Wrong kind for alignment attribute!"); + Entry = new AlignAttributeEntry(Kind, Align); +} + +AttributeImpl::AttributeImpl(LLVMContext &C, StringRef Kind, StringRef Val) + : Context(C), Entry(new StringAttributeEntry(Kind, Val)) {} + +AttributeImpl::~AttributeImpl() { + delete Entry; +} + +bool AttributeImpl::isEnumAttribute() const { + return isa<EnumAttributeEntry>(Entry); +} + +bool AttributeImpl::isAlignAttribute() const { + return isa<AlignAttributeEntry>(Entry); +} + +bool AttributeImpl::isStringAttribute() const { + return isa<StringAttributeEntry>(Entry); +} + +bool AttributeImpl::hasAttribute(Attribute::AttrKind A) const { + if (isStringAttribute()) return false; + return getKindAsEnum() == A; +} + +bool AttributeImpl::hasAttribute(StringRef Kind) const { + if (!isStringAttribute()) return false; + return getKindAsString() == Kind; +} + +Attribute::AttrKind AttributeImpl::getKindAsEnum() const { + if (EnumAttributeEntry *E = dyn_cast<EnumAttributeEntry>(Entry)) + return E->getEnumKind(); + return cast<AlignAttributeEntry>(Entry)->getEnumKind(); +} + +uint64_t AttributeImpl::getValueAsInt() const { + return cast<AlignAttributeEntry>(Entry)->getAlignment(); +} + +StringRef AttributeImpl::getKindAsString() const { + return cast<StringAttributeEntry>(Entry)->getStringKind(); +} + +StringRef AttributeImpl::getValueAsString() const { + return cast<StringAttributeEntry>(Entry)->getStringValue(); +} + +bool AttributeImpl::operator<(const AttributeImpl &AI) const { + // This sorts the attributes with Attribute::AttrKinds coming first (sorted + // relative to their enum value) and then strings. + if (isEnumAttribute()) { + if (AI.isEnumAttribute()) return getKindAsEnum() < AI.getKindAsEnum(); + if (AI.isAlignAttribute()) return true; + if (AI.isStringAttribute()) return true; + } + + if (isAlignAttribute()) { + if (AI.isEnumAttribute()) return false; + if (AI.isAlignAttribute()) return getValueAsInt() < AI.getValueAsInt(); + if (AI.isStringAttribute()) return true; + } + + if (AI.isEnumAttribute()) return false; + if (AI.isAlignAttribute()) return false; + if (getKindAsString() == AI.getKindAsString()) + return getValueAsString() < AI.getValueAsString(); + return getKindAsString() < AI.getKindAsString(); +} + +uint64_t AttributeImpl::getAttrMask(Attribute::AttrKind Val) { + // FIXME: Remove this. + switch (Val) { + case Attribute::EndAttrKinds: + llvm_unreachable("Synthetic enumerators which should never get here"); + + case Attribute::None: return 0; + case Attribute::ZExt: return 1 << 0; + case Attribute::SExt: return 1 << 1; + case Attribute::NoReturn: return 1 << 2; + case Attribute::InReg: return 1 << 3; + case Attribute::StructRet: return 1 << 4; + case Attribute::NoUnwind: return 1 << 5; + case Attribute::NoAlias: return 1 << 6; + case Attribute::ByVal: return 1 << 7; + case Attribute::Nest: return 1 << 8; + case Attribute::ReadNone: return 1 << 9; + case Attribute::ReadOnly: return 1 << 10; + case Attribute::NoInline: return 1 << 11; + case Attribute::AlwaysInline: return 1 << 12; + case Attribute::OptimizeForSize: return 1 << 13; + case Attribute::StackProtect: return 1 << 14; + case Attribute::StackProtectReq: return 1 << 15; + case Attribute::Alignment: return 31 << 16; + case Attribute::NoCapture: return 1 << 21; + case Attribute::NoRedZone: return 1 << 22; + case Attribute::NoImplicitFloat: return 1 << 23; + case Attribute::Naked: return 1 << 24; + case Attribute::InlineHint: return 1 << 25; + case Attribute::StackAlignment: return 7 << 26; + case Attribute::ReturnsTwice: return 1 << 29; + case Attribute::UWTable: return 1 << 30; + case Attribute::NonLazyBind: return 1U << 31; + case Attribute::SanitizeAddress: return 1ULL << 32; + case Attribute::MinSize: return 1ULL << 33; + case Attribute::NoDuplicate: return 1ULL << 34; + case Attribute::StackProtectStrong: return 1ULL << 35; + case Attribute::SanitizeThread: return 1ULL << 36; + case Attribute::SanitizeMemory: return 1ULL << 37; + case Attribute::NoBuiltin: return 1ULL << 38; + } + llvm_unreachable("Unsupported attribute type"); +} + +//===----------------------------------------------------------------------===// +// AttributeSetNode Definition +//===----------------------------------------------------------------------===// + +AttributeSetNode *AttributeSetNode::get(LLVMContext &C, + ArrayRef<Attribute> Attrs) { + if (Attrs.empty()) + return 0; + + // Otherwise, build a key to look up the existing attributes. + LLVMContextImpl *pImpl = C.pImpl; + FoldingSetNodeID ID; + + SmallVector<Attribute, 8> SortedAttrs(Attrs.begin(), Attrs.end()); + array_pod_sort(SortedAttrs.begin(), SortedAttrs.end()); + + for (SmallVectorImpl<Attribute>::iterator I = SortedAttrs.begin(), + E = SortedAttrs.end(); I != E; ++I) + I->Profile(ID); + + void *InsertPoint; + AttributeSetNode *PA = + pImpl->AttrsSetNodes.FindNodeOrInsertPos(ID, InsertPoint); + + // If we didn't find any existing attributes of the same shape then create a + // new one and insert it. + if (!PA) { + PA = new AttributeSetNode(SortedAttrs); + pImpl->AttrsSetNodes.InsertNode(PA, InsertPoint); + } + + // Return the AttributesListNode that we found or created. + return PA; +} + +bool AttributeSetNode::hasAttribute(Attribute::AttrKind Kind) const { + for (SmallVectorImpl<Attribute>::const_iterator I = AttrList.begin(), + E = AttrList.end(); I != E; ++I) + if (I->hasAttribute(Kind)) + return true; + return false; +} + +bool AttributeSetNode::hasAttribute(StringRef Kind) const { + for (SmallVectorImpl<Attribute>::const_iterator I = AttrList.begin(), + E = AttrList.end(); I != E; ++I) + if (I->hasAttribute(Kind)) + return true; + return false; +} + +Attribute AttributeSetNode::getAttribute(Attribute::AttrKind Kind) const { + for (SmallVectorImpl<Attribute>::const_iterator I = AttrList.begin(), + E = AttrList.end(); I != E; ++I) + if (I->hasAttribute(Kind)) + return *I; + return Attribute(); +} + +Attribute AttributeSetNode::getAttribute(StringRef Kind) const { + for (SmallVectorImpl<Attribute>::const_iterator I = AttrList.begin(), + E = AttrList.end(); I != E; ++I) + if (I->hasAttribute(Kind)) + return *I; + return Attribute(); +} + +unsigned AttributeSetNode::getAlignment() const { + for (SmallVectorImpl<Attribute>::const_iterator I = AttrList.begin(), + E = AttrList.end(); I != E; ++I) + if (I->hasAttribute(Attribute::Alignment)) + return I->getAlignment(); + return 0; +} + +unsigned AttributeSetNode::getStackAlignment() const { + for (SmallVectorImpl<Attribute>::const_iterator I = AttrList.begin(), + E = AttrList.end(); I != E; ++I) + if (I->hasAttribute(Attribute::StackAlignment)) + return I->getStackAlignment(); + return 0; +} + +std::string AttributeSetNode::getAsString(bool InAttrGrp) const { + std::string Str = ""; + for (SmallVectorImpl<Attribute>::const_iterator I = AttrList.begin(), + E = AttrList.end(); I != E; ) { + Str += I->getAsString(InAttrGrp); + if (++I != E) Str += " "; + } + return Str; +} + +//===----------------------------------------------------------------------===// +// AttributeSetImpl Definition +//===----------------------------------------------------------------------===// + +uint64_t AttributeSetImpl::Raw(uint64_t Index) const { + for (unsigned I = 0, E = getNumAttributes(); I != E; ++I) { + if (getSlotIndex(I) != Index) continue; + const AttributeSetNode *ASN = AttrNodes[I].second; + uint64_t Mask = 0; + + for (AttributeSetNode::const_iterator II = ASN->begin(), + IE = ASN->end(); II != IE; ++II) { + Attribute Attr = *II; + + // This cannot handle string attributes. + if (Attr.isStringAttribute()) continue; + + Attribute::AttrKind Kind = Attr.getKindAsEnum(); + + if (Kind == Attribute::Alignment) + Mask |= (Log2_32(ASN->getAlignment()) + 1) << 16; + else if (Kind == Attribute::StackAlignment) + Mask |= (Log2_32(ASN->getStackAlignment()) + 1) << 26; + else + Mask |= AttributeImpl::getAttrMask(Kind); + } + + return Mask; + } + + return 0; +} + +//===----------------------------------------------------------------------===// +// AttributeSet Construction and Mutation Methods +//===----------------------------------------------------------------------===// + +AttributeSet +AttributeSet::getImpl(LLVMContext &C, + ArrayRef<std::pair<unsigned, AttributeSetNode*> > Attrs) { + LLVMContextImpl *pImpl = C.pImpl; + FoldingSetNodeID ID; + AttributeSetImpl::Profile(ID, Attrs); + + void *InsertPoint; + AttributeSetImpl *PA = pImpl->AttrsLists.FindNodeOrInsertPos(ID, InsertPoint); + + // If we didn't find any existing attributes of the same shape then + // create a new one and insert it. + if (!PA) { + PA = new AttributeSetImpl(C, Attrs); + pImpl->AttrsLists.InsertNode(PA, InsertPoint); + } + + // Return the AttributesList that we found or created. + return AttributeSet(PA); +} + +AttributeSet AttributeSet::get(LLVMContext &C, + ArrayRef<std::pair<unsigned, Attribute> > Attrs){ + // If there are no attributes then return a null AttributesList pointer. + if (Attrs.empty()) + return AttributeSet(); + +#ifndef NDEBUG + for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { + assert((!i || Attrs[i-1].first <= Attrs[i].first) && + "Misordered Attributes list!"); + assert(!Attrs[i].second.hasAttribute(Attribute::None) && + "Pointless attribute!"); + } +#endif + + // Create a vector if (unsigned, AttributeSetNode*) pairs from the attributes + // list. + SmallVector<std::pair<unsigned, AttributeSetNode*>, 8> AttrPairVec; + for (ArrayRef<std::pair<unsigned, Attribute> >::iterator I = Attrs.begin(), + E = Attrs.end(); I != E; ) { + unsigned Index = I->first; + SmallVector<Attribute, 4> AttrVec; + while (I != E && I->first == Index) { + AttrVec.push_back(I->second); + ++I; + } + + AttrPairVec.push_back(std::make_pair(Index, + AttributeSetNode::get(C, AttrVec))); + } + + return getImpl(C, AttrPairVec); +} + +AttributeSet AttributeSet::get(LLVMContext &C, + ArrayRef<std::pair<unsigned, + AttributeSetNode*> > Attrs) { + // If there are no attributes then return a null AttributesList pointer. + if (Attrs.empty()) + return AttributeSet(); + + return getImpl(C, Attrs); +} + +AttributeSet AttributeSet::get(LLVMContext &C, unsigned Idx, AttrBuilder &B) { + if (!B.hasAttributes()) + return AttributeSet(); + + // Add target-independent attributes. + SmallVector<std::pair<unsigned, Attribute>, 8> Attrs; + for (Attribute::AttrKind Kind = Attribute::None; + Kind != Attribute::EndAttrKinds; Kind = Attribute::AttrKind(Kind + 1)) { + if (!B.contains(Kind)) + continue; + + if (Kind == Attribute::Alignment) + Attrs.push_back(std::make_pair(Idx, Attribute:: + getWithAlignment(C, B.getAlignment()))); + else if (Kind == Attribute::StackAlignment) + Attrs.push_back(std::make_pair(Idx, Attribute:: + getWithStackAlignment(C, B.getStackAlignment()))); + else + Attrs.push_back(std::make_pair(Idx, Attribute::get(C, Kind))); + } + + // Add target-dependent (string) attributes. + for (AttrBuilder::td_iterator I = B.td_begin(), E = B.td_end(); + I != E; ++I) + Attrs.push_back(std::make_pair(Idx, Attribute::get(C, I->first,I->second))); + + return get(C, Attrs); +} + +AttributeSet AttributeSet::get(LLVMContext &C, unsigned Idx, + ArrayRef<Attribute::AttrKind> Kind) { + SmallVector<std::pair<unsigned, Attribute>, 8> Attrs; + for (ArrayRef<Attribute::AttrKind>::iterator I = Kind.begin(), + E = Kind.end(); I != E; ++I) + Attrs.push_back(std::make_pair(Idx, Attribute::get(C, *I))); + return get(C, Attrs); +} + +AttributeSet AttributeSet::get(LLVMContext &C, ArrayRef<AttributeSet> Attrs) { + if (Attrs.empty()) return AttributeSet(); + + SmallVector<std::pair<unsigned, AttributeSetNode*>, 8> AttrNodeVec; + for (unsigned I = 0, E = Attrs.size(); I != E; ++I) { + AttributeSet AS = Attrs[I]; + if (!AS.pImpl) continue; + AttrNodeVec.append(AS.pImpl->AttrNodes.begin(), AS.pImpl->AttrNodes.end()); + } + + return getImpl(C, AttrNodeVec); +} + +AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Idx, + Attribute::AttrKind Attr) const { + if (hasAttribute(Idx, Attr)) return *this; + return addAttributes(C, Idx, AttributeSet::get(C, Idx, Attr)); +} + +AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Idx, + AttributeSet Attrs) const { + if (!pImpl) return Attrs; + if (!Attrs.pImpl) return *this; + +#ifndef NDEBUG + // FIXME it is not obvious how this should work for alignment. For now, say + // we can't change a known alignment. + unsigned OldAlign = getParamAlignment(Idx); + unsigned NewAlign = Attrs.getParamAlignment(Idx); + assert((!OldAlign || !NewAlign || OldAlign == NewAlign) && + "Attempt to change alignment!"); +#endif + + // Add the attribute slots before the one we're trying to add. + SmallVector<AttributeSet, 4> AttrSet; + uint64_t NumAttrs = pImpl->getNumAttributes(); + AttributeSet AS; + uint64_t LastIndex = 0; + for (unsigned I = 0, E = NumAttrs; I != E; ++I) { + if (getSlotIndex(I) >= Idx) { + if (getSlotIndex(I) == Idx) AS = getSlotAttributes(LastIndex++); + break; + } + LastIndex = I + 1; + AttrSet.push_back(getSlotAttributes(I)); + } + + // Now add the attribute into the correct slot. There may already be an + // AttributeSet there. + AttrBuilder B(AS, Idx); + + for (unsigned I = 0, E = Attrs.pImpl->getNumAttributes(); I != E; ++I) + if (Attrs.getSlotIndex(I) == Idx) { + for (AttributeSetImpl::const_iterator II = Attrs.pImpl->begin(I), + IE = Attrs.pImpl->end(I); II != IE; ++II) + B.addAttribute(*II); + break; + } + + AttrSet.push_back(AttributeSet::get(C, Idx, B)); + + // Add the remaining attribute slots. + for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I) + AttrSet.push_back(getSlotAttributes(I)); + + return get(C, AttrSet); +} + +AttributeSet AttributeSet::removeAttribute(LLVMContext &C, unsigned Idx, + Attribute::AttrKind Attr) const { + if (!hasAttribute(Idx, Attr)) return *this; + return removeAttributes(C, Idx, AttributeSet::get(C, Idx, Attr)); +} + +AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Idx, + AttributeSet Attrs) const { + if (!pImpl) return AttributeSet(); + if (!Attrs.pImpl) return *this; + +#ifndef NDEBUG + // FIXME it is not obvious how this should work for alignment. + // For now, say we can't pass in alignment, which no current use does. + assert(!Attrs.hasAttribute(Idx, Attribute::Alignment) && + "Attempt to change alignment!"); +#endif + + // Add the attribute slots before the one we're trying to add. + SmallVector<AttributeSet, 4> AttrSet; + uint64_t NumAttrs = pImpl->getNumAttributes(); + AttributeSet AS; + uint64_t LastIndex = 0; + for (unsigned I = 0, E = NumAttrs; I != E; ++I) { + if (getSlotIndex(I) >= Idx) { + if (getSlotIndex(I) == Idx) AS = getSlotAttributes(LastIndex++); + break; + } + LastIndex = I + 1; + AttrSet.push_back(getSlotAttributes(I)); + } + + // Now remove the attribute from the correct slot. There may already be an + // AttributeSet there. + AttrBuilder B(AS, Idx); + + for (unsigned I = 0, E = Attrs.pImpl->getNumAttributes(); I != E; ++I) + if (Attrs.getSlotIndex(I) == Idx) { + B.removeAttributes(Attrs.pImpl->getSlotAttributes(I), Idx); + break; + } + + AttrSet.push_back(AttributeSet::get(C, Idx, B)); + + // Add the remaining attribute slots. + for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I) + AttrSet.push_back(getSlotAttributes(I)); + + return get(C, AttrSet); +} + +//===----------------------------------------------------------------------===// +// AttributeSet Accessor Methods +//===----------------------------------------------------------------------===// + +LLVMContext &AttributeSet::getContext() const { + return pImpl->getContext(); +} + +AttributeSet AttributeSet::getParamAttributes(unsigned Idx) const { + return pImpl && hasAttributes(Idx) ? + AttributeSet::get(pImpl->getContext(), + ArrayRef<std::pair<unsigned, AttributeSetNode*> >( + std::make_pair(Idx, getAttributes(Idx)))) : + AttributeSet(); +} + +AttributeSet AttributeSet::getRetAttributes() const { + return pImpl && hasAttributes(ReturnIndex) ? + AttributeSet::get(pImpl->getContext(), + ArrayRef<std::pair<unsigned, AttributeSetNode*> >( + std::make_pair(ReturnIndex, + getAttributes(ReturnIndex)))) : + AttributeSet(); +} + +AttributeSet AttributeSet::getFnAttributes() const { + return pImpl && hasAttributes(FunctionIndex) ? + AttributeSet::get(pImpl->getContext(), + ArrayRef<std::pair<unsigned, AttributeSetNode*> >( + std::make_pair(FunctionIndex, + getAttributes(FunctionIndex)))) : + AttributeSet(); +} + +bool AttributeSet::hasAttribute(unsigned Index, Attribute::AttrKind Kind) const{ + AttributeSetNode *ASN = getAttributes(Index); + return ASN ? ASN->hasAttribute(Kind) : false; +} + +bool AttributeSet::hasAttribute(unsigned Index, StringRef Kind) const { + AttributeSetNode *ASN = getAttributes(Index); + return ASN ? ASN->hasAttribute(Kind) : false; +} + +bool AttributeSet::hasAttributes(unsigned Index) const { + AttributeSetNode *ASN = getAttributes(Index); + return ASN ? ASN->hasAttributes() : false; +} + +/// \brief Return true if the specified attribute is set for at least one +/// parameter or for the return value. +bool AttributeSet::hasAttrSomewhere(Attribute::AttrKind Attr) const { + if (pImpl == 0) return false; + + for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I) + for (AttributeSetImpl::const_iterator II = pImpl->begin(I), + IE = pImpl->end(I); II != IE; ++II) + if (II->hasAttribute(Attr)) + return true; + + return false; +} + +Attribute AttributeSet::getAttribute(unsigned Index, + Attribute::AttrKind Kind) const { + AttributeSetNode *ASN = getAttributes(Index); + return ASN ? ASN->getAttribute(Kind) : Attribute(); +} + +Attribute AttributeSet::getAttribute(unsigned Index, + StringRef Kind) const { + AttributeSetNode *ASN = getAttributes(Index); + return ASN ? ASN->getAttribute(Kind) : Attribute(); +} + +unsigned AttributeSet::getParamAlignment(unsigned Index) const { + AttributeSetNode *ASN = getAttributes(Index); + return ASN ? ASN->getAlignment() : 0; +} + +unsigned AttributeSet::getStackAlignment(unsigned Index) const { + AttributeSetNode *ASN = getAttributes(Index); + return ASN ? ASN->getStackAlignment() : 0; +} + +std::string AttributeSet::getAsString(unsigned Index, + bool InAttrGrp) const { + AttributeSetNode *ASN = getAttributes(Index); + return ASN ? ASN->getAsString(InAttrGrp) : std::string(""); +} + +/// \brief The attributes for the specified index are returned. +AttributeSetNode *AttributeSet::getAttributes(unsigned Idx) const { + if (!pImpl) return 0; + + // Loop through to find the attribute node we want. + for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I) + if (pImpl->getSlotIndex(I) == Idx) + return pImpl->getSlotNode(I); + + return 0; +} + +AttributeSet::iterator AttributeSet::begin(unsigned Idx) const { + if (!pImpl) + return ArrayRef<Attribute>().begin(); + return pImpl->begin(Idx); +} + +AttributeSet::iterator AttributeSet::end(unsigned Idx) const { + if (!pImpl) + return ArrayRef<Attribute>().end(); + return pImpl->end(Idx); +} + +//===----------------------------------------------------------------------===// +// AttributeSet Introspection Methods +//===----------------------------------------------------------------------===// + +/// \brief Return the number of slots used in this attribute list. This is the +/// number of arguments that have an attribute set on them (including the +/// function itself). +unsigned AttributeSet::getNumSlots() const { + return pImpl ? pImpl->getNumAttributes() : 0; +} + +uint64_t AttributeSet::getSlotIndex(unsigned Slot) const { + assert(pImpl && Slot < pImpl->getNumAttributes() && + "Slot # out of range!"); + return pImpl->getSlotIndex(Slot); +} + +AttributeSet AttributeSet::getSlotAttributes(unsigned Slot) const { + assert(pImpl && Slot < pImpl->getNumAttributes() && + "Slot # out of range!"); + return pImpl->getSlotAttributes(Slot); +} + +uint64_t AttributeSet::Raw(unsigned Index) const { + // FIXME: Remove this. + return pImpl ? pImpl->Raw(Index) : 0; +} + +void AttributeSet::dump() const { + dbgs() << "PAL[\n"; + + for (unsigned i = 0, e = getNumSlots(); i < e; ++i) { + uint64_t Index = getSlotIndex(i); + dbgs() << " { "; + if (Index == ~0U) + dbgs() << "~0U"; + else + dbgs() << Index; + dbgs() << " => " << getAsString(Index) << " }\n"; + } + + dbgs() << "]\n"; +} + +//===----------------------------------------------------------------------===// +// AttrBuilder Method Implementations +//===----------------------------------------------------------------------===// + +AttrBuilder::AttrBuilder(AttributeSet AS, unsigned Idx) + : Attrs(0), Alignment(0), StackAlignment(0) { + AttributeSetImpl *pImpl = AS.pImpl; + if (!pImpl) return; + + for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I) { + if (pImpl->getSlotIndex(I) != Idx) continue; + + for (AttributeSetImpl::const_iterator II = pImpl->begin(I), + IE = pImpl->end(I); II != IE; ++II) + addAttribute(*II); + + break; + } +} + +void AttrBuilder::clear() { + Attrs.reset(); + Alignment = StackAlignment = 0; +} + +AttrBuilder &AttrBuilder::addAttribute(Attribute::AttrKind Val) { + assert((unsigned)Val < Attribute::EndAttrKinds && "Attribute out of range!"); + assert(Val != Attribute::Alignment && Val != Attribute::StackAlignment && + "Adding alignment attribute without adding alignment value!"); + Attrs[Val] = true; + return *this; +} + +AttrBuilder &AttrBuilder::addAttribute(Attribute Attr) { + if (Attr.isStringAttribute()) { + addAttribute(Attr.getKindAsString(), Attr.getValueAsString()); + return *this; + } + + Attribute::AttrKind Kind = Attr.getKindAsEnum(); + Attrs[Kind] = true; + + if (Kind == Attribute::Alignment) + Alignment = Attr.getAlignment(); + else if (Kind == Attribute::StackAlignment) + StackAlignment = Attr.getStackAlignment(); + return *this; +} + +AttrBuilder &AttrBuilder::addAttribute(StringRef A, StringRef V) { + TargetDepAttrs[A] = V; + return *this; +} + +AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) { + assert((unsigned)Val < Attribute::EndAttrKinds && "Attribute out of range!"); + Attrs[Val] = false; + + if (Val == Attribute::Alignment) + Alignment = 0; + else if (Val == Attribute::StackAlignment) + StackAlignment = 0; + + return *this; +} + +AttrBuilder &AttrBuilder::removeAttributes(AttributeSet A, uint64_t Index) { + unsigned Idx = ~0U; + for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I) + if (A.getSlotIndex(I) == Index) { + Idx = I; + break; + } + + assert(Idx != ~0U && "Couldn't find index in AttributeSet!"); + + for (AttributeSet::iterator I = A.begin(Idx), E = A.end(Idx); I != E; ++I) { + Attribute Attr = *I; + if (Attr.isEnumAttribute() || Attr.isAlignAttribute()) { + Attribute::AttrKind Kind = I->getKindAsEnum(); + Attrs[Kind] = false; + + if (Kind == Attribute::Alignment) + Alignment = 0; + else if (Kind == Attribute::StackAlignment) + StackAlignment = 0; + } else { + assert(Attr.isStringAttribute() && "Invalid attribute type!"); + std::map<std::string, std::string>::iterator + Iter = TargetDepAttrs.find(Attr.getKindAsString()); + if (Iter != TargetDepAttrs.end()) + TargetDepAttrs.erase(Iter); + } + } + + return *this; +} + +AttrBuilder &AttrBuilder::removeAttribute(StringRef A) { + std::map<std::string, std::string>::iterator I = TargetDepAttrs.find(A); + if (I != TargetDepAttrs.end()) + TargetDepAttrs.erase(I); + return *this; +} + +AttrBuilder &AttrBuilder::addAlignmentAttr(unsigned Align) { + if (Align == 0) return *this; + + assert(isPowerOf2_32(Align) && "Alignment must be a power of two."); + assert(Align <= 0x40000000 && "Alignment too large."); + + Attrs[Attribute::Alignment] = true; + Alignment = Align; + return *this; +} + +AttrBuilder &AttrBuilder::addStackAlignmentAttr(unsigned Align) { + // Default alignment, allow the target to define how to align it. + if (Align == 0) return *this; + + assert(isPowerOf2_32(Align) && "Alignment must be a power of two."); + assert(Align <= 0x100 && "Alignment too large."); + + Attrs[Attribute::StackAlignment] = true; + StackAlignment = Align; + return *this; +} + +AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) { + // FIXME: What if both have alignments, but they don't match?! + if (!Alignment) + Alignment = B.Alignment; + + if (!StackAlignment) + StackAlignment = B.StackAlignment; + + Attrs |= B.Attrs; + + for (td_const_iterator I = B.TargetDepAttrs.begin(), + E = B.TargetDepAttrs.end(); I != E; ++I) + TargetDepAttrs[I->first] = I->second; + + return *this; +} + +bool AttrBuilder::contains(StringRef A) const { + return TargetDepAttrs.find(A) != TargetDepAttrs.end(); +} + +bool AttrBuilder::hasAttributes() const { + return !Attrs.none() || !TargetDepAttrs.empty(); +} + +bool AttrBuilder::hasAttributes(AttributeSet A, uint64_t Index) const { + unsigned Idx = ~0U; + for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I) + if (A.getSlotIndex(I) == Index) { + Idx = I; + break; + } + + assert(Idx != ~0U && "Couldn't find the index!"); + + for (AttributeSet::iterator I = A.begin(Idx), E = A.end(Idx); + I != E; ++I) { + Attribute Attr = *I; + if (Attr.isEnumAttribute() || Attr.isAlignAttribute()) { + if (Attrs[I->getKindAsEnum()]) + return true; + } else { + assert(Attr.isStringAttribute() && "Invalid attribute kind!"); + return TargetDepAttrs.find(Attr.getKindAsString())!=TargetDepAttrs.end(); + } + } + + return false; +} + +bool AttrBuilder::hasAlignmentAttr() const { + return Alignment != 0; +} + +bool AttrBuilder::operator==(const AttrBuilder &B) { + if (Attrs != B.Attrs) + return false; + + for (td_const_iterator I = TargetDepAttrs.begin(), + E = TargetDepAttrs.end(); I != E; ++I) + if (B.TargetDepAttrs.find(I->first) == B.TargetDepAttrs.end()) + return false; + + return Alignment == B.Alignment && StackAlignment == B.StackAlignment; +} + +void AttrBuilder::removeFunctionOnlyAttrs() { + removeAttribute(Attribute::NoReturn) + .removeAttribute(Attribute::NoUnwind) + .removeAttribute(Attribute::ReadNone) + .removeAttribute(Attribute::ReadOnly) + .removeAttribute(Attribute::NoInline) + .removeAttribute(Attribute::AlwaysInline) + .removeAttribute(Attribute::OptimizeForSize) + .removeAttribute(Attribute::StackProtect) + .removeAttribute(Attribute::StackProtectReq) + .removeAttribute(Attribute::StackProtectStrong) + .removeAttribute(Attribute::NoRedZone) + .removeAttribute(Attribute::NoImplicitFloat) + .removeAttribute(Attribute::Naked) + .removeAttribute(Attribute::InlineHint) + .removeAttribute(Attribute::StackAlignment) + .removeAttribute(Attribute::UWTable) + .removeAttribute(Attribute::NonLazyBind) + .removeAttribute(Attribute::ReturnsTwice) + .removeAttribute(Attribute::SanitizeAddress) + .removeAttribute(Attribute::SanitizeThread) + .removeAttribute(Attribute::SanitizeMemory) + .removeAttribute(Attribute::MinSize) + .removeAttribute(Attribute::NoDuplicate) + .removeAttribute(Attribute::NoBuiltin); +} + +AttrBuilder &AttrBuilder::addRawValue(uint64_t Val) { + // FIXME: Remove this in 4.0. + if (!Val) return *this; + + for (Attribute::AttrKind I = Attribute::None; I != Attribute::EndAttrKinds; + I = Attribute::AttrKind(I + 1)) { + if (uint64_t A = (Val & AttributeImpl::getAttrMask(I))) { + Attrs[I] = true; + + if (I == Attribute::Alignment) + Alignment = 1ULL << ((A >> 16) - 1); + else if (I == Attribute::StackAlignment) + StackAlignment = 1ULL << ((A >> 26)-1); + } + } + + return *this; +} + +//===----------------------------------------------------------------------===// +// AttributeFuncs Function Defintions +//===----------------------------------------------------------------------===// + +/// \brief Which attributes cannot be applied to a type. +AttributeSet AttributeFuncs::typeIncompatible(Type *Ty, uint64_t Index) { + AttrBuilder Incompatible; + + if (!Ty->isIntegerTy()) + // Attribute that only apply to integers. + Incompatible.addAttribute(Attribute::SExt) + .addAttribute(Attribute::ZExt); + + if (!Ty->isPointerTy()) + // Attribute that only apply to pointers. + Incompatible.addAttribute(Attribute::ByVal) + .addAttribute(Attribute::Nest) + .addAttribute(Attribute::NoAlias) + .addAttribute(Attribute::NoCapture) + .addAttribute(Attribute::StructRet); + + return AttributeSet::get(Ty->getContext(), Index, Incompatible); +} diff --git a/lib/IR/AutoUpgrade.cpp b/lib/IR/AutoUpgrade.cpp new file mode 100644 index 0000000000..f2375374e3 --- /dev/null +++ b/lib/IR/AutoUpgrade.cpp @@ -0,0 +1,393 @@ +//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the auto-upgrade helper functions +// +//===----------------------------------------------------------------------===// + +#include "llvm/AutoUpgrade.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/CFG.h" +#include "llvm/Support/CallSite.h" +#include "llvm/Support/ErrorHandling.h" +#include <cstring> +using namespace llvm; + +// Upgrade the declarations of the SSE4.1 functions whose arguments have +// changed their type from v4f32 to v2i64. +static bool UpgradeSSE41Function(Function* F, Intrinsic::ID IID, + Function *&NewFn) { + // Check whether this is an old version of the function, which received + // v4f32 arguments. + Type *Arg0Type = F->getFunctionType()->getParamType(0); + if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) + return false; + + // Yes, it's old, replace it with new version. + F->setName(F->getName() + ".old"); + NewFn = Intrinsic::getDeclaration(F->getParent(), IID); + return true; +} + +static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { + assert(F && "Illegal to upgrade a non-existent Function."); + + // Quickly eliminate it, if it's not a candidate. + StringRef Name = F->getName(); + if (Name.size() <= 8 || !Name.startswith("llvm.")) + return false; + Name = Name.substr(5); // Strip off "llvm." + + switch (Name[0]) { + default: break; + case 'a': { + if (Name.startswith("arm.neon.vclz")) { + Type* args[2] = { + F->arg_begin()->getType(), + Type::getInt1Ty(F->getContext()) + }; + // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to + // the end of the name. Change name from llvm.arm.neon.vclz.* to + // llvm.ctlz.* + FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); + NewFn = Function::Create(fType, F->getLinkage(), + "llvm.ctlz." + Name.substr(14), F->getParent()); + return true; + } + if (Name.startswith("arm.neon.vcnt")) { + NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, + F->arg_begin()->getType()); + return true; + } + break; + } + case 'c': { + if (Name.startswith("ctlz.") && F->arg_size() == 1) { + F->setName(Name + ".old"); + NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, + F->arg_begin()->getType()); + return true; + } + if (Name.startswith("cttz.") && F->arg_size() == 1) { + F->setName(Name + ".old"); + NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, + F->arg_begin()->getType()); + return true; + } + break; + } + case 'x': { + if (Name.startswith("x86.sse2.pcmpeq.") || + Name.startswith("x86.sse2.pcmpgt.") || + Name.startswith("x86.avx2.pcmpeq.") || + Name.startswith("x86.avx2.pcmpgt.") || + Name.startswith("x86.avx.vpermil.") || + Name == "x86.avx.movnt.dq.256" || + Name == "x86.avx.movnt.pd.256" || + Name == "x86.avx.movnt.ps.256" || + (Name.startswith("x86.xop.vpcom") && F->arg_size() == 2)) { + NewFn = 0; + return true; + } + // SSE4.1 ptest functions may have an old signature. + if (Name.startswith("x86.sse41.ptest")) { + if (Name == "x86.sse41.ptestc") + return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestc, NewFn); + if (Name == "x86.sse41.ptestz") + return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestz, NewFn); + if (Name == "x86.sse41.ptestnzc") + return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestnzc, NewFn); + } + // frcz.ss/sd may need to have an argument dropped + if (Name.startswith("x86.xop.vfrcz.ss") && F->arg_size() == 2) { + F->setName(Name + ".old"); + NewFn = Intrinsic::getDeclaration(F->getParent(), + Intrinsic::x86_xop_vfrcz_ss); + return true; + } + if (Name.startswith("x86.xop.vfrcz.sd") && F->arg_size() == 2) { + F->setName(Name + ".old"); + NewFn = Intrinsic::getDeclaration(F->getParent(), + Intrinsic::x86_xop_vfrcz_sd); + return true; + } + // Fix the FMA4 intrinsics to remove the 4 + if (Name.startswith("x86.fma4.")) { + F->setName("llvm.x86.fma" + Name.substr(8)); + NewFn = F; + return true; + } + break; + } + } + + // This may not belong here. This function is effectively being overloaded + // to both detect an intrinsic which needs upgrading, and to provide the + // upgraded form of the intrinsic. We should perhaps have two separate + // functions for this. + return false; +} + +bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { + NewFn = 0; + bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); + + // Upgrade intrinsic attributes. This does not change the function. + if (NewFn) + F = NewFn; + if (unsigned id = F->getIntrinsicID()) + F->setAttributes(Intrinsic::getAttributes(F->getContext(), + (Intrinsic::ID)id)); + return Upgraded; +} + +bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) { + // Nothing to do yet. + return false; +} + +// UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the +// upgraded intrinsic. All argument and return casting must be provided in +// order to seamlessly integrate with existing context. +void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { + Function *F = CI->getCalledFunction(); + LLVMContext &C = CI->getContext(); + IRBuilder<> Builder(C); + Builder.SetInsertPoint(CI->getParent(), CI); + + assert(F && "Intrinsic call is not direct?"); + + if (!NewFn) { + // Get the Function's name. + StringRef Name = F->getName(); + + Value *Rep; + // Upgrade packed integer vector compares intrinsics to compare instructions + if (Name.startswith("llvm.x86.sse2.pcmpeq.") || + Name.startswith("llvm.x86.avx2.pcmpeq.")) { + Rep = Builder.CreateICmpEQ(CI->getArgOperand(0), CI->getArgOperand(1), + "pcmpeq"); + // need to sign extend since icmp returns vector of i1 + Rep = Builder.CreateSExt(Rep, CI->getType(), ""); + } else if (Name.startswith("llvm.x86.sse2.pcmpgt.") || + Name.startswith("llvm.x86.avx2.pcmpgt.")) { + Rep = Builder.CreateICmpSGT(CI->getArgOperand(0), CI->getArgOperand(1), + "pcmpgt"); + // need to sign extend since icmp returns vector of i1 + Rep = Builder.CreateSExt(Rep, CI->getType(), ""); + } else if (Name == "llvm.x86.avx.movnt.dq.256" || + Name == "llvm.x86.avx.movnt.ps.256" || + Name == "llvm.x86.avx.movnt.pd.256") { + IRBuilder<> Builder(C); + Builder.SetInsertPoint(CI->getParent(), CI); + + Module *M = F->getParent(); + SmallVector<Value *, 1> Elts; + Elts.push_back(ConstantInt::get(Type::getInt32Ty(C), 1)); + MDNode *Node = MDNode::get(C, Elts); + + Value *Arg0 = CI->getArgOperand(0); + Value *Arg1 = CI->getArgOperand(1); + + // Convert the type of the pointer to a pointer to the stored type. + Value *BC = Builder.CreateBitCast(Arg0, + PointerType::getUnqual(Arg1->getType()), + "cast"); + StoreInst *SI = Builder.CreateStore(Arg1, BC); + SI->setMetadata(M->getMDKindID("nontemporal"), Node); + SI->setAlignment(16); + + // Remove intrinsic. + CI->eraseFromParent(); + return; + } else if (Name.startswith("llvm.x86.xop.vpcom")) { + Intrinsic::ID intID; + if (Name.endswith("ub")) + intID = Intrinsic::x86_xop_vpcomub; + else if (Name.endswith("uw")) + intID = Intrinsic::x86_xop_vpcomuw; + else if (Name.endswith("ud")) + intID = Intrinsic::x86_xop_vpcomud; + else if (Name.endswith("uq")) + intID = Intrinsic::x86_xop_vpcomuq; + else if (Name.endswith("b")) + intID = Intrinsic::x86_xop_vpcomb; + else if (Name.endswith("w")) + intID = Intrinsic::x86_xop_vpcomw; + else if (Name.endswith("d")) + intID = Intrinsic::x86_xop_vpcomd; + else if (Name.endswith("q")) + intID = Intrinsic::x86_xop_vpcomq; + else + llvm_unreachable("Unknown suffix"); + + Name = Name.substr(18); // strip off "llvm.x86.xop.vpcom" + unsigned Imm; + if (Name.startswith("lt")) + Imm = 0; + else if (Name.startswith("le")) + Imm = 1; + else if (Name.startswith("gt")) + Imm = 2; + else if (Name.startswith("ge")) + Imm = 3; + else if (Name.startswith("eq")) + Imm = 4; + else if (Name.startswith("ne")) + Imm = 5; + else if (Name.startswith("true")) + Imm = 6; + else if (Name.startswith("false")) + Imm = 7; + else + llvm_unreachable("Unknown condition"); + + Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID); + Rep = Builder.CreateCall3(VPCOM, CI->getArgOperand(0), + CI->getArgOperand(1), Builder.getInt8(Imm)); + } else { + bool PD128 = false, PD256 = false, PS128 = false, PS256 = false; + if (Name == "llvm.x86.avx.vpermil.pd.256") + PD256 = true; + else if (Name == "llvm.x86.avx.vpermil.pd") + PD128 = true; + else if (Name == "llvm.x86.avx.vpermil.ps.256") + PS256 = true; + else if (Name == "llvm.x86.avx.vpermil.ps") + PS128 = true; + + if (PD256 || PD128 || PS256 || PS128) { + Value *Op0 = CI->getArgOperand(0); + unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); + SmallVector<Constant*, 8> Idxs; + + if (PD128) + for (unsigned i = 0; i != 2; ++i) + Idxs.push_back(Builder.getInt32((Imm >> i) & 0x1)); + else if (PD256) + for (unsigned l = 0; l != 4; l+=2) + for (unsigned i = 0; i != 2; ++i) + Idxs.push_back(Builder.getInt32(((Imm >> (l+i)) & 0x1) + l)); + else if (PS128) + for (unsigned i = 0; i != 4; ++i) + Idxs.push_back(Builder.getInt32((Imm >> (2 * i)) & 0x3)); + else if (PS256) + for (unsigned l = 0; l != 8; l+=4) + for (unsigned i = 0; i != 4; ++i) + Idxs.push_back(Builder.getInt32(((Imm >> (2 * i)) & 0x3) + l)); + else + llvm_unreachable("Unexpected function"); + + Rep = Builder.CreateShuffleVector(Op0, Op0, ConstantVector::get(Idxs)); + } else { + llvm_unreachable("Unknown function for CallInst upgrade."); + } + } + + CI->replaceAllUsesWith(Rep); + CI->eraseFromParent(); + return; + } + + std::string Name = CI->getName().str(); + CI->setName(Name + ".old"); + + switch (NewFn->getIntrinsicID()) { + default: + llvm_unreachable("Unknown function for CallInst upgrade."); + + case Intrinsic::ctlz: + case Intrinsic::cttz: + assert(CI->getNumArgOperands() == 1 && + "Mismatch between function args and call args"); + CI->replaceAllUsesWith(Builder.CreateCall2(NewFn, CI->getArgOperand(0), + Builder.getFalse(), Name)); + CI->eraseFromParent(); + return; + + case Intrinsic::arm_neon_vclz: { + // Change name from llvm.arm.neon.vclz.* to llvm.ctlz.* + CI->replaceAllUsesWith(Builder.CreateCall2(NewFn, CI->getArgOperand(0), + Builder.getFalse(), + "llvm.ctlz." + Name.substr(14))); + CI->eraseFromParent(); + return; + } + case Intrinsic::ctpop: { + CI->replaceAllUsesWith(Builder.CreateCall(NewFn, CI->getArgOperand(0))); + CI->eraseFromParent(); + return; + } + + case Intrinsic::x86_xop_vfrcz_ss: + case Intrinsic::x86_xop_vfrcz_sd: + CI->replaceAllUsesWith(Builder.CreateCall(NewFn, CI->getArgOperand(1), + Name)); + CI->eraseFromParent(); + return; + + case Intrinsic::x86_sse41_ptestc: + case Intrinsic::x86_sse41_ptestz: + case Intrinsic::x86_sse41_ptestnzc: { + // The arguments for these intrinsics used to be v4f32, and changed + // to v2i64. This is purely a nop, since those are bitwise intrinsics. + // So, the only thing required is a bitcast for both arguments. + // First, check the arguments have the old type. + Value *Arg0 = CI->getArgOperand(0); + if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) + return; + + // Old intrinsic, add bitcasts + Value *Arg1 = CI->getArgOperand(1); + + Value *BC0 = + Builder.CreateBitCast(Arg0, + VectorType::get(Type::getInt64Ty(C), 2), + "cast"); + Value *BC1 = + Builder.CreateBitCast(Arg1, + VectorType::get(Type::getInt64Ty(C), 2), + "cast"); + + CallInst* NewCall = Builder.CreateCall2(NewFn, BC0, BC1, Name); + CI->replaceAllUsesWith(NewCall); + CI->eraseFromParent(); + return; + } + } +} + +// This tests each Function to determine if it needs upgrading. When we find +// one we are interested in, we then upgrade all calls to reflect the new +// function. +void llvm::UpgradeCallsToIntrinsic(Function* F) { + assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); + + // Upgrade the function and check if it is a totaly new function. + Function *NewFn; + if (UpgradeIntrinsicFunction(F, NewFn)) { + if (NewFn != F) { + // Replace all uses to the old function with the new one if necessary. + for (Value::use_iterator UI = F->use_begin(), UE = F->use_end(); + UI != UE; ) { + if (CallInst *CI = dyn_cast<CallInst>(*UI++)) + UpgradeIntrinsicCall(CI, NewFn); + } + // Remove old function, no longer used, from the module. + F->eraseFromParent(); + } + } +} + diff --git a/lib/IR/BasicBlock.cpp b/lib/IR/BasicBlock.cpp new file mode 100644 index 0000000000..41e58ec5da --- /dev/null +++ b/lib/IR/BasicBlock.cpp @@ -0,0 +1,371 @@ +//===-- BasicBlock.cpp - Implement BasicBlock related methods -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the BasicBlock class for the IR library. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/BasicBlock.h" +#include "SymbolTableListTraitsImpl.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/CFG.h" +#include "llvm/Support/LeakDetector.h" +#include <algorithm> +using namespace llvm; + +ValueSymbolTable *BasicBlock::getValueSymbolTable() { + if (Function *F = getParent()) + return &F->getValueSymbolTable(); + return 0; +} + +LLVMContext &BasicBlock::getContext() const { + return getType()->getContext(); +} + +// Explicit instantiation of SymbolTableListTraits since some of the methods +// are not in the public header file... +template class llvm::SymbolTableListTraits<Instruction, BasicBlock>; + + +BasicBlock::BasicBlock(LLVMContext &C, const Twine &Name, Function *NewParent, + BasicBlock *InsertBefore) + : Value(Type::getLabelTy(C), Value::BasicBlockVal), Parent(0) { + + // Make sure that we get added to a function + LeakDetector::addGarbageObject(this); + + if (InsertBefore) { + assert(NewParent && + "Cannot insert block before another block with no function!"); + NewParent->getBasicBlockList().insert(InsertBefore, this); + } else if (NewParent) { + NewParent->getBasicBlockList().push_back(this); + } + + setName(Name); +} + + +BasicBlock::~BasicBlock() { + // If the address of the block is taken and it is being deleted (e.g. because + // it is dead), this means that there is either a dangling constant expr + // hanging off the block, or an undefined use of the block (source code + // expecting the address of a label to keep the block alive even though there + // is no indirect branch). Handle these cases by zapping the BlockAddress + // nodes. There are no other possible uses at this point. + if (hasAddressTaken()) { + assert(!use_empty() && "There should be at least one blockaddress!"); + Constant *Replacement = + ConstantInt::get(llvm::Type::getInt32Ty(getContext()), 1); + while (!use_empty()) { + BlockAddress *BA = cast<BlockAddress>(use_back()); + BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, + BA->getType())); + BA->destroyConstant(); + } + } + + assert(getParent() == 0 && "BasicBlock still linked into the program!"); + dropAllReferences(); + InstList.clear(); +} + +void BasicBlock::setParent(Function *parent) { + if (getParent()) + LeakDetector::addGarbageObject(this); + + // Set Parent=parent, updating instruction symtab entries as appropriate. + InstList.setSymTabObject(&Parent, parent); + + if (getParent()) + LeakDetector::removeGarbageObject(this); +} + +void BasicBlock::removeFromParent() { + getParent()->getBasicBlockList().remove(this); +} + +void BasicBlock::eraseFromParent() { + getParent()->getBasicBlockList().erase(this); +} + +/// moveBefore - Unlink this basic block from its current function and +/// insert it into the function that MovePos lives in, right before MovePos. +void BasicBlock::moveBefore(BasicBlock *MovePos) { + MovePos->getParent()->getBasicBlockList().splice(MovePos, + getParent()->getBasicBlockList(), this); +} + +/// moveAfter - Unlink this basic block from its current function and +/// insert it into the function that MovePos lives in, right after MovePos. +void BasicBlock::moveAfter(BasicBlock *MovePos) { + Function::iterator I = MovePos; + MovePos->getParent()->getBasicBlockList().splice(++I, + getParent()->getBasicBlockList(), this); +} + + +TerminatorInst *BasicBlock::getTerminator() { + if (InstList.empty()) return 0; + return dyn_cast<TerminatorInst>(&InstList.back()); +} + +const TerminatorInst *BasicBlock::getTerminator() const { + if (InstList.empty()) return 0; + return dyn_cast<TerminatorInst>(&InstList.back()); +} + +Instruction* BasicBlock::getFirstNonPHI() { + BasicBlock::iterator i = begin(); + // All valid basic blocks should have a terminator, + // which is not a PHINode. If we have an invalid basic + // block we'll get an assertion failure when dereferencing + // a past-the-end iterator. + while (isa<PHINode>(i)) ++i; + return &*i; +} + +Instruction* BasicBlock::getFirstNonPHIOrDbg() { + BasicBlock::iterator i = begin(); + // All valid basic blocks should have a terminator, + // which is not a PHINode. If we have an invalid basic + // block we'll get an assertion failure when dereferencing + // a past-the-end iterator. + while (isa<PHINode>(i) || isa<DbgInfoIntrinsic>(i)) ++i; + return &*i; +} + +Instruction* BasicBlock::getFirstNonPHIOrDbgOrLifetime() { + // All valid basic blocks should have a terminator, + // which is not a PHINode. If we have an invalid basic + // block we'll get an assertion failure when dereferencing + // a past-the-end iterator. + BasicBlock::iterator i = begin(); + for (;; ++i) { + if (isa<PHINode>(i) || isa<DbgInfoIntrinsic>(i)) + continue; + + const IntrinsicInst *II = dyn_cast<IntrinsicInst>(i); + if (!II) + break; + if (II->getIntrinsicID() != Intrinsic::lifetime_start && + II->getIntrinsicID() != Intrinsic::lifetime_end) + break; + } + return &*i; +} + +BasicBlock::iterator BasicBlock::getFirstInsertionPt() { + iterator InsertPt = getFirstNonPHI(); + if (isa<LandingPadInst>(InsertPt)) ++InsertPt; + return InsertPt; +} + +void BasicBlock::dropAllReferences() { + for(iterator I = begin(), E = end(); I != E; ++I) + I->dropAllReferences(); +} + +/// getSinglePredecessor - If this basic block has a single predecessor block, +/// return the block, otherwise return a null pointer. +BasicBlock *BasicBlock::getSinglePredecessor() { + pred_iterator PI = pred_begin(this), E = pred_end(this); + if (PI == E) return 0; // No preds. + BasicBlock *ThePred = *PI; + ++PI; + return (PI == E) ? ThePred : 0 /*multiple preds*/; +} + +/// getUniquePredecessor - If this basic block has a unique predecessor block, +/// return the block, otherwise return a null pointer. +/// Note that unique predecessor doesn't mean single edge, there can be +/// multiple edges from the unique predecessor to this block (for example +/// a switch statement with multiple cases having the same destination). +BasicBlock *BasicBlock::getUniquePredecessor() { + pred_iterator PI = pred_begin(this), E = pred_end(this); + if (PI == E) return 0; // No preds. + BasicBlock *PredBB = *PI; + ++PI; + for (;PI != E; ++PI) { + if (*PI != PredBB) + return 0; + // The same predecessor appears multiple times in the predecessor list. + // This is OK. + } + return PredBB; +} + +/// removePredecessor - This method is used to notify a BasicBlock that the +/// specified Predecessor of the block is no longer able to reach it. This is +/// actually not used to update the Predecessor list, but is actually used to +/// update the PHI nodes that reside in the block. Note that this should be +/// called while the predecessor still refers to this block. +/// +void BasicBlock::removePredecessor(BasicBlock *Pred, + bool DontDeleteUselessPHIs) { + assert((hasNUsesOrMore(16)||// Reduce cost of this assertion for complex CFGs. + find(pred_begin(this), pred_end(this), Pred) != pred_end(this)) && + "removePredecessor: BB is not a predecessor!"); + + if (InstList.empty()) return; + PHINode *APN = dyn_cast<PHINode>(&front()); + if (!APN) return; // Quick exit. + + // If there are exactly two predecessors, then we want to nuke the PHI nodes + // altogether. However, we cannot do this, if this in this case: + // + // Loop: + // %x = phi [X, Loop] + // %x2 = add %x, 1 ;; This would become %x2 = add %x2, 1 + // br Loop ;; %x2 does not dominate all uses + // + // This is because the PHI node input is actually taken from the predecessor + // basic block. The only case this can happen is with a self loop, so we + // check for this case explicitly now. + // + unsigned max_idx = APN->getNumIncomingValues(); + assert(max_idx != 0 && "PHI Node in block with 0 predecessors!?!?!"); + if (max_idx == 2) { + BasicBlock *Other = APN->getIncomingBlock(APN->getIncomingBlock(0) == Pred); + + // Disable PHI elimination! + if (this == Other) max_idx = 3; + } + + // <= Two predecessors BEFORE I remove one? + if (max_idx <= 2 && !DontDeleteUselessPHIs) { + // Yup, loop through and nuke the PHI nodes + while (PHINode *PN = dyn_cast<PHINode>(&front())) { + // Remove the predecessor first. + PN->removeIncomingValue(Pred, !DontDeleteUselessPHIs); + + // If the PHI _HAD_ two uses, replace PHI node with its now *single* value + if (max_idx == 2) { + if (PN->getIncomingValue(0) != PN) + PN->replaceAllUsesWith(PN->getIncomingValue(0)); + else + // We are left with an infinite loop with no entries: kill the PHI. + PN->replaceAllUsesWith(UndefValue::get(PN->getType())); + getInstList().pop_front(); // Remove the PHI node + } + + // If the PHI node already only had one entry, it got deleted by + // removeIncomingValue. + } + } else { + // Okay, now we know that we need to remove predecessor #pred_idx from all + // PHI nodes. Iterate over each PHI node fixing them up + PHINode *PN; + for (iterator II = begin(); (PN = dyn_cast<PHINode>(II)); ) { + ++II; + PN->removeIncomingValue(Pred, false); + // If all incoming values to the Phi are the same, we can replace the Phi + // with that value. + Value* PNV = 0; + if (!DontDeleteUselessPHIs && (PNV = PN->hasConstantValue())) + if (PNV != PN) { + PN->replaceAllUsesWith(PNV); + PN->eraseFromParent(); + } + } + } +} + + +/// splitBasicBlock - This splits a basic block into two at the specified +/// instruction. Note that all instructions BEFORE the specified iterator stay +/// as part of the original basic block, an unconditional branch is added to +/// the new BB, and the rest of the instructions in the BB are moved to the new +/// BB, including the old terminator. This invalidates the iterator. +/// +/// Note that this only works on well formed basic blocks (must have a +/// terminator), and 'I' must not be the end of instruction list (which would +/// cause a degenerate basic block to be formed, having a terminator inside of +/// the basic block). +/// +BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) { + assert(getTerminator() && "Can't use splitBasicBlock on degenerate BB!"); + assert(I != InstList.end() && + "Trying to get me to create degenerate basic block!"); + + BasicBlock *InsertBefore = llvm::next(Function::iterator(this)) + .getNodePtrUnchecked(); + BasicBlock *New = BasicBlock::Create(getContext(), BBName, + getParent(), InsertBefore); + + // Move all of the specified instructions from the original basic block into + // the new basic block. + New->getInstList().splice(New->end(), this->getInstList(), I, end()); + + // Add a branch instruction to the newly formed basic block. + BranchInst::Create(New, this); + + // Now we must loop through all of the successors of the New block (which + // _were_ the successors of the 'this' block), and update any PHI nodes in + // successors. If there were PHI nodes in the successors, then they need to + // know that incoming branches will be from New, not from Old. + // + for (succ_iterator I = succ_begin(New), E = succ_end(New); I != E; ++I) { + // Loop over any phi nodes in the basic block, updating the BB field of + // incoming values... + BasicBlock *Successor = *I; + PHINode *PN; + for (BasicBlock::iterator II = Successor->begin(); + (PN = dyn_cast<PHINode>(II)); ++II) { + int IDX = PN->getBasicBlockIndex(this); + while (IDX != -1) { + PN->setIncomingBlock((unsigned)IDX, New); + IDX = PN->getBasicBlockIndex(this); + } + } + } + return New; +} + +void BasicBlock::replaceSuccessorsPhiUsesWith(BasicBlock *New) { + TerminatorInst *TI = getTerminator(); + if (!TI) + // Cope with being called on a BasicBlock that doesn't have a terminator + // yet. Clang's CodeGenFunction::EmitReturnBlock() likes to do this. + return; + for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) { + BasicBlock *Succ = TI->getSuccessor(i); + // N.B. Succ might not be a complete BasicBlock, so don't assume + // that it ends with a non-phi instruction. + for (iterator II = Succ->begin(), IE = Succ->end(); II != IE; ++II) { + PHINode *PN = dyn_cast<PHINode>(II); + if (!PN) + break; + int i; + while ((i = PN->getBasicBlockIndex(this)) >= 0) + PN->setIncomingBlock(i, New); + } + } +} + +/// isLandingPad - Return true if this basic block is a landing pad. I.e., it's +/// the destination of the 'unwind' edge of an invoke instruction. +bool BasicBlock::isLandingPad() const { + return isa<LandingPadInst>(getFirstNonPHI()); +} + +/// getLandingPadInst() - Return the landingpad instruction associated with +/// the landing pad. +LandingPadInst *BasicBlock::getLandingPadInst() { + return dyn_cast<LandingPadInst>(getFirstNonPHI()); +} +const LandingPadInst *BasicBlock::getLandingPadInst() const { + return dyn_cast<LandingPadInst>(getFirstNonPHI()); +} diff --git a/lib/IR/CMakeLists.txt b/lib/IR/CMakeLists.txt new file mode 100644 index 0000000000..c2a4ee3aae --- /dev/null +++ b/lib/IR/CMakeLists.txt @@ -0,0 +1,51 @@ +add_llvm_library(LLVMCore + AsmWriter.cpp + Attributes.cpp + AutoUpgrade.cpp + BasicBlock.cpp + ConstantFold.cpp + Constants.cpp + Core.cpp + DataLayout.cpp + DebugInfo.cpp + DebugLoc.cpp + DIBuilder.cpp + Dominators.cpp + Function.cpp + GCOV.cpp + GVMaterializer.cpp + Globals.cpp + IRBuilder.cpp + InlineAsm.cpp + Instruction.cpp + Instructions.cpp + IntrinsicInst.cpp + LLVMContext.cpp + LLVMContextImpl.cpp + LeakDetector.cpp + Metadata.cpp + Module.cpp + Pass.cpp + PassManager.cpp + PassRegistry.cpp + PrintModulePass.cpp + Type.cpp + TypeFinder.cpp + Use.cpp + User.cpp + Value.cpp + ValueSymbolTable.cpp + ValueTypes.cpp + Verifier.cpp + ) + +# Workaround: It takes over 20 minutes to compile with msvc10. +# FIXME: Suppressing optimizations to core libraries would not be good thing. +if( MSVC_VERSION LESS 1700 ) +set_property( + SOURCE Function.cpp + PROPERTY COMPILE_FLAGS "/Og-" + ) +endif() + +add_dependencies(LLVMCore intrinsics_gen) diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp new file mode 100644 index 0000000000..bf93d4f956 --- /dev/null +++ b/lib/IR/ConstantFold.cpp @@ -0,0 +1,2074 @@ +//===- ConstantFold.cpp - LLVM constant folder ----------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements folding of constants for LLVM. This implements the +// (internal) ConstantFold.h interface, which is used by the +// ConstantExpr::get* methods to automatically fold constants when possible. +// +// The current constant folding implementation is implemented in two pieces: the +// pieces that don't need DataLayout, and the pieces that do. This is to avoid +// a dependence in IR on Target. +// +//===----------------------------------------------------------------------===// + +#include "ConstantFold.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Operator.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/MathExtras.h" +#include <limits> +using namespace llvm; + +//===----------------------------------------------------------------------===// +// ConstantFold*Instruction Implementations +//===----------------------------------------------------------------------===// + +/// BitCastConstantVector - Convert the specified vector Constant node to the +/// specified vector type. At this point, we know that the elements of the +/// input vector constant are all simple integer or FP values. +static Constant *BitCastConstantVector(Constant *CV, VectorType *DstTy) { + + if (CV->isAllOnesValue()) return Constant::getAllOnesValue(DstTy); + if (CV->isNullValue()) return Constant::getNullValue(DstTy); + + // If this cast changes element count then we can't handle it here: + // doing so requires endianness information. This should be handled by + // Analysis/ConstantFolding.cpp + unsigned NumElts = DstTy->getNumElements(); + if (NumElts != CV->getType()->getVectorNumElements()) + return 0; + + Type *DstEltTy = DstTy->getElementType(); + + SmallVector<Constant*, 16> Result; + Type *Ty = IntegerType::get(CV->getContext(), 32); + for (unsigned i = 0; i != NumElts; ++i) { + Constant *C = + ConstantExpr::getExtractElement(CV, ConstantInt::get(Ty, i)); + C = ConstantExpr::getBitCast(C, DstEltTy); + Result.push_back(C); + } + + return ConstantVector::get(Result); +} + +/// This function determines which opcode to use to fold two constant cast +/// expressions together. It uses CastInst::isEliminableCastPair to determine +/// the opcode. Consequently its just a wrapper around that function. +/// @brief Determine if it is valid to fold a cast of a cast +static unsigned +foldConstantCastPair( + unsigned opc, ///< opcode of the second cast constant expression + ConstantExpr *Op, ///< the first cast constant expression + Type *DstTy ///< desintation type of the first cast +) { + assert(Op && Op->isCast() && "Can't fold cast of cast without a cast!"); + assert(DstTy && DstTy->isFirstClassType() && "Invalid cast destination type"); + assert(CastInst::isCast(opc) && "Invalid cast opcode"); + + // The the types and opcodes for the two Cast constant expressions + Type *SrcTy = Op->getOperand(0)->getType(); + Type *MidTy = Op->getType(); + Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode()); + Instruction::CastOps secondOp = Instruction::CastOps(opc); + + // Assume that pointers are never more than 64 bits wide. + IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext()); + + // Let CastInst::isEliminableCastPair do the heavy lifting. + return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy, + FakeIntPtrTy, FakeIntPtrTy, + FakeIntPtrTy); +} + +static Constant *FoldBitCast(Constant *V, Type *DestTy) { + Type *SrcTy = V->getType(); + if (SrcTy == DestTy) + return V; // no-op cast + + // Check to see if we are casting a pointer to an aggregate to a pointer to + // the first element. If so, return the appropriate GEP instruction. + if (PointerType *PTy = dyn_cast<PointerType>(V->getType())) + if (PointerType *DPTy = dyn_cast<PointerType>(DestTy)) + if (PTy->getAddressSpace() == DPTy->getAddressSpace() + && DPTy->getElementType()->isSized()) { + SmallVector<Value*, 8> IdxList; + Value *Zero = + Constant::getNullValue(Type::getInt32Ty(DPTy->getContext())); + IdxList.push_back(Zero); + Type *ElTy = PTy->getElementType(); + while (ElTy != DPTy->getElementType()) { + if (StructType *STy = dyn_cast<StructType>(ElTy)) { + if (STy->getNumElements() == 0) break; + ElTy = STy->getElementType(0); + IdxList.push_back(Zero); + } else if (SequentialType *STy = + dyn_cast<SequentialType>(ElTy)) { + if (ElTy->isPointerTy()) break; // Can't index into pointers! + ElTy = STy->getElementType(); + IdxList.push_back(Zero); + } else { + break; + } + } + + if (ElTy == DPTy->getElementType()) + // This GEP is inbounds because all indices are zero. + return ConstantExpr::getInBoundsGetElementPtr(V, IdxList); + } + + // Handle casts from one vector constant to another. We know that the src + // and dest type have the same size (otherwise its an illegal cast). + if (VectorType *DestPTy = dyn_cast<VectorType>(DestTy)) { + if (VectorType *SrcTy = dyn_cast<VectorType>(V->getType())) { + assert(DestPTy->getBitWidth() == SrcTy->getBitWidth() && + "Not cast between same sized vectors!"); + SrcTy = NULL; + // First, check for null. Undef is already handled. + if (isa<ConstantAggregateZero>(V)) + return Constant::getNullValue(DestTy); + + // Handle ConstantVector and ConstantAggregateVector. + return BitCastConstantVector(V, DestPTy); + } + + // Canonicalize scalar-to-vector bitcasts into vector-to-vector bitcasts + // This allows for other simplifications (although some of them + // can only be handled by Analysis/ConstantFolding.cpp). + if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) + return ConstantExpr::getBitCast(ConstantVector::get(V), DestPTy); + } + + // Finally, implement bitcast folding now. The code below doesn't handle + // bitcast right. + if (isa<ConstantPointerNull>(V)) // ptr->ptr cast. + return ConstantPointerNull::get(cast<PointerType>(DestTy)); + + // Handle integral constant input. + if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { + if (DestTy->isIntegerTy()) + // Integral -> Integral. This is a no-op because the bit widths must + // be the same. Consequently, we just fold to V. + return V; + + if (DestTy->isFloatingPointTy()) + return ConstantFP::get(DestTy->getContext(), + APFloat(DestTy->getFltSemantics(), + CI->getValue())); + + // Otherwise, can't fold this (vector?) + return 0; + } + + // Handle ConstantFP input: FP -> Integral. + if (ConstantFP *FP = dyn_cast<ConstantFP>(V)) + return ConstantInt::get(FP->getContext(), + FP->getValueAPF().bitcastToAPInt()); + + return 0; +} + + +/// ExtractConstantBytes - V is an integer constant which only has a subset of +/// its bytes used. The bytes used are indicated by ByteStart (which is the +/// first byte used, counting from the least significant byte) and ByteSize, +/// which is the number of bytes used. +/// +/// This function analyzes the specified constant to see if the specified byte +/// range can be returned as a simplified constant. If so, the constant is +/// returned, otherwise null is returned. +/// +static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart, + unsigned ByteSize) { + assert(C->getType()->isIntegerTy() && + (cast<IntegerType>(C->getType())->getBitWidth() & 7) == 0 && + "Non-byte sized integer input"); + unsigned CSize = cast<IntegerType>(C->getType())->getBitWidth()/8; + assert(ByteSize && "Must be accessing some piece"); + assert(ByteStart+ByteSize <= CSize && "Extracting invalid piece from input"); + assert(ByteSize != CSize && "Should not extract everything"); + + // Constant Integers are simple. + if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { + APInt V = CI->getValue(); + if (ByteStart) + V = V.lshr(ByteStart*8); + V = V.trunc(ByteSize*8); + return ConstantInt::get(CI->getContext(), V); + } + + // In the input is a constant expr, we might be able to recursively simplify. + // If not, we definitely can't do anything. + ConstantExpr *CE = dyn_cast<ConstantExpr>(C); + if (CE == 0) return 0; + + switch (CE->getOpcode()) { + default: return 0; + case Instruction::Or: { + Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize); + if (RHS == 0) + return 0; + + // X | -1 -> -1. + if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) + if (RHSC->isAllOnesValue()) + return RHSC; + + Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize); + if (LHS == 0) + return 0; + return ConstantExpr::getOr(LHS, RHS); + } + case Instruction::And: { + Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize); + if (RHS == 0) + return 0; + + // X & 0 -> 0. + if (RHS->isNullValue()) + return RHS; + + Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize); + if (LHS == 0) + return 0; + return ConstantExpr::getAnd(LHS, RHS); + } + case Instruction::LShr: { + ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1)); + if (Amt == 0) + return 0; + unsigned ShAmt = Amt->getZExtValue(); + // Cannot analyze non-byte shifts. + if ((ShAmt & 7) != 0) + return 0; + ShAmt >>= 3; + + // If the extract is known to be all zeros, return zero. + if (ByteStart >= CSize-ShAmt) + return Constant::getNullValue(IntegerType::get(CE->getContext(), + ByteSize*8)); + // If the extract is known to be fully in the input, extract it. + if (ByteStart+ByteSize+ShAmt <= CSize) + return ExtractConstantBytes(CE->getOperand(0), ByteStart+ShAmt, ByteSize); + + // TODO: Handle the 'partially zero' case. + return 0; + } + + case Instruction::Shl: { + ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1)); + if (Amt == 0) + return 0; + unsigned ShAmt = Amt->getZExtValue(); + // Cannot analyze non-byte shifts. + if ((ShAmt & 7) != 0) + return 0; + ShAmt >>= 3; + + // If the extract is known to be all zeros, return zero. + if (ByteStart+ByteSize <= ShAmt) + return Constant::getNullValue(IntegerType::get(CE->getContext(), + ByteSize*8)); + // If the extract is known to be fully in the input, extract it. + if (ByteStart >= ShAmt) + return ExtractConstantBytes(CE->getOperand(0), ByteStart-ShAmt, ByteSize); + + // TODO: Handle the 'partially zero' case. + return 0; + } + + case Instruction::ZExt: { + unsigned SrcBitSize = + cast<IntegerType>(CE->getOperand(0)->getType())->getBitWidth(); + + // If extracting something that is completely zero, return 0. + if (ByteStart*8 >= SrcBitSize) + return Constant::getNullValue(IntegerType::get(CE->getContext(), + ByteSize*8)); + + // If exactly extracting the input, return it. + if (ByteStart == 0 && ByteSize*8 == SrcBitSize) + return CE->getOperand(0); + + // If extracting something completely in the input, if if the input is a + // multiple of 8 bits, recurse. + if ((SrcBitSize&7) == 0 && (ByteStart+ByteSize)*8 <= SrcBitSize) + return ExtractConstantBytes(CE->getOperand(0), ByteStart, ByteSize); + + // Otherwise, if extracting a subset of the input, which is not multiple of + // 8 bits, do a shift and trunc to get the bits. + if ((ByteStart+ByteSize)*8 < SrcBitSize) { + assert((SrcBitSize&7) && "Shouldn't get byte sized case here"); + Constant *Res = CE->getOperand(0); + if (ByteStart) + Res = ConstantExpr::getLShr(Res, + ConstantInt::get(Res->getType(), ByteStart*8)); + return ConstantExpr::getTrunc(Res, IntegerType::get(C->getContext(), + ByteSize*8)); + } + + // TODO: Handle the 'partially zero' case. + return 0; + } + } +} + +/// getFoldedSizeOf - Return a ConstantExpr with type DestTy for sizeof +/// on Ty, with any known factors factored out. If Folded is false, +/// return null if no factoring was possible, to avoid endlessly +/// bouncing an unfoldable expression back into the top-level folder. +/// +static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy, + bool Folded) { + if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { + Constant *N = ConstantInt::get(DestTy, ATy->getNumElements()); + Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true); + return ConstantExpr::getNUWMul(E, N); + } + + if (StructType *STy = dyn_cast<StructType>(Ty)) + if (!STy->isPacked()) { + unsigned NumElems = STy->getNumElements(); + // An empty struct has size zero. + if (NumElems == 0) + return ConstantExpr::getNullValue(DestTy); + // Check for a struct with all members having the same size. + Constant *MemberSize = + getFoldedSizeOf(STy->getElementType(0), DestTy, true); + bool AllSame = true; + for (unsigned i = 1; i != NumElems; ++i) + if (MemberSize != + getFoldedSizeOf(STy->getElementType(i), DestTy, true)) { + AllSame = false; + break; + } + if (AllSame) { + Constant *N = ConstantInt::get(DestTy, NumElems); + return ConstantExpr::getNUWMul(MemberSize, N); + } + } + + // Pointer size doesn't depend on the pointee type, so canonicalize them + // to an arbitrary pointee. + if (PointerType *PTy = dyn_cast<PointerType>(Ty)) + if (!PTy->getElementType()->isIntegerTy(1)) + return + getFoldedSizeOf(PointerType::get(IntegerType::get(PTy->getContext(), 1), + PTy->getAddressSpace()), + DestTy, true); + + // If there's no interesting folding happening, bail so that we don't create + // a constant that looks like it needs folding but really doesn't. + if (!Folded) + return 0; + + // Base case: Get a regular sizeof expression. + Constant *C = ConstantExpr::getSizeOf(Ty); + C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, + DestTy, false), + C, DestTy); + return C; +} + +/// getFoldedAlignOf - Return a ConstantExpr with type DestTy for alignof +/// on Ty, with any known factors factored out. If Folded is false, +/// return null if no factoring was possible, to avoid endlessly +/// bouncing an unfoldable expression back into the top-level folder. +/// +static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy, + bool Folded) { + // The alignment of an array is equal to the alignment of the + // array element. Note that this is not always true for vectors. + if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { + Constant *C = ConstantExpr::getAlignOf(ATy->getElementType()); + C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, + DestTy, + false), + C, DestTy); + return C; + } + + if (StructType *STy = dyn_cast<StructType>(Ty)) { + // Packed structs always have an alignment of 1. + if (STy->isPacked()) + return ConstantInt::get(DestTy, 1); + + // Otherwise, struct alignment is the maximum alignment of any member. + // Without target data, we can't compare much, but we can check to see + // if all the members have the same alignment. + unsigned NumElems = STy->getNumElements(); + // An empty struct has minimal alignment. + if (NumElems == 0) + return ConstantInt::get(DestTy, 1); + // Check for a struct with all members having the same alignment. + Constant *MemberAlign = + getFoldedAlignOf(STy->getElementType(0), DestTy, true); + bool AllSame = true; + for (unsigned i = 1; i != NumElems; ++i) + if (MemberAlign != getFoldedAlignOf(STy->getElementType(i), DestTy, true)) { + AllSame = false; + break; + } + if (AllSame) + return MemberAlign; + } + + // Pointer alignment doesn't depend on the pointee type, so canonicalize them + // to an arbitrary pointee. + if (PointerType *PTy = dyn_cast<PointerType>(Ty)) + if (!PTy->getElementType()->isIntegerTy(1)) + return + getFoldedAlignOf(PointerType::get(IntegerType::get(PTy->getContext(), + 1), + PTy->getAddressSpace()), + DestTy, true); + + // If there's no interesting folding happening, bail so that we don't create + // a constant that looks like it needs folding but really doesn't. + if (!Folded) + return 0; + + // Base case: Get a regular alignof expression. + Constant *C = ConstantExpr::getAlignOf(Ty); + C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, + DestTy, false), + C, DestTy); + return C; +} + +/// getFoldedOffsetOf - Return a ConstantExpr with type DestTy for offsetof +/// on Ty and FieldNo, with any known factors factored out. If Folded is false, +/// return null if no factoring was possible, to avoid endlessly +/// bouncing an unfoldable expression back into the top-level folder. +/// +static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo, + Type *DestTy, + bool Folded) { + if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { + Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false, + DestTy, false), + FieldNo, DestTy); + Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true); + return ConstantExpr::getNUWMul(E, N); + } + + if (StructType *STy = dyn_cast<StructType>(Ty)) + if (!STy->isPacked()) { + unsigned NumElems = STy->getNumElements(); + // An empty struct has no members. + if (NumElems == 0) + return 0; + // Check for a struct with all members having the same size. + Constant *MemberSize = + getFoldedSizeOf(STy->getElementType(0), DestTy, true); + bool AllSame = true; + for (unsigned i = 1; i != NumElems; ++i) + if (MemberSize != + getFoldedSizeOf(STy->getElementType(i), DestTy, true)) { + AllSame = false; + break; + } + if (AllSame) { + Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, + false, + DestTy, + false), + FieldNo, DestTy); + return ConstantExpr::getNUWMul(MemberSize, N); + } + } + + // If there's no interesting folding happening, bail so that we don't create + // a constant that looks like it needs folding but really doesn't. + if (!Folded) + return 0; + + // Base case: Get a regular offsetof expression. + Constant *C = ConstantExpr::getOffsetOf(Ty, FieldNo); + C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, + DestTy, false), + C, DestTy); + return C; +} + +Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V, + Type *DestTy) { + if (isa<UndefValue>(V)) { + // zext(undef) = 0, because the top bits will be zero. + // sext(undef) = 0, because the top bits will all be the same. + // [us]itofp(undef) = 0, because the result value is bounded. + if (opc == Instruction::ZExt || opc == Instruction::SExt || + opc == Instruction::UIToFP || opc == Instruction::SIToFP) + return Constant::getNullValue(DestTy); + return UndefValue::get(DestTy); + } + + if (V->isNullValue() && !DestTy->isX86_MMXTy()) + return Constant::getNullValue(DestTy); + + // If the cast operand is a constant expression, there's a few things we can + // do to try to simplify it. + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { + if (CE->isCast()) { + // Try hard to fold cast of cast because they are often eliminable. + if (unsigned newOpc = foldConstantCastPair(opc, CE, DestTy)) + return ConstantExpr::getCast(newOpc, CE->getOperand(0), DestTy); + } else if (CE->getOpcode() == Instruction::GetElementPtr) { + // If all of the indexes in the GEP are null values, there is no pointer + // adjustment going on. We might as well cast the source pointer. + bool isAllNull = true; + for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) + if (!CE->getOperand(i)->isNullValue()) { + isAllNull = false; + break; + } + if (isAllNull) + // This is casting one pointer type to another, always BitCast + return ConstantExpr::getPointerCast(CE->getOperand(0), DestTy); + } + } + + // If the cast operand is a constant vector, perform the cast by + // operating on each element. In the cast of bitcasts, the element + // count may be mismatched; don't attempt to handle that here. + if ((isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) && + DestTy->isVectorTy() && + DestTy->getVectorNumElements() == V->getType()->getVectorNumElements()) { + SmallVector<Constant*, 16> res; + VectorType *DestVecTy = cast<VectorType>(DestTy); + Type *DstEltTy = DestVecTy->getElementType(); + Type *Ty = IntegerType::get(V->getContext(), 32); + for (unsigned i = 0, e = V->getType()->getVectorNumElements(); i != e; ++i) { + Constant *C = + ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i)); + res.push_back(ConstantExpr::getCast(opc, C, DstEltTy)); + } + return ConstantVector::get(res); + } + + // We actually have to do a cast now. Perform the cast according to the + // opcode specified. + switch (opc) { + default: + llvm_unreachable("Failed to cast constant expression"); + case Instruction::FPTrunc: + case Instruction::FPExt: + if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) { + bool ignored; + APFloat Val = FPC->getValueAPF(); + Val.convert(DestTy->isHalfTy() ? APFloat::IEEEhalf : + DestTy->isFloatTy() ? APFloat::IEEEsingle : + DestTy->isDoubleTy() ? APFloat::IEEEdouble : + DestTy->isX86_FP80Ty() ? APFloat::x87DoubleExtended : + DestTy->isFP128Ty() ? APFloat::IEEEquad : + DestTy->isPPC_FP128Ty() ? APFloat::PPCDoubleDouble : + APFloat::Bogus, + APFloat::rmNearestTiesToEven, &ignored); + return ConstantFP::get(V->getContext(), Val); + } + return 0; // Can't fold. + case Instruction::FPToUI: + case Instruction::FPToSI: + if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) { + const APFloat &V = FPC->getValueAPF(); + bool ignored; + uint64_t x[2]; + uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth(); + (void) V.convertToInteger(x, DestBitWidth, opc==Instruction::FPToSI, + APFloat::rmTowardZero, &ignored); + APInt Val(DestBitWidth, x); + return ConstantInt::get(FPC->getContext(), Val); + } + return 0; // Can't fold. + case Instruction::IntToPtr: //always treated as unsigned + if (V->isNullValue()) // Is it an integral null value? + return ConstantPointerNull::get(cast<PointerType>(DestTy)); + return 0; // Other pointer types cannot be casted + case Instruction::PtrToInt: // always treated as unsigned + // Is it a null pointer value? + if (V->isNullValue()) + return ConstantInt::get(DestTy, 0); + // If this is a sizeof-like expression, pull out multiplications by + // known factors to expose them to subsequent folding. If it's an + // alignof-like expression, factor out known factors. + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) + if (CE->getOpcode() == Instruction::GetElementPtr && + CE->getOperand(0)->isNullValue()) { + Type *Ty = + cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); + if (CE->getNumOperands() == 2) { + // Handle a sizeof-like expression. + Constant *Idx = CE->getOperand(1); + bool isOne = isa<ConstantInt>(Idx) && cast<ConstantInt>(Idx)->isOne(); + if (Constant *C = getFoldedSizeOf(Ty, DestTy, !isOne)) { + Idx = ConstantExpr::getCast(CastInst::getCastOpcode(Idx, true, + DestTy, false), + Idx, DestTy); + return ConstantExpr::getMul(C, Idx); + } + } else if (CE->getNumOperands() == 3 && + CE->getOperand(1)->isNullValue()) { + // Handle an alignof-like expression. + if (StructType *STy = dyn_cast<StructType>(Ty)) + if (!STy->isPacked()) { + ConstantInt *CI = cast<ConstantInt>(CE->getOperand(2)); + if (CI->isOne() && + STy->getNumElements() == 2 && + STy->getElementType(0)->isIntegerTy(1)) { + return getFoldedAlignOf(STy->getElementType(1), DestTy, false); + } + } + // Handle an offsetof-like expression. + if (Ty->isStructTy() || Ty->isArrayTy()) { + if (Constant *C = getFoldedOffsetOf(Ty, CE->getOperand(2), + DestTy, false)) + return C; + } + } + } + // Other pointer types cannot be casted + return 0; + case Instruction::UIToFP: + case Instruction::SIToFP: + if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { + APInt api = CI->getValue(); + APFloat apf(DestTy->getFltSemantics(), + APInt::getNullValue(DestTy->getPrimitiveSizeInBits())); + (void)apf.convertFromAPInt(api, + opc==Instruction::SIToFP, + APFloat::rmNearestTiesToEven); + return ConstantFP::get(V->getContext(), apf); + } + return 0; + case Instruction::ZExt: + if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { + uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth(); + return ConstantInt::get(V->getContext(), + CI->getValue().zext(BitWidth)); + } + return 0; + case Instruction::SExt: + if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { + uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth(); + return ConstantInt::get(V->getContext(), + CI->getValue().sext(BitWidth)); + } + return 0; + case Instruction::Trunc: { + uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth(); + if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { + return ConstantInt::get(V->getContext(), + CI->getValue().trunc(DestBitWidth)); + } + + // The input must be a constantexpr. See if we can simplify this based on + // the bytes we are demanding. Only do this if the source and dest are an + // even multiple of a byte. + if ((DestBitWidth & 7) == 0 && + (cast<IntegerType>(V->getType())->getBitWidth() & 7) == 0) + if (Constant *Res = ExtractConstantBytes(V, 0, DestBitWidth / 8)) + return Res; + + return 0; + } + case Instruction::BitCast: + return FoldBitCast(V, DestTy); + } +} + +Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond, + Constant *V1, Constant *V2) { + // Check for i1 and vector true/false conditions. + if (Cond->isNullValue()) return V2; + if (Cond->isAllOnesValue()) return V1; + + // If the condition is a vector constant, fold the result elementwise. + if (ConstantVector *CondV = dyn_cast<ConstantVector>(Cond)) { + SmallVector<Constant*, 16> Result; + Type *Ty = IntegerType::get(CondV->getContext(), 32); + for (unsigned i = 0, e = V1->getType()->getVectorNumElements(); i != e;++i){ + ConstantInt *Cond = dyn_cast<ConstantInt>(CondV->getOperand(i)); + if (Cond == 0) break; + + Constant *V = Cond->isNullValue() ? V2 : V1; + Constant *Res = ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i)); + Result.push_back(Res); + } + + // If we were able to build the vector, return it. + if (Result.size() == V1->getType()->getVectorNumElements()) + return ConstantVector::get(Result); + } + + if (isa<UndefValue>(Cond)) { + if (isa<UndefValue>(V1)) return V1; + return V2; + } + if (isa<UndefValue>(V1)) return V2; + if (isa<UndefValue>(V2)) return V1; + if (V1 == V2) return V1; + + if (ConstantExpr *TrueVal = dyn_cast<ConstantExpr>(V1)) { + if (TrueVal->getOpcode() == Instruction::Select) + if (TrueVal->getOperand(0) == Cond) + return ConstantExpr::getSelect(Cond, TrueVal->getOperand(1), V2); + } + if (ConstantExpr *FalseVal = dyn_cast<ConstantExpr>(V2)) { + if (FalseVal->getOpcode() == Instruction::Select) + if (FalseVal->getOperand(0) == Cond) + return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2)); + } + + return 0; +} + +Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val, + Constant *Idx) { + if (isa<UndefValue>(Val)) // ee(undef, x) -> undef + return UndefValue::get(Val->getType()->getVectorElementType()); + if (Val->isNullValue()) // ee(zero, x) -> zero + return Constant::getNullValue(Val->getType()->getVectorElementType()); + // ee({w,x,y,z}, undef) -> undef + if (isa<UndefValue>(Idx)) + return UndefValue::get(Val->getType()->getVectorElementType()); + + if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx)) { + uint64_t Index = CIdx->getZExtValue(); + // ee({w,x,y,z}, wrong_value) -> undef + if (Index >= Val->getType()->getVectorNumElements()) + return UndefValue::get(Val->getType()->getVectorElementType()); + return Val->getAggregateElement(Index); + } + return 0; +} + +Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val, + Constant *Elt, + Constant *Idx) { + ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx); + if (!CIdx) return 0; + const APInt &IdxVal = CIdx->getValue(); + + SmallVector<Constant*, 16> Result; + Type *Ty = IntegerType::get(Val->getContext(), 32); + for (unsigned i = 0, e = Val->getType()->getVectorNumElements(); i != e; ++i){ + if (i == IdxVal) { + Result.push_back(Elt); + continue; + } + + Constant *C = + ConstantExpr::getExtractElement(Val, ConstantInt::get(Ty, i)); + Result.push_back(C); + } + + return ConstantVector::get(Result); +} + +Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1, + Constant *V2, + Constant *Mask) { + unsigned MaskNumElts = Mask->getType()->getVectorNumElements(); + Type *EltTy = V1->getType()->getVectorElementType(); + + // Undefined shuffle mask -> undefined value. + if (isa<UndefValue>(Mask)) + return UndefValue::get(VectorType::get(EltTy, MaskNumElts)); + + // Don't break the bitcode reader hack. + if (isa<ConstantExpr>(Mask)) return 0; + + unsigned SrcNumElts = V1->getType()->getVectorNumElements(); + + // Loop over the shuffle mask, evaluating each element. + SmallVector<Constant*, 32> Result; + for (unsigned i = 0; i != MaskNumElts; ++i) { + int Elt = ShuffleVectorInst::getMaskValue(Mask, i); + if (Elt == -1) { + Result.push_back(UndefValue::get(EltTy)); + continue; + } + Constant *InElt; + if (unsigned(Elt) >= SrcNumElts*2) + InElt = UndefValue::get(EltTy); + else if (unsigned(Elt) >= SrcNumElts) { + Type *Ty = IntegerType::get(V2->getContext(), 32); + InElt = + ConstantExpr::getExtractElement(V2, + ConstantInt::get(Ty, Elt - SrcNumElts)); + } else { + Type *Ty = IntegerType::get(V1->getContext(), 32); + InElt = ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, Elt)); + } + Result.push_back(InElt); + } + + return ConstantVector::get(Result); +} + +Constant *llvm::ConstantFoldExtractValueInstruction(Constant *Agg, + ArrayRef<unsigned> Idxs) { + // Base case: no indices, so return the entire value. + if (Idxs.empty()) + return Agg; + + if (Constant *C = Agg->getAggregateElement(Idxs[0])) + return ConstantFoldExtractValueInstruction(C, Idxs.slice(1)); + + return 0; +} + +Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg, + Constant *Val, + ArrayRef<unsigned> Idxs) { + // Base case: no indices, so replace the entire value. + if (Idxs.empty()) + return Val; + + unsigned NumElts; + if (StructType *ST = dyn_cast<StructType>(Agg->getType())) + NumElts = ST->getNumElements(); + else if (ArrayType *AT = dyn_cast<ArrayType>(Agg->getType())) + NumElts = AT->getNumElements(); + else + NumElts = Agg->getType()->getVectorNumElements(); + + SmallVector<Constant*, 32> Result; + for (unsigned i = 0; i != NumElts; ++i) { + Constant *C = Agg->getAggregateElement(i); + if (C == 0) return 0; + + if (Idxs[0] == i) + C = ConstantFoldInsertValueInstruction(C, Val, Idxs.slice(1)); + + Result.push_back(C); + } + + if (StructType *ST = dyn_cast<StructType>(Agg->getType())) + return ConstantStruct::get(ST, Result); + if (ArrayType *AT = dyn_cast<ArrayType>(Agg->getType())) + return ConstantArray::get(AT, Result); + return ConstantVector::get(Result); +} + + +Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, + Constant *C1, Constant *C2) { + // Handle UndefValue up front. + if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) { + switch (Opcode) { + case Instruction::Xor: + if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) + // Handle undef ^ undef -> 0 special case. This is a common + // idiom (misuse). + return Constant::getNullValue(C1->getType()); + // Fallthrough + case Instruction::Add: + case Instruction::Sub: + return UndefValue::get(C1->getType()); + case Instruction::And: + if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef & undef -> undef + return C1; + return Constant::getNullValue(C1->getType()); // undef & X -> 0 + case Instruction::Mul: { + ConstantInt *CI; + // X * undef -> undef if X is odd or undef + if (((CI = dyn_cast<ConstantInt>(C1)) && CI->getValue()[0]) || + ((CI = dyn_cast<ConstantInt>(C2)) && CI->getValue()[0]) || + (isa<UndefValue>(C1) && isa<UndefValue>(C2))) + return UndefValue::get(C1->getType()); + + // X * undef -> 0 otherwise + return Constant::getNullValue(C1->getType()); + } + case Instruction::UDiv: + case Instruction::SDiv: + // undef / 1 -> undef + if (Opcode == Instruction::UDiv || Opcode == Instruction::SDiv) + if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) + if (CI2->isOne()) + return C1; + // FALL THROUGH + case Instruction::URem: + case Instruction::SRem: + if (!isa<UndefValue>(C2)) // undef / X -> 0 + return Constant::getNullValue(C1->getType()); + return C2; // X / undef -> undef + case Instruction::Or: // X | undef -> -1 + if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef | undef -> undef + return C1; + return Constant::getAllOnesValue(C1->getType()); // undef | X -> ~0 + case Instruction::LShr: + if (isa<UndefValue>(C2) && isa<UndefValue>(C1)) + return C1; // undef lshr undef -> undef + return Constant::getNullValue(C1->getType()); // X lshr undef -> 0 + // undef lshr X -> 0 + case Instruction::AShr: + if (!isa<UndefValue>(C2)) // undef ashr X --> all ones + return Constant::getAllOnesValue(C1->getType()); + else if (isa<UndefValue>(C1)) + return C1; // undef ashr undef -> undef + else + return C1; // X ashr undef --> X + case Instruction::Shl: + if (isa<UndefValue>(C2) && isa<UndefValue>(C1)) + return C1; // undef shl undef -> undef + // undef << X -> 0 or X << undef -> 0 + return Constant::getNullValue(C1->getType()); + } + } + + // Handle simplifications when the RHS is a constant int. + if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) { + switch (Opcode) { + case Instruction::Add: + if (CI2->equalsInt(0)) return C1; // X + 0 == X + break; + case Instruction::Sub: + if (CI2->equalsInt(0)) return C1; // X - 0 == X + break; + case Instruction::Mul: + if (CI2->equalsInt(0)) return C2; // X * 0 == 0 + if (CI2->equalsInt(1)) + return C1; // X * 1 == X + break; + case Instruction::UDiv: + case Instruction::SDiv: + if (CI2->equalsInt(1)) + return C1; // X / 1 == X + if (CI2->equalsInt(0)) + return UndefValue::get(CI2->getType()); // X / 0 == undef + break; + case Instruction::URem: + case Instruction::SRem: + if (CI2->equalsInt(1)) + return Constant::getNullValue(CI2->getType()); // X % 1 == 0 + if (CI2->equalsInt(0)) + return UndefValue::get(CI2->getType()); // X % 0 == undef + break; + case Instruction::And: + if (CI2->isZero()) return C2; // X & 0 == 0 + if (CI2->isAllOnesValue()) + return C1; // X & -1 == X + + if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) { + // (zext i32 to i64) & 4294967295 -> (zext i32 to i64) + if (CE1->getOpcode() == Instruction::ZExt) { + unsigned DstWidth = CI2->getType()->getBitWidth(); + unsigned SrcWidth = + CE1->getOperand(0)->getType()->getPrimitiveSizeInBits(); + APInt PossiblySetBits(APInt::getLowBitsSet(DstWidth, SrcWidth)); + if ((PossiblySetBits & CI2->getValue()) == PossiblySetBits) + return C1; + } + + // If and'ing the address of a global with a constant, fold it. + if (CE1->getOpcode() == Instruction::PtrToInt && + isa<GlobalValue>(CE1->getOperand(0))) { + GlobalValue *GV = cast<GlobalValue>(CE1->getOperand(0)); + + // Functions are at least 4-byte aligned. + unsigned GVAlign = GV->getAlignment(); + if (isa<Function>(GV)) + GVAlign = std::max(GVAlign, 4U); + + if (GVAlign > 1) { + unsigned DstWidth = CI2->getType()->getBitWidth(); + unsigned SrcWidth = std::min(DstWidth, Log2_32(GVAlign)); + APInt BitsNotSet(APInt::getLowBitsSet(DstWidth, SrcWidth)); + + // If checking bits we know are clear, return zero. + if ((CI2->getValue() & BitsNotSet) == CI2->getValue()) + return Constant::getNullValue(CI2->getType()); + } + } + } + break; + case Instruction::Or: + if (CI2->equalsInt(0)) return C1; // X | 0 == X + if (CI2->isAllOnesValue()) + return C2; // X | -1 == -1 + break; + case Instruction::Xor: + if (CI2->equalsInt(0)) return C1; // X ^ 0 == X + + if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) { + switch (CE1->getOpcode()) { + default: break; + case Instruction::ICmp: + case Instruction::FCmp: + // cmp pred ^ true -> cmp !pred + assert(CI2->equalsInt(1)); + CmpInst::Predicate pred = (CmpInst::Predicate)CE1->getPredicate(); + pred = CmpInst::getInversePredicate(pred); + return ConstantExpr::getCompare(pred, CE1->getOperand(0), + CE1->getOperand(1)); + } + } + break; + case Instruction::AShr: + // ashr (zext C to Ty), C2 -> lshr (zext C, CSA), C2 + if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) + if (CE1->getOpcode() == Instruction::ZExt) // Top bits known zero. + return ConstantExpr::getLShr(C1, C2); + break; + } + } else if (isa<ConstantInt>(C1)) { + // If C1 is a ConstantInt and C2 is not, swap the operands. + if (Instruction::isCommutative(Opcode)) + return ConstantExpr::get(Opcode, C2, C1); + } + + // At this point we know neither constant is an UndefValue. + if (ConstantInt *CI1 = dyn_cast<ConstantInt>(C1)) { + if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) { + const APInt &C1V = CI1->getValue(); + const APInt &C2V = CI2->getValue(); + switch (Opcode) { + default: + break; + case Instruction::Add: + return ConstantInt::get(CI1->getContext(), C1V + C2V); + case Instruction::Sub: + return ConstantInt::get(CI1->getContext(), C1V - C2V); + case Instruction::Mul: + return ConstantInt::get(CI1->getContext(), C1V * C2V); + case Instruction::UDiv: + assert(!CI2->isNullValue() && "Div by zero handled above"); + return ConstantInt::get(CI1->getContext(), C1V.udiv(C2V)); + case Instruction::SDiv: + assert(!CI2->isNullValue() && "Div by zero handled above"); + if (C2V.isAllOnesValue() && C1V.isMinSignedValue()) + return UndefValue::get(CI1->getType()); // MIN_INT / -1 -> undef + return ConstantInt::get(CI1->getContext(), C1V.sdiv(C2V)); + case Instruction::URem: + assert(!CI2->isNullValue() && "Div by zero handled above"); + return ConstantInt::get(CI1->getContext(), C1V.urem(C2V)); + case Instruction::SRem: + assert(!CI2->isNullValue() && "Div by zero handled above"); + if (C2V.isAllOnesValue() && C1V.isMinSignedValue()) + return UndefValue::get(CI1->getType()); // MIN_INT % -1 -> undef + return ConstantInt::get(CI1->getContext(), C1V.srem(C2V)); + case Instruction::And: + return ConstantInt::get(CI1->getContext(), C1V & C2V); + case Instruction::Or: + return ConstantInt::get(CI1->getContext(), C1V | C2V); + case Instruction::Xor: + return ConstantInt::get(CI1->getContext(), C1V ^ C2V); + case Instruction::Shl: { + uint32_t shiftAmt = C2V.getZExtValue(); + if (shiftAmt < C1V.getBitWidth()) + return ConstantInt::get(CI1->getContext(), C1V.shl(shiftAmt)); + else + return UndefValue::get(C1->getType()); // too big shift is undef + } + case Instruction::LShr: { + uint32_t shiftAmt = C2V.getZExtValue(); + if (shiftAmt < C1V.getBitWidth()) + return ConstantInt::get(CI1->getContext(), C1V.lshr(shiftAmt)); + else + return UndefValue::get(C1->getType()); // too big shift is undef + } + case Instruction::AShr: { + uint32_t shiftAmt = C2V.getZExtValue(); + if (shiftAmt < C1V.getBitWidth()) + return ConstantInt::get(CI1->getContext(), C1V.ashr(shiftAmt)); + else + return UndefValue::get(C1->getType()); // too big shift is undef + } + } + } + + switch (Opcode) { + case Instruction::SDiv: + case Instruction::UDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::Shl: + if (CI1->equalsInt(0)) return C1; + break; + default: + break; + } + } else if (ConstantFP *CFP1 = dyn_cast<ConstantFP>(C1)) { + if (ConstantFP *CFP2 = dyn_cast<ConstantFP>(C2)) { + APFloat C1V = CFP1->getValueAPF(); + APFloat C2V = CFP2->getValueAPF(); + APFloat C3V = C1V; // copy for modification + switch (Opcode) { + default: + break; + case Instruction::FAdd: + (void)C3V.add(C2V, APFloat::rmNearestTiesToEven); + return ConstantFP::get(C1->getContext(), C3V); + case Instruction::FSub: + (void)C3V.subtract(C2V, APFloat::rmNearestTiesToEven); + return ConstantFP::get(C1->getContext(), C3V); + case Instruction::FMul: + (void)C3V.multiply(C2V, APFloat::rmNearestTiesToEven); + return ConstantFP::get(C1->getContext(), C3V); + case Instruction::FDiv: + (void)C3V.divide(C2V, APFloat::rmNearestTiesToEven); + return ConstantFP::get(C1->getContext(), C3V); + case Instruction::FRem: + (void)C3V.mod(C2V, APFloat::rmNearestTiesToEven); + return ConstantFP::get(C1->getContext(), C3V); + } + } + } else if (VectorType *VTy = dyn_cast<VectorType>(C1->getType())) { + // Perform elementwise folding. + SmallVector<Constant*, 16> Result; + Type *Ty = IntegerType::get(VTy->getContext(), 32); + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { + Constant *LHS = + ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i)); + Constant *RHS = + ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, i)); + + Result.push_back(ConstantExpr::get(Opcode, LHS, RHS)); + } + + return ConstantVector::get(Result); + } + + if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) { + // There are many possible foldings we could do here. We should probably + // at least fold add of a pointer with an integer into the appropriate + // getelementptr. This will improve alias analysis a bit. + + // Given ((a + b) + c), if (b + c) folds to something interesting, return + // (a + (b + c)). + if (Instruction::isAssociative(Opcode) && CE1->getOpcode() == Opcode) { + Constant *T = ConstantExpr::get(Opcode, CE1->getOperand(1), C2); + if (!isa<ConstantExpr>(T) || cast<ConstantExpr>(T)->getOpcode() != Opcode) + return ConstantExpr::get(Opcode, CE1->getOperand(0), T); + } + } else if (isa<ConstantExpr>(C2)) { + // If C2 is a constant expr and C1 isn't, flop them around and fold the + // other way if possible. + if (Instruction::isCommutative(Opcode)) + return ConstantFoldBinaryInstruction(Opcode, C2, C1); + } + + // i1 can be simplified in many cases. + if (C1->getType()->isIntegerTy(1)) { + switch (Opcode) { + case Instruction::Add: + case Instruction::Sub: + return ConstantExpr::getXor(C1, C2); + case Instruction::Mul: + return ConstantExpr::getAnd(C1, C2); + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + // We can assume that C2 == 0. If it were one the result would be + // undefined because the shift value is as large as the bitwidth. + return C1; + case Instruction::SDiv: + case Instruction::UDiv: + // We can assume that C2 == 1. If it were zero the result would be + // undefined through division by zero. + return C1; + case Instruction::URem: + case Instruction::SRem: + // We can assume that C2 == 1. If it were zero the result would be + // undefined through division by zero. + return ConstantInt::getFalse(C1->getContext()); + default: + break; + } + } + + // We don't know how to fold this. + return 0; +} + +/// isZeroSizedType - This type is zero sized if its an array or structure of +/// zero sized types. The only leaf zero sized type is an empty structure. +static bool isMaybeZeroSizedType(Type *Ty) { + if (StructType *STy = dyn_cast<StructType>(Ty)) { + if (STy->isOpaque()) return true; // Can't say. + + // If all of elements have zero size, this does too. + for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) + if (!isMaybeZeroSizedType(STy->getElementType(i))) return false; + return true; + + } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { + return isMaybeZeroSizedType(ATy->getElementType()); + } + return false; +} + +/// IdxCompare - Compare the two constants as though they were getelementptr +/// indices. This allows coersion of the types to be the same thing. +/// +/// If the two constants are the "same" (after coersion), return 0. If the +/// first is less than the second, return -1, if the second is less than the +/// first, return 1. If the constants are not integral, return -2. +/// +static int IdxCompare(Constant *C1, Constant *C2, Type *ElTy) { + if (C1 == C2) return 0; + + // Ok, we found a different index. If they are not ConstantInt, we can't do + // anything with them. + if (!isa<ConstantInt>(C1) || !isa<ConstantInt>(C2)) + return -2; // don't know! + + // Ok, we have two differing integer indices. Sign extend them to be the same + // type. Long is always big enough, so we use it. + if (!C1->getType()->isIntegerTy(64)) + C1 = ConstantExpr::getSExt(C1, Type::getInt64Ty(C1->getContext())); + + if (!C2->getType()->isIntegerTy(64)) + C2 = ConstantExpr::getSExt(C2, Type::getInt64Ty(C1->getContext())); + + if (C1 == C2) return 0; // They are equal + + // If the type being indexed over is really just a zero sized type, there is + // no pointer difference being made here. + if (isMaybeZeroSizedType(ElTy)) + return -2; // dunno. + + // If they are really different, now that they are the same type, then we + // found a difference! + if (cast<ConstantInt>(C1)->getSExtValue() < + cast<ConstantInt>(C2)->getSExtValue()) + return -1; + else + return 1; +} + +/// evaluateFCmpRelation - This function determines if there is anything we can +/// decide about the two constants provided. This doesn't need to handle simple +/// things like ConstantFP comparisons, but should instead handle ConstantExprs. +/// If we can determine that the two constants have a particular relation to +/// each other, we should return the corresponding FCmpInst predicate, +/// otherwise return FCmpInst::BAD_FCMP_PREDICATE. This is used below in +/// ConstantFoldCompareInstruction. +/// +/// To simplify this code we canonicalize the relation so that the first +/// operand is always the most "complex" of the two. We consider ConstantFP +/// to be the simplest, and ConstantExprs to be the most complex. +static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) { + assert(V1->getType() == V2->getType() && + "Cannot compare values of different types!"); + + // Handle degenerate case quickly + if (V1 == V2) return FCmpInst::FCMP_OEQ; + + if (!isa<ConstantExpr>(V1)) { + if (!isa<ConstantExpr>(V2)) { + // We distilled thisUse the standard constant folder for a few cases + ConstantInt *R = 0; + R = dyn_cast<ConstantInt>( + ConstantExpr::getFCmp(FCmpInst::FCMP_OEQ, V1, V2)); + if (R && !R->isZero()) + return FCmpInst::FCMP_OEQ; + R = dyn_cast<ConstantInt>( + ConstantExpr::getFCmp(FCmpInst::FCMP_OLT, V1, V2)); + if (R && !R->isZero()) + return FCmpInst::FCMP_OLT; + R = dyn_cast<ConstantInt>( + ConstantExpr::getFCmp(FCmpInst::FCMP_OGT, V1, V2)); + if (R && !R->isZero()) + return FCmpInst::FCMP_OGT; + + // Nothing more we can do + return FCmpInst::BAD_FCMP_PREDICATE; + } + + // If the first operand is simple and second is ConstantExpr, swap operands. + FCmpInst::Predicate SwappedRelation = evaluateFCmpRelation(V2, V1); + if (SwappedRelation != FCmpInst::BAD_FCMP_PREDICATE) + return FCmpInst::getSwappedPredicate(SwappedRelation); + } else { + // Ok, the LHS is known to be a constantexpr. The RHS can be any of a + // constantexpr or a simple constant. + ConstantExpr *CE1 = cast<ConstantExpr>(V1); + switch (CE1->getOpcode()) { + case Instruction::FPTrunc: + case Instruction::FPExt: + case Instruction::UIToFP: + case Instruction::SIToFP: + // We might be able to do something with these but we don't right now. + break; + default: + break; + } + } + // There are MANY other foldings that we could perform here. They will + // probably be added on demand, as they seem needed. + return FCmpInst::BAD_FCMP_PREDICATE; +} + +/// evaluateICmpRelation - This function determines if there is anything we can +/// decide about the two constants provided. This doesn't need to handle simple +/// things like integer comparisons, but should instead handle ConstantExprs +/// and GlobalValues. If we can determine that the two constants have a +/// particular relation to each other, we should return the corresponding ICmp +/// predicate, otherwise return ICmpInst::BAD_ICMP_PREDICATE. +/// +/// To simplify this code we canonicalize the relation so that the first +/// operand is always the most "complex" of the two. We consider simple +/// constants (like ConstantInt) to be the simplest, followed by +/// GlobalValues, followed by ConstantExpr's (the most complex). +/// +static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2, + bool isSigned) { + assert(V1->getType() == V2->getType() && + "Cannot compare different types of values!"); + if (V1 == V2) return ICmpInst::ICMP_EQ; + + if (!isa<ConstantExpr>(V1) && !isa<GlobalValue>(V1) && + !isa<BlockAddress>(V1)) { + if (!isa<GlobalValue>(V2) && !isa<ConstantExpr>(V2) && + !isa<BlockAddress>(V2)) { + // We distilled this down to a simple case, use the standard constant + // folder. + ConstantInt *R = 0; + ICmpInst::Predicate pred = ICmpInst::ICMP_EQ; + R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2)); + if (R && !R->isZero()) + return pred; + pred = isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; + R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2)); + if (R && !R->isZero()) + return pred; + pred = isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; + R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2)); + if (R && !R->isZero()) + return pred; + + // If we couldn't figure it out, bail. + return ICmpInst::BAD_ICMP_PREDICATE; + } + + // If the first operand is simple, swap operands. + ICmpInst::Predicate SwappedRelation = + evaluateICmpRelation(V2, V1, isSigned); + if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE) + return ICmpInst::getSwappedPredicate(SwappedRelation); + + } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V1)) { + if (isa<ConstantExpr>(V2)) { // Swap as necessary. + ICmpInst::Predicate SwappedRelation = + evaluateICmpRelation(V2, V1, isSigned); + if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE) + return ICmpInst::getSwappedPredicate(SwappedRelation); + return ICmpInst::BAD_ICMP_PREDICATE; + } + + // Now we know that the RHS is a GlobalValue, BlockAddress or simple + // constant (which, since the types must match, means that it's a + // ConstantPointerNull). + if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) { + // Don't try to decide equality of aliases. + if (!isa<GlobalAlias>(GV) && !isa<GlobalAlias>(GV2)) + if (!GV->hasExternalWeakLinkage() || !GV2->hasExternalWeakLinkage()) + return ICmpInst::ICMP_NE; + } else if (isa<BlockAddress>(V2)) { + return ICmpInst::ICMP_NE; // Globals never equal labels. + } else { + assert(isa<ConstantPointerNull>(V2) && "Canonicalization guarantee!"); + // GlobalVals can never be null unless they have external weak linkage. + // We don't try to evaluate aliases here. + if (!GV->hasExternalWeakLinkage() && !isa<GlobalAlias>(GV)) + return ICmpInst::ICMP_NE; + } + } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(V1)) { + if (isa<ConstantExpr>(V2)) { // Swap as necessary. + ICmpInst::Predicate SwappedRelation = + evaluateICmpRelation(V2, V1, isSigned); + if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE) + return ICmpInst::getSwappedPredicate(SwappedRelation); + return ICmpInst::BAD_ICMP_PREDICATE; + } + + // Now we know that the RHS is a GlobalValue, BlockAddress or simple + // constant (which, since the types must match, means that it is a + // ConstantPointerNull). + if (const BlockAddress *BA2 = dyn_cast<BlockAddress>(V2)) { + // Block address in another function can't equal this one, but block + // addresses in the current function might be the same if blocks are + // empty. + if (BA2->getFunction() != BA->getFunction()) + return ICmpInst::ICMP_NE; + } else { + // Block addresses aren't null, don't equal the address of globals. + assert((isa<ConstantPointerNull>(V2) || isa<GlobalValue>(V2)) && + "Canonicalization guarantee!"); + return ICmpInst::ICMP_NE; + } + } else { + // Ok, the LHS is known to be a constantexpr. The RHS can be any of a + // constantexpr, a global, block address, or a simple constant. + ConstantExpr *CE1 = cast<ConstantExpr>(V1); + Constant *CE1Op0 = CE1->getOperand(0); + + switch (CE1->getOpcode()) { + case Instruction::Trunc: + case Instruction::FPTrunc: + case Instruction::FPExt: + case Instruction::FPToUI: + case Instruction::FPToSI: + break; // We can't evaluate floating point casts or truncations. + + case Instruction::UIToFP: + case Instruction::SIToFP: + case Instruction::BitCast: + case Instruction::ZExt: + case Instruction::SExt: + // If the cast is not actually changing bits, and the second operand is a + // null pointer, do the comparison with the pre-casted value. + if (V2->isNullValue() && + (CE1->getType()->isPointerTy() || CE1->getType()->isIntegerTy())) { + if (CE1->getOpcode() == Instruction::ZExt) isSigned = false; + if (CE1->getOpcode() == Instruction::SExt) isSigned = true; + return evaluateICmpRelation(CE1Op0, + Constant::getNullValue(CE1Op0->getType()), + isSigned); + } + break; + + case Instruction::GetElementPtr: + // Ok, since this is a getelementptr, we know that the constant has a + // pointer type. Check the various cases. + if (isa<ConstantPointerNull>(V2)) { + // If we are comparing a GEP to a null pointer, check to see if the base + // of the GEP equals the null pointer. + if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) { + if (GV->hasExternalWeakLinkage()) + // Weak linkage GVals could be zero or not. We're comparing that + // to null pointer so its greater-or-equal + return isSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; + else + // If its not weak linkage, the GVal must have a non-zero address + // so the result is greater-than + return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; + } else if (isa<ConstantPointerNull>(CE1Op0)) { + // If we are indexing from a null pointer, check to see if we have any + // non-zero indices. + for (unsigned i = 1, e = CE1->getNumOperands(); i != e; ++i) + if (!CE1->getOperand(i)->isNullValue()) + // Offsetting from null, must not be equal. + return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; + // Only zero indexes from null, must still be zero. + return ICmpInst::ICMP_EQ; + } + // Otherwise, we can't really say if the first operand is null or not. + } else if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) { + if (isa<ConstantPointerNull>(CE1Op0)) { + if (GV2->hasExternalWeakLinkage()) + // Weak linkage GVals could be zero or not. We're comparing it to + // a null pointer, so its less-or-equal + return isSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; + else + // If its not weak linkage, the GVal must have a non-zero address + // so the result is less-than + return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; + } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) { + if (GV == GV2) { + // If this is a getelementptr of the same global, then it must be + // different. Because the types must match, the getelementptr could + // only have at most one index, and because we fold getelementptr's + // with a single zero index, it must be nonzero. + assert(CE1->getNumOperands() == 2 && + !CE1->getOperand(1)->isNullValue() && + "Surprising getelementptr!"); + return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; + } else { + // If they are different globals, we don't know what the value is. + return ICmpInst::BAD_ICMP_PREDICATE; + } + } + } else { + ConstantExpr *CE2 = cast<ConstantExpr>(V2); + Constant *CE2Op0 = CE2->getOperand(0); + + // There are MANY other foldings that we could perform here. They will + // probably be added on demand, as they seem needed. + switch (CE2->getOpcode()) { + default: break; + case Instruction::GetElementPtr: + // By far the most common case to handle is when the base pointers are + // obviously to the same global. + if (isa<GlobalValue>(CE1Op0) && isa<GlobalValue>(CE2Op0)) { + if (CE1Op0 != CE2Op0) // Don't know relative ordering. + return ICmpInst::BAD_ICMP_PREDICATE; + // Ok, we know that both getelementptr instructions are based on the + // same global. From this, we can precisely determine the relative + // ordering of the resultant pointers. + unsigned i = 1; + + // The logic below assumes that the result of the comparison + // can be determined by finding the first index that differs. + // This doesn't work if there is over-indexing in any + // subsequent indices, so check for that case first. + if (!CE1->isGEPWithNoNotionalOverIndexing() || + !CE2->isGEPWithNoNotionalOverIndexing()) + return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal. + + // Compare all of the operands the GEP's have in common. + gep_type_iterator GTI = gep_type_begin(CE1); + for (;i != CE1->getNumOperands() && i != CE2->getNumOperands(); + ++i, ++GTI) + switch (IdxCompare(CE1->getOperand(i), + CE2->getOperand(i), GTI.getIndexedType())) { + case -1: return isSigned ? ICmpInst::ICMP_SLT:ICmpInst::ICMP_ULT; + case 1: return isSigned ? ICmpInst::ICMP_SGT:ICmpInst::ICMP_UGT; + case -2: return ICmpInst::BAD_ICMP_PREDICATE; + } + + // Ok, we ran out of things they have in common. If any leftovers + // are non-zero then we have a difference, otherwise we are equal. + for (; i < CE1->getNumOperands(); ++i) + if (!CE1->getOperand(i)->isNullValue()) { + if (isa<ConstantInt>(CE1->getOperand(i))) + return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; + else + return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal. + } + + for (; i < CE2->getNumOperands(); ++i) + if (!CE2->getOperand(i)->isNullValue()) { + if (isa<ConstantInt>(CE2->getOperand(i))) + return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; + else + return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal. + } + return ICmpInst::ICMP_EQ; + } + } + } + default: + break; + } + } + + return ICmpInst::BAD_ICMP_PREDICATE; +} + +Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred, + Constant *C1, Constant *C2) { + Type *ResultTy; + if (VectorType *VT = dyn_cast<VectorType>(C1->getType())) + ResultTy = VectorType::get(Type::getInt1Ty(C1->getContext()), + VT->getNumElements()); + else + ResultTy = Type::getInt1Ty(C1->getContext()); + + // Fold FCMP_FALSE/FCMP_TRUE unconditionally. + if (pred == FCmpInst::FCMP_FALSE) + return Constant::getNullValue(ResultTy); + + if (pred == FCmpInst::FCMP_TRUE) + return Constant::getAllOnesValue(ResultTy); + + // Handle some degenerate cases first + if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) { + // For EQ and NE, we can always pick a value for the undef to make the + // predicate pass or fail, so we can return undef. + // Also, if both operands are undef, we can return undef. + if (ICmpInst::isEquality(ICmpInst::Predicate(pred)) || + (isa<UndefValue>(C1) && isa<UndefValue>(C2))) + return UndefValue::get(ResultTy); + // Otherwise, pick the same value as the non-undef operand, and fold + // it to true or false. + return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(pred)); + } + + // icmp eq/ne(null,GV) -> false/true + if (C1->isNullValue()) { + if (const GlobalValue *GV = dyn_cast<GlobalValue>(C2)) + // Don't try to evaluate aliases. External weak GV can be null. + if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage()) { + if (pred == ICmpInst::ICMP_EQ) + return ConstantInt::getFalse(C1->getContext()); + else if (pred == ICmpInst::ICMP_NE) + return ConstantInt::getTrue(C1->getContext()); + } + // icmp eq/ne(GV,null) -> false/true + } else if (C2->isNullValue()) { + if (const GlobalValue *GV = dyn_cast<GlobalValue>(C1)) + // Don't try to evaluate aliases. External weak GV can be null. + if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage()) { + if (pred == ICmpInst::ICMP_EQ) + return ConstantInt::getFalse(C1->getContext()); + else if (pred == ICmpInst::ICMP_NE) + return ConstantInt::getTrue(C1->getContext()); + } + } + + // If the comparison is a comparison between two i1's, simplify it. + if (C1->getType()->isIntegerTy(1)) { + switch(pred) { + case ICmpInst::ICMP_EQ: + if (isa<ConstantInt>(C2)) + return ConstantExpr::getXor(C1, ConstantExpr::getNot(C2)); + return ConstantExpr::getXor(ConstantExpr::getNot(C1), C2); + case ICmpInst::ICMP_NE: + return ConstantExpr::getXor(C1, C2); + default: + break; + } + } + + if (isa<ConstantInt>(C1) && isa<ConstantInt>(C2)) { + APInt V1 = cast<ConstantInt>(C1)->getValue(); + APInt V2 = cast<ConstantInt>(C2)->getValue(); + switch (pred) { + default: llvm_unreachable("Invalid ICmp Predicate"); + case ICmpInst::ICMP_EQ: return ConstantInt::get(ResultTy, V1 == V2); + case ICmpInst::ICMP_NE: return ConstantInt::get(ResultTy, V1 != V2); + case ICmpInst::ICMP_SLT: return ConstantInt::get(ResultTy, V1.slt(V2)); + case ICmpInst::ICMP_SGT: return ConstantInt::get(ResultTy, V1.sgt(V2)); + case ICmpInst::ICMP_SLE: return ConstantInt::get(ResultTy, V1.sle(V2)); + case ICmpInst::ICMP_SGE: return ConstantInt::get(ResultTy, V1.sge(V2)); + case ICmpInst::ICMP_ULT: return ConstantInt::get(ResultTy, V1.ult(V2)); + case ICmpInst::ICMP_UGT: return ConstantInt::get(ResultTy, V1.ugt(V2)); + case ICmpInst::ICMP_ULE: return ConstantInt::get(ResultTy, V1.ule(V2)); + case ICmpInst::ICMP_UGE: return ConstantInt::get(ResultTy, V1.uge(V2)); + } + } else if (isa<ConstantFP>(C1) && isa<ConstantFP>(C2)) { + APFloat C1V = cast<ConstantFP>(C1)->getValueAPF(); + APFloat C2V = cast<ConstantFP>(C2)->getValueAPF(); + APFloat::cmpResult R = C1V.compare(C2V); + switch (pred) { + default: llvm_unreachable("Invalid FCmp Predicate"); + case FCmpInst::FCMP_FALSE: return Constant::getNullValue(ResultTy); + case FCmpInst::FCMP_TRUE: return Constant::getAllOnesValue(ResultTy); + case FCmpInst::FCMP_UNO: + return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered); + case FCmpInst::FCMP_ORD: + return ConstantInt::get(ResultTy, R!=APFloat::cmpUnordered); + case FCmpInst::FCMP_UEQ: + return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered || + R==APFloat::cmpEqual); + case FCmpInst::FCMP_OEQ: + return ConstantInt::get(ResultTy, R==APFloat::cmpEqual); + case FCmpInst::FCMP_UNE: + return ConstantInt::get(ResultTy, R!=APFloat::cmpEqual); + case FCmpInst::FCMP_ONE: + return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan || + R==APFloat::cmpGreaterThan); + case FCmpInst::FCMP_ULT: + return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered || + R==APFloat::cmpLessThan); + case FCmpInst::FCMP_OLT: + return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan); + case FCmpInst::FCMP_UGT: + return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered || + R==APFloat::cmpGreaterThan); + case FCmpInst::FCMP_OGT: + return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan); + case FCmpInst::FCMP_ULE: + return ConstantInt::get(ResultTy, R!=APFloat::cmpGreaterThan); + case FCmpInst::FCMP_OLE: + return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan || + R==APFloat::cmpEqual); + case FCmpInst::FCMP_UGE: + return ConstantInt::get(ResultTy, R!=APFloat::cmpLessThan); + case FCmpInst::FCMP_OGE: + return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan || + R==APFloat::cmpEqual); + } + } else if (C1->getType()->isVectorTy()) { + // If we can constant fold the comparison of each element, constant fold + // the whole vector comparison. + SmallVector<Constant*, 4> ResElts; + Type *Ty = IntegerType::get(C1->getContext(), 32); + // Compare the elements, producing an i1 result or constant expr. + for (unsigned i = 0, e = C1->getType()->getVectorNumElements(); i != e;++i){ + Constant *C1E = + ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i)); + Constant *C2E = + ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, i)); + + ResElts.push_back(ConstantExpr::getCompare(pred, C1E, C2E)); + } + + return ConstantVector::get(ResElts); + } + + if (C1->getType()->isFloatingPointTy()) { + int Result = -1; // -1 = unknown, 0 = known false, 1 = known true. + switch (evaluateFCmpRelation(C1, C2)) { + default: llvm_unreachable("Unknown relation!"); + case FCmpInst::FCMP_UNO: + case FCmpInst::FCMP_ORD: + case FCmpInst::FCMP_UEQ: + case FCmpInst::FCMP_UNE: + case FCmpInst::FCMP_ULT: + case FCmpInst::FCMP_UGT: + case FCmpInst::FCMP_ULE: + case FCmpInst::FCMP_UGE: + case FCmpInst::FCMP_TRUE: + case FCmpInst::FCMP_FALSE: + case FCmpInst::BAD_FCMP_PREDICATE: + break; // Couldn't determine anything about these constants. + case FCmpInst::FCMP_OEQ: // We know that C1 == C2 + Result = (pred == FCmpInst::FCMP_UEQ || pred == FCmpInst::FCMP_OEQ || + pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE || + pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE); + break; + case FCmpInst::FCMP_OLT: // We know that C1 < C2 + Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE || + pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT || + pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE); + break; + case FCmpInst::FCMP_OGT: // We know that C1 > C2 + Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE || + pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT || + pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE); + break; + case FCmpInst::FCMP_OLE: // We know that C1 <= C2 + // We can only partially decide this relation. + if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT) + Result = 0; + else if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT) + Result = 1; + break; + case FCmpInst::FCMP_OGE: // We known that C1 >= C2 + // We can only partially decide this relation. + if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT) + Result = 0; + else if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT) + Result = 1; + break; + case FCmpInst::FCMP_ONE: // We know that C1 != C2 + // We can only partially decide this relation. + if (pred == FCmpInst::FCMP_OEQ || pred == FCmpInst::FCMP_UEQ) + Result = 0; + else if (pred == FCmpInst::FCMP_ONE || pred == FCmpInst::FCMP_UNE) + Result = 1; + break; + } + + // If we evaluated the result, return it now. + if (Result != -1) + return ConstantInt::get(ResultTy, Result); + + } else { + // Evaluate the relation between the two constants, per the predicate. + int Result = -1; // -1 = unknown, 0 = known false, 1 = known true. + switch (evaluateICmpRelation(C1, C2, CmpInst::isSigned(pred))) { + default: llvm_unreachable("Unknown relational!"); + case ICmpInst::BAD_ICMP_PREDICATE: + break; // Couldn't determine anything about these constants. + case ICmpInst::ICMP_EQ: // We know the constants are equal! + // If we know the constants are equal, we can decide the result of this + // computation precisely. + Result = ICmpInst::isTrueWhenEqual((ICmpInst::Predicate)pred); + break; + case ICmpInst::ICMP_ULT: + switch (pred) { + case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_ULE: + Result = 1; break; + case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_UGE: + Result = 0; break; + } + break; + case ICmpInst::ICMP_SLT: + switch (pred) { + case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SLE: + Result = 1; break; + case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SGE: + Result = 0; break; + } + break; + case ICmpInst::ICMP_UGT: + switch (pred) { + case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_UGE: + Result = 1; break; + case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_ULE: + Result = 0; break; + } + break; + case ICmpInst::ICMP_SGT: + switch (pred) { + case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SGE: + Result = 1; break; + case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SLE: + Result = 0; break; + } + break; + case ICmpInst::ICMP_ULE: + if (pred == ICmpInst::ICMP_UGT) Result = 0; + if (pred == ICmpInst::ICMP_ULT || pred == ICmpInst::ICMP_ULE) Result = 1; + break; + case ICmpInst::ICMP_SLE: + if (pred == ICmpInst::ICMP_SGT) Result = 0; + if (pred == ICmpInst::ICMP_SLT || pred == ICmpInst::ICMP_SLE) Result = 1; + break; + case ICmpInst::ICMP_UGE: + if (pred == ICmpInst::ICMP_ULT) Result = 0; + if (pred == ICmpInst::ICMP_UGT || pred == ICmpInst::ICMP_UGE) Result = 1; + break; + case ICmpInst::ICMP_SGE: + if (pred == ICmpInst::ICMP_SLT) Result = 0; + if (pred == ICmpInst::ICMP_SGT || pred == ICmpInst::ICMP_SGE) Result = 1; + break; + case ICmpInst::ICMP_NE: + if (pred == ICmpInst::ICMP_EQ) Result = 0; + if (pred == ICmpInst::ICMP_NE) Result = 1; + break; + } + + // If we evaluated the result, return it now. + if (Result != -1) + return ConstantInt::get(ResultTy, Result); + + // If the right hand side is a bitcast, try using its inverse to simplify + // it by moving it to the left hand side. We can't do this if it would turn + // a vector compare into a scalar compare or visa versa. + if (ConstantExpr *CE2 = dyn_cast<ConstantExpr>(C2)) { + Constant *CE2Op0 = CE2->getOperand(0); + if (CE2->getOpcode() == Instruction::BitCast && + CE2->getType()->isVectorTy() == CE2Op0->getType()->isVectorTy()) { + Constant *Inverse = ConstantExpr::getBitCast(C1, CE2Op0->getType()); + return ConstantExpr::getICmp(pred, Inverse, CE2Op0); + } + } + + // If the left hand side is an extension, try eliminating it. + if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) { + if ((CE1->getOpcode() == Instruction::SExt && ICmpInst::isSigned(pred)) || + (CE1->getOpcode() == Instruction::ZExt && !ICmpInst::isSigned(pred))){ + Constant *CE1Op0 = CE1->getOperand(0); + Constant *CE1Inverse = ConstantExpr::getTrunc(CE1, CE1Op0->getType()); + if (CE1Inverse == CE1Op0) { + // Check whether we can safely truncate the right hand side. + Constant *C2Inverse = ConstantExpr::getTrunc(C2, CE1Op0->getType()); + if (ConstantExpr::getZExt(C2Inverse, C2->getType()) == C2) { + return ConstantExpr::getICmp(pred, CE1Inverse, C2Inverse); + } + } + } + } + + if ((!isa<ConstantExpr>(C1) && isa<ConstantExpr>(C2)) || + (C1->isNullValue() && !C2->isNullValue())) { + // If C2 is a constant expr and C1 isn't, flip them around and fold the + // other way if possible. + // Also, if C1 is null and C2 isn't, flip them around. + pred = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)pred); + return ConstantExpr::getICmp(pred, C2, C1); + } + } + return 0; +} + +/// isInBoundsIndices - Test whether the given sequence of *normalized* indices +/// is "inbounds". +template<typename IndexTy> +static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) { + // No indices means nothing that could be out of bounds. + if (Idxs.empty()) return true; + + // If the first index is zero, it's in bounds. + if (cast<Constant>(Idxs[0])->isNullValue()) return true; + + // If the first index is one and all the rest are zero, it's in bounds, + // by the one-past-the-end rule. + if (!cast<ConstantInt>(Idxs[0])->isOne()) + return false; + for (unsigned i = 1, e = Idxs.size(); i != e; ++i) + if (!cast<Constant>(Idxs[i])->isNullValue()) + return false; + return true; +} + +template<typename IndexTy> +static Constant *ConstantFoldGetElementPtrImpl(Constant *C, + bool inBounds, + ArrayRef<IndexTy> Idxs) { + if (Idxs.empty()) return C; + Constant *Idx0 = cast<Constant>(Idxs[0]); + if ((Idxs.size() == 1 && Idx0->isNullValue())) + return C; + + if (isa<UndefValue>(C)) { + PointerType *Ptr = cast<PointerType>(C->getType()); + Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs); + assert(Ty != 0 && "Invalid indices for GEP!"); + return UndefValue::get(PointerType::get(Ty, Ptr->getAddressSpace())); + } + + if (C->isNullValue()) { + bool isNull = true; + for (unsigned i = 0, e = Idxs.size(); i != e; ++i) + if (!cast<Constant>(Idxs[i])->isNullValue()) { + isNull = false; + break; + } + if (isNull) { + PointerType *Ptr = cast<PointerType>(C->getType()); + Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs); + assert(Ty != 0 && "Invalid indices for GEP!"); + return ConstantPointerNull::get(PointerType::get(Ty, + Ptr->getAddressSpace())); + } + } + + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { + // Combine Indices - If the source pointer to this getelementptr instruction + // is a getelementptr instruction, combine the indices of the two + // getelementptr instructions into a single instruction. + // + if (CE->getOpcode() == Instruction::GetElementPtr) { + Type *LastTy = 0; + for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE); + I != E; ++I) + LastTy = *I; + + if ((LastTy && isa<SequentialType>(LastTy)) || Idx0->isNullValue()) { + SmallVector<Value*, 16> NewIndices; + NewIndices.reserve(Idxs.size() + CE->getNumOperands()); + for (unsigned i = 1, e = CE->getNumOperands()-1; i != e; ++i) + NewIndices.push_back(CE->getOperand(i)); + + // Add the last index of the source with the first index of the new GEP. + // Make sure to handle the case when they are actually different types. + Constant *Combined = CE->getOperand(CE->getNumOperands()-1); + // Otherwise it must be an array. + if (!Idx0->isNullValue()) { + Type *IdxTy = Combined->getType(); + if (IdxTy != Idx0->getType()) { + Type *Int64Ty = Type::getInt64Ty(IdxTy->getContext()); + Constant *C1 = ConstantExpr::getSExtOrBitCast(Idx0, Int64Ty); + Constant *C2 = ConstantExpr::getSExtOrBitCast(Combined, Int64Ty); + Combined = ConstantExpr::get(Instruction::Add, C1, C2); + } else { + Combined = + ConstantExpr::get(Instruction::Add, Idx0, Combined); + } + } + + NewIndices.push_back(Combined); + NewIndices.append(Idxs.begin() + 1, Idxs.end()); + return + ConstantExpr::getGetElementPtr(CE->getOperand(0), NewIndices, + inBounds && + cast<GEPOperator>(CE)->isInBounds()); + } + } + + // Attempt to fold casts to the same type away. For example, folding: + // + // i32* getelementptr ([2 x i32]* bitcast ([3 x i32]* %X to [2 x i32]*), + // i64 0, i64 0) + // into: + // + // i32* getelementptr ([3 x i32]* %X, i64 0, i64 0) + // + // Don't fold if the cast is changing address spaces. + if (CE->isCast() && Idxs.size() > 1 && Idx0->isNullValue()) { + PointerType *SrcPtrTy = + dyn_cast<PointerType>(CE->getOperand(0)->getType()); + PointerType *DstPtrTy = dyn_cast<PointerType>(CE->getType()); + if (SrcPtrTy && DstPtrTy) { + ArrayType *SrcArrayTy = + dyn_cast<ArrayType>(SrcPtrTy->getElementType()); + ArrayType *DstArrayTy = + dyn_cast<ArrayType>(DstPtrTy->getElementType()); + if (SrcArrayTy && DstArrayTy + && SrcArrayTy->getElementType() == DstArrayTy->getElementType() + && SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) + return ConstantExpr::getGetElementPtr((Constant*)CE->getOperand(0), + Idxs, inBounds); + } + } + } + + // Check to see if any array indices are not within the corresponding + // notional array bounds. If so, try to determine if they can be factored + // out into preceding dimensions. + bool Unknown = false; + SmallVector<Constant *, 8> NewIdxs; + Type *Ty = C->getType(); + Type *Prev = 0; + for (unsigned i = 0, e = Idxs.size(); i != e; + Prev = Ty, Ty = cast<CompositeType>(Ty)->getTypeAtIndex(Idxs[i]), ++i) { + if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) { + if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) + if (ATy->getNumElements() <= INT64_MAX && + ATy->getNumElements() != 0 && + CI->getSExtValue() >= (int64_t)ATy->getNumElements()) { + if (isa<SequentialType>(Prev)) { + // It's out of range, but we can factor it into the prior + // dimension. + NewIdxs.resize(Idxs.size()); + ConstantInt *Factor = ConstantInt::get(CI->getType(), + ATy->getNumElements()); + NewIdxs[i] = ConstantExpr::getSRem(CI, Factor); + + Constant *PrevIdx = cast<Constant>(Idxs[i-1]); + Constant *Div = ConstantExpr::getSDiv(CI, Factor); + + // Before adding, extend both operands to i64 to avoid + // overflow trouble. + if (!PrevIdx->getType()->isIntegerTy(64)) + PrevIdx = ConstantExpr::getSExt(PrevIdx, + Type::getInt64Ty(Div->getContext())); + if (!Div->getType()->isIntegerTy(64)) + Div = ConstantExpr::getSExt(Div, + Type::getInt64Ty(Div->getContext())); + + NewIdxs[i-1] = ConstantExpr::getAdd(PrevIdx, Div); + } else { + // It's out of range, but the prior dimension is a struct + // so we can't do anything about it. + Unknown = true; + } + } + } else { + // We don't know if it's in range or not. + Unknown = true; + } + } + + // If we did any factoring, start over with the adjusted indices. + if (!NewIdxs.empty()) { + for (unsigned i = 0, e = Idxs.size(); i != e; ++i) + if (!NewIdxs[i]) NewIdxs[i] = cast<Constant>(Idxs[i]); + return ConstantExpr::getGetElementPtr(C, NewIdxs, inBounds); + } + + // If all indices are known integers and normalized, we can do a simple + // check for the "inbounds" property. + if (!Unknown && !inBounds && + isa<GlobalVariable>(C) && isInBoundsIndices(Idxs)) + return ConstantExpr::getInBoundsGetElementPtr(C, Idxs); + + return 0; +} + +Constant *llvm::ConstantFoldGetElementPtr(Constant *C, + bool inBounds, + ArrayRef<Constant *> Idxs) { + return ConstantFoldGetElementPtrImpl(C, inBounds, Idxs); +} + +Constant *llvm::ConstantFoldGetElementPtr(Constant *C, + bool inBounds, + ArrayRef<Value *> Idxs) { + return ConstantFoldGetElementPtrImpl(C, inBounds, Idxs); +} diff --git a/lib/IR/ConstantFold.h b/lib/IR/ConstantFold.h new file mode 100644 index 0000000000..e12f27a7cb --- /dev/null +++ b/lib/IR/ConstantFold.h @@ -0,0 +1,56 @@ +//===-- ConstantFolding.h - Internal Constant Folding Interface -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the (internal) constant folding interfaces for LLVM. These +// interfaces are used by the ConstantExpr::get* methods to automatically fold +// constants when possible. +// +// These operators may return a null object if they don't know how to perform +// the specified operation on the specified constant types. +// +//===----------------------------------------------------------------------===// + +#ifndef CONSTANTFOLDING_H +#define CONSTANTFOLDING_H + +#include "llvm/ADT/ArrayRef.h" + +namespace llvm { + class Value; + class Constant; + class Type; + + // Constant fold various types of instruction... + Constant *ConstantFoldCastInstruction( + unsigned opcode, ///< The opcode of the cast + Constant *V, ///< The source constant + Type *DestTy ///< The destination type + ); + Constant *ConstantFoldSelectInstruction(Constant *Cond, + Constant *V1, Constant *V2); + Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx); + Constant *ConstantFoldInsertElementInstruction(Constant *Val, Constant *Elt, + Constant *Idx); + Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2, + Constant *Mask); + Constant *ConstantFoldExtractValueInstruction(Constant *Agg, + ArrayRef<unsigned> Idxs); + Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, + ArrayRef<unsigned> Idxs); + Constant *ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, + Constant *V2); + Constant *ConstantFoldCompareInstruction(unsigned short predicate, + Constant *C1, Constant *C2); + Constant *ConstantFoldGetElementPtr(Constant *C, bool inBounds, + ArrayRef<Constant *> Idxs); + Constant *ConstantFoldGetElementPtr(Constant *C, bool inBounds, + ArrayRef<Value *> Idxs); +} // End llvm namespace + +#endif diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp new file mode 100644 index 0000000000..0c7effb5ca --- /dev/null +++ b/lib/IR/Constants.cpp @@ -0,0 +1,2769 @@ +//===-- Constants.cpp - Implement Constant nodes --------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Constant* classes. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Constants.h" +#include "ConstantFold.h" +#include "LLVMContextImpl.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cstdarg> +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Constant Class +//===----------------------------------------------------------------------===// + +void Constant::anchor() { } + +bool Constant::isNegativeZeroValue() const { + // Floating point values have an explicit -0.0 value. + if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this)) + return CFP->isZero() && CFP->isNegative(); + + // Otherwise, just use +0.0. + return isNullValue(); +} + +// Return true iff this constant is positive zero (floating point), negative +// zero (floating point), or a null value. +bool Constant::isZeroValue() const { + // Floating point values have an explicit -0.0 value. + if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this)) + return CFP->isZero(); + + // Otherwise, just use +0.0. + return isNullValue(); +} + +bool Constant::isNullValue() const { + // 0 is null. + if (const ConstantInt *CI = dyn_cast<ConstantInt>(this)) + return CI->isZero(); + + // +0.0 is null. + if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this)) + return CFP->isZero() && !CFP->isNegative(); + + // constant zero is zero for aggregates and cpnull is null for pointers. + return isa<ConstantAggregateZero>(this) || isa<ConstantPointerNull>(this); +} + +bool Constant::isAllOnesValue() const { + // Check for -1 integers + if (const ConstantInt *CI = dyn_cast<ConstantInt>(this)) + return CI->isMinusOne(); + + // Check for FP which are bitcasted from -1 integers + if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this)) + return CFP->getValueAPF().bitcastToAPInt().isAllOnesValue(); + + // Check for constant vectors which are splats of -1 values. + if (const ConstantVector *CV = dyn_cast<ConstantVector>(this)) + if (Constant *Splat = CV->getSplatValue()) + return Splat->isAllOnesValue(); + + // Check for constant vectors which are splats of -1 values. + if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this)) + if (Constant *Splat = CV->getSplatValue()) + return Splat->isAllOnesValue(); + + return false; +} + +// Constructor to create a '0' constant of arbitrary type... +Constant *Constant::getNullValue(Type *Ty) { + switch (Ty->getTypeID()) { + case Type::IntegerTyID: + return ConstantInt::get(Ty, 0); + case Type::HalfTyID: + return ConstantFP::get(Ty->getContext(), + APFloat::getZero(APFloat::IEEEhalf)); + case Type::FloatTyID: + return ConstantFP::get(Ty->getContext(), + APFloat::getZero(APFloat::IEEEsingle)); + case Type::DoubleTyID: + return ConstantFP::get(Ty->getContext(), + APFloat::getZero(APFloat::IEEEdouble)); + case Type::X86_FP80TyID: + return ConstantFP::get(Ty->getContext(), + APFloat::getZero(APFloat::x87DoubleExtended)); + case Type::FP128TyID: + return ConstantFP::get(Ty->getContext(), + APFloat::getZero(APFloat::IEEEquad)); + case Type::PPC_FP128TyID: + return ConstantFP::get(Ty->getContext(), + APFloat(APFloat::PPCDoubleDouble, + APInt::getNullValue(128))); + case Type::PointerTyID: + return ConstantPointerNull::get(cast<PointerType>(Ty)); + case Type::StructTyID: + case Type::ArrayTyID: + case Type::VectorTyID: + return ConstantAggregateZero::get(Ty); + default: + // Function, Label, or Opaque type? + llvm_unreachable("Cannot create a null constant of that type!"); + } +} + +Constant *Constant::getIntegerValue(Type *Ty, const APInt &V) { + Type *ScalarTy = Ty->getScalarType(); + + // Create the base integer constant. + Constant *C = ConstantInt::get(Ty->getContext(), V); + + // Convert an integer to a pointer, if necessary. + if (PointerType *PTy = dyn_cast<PointerType>(ScalarTy)) + C = ConstantExpr::getIntToPtr(C, PTy); + + // Broadcast a scalar to a vector, if necessary. + if (VectorType *VTy = dyn_cast<VectorType>(Ty)) + C = ConstantVector::getSplat(VTy->getNumElements(), C); + + return C; +} + +Constant *Constant::getAllOnesValue(Type *Ty) { + if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) + return ConstantInt::get(Ty->getContext(), + APInt::getAllOnesValue(ITy->getBitWidth())); + + if (Ty->isFloatingPointTy()) { + APFloat FL = APFloat::getAllOnesValue(Ty->getPrimitiveSizeInBits(), + !Ty->isPPC_FP128Ty()); + return ConstantFP::get(Ty->getContext(), FL); + } + + VectorType *VTy = cast<VectorType>(Ty); + return ConstantVector::getSplat(VTy->getNumElements(), + getAllOnesValue(VTy->getElementType())); +} + +/// getAggregateElement - For aggregates (struct/array/vector) return the +/// constant that corresponds to the specified element if possible, or null if +/// not. This can return null if the element index is a ConstantExpr, or if +/// 'this' is a constant expr. +Constant *Constant::getAggregateElement(unsigned Elt) const { + if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(this)) + return Elt < CS->getNumOperands() ? CS->getOperand(Elt) : 0; + + if (const ConstantArray *CA = dyn_cast<ConstantArray>(this)) + return Elt < CA->getNumOperands() ? CA->getOperand(Elt) : 0; + + if (const ConstantVector *CV = dyn_cast<ConstantVector>(this)) + return Elt < CV->getNumOperands() ? CV->getOperand(Elt) : 0; + + if (const ConstantAggregateZero *CAZ =dyn_cast<ConstantAggregateZero>(this)) + return CAZ->getElementValue(Elt); + + if (const UndefValue *UV = dyn_cast<UndefValue>(this)) + return UV->getElementValue(Elt); + + if (const ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(this)) + return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt) : 0; + return 0; +} + +Constant *Constant::getAggregateElement(Constant *Elt) const { + assert(isa<IntegerType>(Elt->getType()) && "Index must be an integer"); + if (ConstantInt *CI = dyn_cast<ConstantInt>(Elt)) + return getAggregateElement(CI->getZExtValue()); + return 0; +} + + +void Constant::destroyConstantImpl() { + // When a Constant is destroyed, there may be lingering + // references to the constant by other constants in the constant pool. These + // constants are implicitly dependent on the module that is being deleted, + // but they don't know that. Because we only find out when the CPV is + // deleted, we must now notify all of our users (that should only be + // Constants) that they are, in fact, invalid now and should be deleted. + // + while (!use_empty()) { + Value *V = use_back(); +#ifndef NDEBUG // Only in -g mode... + if (!isa<Constant>(V)) { + dbgs() << "While deleting: " << *this + << "\n\nUse still stuck around after Def is destroyed: " + << *V << "\n\n"; + } +#endif + assert(isa<Constant>(V) && "References remain to Constant being destroyed"); + cast<Constant>(V)->destroyConstant(); + + // The constant should remove itself from our use list... + assert((use_empty() || use_back() != V) && "Constant not removed!"); + } + + // Value has no outstanding references it is safe to delete it now... + delete this; +} + +/// canTrap - Return true if evaluation of this constant could trap. This is +/// true for things like constant expressions that could divide by zero. +bool Constant::canTrap() const { + assert(getType()->isFirstClassType() && "Cannot evaluate aggregate vals!"); + // The only thing that could possibly trap are constant exprs. + const ConstantExpr *CE = dyn_cast<ConstantExpr>(this); + if (!CE) return false; + + // ConstantExpr traps if any operands can trap. + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + if (CE->getOperand(i)->canTrap()) + return true; + + // Otherwise, only specific operations can trap. + switch (CE->getOpcode()) { + default: + return false; + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::FDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::FRem: + // Div and rem can trap if the RHS is not known to be non-zero. + if (!isa<ConstantInt>(CE->getOperand(1)) ||CE->getOperand(1)->isNullValue()) + return true; + return false; + } +} + +/// isThreadDependent - Return true if the value can vary between threads. +bool Constant::isThreadDependent() const { + SmallPtrSet<const Constant*, 64> Visited; + SmallVector<const Constant*, 64> WorkList; + WorkList.push_back(this); + Visited.insert(this); + + while (!WorkList.empty()) { + const Constant *C = WorkList.pop_back_val(); + + if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) { + if (GV->isThreadLocal()) + return true; + } + + for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) { + const Constant *D = dyn_cast<Constant>(C->getOperand(I)); + if (!D) + continue; + if (Visited.insert(D)) + WorkList.push_back(D); + } + } + + return false; +} + +/// isConstantUsed - Return true if the constant has users other than constant +/// exprs and other dangling things. +bool Constant::isConstantUsed() const { + for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { + const Constant *UC = dyn_cast<Constant>(*UI); + if (UC == 0 || isa<GlobalValue>(UC)) + return true; + + if (UC->isConstantUsed()) + return true; + } + return false; +} + + + +/// getRelocationInfo - This method classifies the entry according to +/// whether or not it may generate a relocation entry. This must be +/// conservative, so if it might codegen to a relocatable entry, it should say +/// so. The return values are: +/// +/// NoRelocation: This constant pool entry is guaranteed to never have a +/// relocation applied to it (because it holds a simple constant like +/// '4'). +/// LocalRelocation: This entry has relocations, but the entries are +/// guaranteed to be resolvable by the static linker, so the dynamic +/// linker will never see them. +/// GlobalRelocations: This entry may have arbitrary relocations. +/// +/// FIXME: This really should not be in IR. +Constant::PossibleRelocationsTy Constant::getRelocationInfo() const { + if (const GlobalValue *GV = dyn_cast<GlobalValue>(this)) { + if (GV->hasLocalLinkage() || GV->hasHiddenVisibility()) + return LocalRelocation; // Local to this file/library. + return GlobalRelocations; // Global reference. + } + + if (const BlockAddress *BA = dyn_cast<BlockAddress>(this)) + return BA->getFunction()->getRelocationInfo(); + + // While raw uses of blockaddress need to be relocated, differences between + // two of them don't when they are for labels in the same function. This is a + // common idiom when creating a table for the indirect goto extension, so we + // handle it efficiently here. + if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(this)) + if (CE->getOpcode() == Instruction::Sub) { + ConstantExpr *LHS = dyn_cast<ConstantExpr>(CE->getOperand(0)); + ConstantExpr *RHS = dyn_cast<ConstantExpr>(CE->getOperand(1)); + if (LHS && RHS && + LHS->getOpcode() == Instruction::PtrToInt && + RHS->getOpcode() == Instruction::PtrToInt && + isa<BlockAddress>(LHS->getOperand(0)) && + isa<BlockAddress>(RHS->getOperand(0)) && + cast<BlockAddress>(LHS->getOperand(0))->getFunction() == + cast<BlockAddress>(RHS->getOperand(0))->getFunction()) + return NoRelocation; + } + + PossibleRelocationsTy Result = NoRelocation; + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + Result = std::max(Result, + cast<Constant>(getOperand(i))->getRelocationInfo()); + + return Result; +} + +/// removeDeadUsersOfConstant - If the specified constantexpr is dead, remove +/// it. This involves recursively eliminating any dead users of the +/// constantexpr. +static bool removeDeadUsersOfConstant(const Constant *C) { + if (isa<GlobalValue>(C)) return false; // Cannot remove this + + while (!C->use_empty()) { + const Constant *User = dyn_cast<Constant>(C->use_back()); + if (!User) return false; // Non-constant usage; + if (!removeDeadUsersOfConstant(User)) + return false; // Constant wasn't dead + } + + const_cast<Constant*>(C)->destroyConstant(); + return true; +} + + +/// removeDeadConstantUsers - If there are any dead constant users dangling +/// off of this constant, remove them. This method is useful for clients +/// that want to check to see if a global is unused, but don't want to deal +/// with potentially dead constants hanging off of the globals. +void Constant::removeDeadConstantUsers() const { + Value::const_use_iterator I = use_begin(), E = use_end(); + Value::const_use_iterator LastNonDeadUser = E; + while (I != E) { + const Constant *User = dyn_cast<Constant>(*I); + if (User == 0) { + LastNonDeadUser = I; + ++I; + continue; + } + + if (!removeDeadUsersOfConstant(User)) { + // If the constant wasn't dead, remember that this was the last live use + // and move on to the next constant. + LastNonDeadUser = I; + ++I; + continue; + } + + // If the constant was dead, then the iterator is invalidated. + if (LastNonDeadUser == E) { + I = use_begin(); + if (I == E) break; + } else { + I = LastNonDeadUser; + ++I; + } + } +} + + + +//===----------------------------------------------------------------------===// +// ConstantInt +//===----------------------------------------------------------------------===// + +void ConstantInt::anchor() { } + +ConstantInt::ConstantInt(IntegerType *Ty, const APInt& V) + : Constant(Ty, ConstantIntVal, 0, 0), Val(V) { + assert(V.getBitWidth() == Ty->getBitWidth() && "Invalid constant for type"); +} + +ConstantInt *ConstantInt::getTrue(LLVMContext &Context) { + LLVMContextImpl *pImpl = Context.pImpl; + if (!pImpl->TheTrueVal) + pImpl->TheTrueVal = ConstantInt::get(Type::getInt1Ty(Context), 1); + return pImpl->TheTrueVal; +} + +ConstantInt *ConstantInt::getFalse(LLVMContext &Context) { + LLVMContextImpl *pImpl = Context.pImpl; + if (!pImpl->TheFalseVal) + pImpl->TheFalseVal = ConstantInt::get(Type::getInt1Ty(Context), 0); + return pImpl->TheFalseVal; +} + +Constant *ConstantInt::getTrue(Type *Ty) { + VectorType *VTy = dyn_cast<VectorType>(Ty); + if (!VTy) { + assert(Ty->isIntegerTy(1) && "True must be i1 or vector of i1."); + return ConstantInt::getTrue(Ty->getContext()); + } + assert(VTy->getElementType()->isIntegerTy(1) && + "True must be vector of i1 or i1."); + return ConstantVector::getSplat(VTy->getNumElements(), + ConstantInt::getTrue(Ty->getContext())); +} + +Constant *ConstantInt::getFalse(Type *Ty) { + VectorType *VTy = dyn_cast<VectorType>(Ty); + if (!VTy) { + assert(Ty->isIntegerTy(1) && "False must be i1 or vector of i1."); + return ConstantInt::getFalse(Ty->getContext()); + } + assert(VTy->getElementType()->isIntegerTy(1) && + "False must be vector of i1 or i1."); + return ConstantVector::getSplat(VTy->getNumElements(), + ConstantInt::getFalse(Ty->getContext())); +} + + +// Get a ConstantInt from an APInt. Note that the value stored in the DenseMap +// as the key, is a DenseMapAPIntKeyInfo::KeyTy which has provided the +// operator== and operator!= to ensure that the DenseMap doesn't attempt to +// compare APInt's of different widths, which would violate an APInt class +// invariant which generates an assertion. +ConstantInt *ConstantInt::get(LLVMContext &Context, const APInt &V) { + // Get the corresponding integer type for the bit width of the value. + IntegerType *ITy = IntegerType::get(Context, V.getBitWidth()); + // get an existing value or the insertion position + DenseMapAPIntKeyInfo::KeyTy Key(V, ITy); + ConstantInt *&Slot = Context.pImpl->IntConstants[Key]; + if (!Slot) Slot = new ConstantInt(ITy, V); + return Slot; +} + +Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) { + Constant *C = get(cast<IntegerType>(Ty->getScalarType()), V, isSigned); + + // For vectors, broadcast the value. + if (VectorType *VTy = dyn_cast<VectorType>(Ty)) + return ConstantVector::getSplat(VTy->getNumElements(), C); + + return C; +} + +ConstantInt *ConstantInt::get(IntegerType *Ty, uint64_t V, + bool isSigned) { + return get(Ty->getContext(), APInt(Ty->getBitWidth(), V, isSigned)); +} + +ConstantInt *ConstantInt::getSigned(IntegerType *Ty, int64_t V) { + return get(Ty, V, true); +} + +Constant *ConstantInt::getSigned(Type *Ty, int64_t V) { + return get(Ty, V, true); +} + +Constant *ConstantInt::get(Type *Ty, const APInt& V) { + ConstantInt *C = get(Ty->getContext(), V); + assert(C->getType() == Ty->getScalarType() && + "ConstantInt type doesn't match the type implied by its value!"); + + // For vectors, broadcast the value. + if (VectorType *VTy = dyn_cast<VectorType>(Ty)) + return ConstantVector::getSplat(VTy->getNumElements(), C); + + return C; +} + +ConstantInt *ConstantInt::get(IntegerType* Ty, StringRef Str, + uint8_t radix) { + return get(Ty->getContext(), APInt(Ty->getBitWidth(), Str, radix)); +} + +//===----------------------------------------------------------------------===// +// ConstantFP +//===----------------------------------------------------------------------===// + +static const fltSemantics *TypeToFloatSemantics(Type *Ty) { + if (Ty->isHalfTy()) + return &APFloat::IEEEhalf; + if (Ty->isFloatTy()) + return &APFloat::IEEEsingle; + if (Ty->isDoubleTy()) + return &APFloat::IEEEdouble; + if (Ty->isX86_FP80Ty()) + return &APFloat::x87DoubleExtended; + else if (Ty->isFP128Ty()) + return &APFloat::IEEEquad; + + assert(Ty->isPPC_FP128Ty() && "Unknown FP format"); + return &APFloat::PPCDoubleDouble; +} + +void ConstantFP::anchor() { } + +/// get() - This returns a constant fp for the specified value in the +/// specified type. This should only be used for simple constant values like +/// 2.0/1.0 etc, that are known-valid both as double and as the target format. +Constant *ConstantFP::get(Type *Ty, double V) { + LLVMContext &Context = Ty->getContext(); + + APFloat FV(V); + bool ignored; + FV.convert(*TypeToFloatSemantics(Ty->getScalarType()), + APFloat::rmNearestTiesToEven, &ignored); + Constant *C = get(Context, FV); + + // For vectors, broadcast the value. + if (VectorType *VTy = dyn_cast<VectorType>(Ty)) + return ConstantVector::getSplat(VTy->getNumElements(), C); + + return C; +} + + +Constant *ConstantFP::get(Type *Ty, StringRef Str) { + LLVMContext &Context = Ty->getContext(); + + APFloat FV(*TypeToFloatSemantics(Ty->getScalarType()), Str); + Constant *C = get(Context, FV); + + // For vectors, broadcast the value. + if (VectorType *VTy = dyn_cast<VectorType>(Ty)) + return ConstantVector::getSplat(VTy->getNumElements(), C); + + return C; +} + + +ConstantFP *ConstantFP::getNegativeZero(Type *Ty) { + LLVMContext &Context = Ty->getContext(); + APFloat apf = cast<ConstantFP>(Constant::getNullValue(Ty))->getValueAPF(); + apf.changeSign(); + return get(Context, apf); +} + + +Constant *ConstantFP::getZeroValueForNegation(Type *Ty) { + Type *ScalarTy = Ty->getScalarType(); + if (ScalarTy->isFloatingPointTy()) { + Constant *C = getNegativeZero(ScalarTy); + if (VectorType *VTy = dyn_cast<VectorType>(Ty)) + return ConstantVector::getSplat(VTy->getNumElements(), C); + return C; + } + + return Constant::getNullValue(Ty); +} + + +// ConstantFP accessors. +ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) { + DenseMapAPFloatKeyInfo::KeyTy Key(V); + + LLVMContextImpl* pImpl = Context.pImpl; + + ConstantFP *&Slot = pImpl->FPConstants[Key]; + + if (!Slot) { + Type *Ty; + if (&V.getSemantics() == &APFloat::IEEEhalf) + Ty = Type::getHalfTy(Context); + else if (&V.getSemantics() == &APFloat::IEEEsingle) + Ty = Type::getFloatTy(Context); + else if (&V.getSemantics() == &APFloat::IEEEdouble) + Ty = Type::getDoubleTy(Context); + else if (&V.getSemantics() == &APFloat::x87DoubleExtended) + Ty = Type::getX86_FP80Ty(Context); + else if (&V.getSemantics() == &APFloat::IEEEquad) + Ty = Type::getFP128Ty(Context); + else { + assert(&V.getSemantics() == &APFloat::PPCDoubleDouble && + "Unknown FP format"); + Ty = Type::getPPC_FP128Ty(Context); + } + Slot = new ConstantFP(Ty, V); + } + + return Slot; +} + +ConstantFP *ConstantFP::getInfinity(Type *Ty, bool Negative) { + const fltSemantics &Semantics = *TypeToFloatSemantics(Ty); + return ConstantFP::get(Ty->getContext(), + APFloat::getInf(Semantics, Negative)); +} + +ConstantFP::ConstantFP(Type *Ty, const APFloat& V) + : Constant(Ty, ConstantFPVal, 0, 0), Val(V) { + assert(&V.getSemantics() == TypeToFloatSemantics(Ty) && + "FP type Mismatch"); +} + +bool ConstantFP::isExactlyValue(const APFloat &V) const { + return Val.bitwiseIsEqual(V); +} + +//===----------------------------------------------------------------------===// +// ConstantAggregateZero Implementation +//===----------------------------------------------------------------------===// + +/// getSequentialElement - If this CAZ has array or vector type, return a zero +/// with the right element type. +Constant *ConstantAggregateZero::getSequentialElement() const { + return Constant::getNullValue(getType()->getSequentialElementType()); +} + +/// getStructElement - If this CAZ has struct type, return a zero with the +/// right element type for the specified element. +Constant *ConstantAggregateZero::getStructElement(unsigned Elt) const { + return Constant::getNullValue(getType()->getStructElementType(Elt)); +} + +/// getElementValue - Return a zero of the right value for the specified GEP +/// index if we can, otherwise return null (e.g. if C is a ConstantExpr). +Constant *ConstantAggregateZero::getElementValue(Constant *C) const { + if (isa<SequentialType>(getType())) + return getSequentialElement(); + return getStructElement(cast<ConstantInt>(C)->getZExtValue()); +} + +/// getElementValue - Return a zero of the right value for the specified GEP +/// index. +Constant *ConstantAggregateZero::getElementValue(unsigned Idx) const { + if (isa<SequentialType>(getType())) + return getSequentialElement(); + return getStructElement(Idx); +} + + +//===----------------------------------------------------------------------===// +// UndefValue Implementation +//===----------------------------------------------------------------------===// + +/// getSequentialElement - If this undef has array or vector type, return an +/// undef with the right element type. +UndefValue *UndefValue::getSequentialElement() const { + return UndefValue::get(getType()->getSequentialElementType()); +} + +/// getStructElement - If this undef has struct type, return a zero with the +/// right element type for the specified element. +UndefValue *UndefValue::getStructElement(unsigned Elt) const { + return UndefValue::get(getType()->getStructElementType(Elt)); +} + +/// getElementValue - Return an undef of the right value for the specified GEP +/// index if we can, otherwise return null (e.g. if C is a ConstantExpr). +UndefValue *UndefValue::getElementValue(Constant *C) const { + if (isa<SequentialType>(getType())) + return getSequentialElement(); + return getStructElement(cast<ConstantInt>(C)->getZExtValue()); +} + +/// getElementValue - Return an undef of the right value for the specified GEP +/// index. +UndefValue *UndefValue::getElementValue(unsigned Idx) const { + if (isa<SequentialType>(getType())) + return getSequentialElement(); + return getStructElement(Idx); +} + + + +//===----------------------------------------------------------------------===// +// ConstantXXX Classes +//===----------------------------------------------------------------------===// + +template <typename ItTy, typename EltTy> +static bool rangeOnlyContains(ItTy Start, ItTy End, EltTy Elt) { + for (; Start != End; ++Start) + if (*Start != Elt) + return false; + return true; +} + +ConstantArray::ConstantArray(ArrayType *T, ArrayRef<Constant *> V) + : Constant(T, ConstantArrayVal, + OperandTraits<ConstantArray>::op_end(this) - V.size(), + V.size()) { + assert(V.size() == T->getNumElements() && + "Invalid initializer vector for constant array"); + for (unsigned i = 0, e = V.size(); i != e; ++i) + assert(V[i]->getType() == T->getElementType() && + "Initializer for array element doesn't match array element type!"); + std::copy(V.begin(), V.end(), op_begin()); +} + +Constant *ConstantArray::get(ArrayType *Ty, ArrayRef<Constant*> V) { + // Empty arrays are canonicalized to ConstantAggregateZero. + if (V.empty()) + return ConstantAggregateZero::get(Ty); + + for (unsigned i = 0, e = V.size(); i != e; ++i) { + assert(V[i]->getType() == Ty->getElementType() && + "Wrong type in array element initializer"); + } + LLVMContextImpl *pImpl = Ty->getContext().pImpl; + + // If this is an all-zero array, return a ConstantAggregateZero object. If + // all undef, return an UndefValue, if "all simple", then return a + // ConstantDataArray. + Constant *C = V[0]; + if (isa<UndefValue>(C) && rangeOnlyContains(V.begin(), V.end(), C)) + return UndefValue::get(Ty); + + if (C->isNullValue() && rangeOnlyContains(V.begin(), V.end(), C)) + return ConstantAggregateZero::get(Ty); + + // Check to see if all of the elements are ConstantFP or ConstantInt and if + // the element type is compatible with ConstantDataVector. If so, use it. + if (ConstantDataSequential::isElementTypeCompatible(C->getType())) { + // We speculatively build the elements here even if it turns out that there + // is a constantexpr or something else weird in the array, since it is so + // uncommon for that to happen. + if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { + if (CI->getType()->isIntegerTy(8)) { + SmallVector<uint8_t, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i])) + Elts.push_back(CI->getZExtValue()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataArray::get(C->getContext(), Elts); + } else if (CI->getType()->isIntegerTy(16)) { + SmallVector<uint16_t, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i])) + Elts.push_back(CI->getZExtValue()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataArray::get(C->getContext(), Elts); + } else if (CI->getType()->isIntegerTy(32)) { + SmallVector<uint32_t, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i])) + Elts.push_back(CI->getZExtValue()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataArray::get(C->getContext(), Elts); + } else if (CI->getType()->isIntegerTy(64)) { + SmallVector<uint64_t, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i])) + Elts.push_back(CI->getZExtValue()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataArray::get(C->getContext(), Elts); + } + } + + if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { + if (CFP->getType()->isFloatTy()) { + SmallVector<float, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantFP *CFP = dyn_cast<ConstantFP>(V[i])) + Elts.push_back(CFP->getValueAPF().convertToFloat()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataArray::get(C->getContext(), Elts); + } else if (CFP->getType()->isDoubleTy()) { + SmallVector<double, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantFP *CFP = dyn_cast<ConstantFP>(V[i])) + Elts.push_back(CFP->getValueAPF().convertToDouble()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataArray::get(C->getContext(), Elts); + } + } + } + + // Otherwise, we really do want to create a ConstantArray. + return pImpl->ArrayConstants.getOrCreate(Ty, V); +} + +/// getTypeForElements - Return an anonymous struct type to use for a constant +/// with the specified set of elements. The list must not be empty. +StructType *ConstantStruct::getTypeForElements(LLVMContext &Context, + ArrayRef<Constant*> V, + bool Packed) { + unsigned VecSize = V.size(); + SmallVector<Type*, 16> EltTypes(VecSize); + for (unsigned i = 0; i != VecSize; ++i) + EltTypes[i] = V[i]->getType(); + + return StructType::get(Context, EltTypes, Packed); +} + + +StructType *ConstantStruct::getTypeForElements(ArrayRef<Constant*> V, + bool Packed) { + assert(!V.empty() && + "ConstantStruct::getTypeForElements cannot be called on empty list"); + return getTypeForElements(V[0]->getContext(), V, Packed); +} + + +ConstantStruct::ConstantStruct(StructType *T, ArrayRef<Constant *> V) + : Constant(T, ConstantStructVal, + OperandTraits<ConstantStruct>::op_end(this) - V.size(), + V.size()) { + assert(V.size() == T->getNumElements() && + "Invalid initializer vector for constant structure"); + for (unsigned i = 0, e = V.size(); i != e; ++i) + assert((T->isOpaque() || V[i]->getType() == T->getElementType(i)) && + "Initializer for struct element doesn't match struct element type!"); + std::copy(V.begin(), V.end(), op_begin()); +} + +// ConstantStruct accessors. +Constant *ConstantStruct::get(StructType *ST, ArrayRef<Constant*> V) { + assert((ST->isOpaque() || ST->getNumElements() == V.size()) && + "Incorrect # elements specified to ConstantStruct::get"); + + // Create a ConstantAggregateZero value if all elements are zeros. + bool isZero = true; + bool isUndef = false; + + if (!V.empty()) { + isUndef = isa<UndefValue>(V[0]); + isZero = V[0]->isNullValue(); + if (isUndef || isZero) { + for (unsigned i = 0, e = V.size(); i != e; ++i) { + if (!V[i]->isNullValue()) + isZero = false; + if (!isa<UndefValue>(V[i])) + isUndef = false; + } + } + } + if (isZero) + return ConstantAggregateZero::get(ST); + if (isUndef) + return UndefValue::get(ST); + + return ST->getContext().pImpl->StructConstants.getOrCreate(ST, V); +} + +Constant *ConstantStruct::get(StructType *T, ...) { + va_list ap; + SmallVector<Constant*, 8> Values; + va_start(ap, T); + while (Constant *Val = va_arg(ap, llvm::Constant*)) + Values.push_back(Val); + va_end(ap); + return get(T, Values); +} + +ConstantVector::ConstantVector(VectorType *T, ArrayRef<Constant *> V) + : Constant(T, ConstantVectorVal, + OperandTraits<ConstantVector>::op_end(this) - V.size(), + V.size()) { + for (size_t i = 0, e = V.size(); i != e; i++) + assert(V[i]->getType() == T->getElementType() && + "Initializer for vector element doesn't match vector element type!"); + std::copy(V.begin(), V.end(), op_begin()); +} + +// ConstantVector accessors. +Constant *ConstantVector::get(ArrayRef<Constant*> V) { + assert(!V.empty() && "Vectors can't be empty"); + VectorType *T = VectorType::get(V.front()->getType(), V.size()); + LLVMContextImpl *pImpl = T->getContext().pImpl; + + // If this is an all-undef or all-zero vector, return a + // ConstantAggregateZero or UndefValue. + Constant *C = V[0]; + bool isZero = C->isNullValue(); + bool isUndef = isa<UndefValue>(C); + + if (isZero || isUndef) { + for (unsigned i = 1, e = V.size(); i != e; ++i) + if (V[i] != C) { + isZero = isUndef = false; + break; + } + } + + if (isZero) + return ConstantAggregateZero::get(T); + if (isUndef) + return UndefValue::get(T); + + // Check to see if all of the elements are ConstantFP or ConstantInt and if + // the element type is compatible with ConstantDataVector. If so, use it. + if (ConstantDataSequential::isElementTypeCompatible(C->getType())) { + // We speculatively build the elements here even if it turns out that there + // is a constantexpr or something else weird in the array, since it is so + // uncommon for that to happen. + if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { + if (CI->getType()->isIntegerTy(8)) { + SmallVector<uint8_t, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i])) + Elts.push_back(CI->getZExtValue()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataVector::get(C->getContext(), Elts); + } else if (CI->getType()->isIntegerTy(16)) { + SmallVector<uint16_t, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i])) + Elts.push_back(CI->getZExtValue()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataVector::get(C->getContext(), Elts); + } else if (CI->getType()->isIntegerTy(32)) { + SmallVector<uint32_t, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i])) + Elts.push_back(CI->getZExtValue()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataVector::get(C->getContext(), Elts); + } else if (CI->getType()->isIntegerTy(64)) { + SmallVector<uint64_t, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i])) + Elts.push_back(CI->getZExtValue()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataVector::get(C->getContext(), Elts); + } + } + + if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { + if (CFP->getType()->isFloatTy()) { + SmallVector<float, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantFP *CFP = dyn_cast<ConstantFP>(V[i])) + Elts.push_back(CFP->getValueAPF().convertToFloat()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataVector::get(C->getContext(), Elts); + } else if (CFP->getType()->isDoubleTy()) { + SmallVector<double, 16> Elts; + for (unsigned i = 0, e = V.size(); i != e; ++i) + if (ConstantFP *CFP = dyn_cast<ConstantFP>(V[i])) + Elts.push_back(CFP->getValueAPF().convertToDouble()); + else + break; + if (Elts.size() == V.size()) + return ConstantDataVector::get(C->getContext(), Elts); + } + } + } + + // Otherwise, the element type isn't compatible with ConstantDataVector, or + // the operand list constants a ConstantExpr or something else strange. + return pImpl->VectorConstants.getOrCreate(T, V); +} + +Constant *ConstantVector::getSplat(unsigned NumElts, Constant *V) { + // If this splat is compatible with ConstantDataVector, use it instead of + // ConstantVector. + if ((isa<ConstantFP>(V) || isa<ConstantInt>(V)) && + ConstantDataSequential::isElementTypeCompatible(V->getType())) + return ConstantDataVector::getSplat(NumElts, V); + + SmallVector<Constant*, 32> Elts(NumElts, V); + return get(Elts); +} + + +// Utility function for determining if a ConstantExpr is a CastOp or not. This +// can't be inline because we don't want to #include Instruction.h into +// Constant.h +bool ConstantExpr::isCast() const { + return Instruction::isCast(getOpcode()); +} + +bool ConstantExpr::isCompare() const { + return getOpcode() == Instruction::ICmp || getOpcode() == Instruction::FCmp; +} + +bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const { + if (getOpcode() != Instruction::GetElementPtr) return false; + + gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this); + User::const_op_iterator OI = llvm::next(this->op_begin()); + + // Skip the first index, as it has no static limit. + ++GEPI; + ++OI; + + // The remaining indices must be compile-time known integers within the + // bounds of the corresponding notional static array types. + for (; GEPI != E; ++GEPI, ++OI) { + ConstantInt *CI = dyn_cast<ConstantInt>(*OI); + if (!CI) return false; + if (ArrayType *ATy = dyn_cast<ArrayType>(*GEPI)) + if (CI->getValue().getActiveBits() > 64 || + CI->getZExtValue() >= ATy->getNumElements()) + return false; + } + + // All the indices checked out. + return true; +} + +bool ConstantExpr::hasIndices() const { + return getOpcode() == Instruction::ExtractValue || + getOpcode() == Instruction::InsertValue; +} + +ArrayRef<unsigned> ConstantExpr::getIndices() const { + if (const ExtractValueConstantExpr *EVCE = + dyn_cast<ExtractValueConstantExpr>(this)) + return EVCE->Indices; + + return cast<InsertValueConstantExpr>(this)->Indices; +} + +unsigned ConstantExpr::getPredicate() const { + assert(isCompare()); + return ((const CompareConstantExpr*)this)->predicate; +} + +/// getWithOperandReplaced - Return a constant expression identical to this +/// one, but with the specified operand set to the specified value. +Constant * +ConstantExpr::getWithOperandReplaced(unsigned OpNo, Constant *Op) const { + assert(Op->getType() == getOperand(OpNo)->getType() && + "Replacing operand with value of different type!"); + if (getOperand(OpNo) == Op) + return const_cast<ConstantExpr*>(this); + + SmallVector<Constant*, 8> NewOps; + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + NewOps.push_back(i == OpNo ? Op : getOperand(i)); + + return getWithOperands(NewOps); +} + +/// getWithOperands - This returns the current constant expression with the +/// operands replaced with the specified values. The specified array must +/// have the same number of operands as our current one. +Constant *ConstantExpr:: +getWithOperands(ArrayRef<Constant*> Ops, Type *Ty) const { + assert(Ops.size() == getNumOperands() && "Operand count mismatch!"); + bool AnyChange = Ty != getType(); + for (unsigned i = 0; i != Ops.size(); ++i) + AnyChange |= Ops[i] != getOperand(i); + + if (!AnyChange) // No operands changed, return self. + return const_cast<ConstantExpr*>(this); + + switch (getOpcode()) { + case Instruction::Trunc: + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPTrunc: + case Instruction::FPExt: + case Instruction::UIToFP: + case Instruction::SIToFP: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::BitCast: + return ConstantExpr::getCast(getOpcode(), Ops[0], Ty); + case Instruction::Select: + return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); + case Instruction::InsertElement: + return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); + case Instruction::ExtractElement: + return ConstantExpr::getExtractElement(Ops[0], Ops[1]); + case Instruction::InsertValue: + return ConstantExpr::getInsertValue(Ops[0], Ops[1], getIndices()); + case Instruction::ExtractValue: + return ConstantExpr::getExtractValue(Ops[0], getIndices()); + case Instruction::ShuffleVector: + return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); + case Instruction::GetElementPtr: + return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1), + cast<GEPOperator>(this)->isInBounds()); + case Instruction::ICmp: + case Instruction::FCmp: + return ConstantExpr::getCompare(getPredicate(), Ops[0], Ops[1]); + default: + assert(getNumOperands() == 2 && "Must be binary operator?"); + return ConstantExpr::get(getOpcode(), Ops[0], Ops[1], SubclassOptionalData); + } +} + + +//===----------------------------------------------------------------------===// +// isValueValidForType implementations + +bool ConstantInt::isValueValidForType(Type *Ty, uint64_t Val) { + unsigned NumBits = Ty->getIntegerBitWidth(); // assert okay + if (Ty->isIntegerTy(1)) + return Val == 0 || Val == 1; + if (NumBits >= 64) + return true; // always true, has to fit in largest type + uint64_t Max = (1ll << NumBits) - 1; + return Val <= Max; +} + +bool ConstantInt::isValueValidForType(Type *Ty, int64_t Val) { + unsigned NumBits = Ty->getIntegerBitWidth(); + if (Ty->isIntegerTy(1)) + return Val == 0 || Val == 1 || Val == -1; + if (NumBits >= 64) + return true; // always true, has to fit in largest type + int64_t Min = -(1ll << (NumBits-1)); + int64_t Max = (1ll << (NumBits-1)) - 1; + return (Val >= Min && Val <= Max); +} + +bool ConstantFP::isValueValidForType(Type *Ty, const APFloat& Val) { + // convert modifies in place, so make a copy. + APFloat Val2 = APFloat(Val); + bool losesInfo; + switch (Ty->getTypeID()) { + default: + return false; // These can't be represented as floating point! + + // FIXME rounding mode needs to be more flexible + case Type::HalfTyID: { + if (&Val2.getSemantics() == &APFloat::IEEEhalf) + return true; + Val2.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &losesInfo); + return !losesInfo; + } + case Type::FloatTyID: { + if (&Val2.getSemantics() == &APFloat::IEEEsingle) + return true; + Val2.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &losesInfo); + return !losesInfo; + } + case Type::DoubleTyID: { + if (&Val2.getSemantics() == &APFloat::IEEEhalf || + &Val2.getSemantics() == &APFloat::IEEEsingle || + &Val2.getSemantics() == &APFloat::IEEEdouble) + return true; + Val2.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &losesInfo); + return !losesInfo; + } + case Type::X86_FP80TyID: + return &Val2.getSemantics() == &APFloat::IEEEhalf || + &Val2.getSemantics() == &APFloat::IEEEsingle || + &Val2.getSemantics() == &APFloat::IEEEdouble || + &Val2.getSemantics() == &APFloat::x87DoubleExtended; + case Type::FP128TyID: + return &Val2.getSemantics() == &APFloat::IEEEhalf || + &Val2.getSemantics() == &APFloat::IEEEsingle || + &Val2.getSemantics() == &APFloat::IEEEdouble || + &Val2.getSemantics() == &APFloat::IEEEquad; + case Type::PPC_FP128TyID: + return &Val2.getSemantics() == &APFloat::IEEEhalf || + &Val2.getSemantics() == &APFloat::IEEEsingle || + &Val2.getSemantics() == &APFloat::IEEEdouble || + &Val2.getSemantics() == &APFloat::PPCDoubleDouble; + } +} + + +//===----------------------------------------------------------------------===// +// Factory Function Implementation + +ConstantAggregateZero *ConstantAggregateZero::get(Type *Ty) { + assert((Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()) && + "Cannot create an aggregate zero of non-aggregate type!"); + + ConstantAggregateZero *&Entry = Ty->getContext().pImpl->CAZConstants[Ty]; + if (Entry == 0) + Entry = new ConstantAggregateZero(Ty); + + return Entry; +} + +/// destroyConstant - Remove the constant from the constant table. +/// +void ConstantAggregateZero::destroyConstant() { + getContext().pImpl->CAZConstants.erase(getType()); + destroyConstantImpl(); +} + +/// destroyConstant - Remove the constant from the constant table... +/// +void ConstantArray::destroyConstant() { + getType()->getContext().pImpl->ArrayConstants.remove(this); + destroyConstantImpl(); +} + + +//---- ConstantStruct::get() implementation... +// + +// destroyConstant - Remove the constant from the constant table... +// +void ConstantStruct::destroyConstant() { + getType()->getContext().pImpl->StructConstants.remove(this); + destroyConstantImpl(); +} + +// destroyConstant - Remove the constant from the constant table... +// +void ConstantVector::destroyConstant() { + getType()->getContext().pImpl->VectorConstants.remove(this); + destroyConstantImpl(); +} + +/// getSplatValue - If this is a splat vector constant, meaning that all of +/// the elements have the same value, return that value. Otherwise return 0. +Constant *Constant::getSplatValue() const { + assert(this->getType()->isVectorTy() && "Only valid for vectors!"); + if (isa<ConstantAggregateZero>(this)) + return getNullValue(this->getType()->getVectorElementType()); + if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this)) + return CV->getSplatValue(); + if (const ConstantVector *CV = dyn_cast<ConstantVector>(this)) + return CV->getSplatValue(); + return 0; +} + +/// getSplatValue - If this is a splat constant, where all of the +/// elements have the same value, return that value. Otherwise return null. +Constant *ConstantVector::getSplatValue() const { + // Check out first element. + Constant *Elt = getOperand(0); + // Then make sure all remaining elements point to the same value. + for (unsigned I = 1, E = getNumOperands(); I < E; ++I) + if (getOperand(I) != Elt) + return 0; + return Elt; +} + +/// If C is a constant integer then return its value, otherwise C must be a +/// vector of constant integers, all equal, and the common value is returned. +const APInt &Constant::getUniqueInteger() const { + if (const ConstantInt *CI = dyn_cast<ConstantInt>(this)) + return CI->getValue(); + assert(this->getSplatValue() && "Doesn't contain a unique integer!"); + const Constant *C = this->getAggregateElement(0U); + assert(C && isa<ConstantInt>(C) && "Not a vector of numbers!"); + return cast<ConstantInt>(C)->getValue(); +} + + +//---- ConstantPointerNull::get() implementation. +// + +ConstantPointerNull *ConstantPointerNull::get(PointerType *Ty) { + ConstantPointerNull *&Entry = Ty->getContext().pImpl->CPNConstants[Ty]; + if (Entry == 0) + Entry = new ConstantPointerNull(Ty); + + return Entry; +} + +// destroyConstant - Remove the constant from the constant table... +// +void ConstantPointerNull::destroyConstant() { + getContext().pImpl->CPNConstants.erase(getType()); + // Free the constant and any dangling references to it. + destroyConstantImpl(); +} + + +//---- UndefValue::get() implementation. +// + +UndefValue *UndefValue::get(Type *Ty) { + UndefValue *&Entry = Ty->getContext().pImpl->UVConstants[Ty]; + if (Entry == 0) + Entry = new UndefValue(Ty); + + return Entry; +} + +// destroyConstant - Remove the constant from the constant table. +// +void UndefValue::destroyConstant() { + // Free the constant and any dangling references to it. + getContext().pImpl->UVConstants.erase(getType()); + destroyConstantImpl(); +} + +//---- BlockAddress::get() implementation. +// + +BlockAddress *BlockAddress::get(BasicBlock *BB) { + assert(BB->getParent() != 0 && "Block must have a parent"); + return get(BB->getParent(), BB); +} + +BlockAddress *BlockAddress::get(Function *F, BasicBlock *BB) { + BlockAddress *&BA = + F->getContext().pImpl->BlockAddresses[std::make_pair(F, BB)]; + if (BA == 0) + BA = new BlockAddress(F, BB); + + assert(BA->getFunction() == F && "Basic block moved between functions"); + return BA; +} + +BlockAddress::BlockAddress(Function *F, BasicBlock *BB) +: Constant(Type::getInt8PtrTy(F->getContext()), Value::BlockAddressVal, + &Op<0>(), 2) { + setOperand(0, F); + setOperand(1, BB); + BB->AdjustBlockAddressRefCount(1); +} + + +// destroyConstant - Remove the constant from the constant table. +// +void BlockAddress::destroyConstant() { + getFunction()->getType()->getContext().pImpl + ->BlockAddresses.erase(std::make_pair(getFunction(), getBasicBlock())); + getBasicBlock()->AdjustBlockAddressRefCount(-1); + destroyConstantImpl(); +} + +void BlockAddress::replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) { + // This could be replacing either the Basic Block or the Function. In either + // case, we have to remove the map entry. + Function *NewF = getFunction(); + BasicBlock *NewBB = getBasicBlock(); + + if (U == &Op<0>()) + NewF = cast<Function>(To); + else + NewBB = cast<BasicBlock>(To); + + // See if the 'new' entry already exists, if not, just update this in place + // and return early. + BlockAddress *&NewBA = + getContext().pImpl->BlockAddresses[std::make_pair(NewF, NewBB)]; + if (NewBA == 0) { + getBasicBlock()->AdjustBlockAddressRefCount(-1); + + // Remove the old entry, this can't cause the map to rehash (just a + // tombstone will get added). + getContext().pImpl->BlockAddresses.erase(std::make_pair(getFunction(), + getBasicBlock())); + NewBA = this; + setOperand(0, NewF); + setOperand(1, NewBB); + getBasicBlock()->AdjustBlockAddressRefCount(1); + return; + } + + // Otherwise, I do need to replace this with an existing value. + assert(NewBA != this && "I didn't contain From!"); + + // Everyone using this now uses the replacement. + replaceAllUsesWith(NewBA); + + destroyConstant(); +} + +//---- ConstantExpr::get() implementations. +// + +/// This is a utility function to handle folding of casts and lookup of the +/// cast in the ExprConstants map. It is used by the various get* methods below. +static inline Constant *getFoldedCast( + Instruction::CastOps opc, Constant *C, Type *Ty) { + assert(Ty->isFirstClassType() && "Cannot cast to an aggregate type!"); + // Fold a few common cases + if (Constant *FC = ConstantFoldCastInstruction(opc, C, Ty)) + return FC; + + LLVMContextImpl *pImpl = Ty->getContext().pImpl; + + // Look up the constant in the table first to ensure uniqueness. + ExprMapKeyType Key(opc, C); + + return pImpl->ExprConstants.getOrCreate(Ty, Key); +} + +Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty) { + Instruction::CastOps opc = Instruction::CastOps(oc); + assert(Instruction::isCast(opc) && "opcode out of range"); + assert(C && Ty && "Null arguments to getCast"); + assert(CastInst::castIsValid(opc, C, Ty) && "Invalid constantexpr cast!"); + + switch (opc) { + default: + llvm_unreachable("Invalid cast opcode"); + case Instruction::Trunc: return getTrunc(C, Ty); + case Instruction::ZExt: return getZExt(C, Ty); + case Instruction::SExt: return getSExt(C, Ty); + case Instruction::FPTrunc: return getFPTrunc(C, Ty); + case Instruction::FPExt: return getFPExtend(C, Ty); + case Instruction::UIToFP: return getUIToFP(C, Ty); + case Instruction::SIToFP: return getSIToFP(C, Ty); + case Instruction::FPToUI: return getFPToUI(C, Ty); + case Instruction::FPToSI: return getFPToSI(C, Ty); + case Instruction::PtrToInt: return getPtrToInt(C, Ty); + case Instruction::IntToPtr: return getIntToPtr(C, Ty); + case Instruction::BitCast: return getBitCast(C, Ty); + } +} + +Constant *ConstantExpr::getZExtOrBitCast(Constant *C, Type *Ty) { + if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) + return getBitCast(C, Ty); + return getZExt(C, Ty); +} + +Constant *ConstantExpr::getSExtOrBitCast(Constant *C, Type *Ty) { + if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) + return getBitCast(C, Ty); + return getSExt(C, Ty); +} + +Constant *ConstantExpr::getTruncOrBitCast(Constant *C, Type *Ty) { + if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) + return getBitCast(C, Ty); + return getTrunc(C, Ty); +} + +Constant *ConstantExpr::getPointerCast(Constant *S, Type *Ty) { + assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); + assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && + "Invalid cast"); + + if (Ty->isIntOrIntVectorTy()) + return getPtrToInt(S, Ty); + return getBitCast(S, Ty); +} + +Constant *ConstantExpr::getIntegerCast(Constant *C, Type *Ty, + bool isSigned) { + assert(C->getType()->isIntOrIntVectorTy() && + Ty->isIntOrIntVectorTy() && "Invalid cast"); + unsigned SrcBits = C->getType()->getScalarSizeInBits(); + unsigned DstBits = Ty->getScalarSizeInBits(); + Instruction::CastOps opcode = + (SrcBits == DstBits ? Instruction::BitCast : + (SrcBits > DstBits ? Instruction::Trunc : + (isSigned ? Instruction::SExt : Instruction::ZExt))); + return getCast(opcode, C, Ty); +} + +Constant *ConstantExpr::getFPCast(Constant *C, Type *Ty) { + assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && + "Invalid cast"); + unsigned SrcBits = C->getType()->getScalarSizeInBits(); + unsigned DstBits = Ty->getScalarSizeInBits(); + if (SrcBits == DstBits) + return C; // Avoid a useless cast + Instruction::CastOps opcode = + (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt); + return getCast(opcode, C, Ty); +} + +Constant *ConstantExpr::getTrunc(Constant *C, Type *Ty) { +#ifndef NDEBUG + bool fromVec = C->getType()->getTypeID() == Type::VectorTyID; + bool toVec = Ty->getTypeID() == Type::VectorTyID; +#endif + assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); + assert(C->getType()->isIntOrIntVectorTy() && "Trunc operand must be integer"); + assert(Ty->isIntOrIntVectorTy() && "Trunc produces only integral"); + assert(C->getType()->getScalarSizeInBits() > Ty->getScalarSizeInBits()&& + "SrcTy must be larger than DestTy for Trunc!"); + + return getFoldedCast(Instruction::Trunc, C, Ty); +} + +Constant *ConstantExpr::getSExt(Constant *C, Type *Ty) { +#ifndef NDEBUG + bool fromVec = C->getType()->getTypeID() == Type::VectorTyID; + bool toVec = Ty->getTypeID() == Type::VectorTyID; +#endif + assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); + assert(C->getType()->isIntOrIntVectorTy() && "SExt operand must be integral"); + assert(Ty->isIntOrIntVectorTy() && "SExt produces only integer"); + assert(C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&& + "SrcTy must be smaller than DestTy for SExt!"); + + return getFoldedCast(Instruction::SExt, C, Ty); +} + +Constant *ConstantExpr::getZExt(Constant *C, Type *Ty) { +#ifndef NDEBUG + bool fromVec = C->getType()->getTypeID() == Type::VectorTyID; + bool toVec = Ty->getTypeID() == Type::VectorTyID; +#endif + assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); + assert(C->getType()->isIntOrIntVectorTy() && "ZEXt operand must be integral"); + assert(Ty->isIntOrIntVectorTy() && "ZExt produces only integer"); + assert(C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&& + "SrcTy must be smaller than DestTy for ZExt!"); + + return getFoldedCast(Instruction::ZExt, C, Ty); +} + +Constant *ConstantExpr::getFPTrunc(Constant *C, Type *Ty) { +#ifndef NDEBUG + bool fromVec = C->getType()->getTypeID() == Type::VectorTyID; + bool toVec = Ty->getTypeID() == Type::VectorTyID; +#endif + assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); + assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && + C->getType()->getScalarSizeInBits() > Ty->getScalarSizeInBits()&& + "This is an illegal floating point truncation!"); + return getFoldedCast(Instruction::FPTrunc, C, Ty); +} + +Constant *ConstantExpr::getFPExtend(Constant *C, Type *Ty) { +#ifndef NDEBUG + bool fromVec = C->getType()->getTypeID() == Type::VectorTyID; + bool toVec = Ty->getTypeID() == Type::VectorTyID; +#endif + assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); + assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && + C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&& + "This is an illegal floating point extension!"); + return getFoldedCast(Instruction::FPExt, C, Ty); +} + +Constant *ConstantExpr::getUIToFP(Constant *C, Type *Ty) { +#ifndef NDEBUG + bool fromVec = C->getType()->getTypeID() == Type::VectorTyID; + bool toVec = Ty->getTypeID() == Type::VectorTyID; +#endif + assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); + assert(C->getType()->isIntOrIntVectorTy() && Ty->isFPOrFPVectorTy() && + "This is an illegal uint to floating point cast!"); + return getFoldedCast(Instruction::UIToFP, C, Ty); +} + +Constant *ConstantExpr::getSIToFP(Constant *C, Type *Ty) { +#ifndef NDEBUG + bool fromVec = C->getType()->getTypeID() == Type::VectorTyID; + bool toVec = Ty->getTypeID() == Type::VectorTyID; +#endif + assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); + assert(C->getType()->isIntOrIntVectorTy() && Ty->isFPOrFPVectorTy() && + "This is an illegal sint to floating point cast!"); + return getFoldedCast(Instruction::SIToFP, C, Ty); +} + +Constant *ConstantExpr::getFPToUI(Constant *C, Type *Ty) { +#ifndef NDEBUG + bool fromVec = C->getType()->getTypeID() == Type::VectorTyID; + bool toVec = Ty->getTypeID() == Type::VectorTyID; +#endif + assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); + assert(C->getType()->isFPOrFPVectorTy() && Ty->isIntOrIntVectorTy() && + "This is an illegal floating point to uint cast!"); + return getFoldedCast(Instruction::FPToUI, C, Ty); +} + +Constant *ConstantExpr::getFPToSI(Constant *C, Type *Ty) { +#ifndef NDEBUG + bool fromVec = C->getType()->getTypeID() == Type::VectorTyID; + bool toVec = Ty->getTypeID() == Type::VectorTyID; +#endif + assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); + assert(C->getType()->isFPOrFPVectorTy() && Ty->isIntOrIntVectorTy() && + "This is an illegal floating point to sint cast!"); + return getFoldedCast(Instruction::FPToSI, C, Ty); +} + +Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy) { + assert(C->getType()->getScalarType()->isPointerTy() && + "PtrToInt source must be pointer or pointer vector"); + assert(DstTy->getScalarType()->isIntegerTy() && + "PtrToInt destination must be integer or integer vector"); + assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy)); + if (isa<VectorType>(C->getType())) + assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&& + "Invalid cast between a different number of vector elements"); + return getFoldedCast(Instruction::PtrToInt, C, DstTy); +} + +Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy) { + assert(C->getType()->getScalarType()->isIntegerTy() && + "IntToPtr source must be integer or integer vector"); + assert(DstTy->getScalarType()->isPointerTy() && + "IntToPtr destination must be a pointer or pointer vector"); + assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy)); + if (isa<VectorType>(C->getType())) + assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&& + "Invalid cast between a different number of vector elements"); + return getFoldedCast(Instruction::IntToPtr, C, DstTy); +} + +Constant *ConstantExpr::getBitCast(Constant *C, Type *DstTy) { + assert(CastInst::castIsValid(Instruction::BitCast, C, DstTy) && + "Invalid constantexpr bitcast!"); + + // It is common to ask for a bitcast of a value to its own type, handle this + // speedily. + if (C->getType() == DstTy) return C; + + return getFoldedCast(Instruction::BitCast, C, DstTy); +} + +Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2, + unsigned Flags) { + // Check the operands for consistency first. + assert(Opcode >= Instruction::BinaryOpsBegin && + Opcode < Instruction::BinaryOpsEnd && + "Invalid opcode in binary constant expression"); + assert(C1->getType() == C2->getType() && + "Operand types in binary constant expression should match"); + +#ifndef NDEBUG + switch (Opcode) { + case Instruction::Add: + case Instruction::Sub: + case Instruction::Mul: + assert(C1->getType() == C2->getType() && "Op types should be identical!"); + assert(C1->getType()->isIntOrIntVectorTy() && + "Tried to create an integer operation on a non-integer type!"); + break; + case Instruction::FAdd: + case Instruction::FSub: + case Instruction::FMul: + assert(C1->getType() == C2->getType() && "Op types should be identical!"); + assert(C1->getType()->isFPOrFPVectorTy() && + "Tried to create a floating-point operation on a " + "non-floating-point type!"); + break; + case Instruction::UDiv: + case Instruction::SDiv: + assert(C1->getType() == C2->getType() && "Op types should be identical!"); + assert(C1->getType()->isIntOrIntVectorTy() && + "Tried to create an arithmetic operation on a non-arithmetic type!"); + break; + case Instruction::FDiv: + assert(C1->getType() == C2->getType() && "Op types should be identical!"); + assert(C1->getType()->isFPOrFPVectorTy() && + "Tried to create an arithmetic operation on a non-arithmetic type!"); + break; + case Instruction::URem: + case Instruction::SRem: + assert(C1->getType() == C2->getType() && "Op types should be identical!"); + assert(C1->getType()->isIntOrIntVectorTy() && + "Tried to create an arithmetic operation on a non-arithmetic type!"); + break; + case Instruction::FRem: + assert(C1->getType() == C2->getType() && "Op types should be identical!"); + assert(C1->getType()->isFPOrFPVectorTy() && + "Tried to create an arithmetic operation on a non-arithmetic type!"); + break; + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: + assert(C1->getType() == C2->getType() && "Op types should be identical!"); + assert(C1->getType()->isIntOrIntVectorTy() && + "Tried to create a logical operation on a non-integral type!"); + break; + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + assert(C1->getType() == C2->getType() && "Op types should be identical!"); + assert(C1->getType()->isIntOrIntVectorTy() && + "Tried to create a shift operation on a non-integer type!"); + break; + default: + break; + } +#endif + + if (Constant *FC = ConstantFoldBinaryInstruction(Opcode, C1, C2)) + return FC; // Fold a few common cases. + + Constant *ArgVec[] = { C1, C2 }; + ExprMapKeyType Key(Opcode, ArgVec, 0, Flags); + + LLVMContextImpl *pImpl = C1->getContext().pImpl; + return pImpl->ExprConstants.getOrCreate(C1->getType(), Key); +} + +Constant *ConstantExpr::getSizeOf(Type* Ty) { + // sizeof is implemented as: (i64) gep (Ty*)null, 1 + // Note that a non-inbounds gep is used, as null isn't within any object. + Constant *GEPIdx = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1); + Constant *GEP = getGetElementPtr( + Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx); + return getPtrToInt(GEP, + Type::getInt64Ty(Ty->getContext())); +} + +Constant *ConstantExpr::getAlignOf(Type* Ty) { + // alignof is implemented as: (i64) gep ({i1,Ty}*)null, 0, 1 + // Note that a non-inbounds gep is used, as null isn't within any object. + Type *AligningTy = + StructType::get(Type::getInt1Ty(Ty->getContext()), Ty, NULL); + Constant *NullPtr = Constant::getNullValue(AligningTy->getPointerTo()); + Constant *Zero = ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0); + Constant *One = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1); + Constant *Indices[2] = { Zero, One }; + Constant *GEP = getGetElementPtr(NullPtr, Indices); + return getPtrToInt(GEP, + Type::getInt64Ty(Ty->getContext())); +} + +Constant *ConstantExpr::getOffsetOf(StructType* STy, unsigned FieldNo) { + return getOffsetOf(STy, ConstantInt::get(Type::getInt32Ty(STy->getContext()), + FieldNo)); +} + +Constant *ConstantExpr::getOffsetOf(Type* Ty, Constant *FieldNo) { + // offsetof is implemented as: (i64) gep (Ty*)null, 0, FieldNo + // Note that a non-inbounds gep is used, as null isn't within any object. + Constant *GEPIdx[] = { + ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0), + FieldNo + }; + Constant *GEP = getGetElementPtr( + Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx); + return getPtrToInt(GEP, + Type::getInt64Ty(Ty->getContext())); +} + +Constant *ConstantExpr::getCompare(unsigned short Predicate, + Constant *C1, Constant *C2) { + assert(C1->getType() == C2->getType() && "Op types should be identical!"); + + switch (Predicate) { + default: llvm_unreachable("Invalid CmpInst predicate"); + case CmpInst::FCMP_FALSE: case CmpInst::FCMP_OEQ: case CmpInst::FCMP_OGT: + case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLT: case CmpInst::FCMP_OLE: + case CmpInst::FCMP_ONE: case CmpInst::FCMP_ORD: case CmpInst::FCMP_UNO: + case CmpInst::FCMP_UEQ: case CmpInst::FCMP_UGT: case CmpInst::FCMP_UGE: + case CmpInst::FCMP_ULT: case CmpInst::FCMP_ULE: case CmpInst::FCMP_UNE: + case CmpInst::FCMP_TRUE: + return getFCmp(Predicate, C1, C2); + + case CmpInst::ICMP_EQ: case CmpInst::ICMP_NE: case CmpInst::ICMP_UGT: + case CmpInst::ICMP_UGE: case CmpInst::ICMP_ULT: case CmpInst::ICMP_ULE: + case CmpInst::ICMP_SGT: case CmpInst::ICMP_SGE: case CmpInst::ICMP_SLT: + case CmpInst::ICMP_SLE: + return getICmp(Predicate, C1, C2); + } +} + +Constant *ConstantExpr::getSelect(Constant *C, Constant *V1, Constant *V2) { + assert(!SelectInst::areInvalidOperands(C, V1, V2)&&"Invalid select operands"); + + if (Constant *SC = ConstantFoldSelectInstruction(C, V1, V2)) + return SC; // Fold common cases + + Constant *ArgVec[] = { C, V1, V2 }; + ExprMapKeyType Key(Instruction::Select, ArgVec); + + LLVMContextImpl *pImpl = C->getContext().pImpl; + return pImpl->ExprConstants.getOrCreate(V1->getType(), Key); +} + +Constant *ConstantExpr::getGetElementPtr(Constant *C, ArrayRef<Value *> Idxs, + bool InBounds) { + assert(C->getType()->isPtrOrPtrVectorTy() && + "Non-pointer type for constant GetElementPtr expression"); + + if (Constant *FC = ConstantFoldGetElementPtr(C, InBounds, Idxs)) + return FC; // Fold a few common cases. + + // Get the result type of the getelementptr! + Type *Ty = GetElementPtrInst::getIndexedType(C->getType(), Idxs); + assert(Ty && "GEP indices invalid!"); + unsigned AS = C->getType()->getPointerAddressSpace(); + Type *ReqTy = Ty->getPointerTo(AS); + if (VectorType *VecTy = dyn_cast<VectorType>(C->getType())) + ReqTy = VectorType::get(ReqTy, VecTy->getNumElements()); + + // Look up the constant in the table first to ensure uniqueness + std::vector<Constant*> ArgVec; + ArgVec.reserve(1 + Idxs.size()); + ArgVec.push_back(C); + for (unsigned i = 0, e = Idxs.size(); i != e; ++i) { + assert(Idxs[i]->getType()->isVectorTy() == ReqTy->isVectorTy() && + "getelementptr index type missmatch"); + assert((!Idxs[i]->getType()->isVectorTy() || + ReqTy->getVectorNumElements() == + Idxs[i]->getType()->getVectorNumElements()) && + "getelementptr index type missmatch"); + ArgVec.push_back(cast<Constant>(Idxs[i])); + } + const ExprMapKeyType Key(Instruction::GetElementPtr, ArgVec, 0, + InBounds ? GEPOperator::IsInBounds : 0); + + LLVMContextImpl *pImpl = C->getContext().pImpl; + return pImpl->ExprConstants.getOrCreate(ReqTy, Key); +} + +Constant * +ConstantExpr::getICmp(unsigned short pred, Constant *LHS, Constant *RHS) { + assert(LHS->getType() == RHS->getType()); + assert(pred >= ICmpInst::FIRST_ICMP_PREDICATE && + pred <= ICmpInst::LAST_ICMP_PREDICATE && "Invalid ICmp Predicate"); + + if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS)) + return FC; // Fold a few common cases... + + // Look up the constant in the table first to ensure uniqueness + Constant *ArgVec[] = { LHS, RHS }; + // Get the key type with both the opcode and predicate + const ExprMapKeyType Key(Instruction::ICmp, ArgVec, pred); + + Type *ResultTy = Type::getInt1Ty(LHS->getContext()); + if (VectorType *VT = dyn_cast<VectorType>(LHS->getType())) + ResultTy = VectorType::get(ResultTy, VT->getNumElements()); + + LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl; + return pImpl->ExprConstants.getOrCreate(ResultTy, Key); +} + +Constant * +ConstantExpr::getFCmp(unsigned short pred, Constant *LHS, Constant *RHS) { + assert(LHS->getType() == RHS->getType()); + assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && "Invalid FCmp Predicate"); + + if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS)) + return FC; // Fold a few common cases... + + // Look up the constant in the table first to ensure uniqueness + Constant *ArgVec[] = { LHS, RHS }; + // Get the key type with both the opcode and predicate + const ExprMapKeyType Key(Instruction::FCmp, ArgVec, pred); + + Type *ResultTy = Type::getInt1Ty(LHS->getContext()); + if (VectorType *VT = dyn_cast<VectorType>(LHS->getType())) + ResultTy = VectorType::get(ResultTy, VT->getNumElements()); + + LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl; + return pImpl->ExprConstants.getOrCreate(ResultTy, Key); +} + +Constant *ConstantExpr::getExtractElement(Constant *Val, Constant *Idx) { + assert(Val->getType()->isVectorTy() && + "Tried to create extractelement operation on non-vector type!"); + assert(Idx->getType()->isIntegerTy(32) && + "Extractelement index must be i32 type!"); + + if (Constant *FC = ConstantFoldExtractElementInstruction(Val, Idx)) + return FC; // Fold a few common cases. + + // Look up the constant in the table first to ensure uniqueness + Constant *ArgVec[] = { Val, Idx }; + const ExprMapKeyType Key(Instruction::ExtractElement, ArgVec); + + LLVMContextImpl *pImpl = Val->getContext().pImpl; + Type *ReqTy = Val->getType()->getVectorElementType(); + return pImpl->ExprConstants.getOrCreate(ReqTy, Key); +} + +Constant *ConstantExpr::getInsertElement(Constant *Val, Constant *Elt, + Constant *Idx) { + assert(Val->getType()->isVectorTy() && + "Tried to create insertelement operation on non-vector type!"); + assert(Elt->getType() == Val->getType()->getVectorElementType() && + "Insertelement types must match!"); + assert(Idx->getType()->isIntegerTy(32) && + "Insertelement index must be i32 type!"); + + if (Constant *FC = ConstantFoldInsertElementInstruction(Val, Elt, Idx)) + return FC; // Fold a few common cases. + // Look up the constant in the table first to ensure uniqueness + Constant *ArgVec[] = { Val, Elt, Idx }; + const ExprMapKeyType Key(Instruction::InsertElement, ArgVec); + + LLVMContextImpl *pImpl = Val->getContext().pImpl; + return pImpl->ExprConstants.getOrCreate(Val->getType(), Key); +} + +Constant *ConstantExpr::getShuffleVector(Constant *V1, Constant *V2, + Constant *Mask) { + assert(ShuffleVectorInst::isValidOperands(V1, V2, Mask) && + "Invalid shuffle vector constant expr operands!"); + + if (Constant *FC = ConstantFoldShuffleVectorInstruction(V1, V2, Mask)) + return FC; // Fold a few common cases. + + unsigned NElts = Mask->getType()->getVectorNumElements(); + Type *EltTy = V1->getType()->getVectorElementType(); + Type *ShufTy = VectorType::get(EltTy, NElts); + + // Look up the constant in the table first to ensure uniqueness + Constant *ArgVec[] = { V1, V2, Mask }; + const ExprMapKeyType Key(Instruction::ShuffleVector, ArgVec); + + LLVMContextImpl *pImpl = ShufTy->getContext().pImpl; + return pImpl->ExprConstants.getOrCreate(ShufTy, Key); +} + +Constant *ConstantExpr::getInsertValue(Constant *Agg, Constant *Val, + ArrayRef<unsigned> Idxs) { + assert(ExtractValueInst::getIndexedType(Agg->getType(), + Idxs) == Val->getType() && + "insertvalue indices invalid!"); + assert(Agg->getType()->isFirstClassType() && + "Non-first-class type for constant insertvalue expression"); + Constant *FC = ConstantFoldInsertValueInstruction(Agg, Val, Idxs); + assert(FC && "insertvalue constant expr couldn't be folded!"); + return FC; +} + +Constant *ConstantExpr::getExtractValue(Constant *Agg, + ArrayRef<unsigned> Idxs) { + assert(Agg->getType()->isFirstClassType() && + "Tried to create extractelement operation on non-first-class type!"); + + Type *ReqTy = ExtractValueInst::getIndexedType(Agg->getType(), Idxs); + (void)ReqTy; + assert(ReqTy && "extractvalue indices invalid!"); + + assert(Agg->getType()->isFirstClassType() && + "Non-first-class type for constant extractvalue expression"); + Constant *FC = ConstantFoldExtractValueInstruction(Agg, Idxs); + assert(FC && "ExtractValue constant expr couldn't be folded!"); + return FC; +} + +Constant *ConstantExpr::getNeg(Constant *C, bool HasNUW, bool HasNSW) { + assert(C->getType()->isIntOrIntVectorTy() && + "Cannot NEG a nonintegral value!"); + return getSub(ConstantFP::getZeroValueForNegation(C->getType()), + C, HasNUW, HasNSW); +} + +Constant *ConstantExpr::getFNeg(Constant *C) { + assert(C->getType()->isFPOrFPVectorTy() && + "Cannot FNEG a non-floating-point value!"); + return getFSub(ConstantFP::getZeroValueForNegation(C->getType()), C); +} + +Constant *ConstantExpr::getNot(Constant *C) { + assert(C->getType()->isIntOrIntVectorTy() && + "Cannot NOT a nonintegral value!"); + return get(Instruction::Xor, C, Constant::getAllOnesValue(C->getType())); +} + +Constant *ConstantExpr::getAdd(Constant *C1, Constant *C2, + bool HasNUW, bool HasNSW) { + unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) | + (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0); + return get(Instruction::Add, C1, C2, Flags); +} + +Constant *ConstantExpr::getFAdd(Constant *C1, Constant *C2) { + return get(Instruction::FAdd, C1, C2); +} + +Constant *ConstantExpr::getSub(Constant *C1, Constant *C2, + bool HasNUW, bool HasNSW) { + unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) | + (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0); + return get(Instruction::Sub, C1, C2, Flags); +} + +Constant *ConstantExpr::getFSub(Constant *C1, Constant *C2) { + return get(Instruction::FSub, C1, C2); +} + +Constant *ConstantExpr::getMul(Constant *C1, Constant *C2, + bool HasNUW, bool HasNSW) { + unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) | + (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0); + return get(Instruction::Mul, C1, C2, Flags); +} + +Constant *ConstantExpr::getFMul(Constant *C1, Constant *C2) { + return get(Instruction::FMul, C1, C2); +} + +Constant *ConstantExpr::getUDiv(Constant *C1, Constant *C2, bool isExact) { + return get(Instruction::UDiv, C1, C2, + isExact ? PossiblyExactOperator::IsExact : 0); +} + +Constant *ConstantExpr::getSDiv(Constant *C1, Constant *C2, bool isExact) { + return get(Instruction::SDiv, C1, C2, + isExact ? PossiblyExactOperator::IsExact : 0); +} + +Constant *ConstantExpr::getFDiv(Constant *C1, Constant *C2) { + return get(Instruction::FDiv, C1, C2); +} + +Constant *ConstantExpr::getURem(Constant *C1, Constant *C2) { + return get(Instruction::URem, C1, C2); +} + +Constant *ConstantExpr::getSRem(Constant *C1, Constant *C2) { + return get(Instruction::SRem, C1, C2); +} + +Constant *ConstantExpr::getFRem(Constant *C1, Constant *C2) { + return get(Instruction::FRem, C1, C2); +} + +Constant *ConstantExpr::getAnd(Constant *C1, Constant *C2) { + return get(Instruction::And, C1, C2); +} + +Constant *ConstantExpr::getOr(Constant *C1, Constant *C2) { + return get(Instruction::Or, C1, C2); +} + +Constant *ConstantExpr::getXor(Constant *C1, Constant *C2) { + return get(Instruction::Xor, C1, C2); +} + +Constant *ConstantExpr::getShl(Constant *C1, Constant *C2, + bool HasNUW, bool HasNSW) { + unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) | + (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0); + return get(Instruction::Shl, C1, C2, Flags); +} + +Constant *ConstantExpr::getLShr(Constant *C1, Constant *C2, bool isExact) { + return get(Instruction::LShr, C1, C2, + isExact ? PossiblyExactOperator::IsExact : 0); +} + +Constant *ConstantExpr::getAShr(Constant *C1, Constant *C2, bool isExact) { + return get(Instruction::AShr, C1, C2, + isExact ? PossiblyExactOperator::IsExact : 0); +} + +/// getBinOpIdentity - Return the identity for the given binary operation, +/// i.e. a constant C such that X op C = X and C op X = X for every X. It +/// returns null if the operator doesn't have an identity. +Constant *ConstantExpr::getBinOpIdentity(unsigned Opcode, Type *Ty) { + switch (Opcode) { + default: + // Doesn't have an identity. + return 0; + + case Instruction::Add: + case Instruction::Or: + case Instruction::Xor: + return Constant::getNullValue(Ty); + + case Instruction::Mul: + return ConstantInt::get(Ty, 1); + + case Instruction::And: + return Constant::getAllOnesValue(Ty); + } +} + +/// getBinOpAbsorber - Return the absorbing element for the given binary +/// operation, i.e. a constant C such that X op C = C and C op X = C for +/// every X. For example, this returns zero for integer multiplication. +/// It returns null if the operator doesn't have an absorbing element. +Constant *ConstantExpr::getBinOpAbsorber(unsigned Opcode, Type *Ty) { + switch (Opcode) { + default: + // Doesn't have an absorber. + return 0; + + case Instruction::Or: + return Constant::getAllOnesValue(Ty); + + case Instruction::And: + case Instruction::Mul: + return Constant::getNullValue(Ty); + } +} + +// destroyConstant - Remove the constant from the constant table... +// +void ConstantExpr::destroyConstant() { + getType()->getContext().pImpl->ExprConstants.remove(this); + destroyConstantImpl(); +} + +const char *ConstantExpr::getOpcodeName() const { + return Instruction::getOpcodeName(getOpcode()); +} + + + +GetElementPtrConstantExpr:: +GetElementPtrConstantExpr(Constant *C, ArrayRef<Constant*> IdxList, + Type *DestTy) + : ConstantExpr(DestTy, Instruction::GetElementPtr, + OperandTraits<GetElementPtrConstantExpr>::op_end(this) + - (IdxList.size()+1), IdxList.size()+1) { + OperandList[0] = C; + for (unsigned i = 0, E = IdxList.size(); i != E; ++i) + OperandList[i+1] = IdxList[i]; +} + +//===----------------------------------------------------------------------===// +// ConstantData* implementations + +void ConstantDataArray::anchor() {} +void ConstantDataVector::anchor() {} + +/// getElementType - Return the element type of the array/vector. +Type *ConstantDataSequential::getElementType() const { + return getType()->getElementType(); +} + +StringRef ConstantDataSequential::getRawDataValues() const { + return StringRef(DataElements, getNumElements()*getElementByteSize()); +} + +/// isElementTypeCompatible - Return true if a ConstantDataSequential can be +/// formed with a vector or array of the specified element type. +/// ConstantDataArray only works with normal float and int types that are +/// stored densely in memory, not with things like i42 or x86_f80. +bool ConstantDataSequential::isElementTypeCompatible(const Type *Ty) { + if (Ty->isFloatTy() || Ty->isDoubleTy()) return true; + if (const IntegerType *IT = dyn_cast<IntegerType>(Ty)) { + switch (IT->getBitWidth()) { + case 8: + case 16: + case 32: + case 64: + return true; + default: break; + } + } + return false; +} + +/// getNumElements - Return the number of elements in the array or vector. +unsigned ConstantDataSequential::getNumElements() const { + if (ArrayType *AT = dyn_cast<ArrayType>(getType())) + return AT->getNumElements(); + return getType()->getVectorNumElements(); +} + + +/// getElementByteSize - Return the size in bytes of the elements in the data. +uint64_t ConstantDataSequential::getElementByteSize() const { + return getElementType()->getPrimitiveSizeInBits()/8; +} + +/// getElementPointer - Return the start of the specified element. +const char *ConstantDataSequential::getElementPointer(unsigned Elt) const { + assert(Elt < getNumElements() && "Invalid Elt"); + return DataElements+Elt*getElementByteSize(); +} + + +/// isAllZeros - return true if the array is empty or all zeros. +static bool isAllZeros(StringRef Arr) { + for (StringRef::iterator I = Arr.begin(), E = Arr.end(); I != E; ++I) + if (*I != 0) + return false; + return true; +} + +/// getImpl - This is the underlying implementation of all of the +/// ConstantDataSequential::get methods. They all thunk down to here, providing +/// the correct element type. We take the bytes in as a StringRef because +/// we *want* an underlying "char*" to avoid TBAA type punning violations. +Constant *ConstantDataSequential::getImpl(StringRef Elements, Type *Ty) { + assert(isElementTypeCompatible(Ty->getSequentialElementType())); + // If the elements are all zero or there are no elements, return a CAZ, which + // is more dense and canonical. + if (isAllZeros(Elements)) + return ConstantAggregateZero::get(Ty); + + // Do a lookup to see if we have already formed one of these. + StringMap<ConstantDataSequential*>::MapEntryTy &Slot = + Ty->getContext().pImpl->CDSConstants.GetOrCreateValue(Elements); + + // The bucket can point to a linked list of different CDS's that have the same + // body but different types. For example, 0,0,0,1 could be a 4 element array + // of i8, or a 1-element array of i32. They'll both end up in the same + /// StringMap bucket, linked up by their Next pointers. Walk the list. + ConstantDataSequential **Entry = &Slot.getValue(); + for (ConstantDataSequential *Node = *Entry; Node != 0; + Entry = &Node->Next, Node = *Entry) + if (Node->getType() == Ty) + return Node; + + // Okay, we didn't get a hit. Create a node of the right class, link it in, + // and return it. + if (isa<ArrayType>(Ty)) + return *Entry = new ConstantDataArray(Ty, Slot.getKeyData()); + + assert(isa<VectorType>(Ty)); + return *Entry = new ConstantDataVector(Ty, Slot.getKeyData()); +} + +void ConstantDataSequential::destroyConstant() { + // Remove the constant from the StringMap. + StringMap<ConstantDataSequential*> &CDSConstants = + getType()->getContext().pImpl->CDSConstants; + + StringMap<ConstantDataSequential*>::iterator Slot = + CDSConstants.find(getRawDataValues()); + + assert(Slot != CDSConstants.end() && "CDS not found in uniquing table"); + + ConstantDataSequential **Entry = &Slot->getValue(); + + // Remove the entry from the hash table. + if ((*Entry)->Next == 0) { + // If there is only one value in the bucket (common case) it must be this + // entry, and removing the entry should remove the bucket completely. + assert((*Entry) == this && "Hash mismatch in ConstantDataSequential"); + getContext().pImpl->CDSConstants.erase(Slot); + } else { + // Otherwise, there are multiple entries linked off the bucket, unlink the + // node we care about but keep the bucket around. + for (ConstantDataSequential *Node = *Entry; ; + Entry = &Node->Next, Node = *Entry) { + assert(Node && "Didn't find entry in its uniquing hash table!"); + // If we found our entry, unlink it from the list and we're done. + if (Node == this) { + *Entry = Node->Next; + break; + } + } + } + + // If we were part of a list, make sure that we don't delete the list that is + // still owned by the uniquing map. + Next = 0; + + // Finally, actually delete it. + destroyConstantImpl(); +} + +/// get() constructors - Return a constant with array type with an element +/// count and element type matching the ArrayRef passed in. Note that this +/// can return a ConstantAggregateZero object. +Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint8_t> Elts) { + Type *Ty = ArrayType::get(Type::getInt8Ty(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty); +} +Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){ + Type *Ty = ArrayType::get(Type::getInt16Ty(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty); +} +Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){ + Type *Ty = ArrayType::get(Type::getInt32Ty(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty); +} +Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){ + Type *Ty = ArrayType::get(Type::getInt64Ty(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty); +} +Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<float> Elts) { + Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty); +} +Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<double> Elts) { + Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty); +} + +/// getString - This method constructs a CDS and initializes it with a text +/// string. The default behavior (AddNull==true) causes a null terminator to +/// be placed at the end of the array (increasing the length of the string by +/// one more than the StringRef would normally indicate. Pass AddNull=false +/// to disable this behavior. +Constant *ConstantDataArray::getString(LLVMContext &Context, + StringRef Str, bool AddNull) { + if (!AddNull) { + const uint8_t *Data = reinterpret_cast<const uint8_t *>(Str.data()); + return get(Context, ArrayRef<uint8_t>(const_cast<uint8_t *>(Data), + Str.size())); + } + + SmallVector<uint8_t, 64> ElementVals; + ElementVals.append(Str.begin(), Str.end()); + ElementVals.push_back(0); + return get(Context, ElementVals); +} + +/// get() constructors - Return a constant with vector type with an element +/// count and element type matching the ArrayRef passed in. Note that this +/// can return a ConstantAggregateZero object. +Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint8_t> Elts){ + Type *Ty = VectorType::get(Type::getInt8Ty(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty); +} +Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){ + Type *Ty = VectorType::get(Type::getInt16Ty(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty); +} +Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){ + Type *Ty = VectorType::get(Type::getInt32Ty(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty); +} +Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){ + Type *Ty = VectorType::get(Type::getInt64Ty(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty); +} +Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<float> Elts) { + Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty); +} +Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<double> Elts) { + Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size()); + const char *Data = reinterpret_cast<const char *>(Elts.data()); + return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty); +} + +Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) { + assert(isElementTypeCompatible(V->getType()) && + "Element type not compatible with ConstantData"); + if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { + if (CI->getType()->isIntegerTy(8)) { + SmallVector<uint8_t, 16> Elts(NumElts, CI->getZExtValue()); + return get(V->getContext(), Elts); + } + if (CI->getType()->isIntegerTy(16)) { + SmallVector<uint16_t, 16> Elts(NumElts, CI->getZExtValue()); + return get(V->getContext(), Elts); + } + if (CI->getType()->isIntegerTy(32)) { + SmallVector<uint32_t, 16> Elts(NumElts, CI->getZExtValue()); + return get(V->getContext(), Elts); + } + assert(CI->getType()->isIntegerTy(64) && "Unsupported ConstantData type"); + SmallVector<uint64_t, 16> Elts(NumElts, CI->getZExtValue()); + return get(V->getContext(), Elts); + } + + if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { + if (CFP->getType()->isFloatTy()) { + SmallVector<float, 16> Elts(NumElts, CFP->getValueAPF().convertToFloat()); + return get(V->getContext(), Elts); + } + if (CFP->getType()->isDoubleTy()) { + SmallVector<double, 16> Elts(NumElts, + CFP->getValueAPF().convertToDouble()); + return get(V->getContext(), Elts); + } + } + return ConstantVector::getSplat(NumElts, V); +} + + +/// getElementAsInteger - If this is a sequential container of integers (of +/// any size), return the specified element in the low bits of a uint64_t. +uint64_t ConstantDataSequential::getElementAsInteger(unsigned Elt) const { + assert(isa<IntegerType>(getElementType()) && + "Accessor can only be used when element is an integer"); + const char *EltPtr = getElementPointer(Elt); + + // The data is stored in host byte order, make sure to cast back to the right + // type to load with the right endianness. + switch (getElementType()->getIntegerBitWidth()) { + default: llvm_unreachable("Invalid bitwidth for CDS"); + case 8: + return *const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(EltPtr)); + case 16: + return *const_cast<uint16_t *>(reinterpret_cast<const uint16_t *>(EltPtr)); + case 32: + return *const_cast<uint32_t *>(reinterpret_cast<const uint32_t *>(EltPtr)); + case 64: + return *const_cast<uint64_t *>(reinterpret_cast<const uint64_t *>(EltPtr)); + } +} + +/// getElementAsAPFloat - If this is a sequential container of floating point +/// type, return the specified element as an APFloat. +APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const { + const char *EltPtr = getElementPointer(Elt); + + switch (getElementType()->getTypeID()) { + default: + llvm_unreachable("Accessor can only be used when element is float/double!"); + case Type::FloatTyID: { + const float *FloatPrt = reinterpret_cast<const float *>(EltPtr); + return APFloat(*const_cast<float *>(FloatPrt)); + } + case Type::DoubleTyID: { + const double *DoublePtr = reinterpret_cast<const double *>(EltPtr); + return APFloat(*const_cast<double *>(DoublePtr)); + } + } +} + +/// getElementAsFloat - If this is an sequential container of floats, return +/// the specified element as a float. +float ConstantDataSequential::getElementAsFloat(unsigned Elt) const { + assert(getElementType()->isFloatTy() && + "Accessor can only be used when element is a 'float'"); + const float *EltPtr = reinterpret_cast<const float *>(getElementPointer(Elt)); + return *const_cast<float *>(EltPtr); +} + +/// getElementAsDouble - If this is an sequential container of doubles, return +/// the specified element as a float. +double ConstantDataSequential::getElementAsDouble(unsigned Elt) const { + assert(getElementType()->isDoubleTy() && + "Accessor can only be used when element is a 'float'"); + const double *EltPtr = + reinterpret_cast<const double *>(getElementPointer(Elt)); + return *const_cast<double *>(EltPtr); +} + +/// getElementAsConstant - Return a Constant for a specified index's element. +/// Note that this has to compute a new constant to return, so it isn't as +/// efficient as getElementAsInteger/Float/Double. +Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const { + if (getElementType()->isFloatTy() || getElementType()->isDoubleTy()) + return ConstantFP::get(getContext(), getElementAsAPFloat(Elt)); + + return ConstantInt::get(getElementType(), getElementAsInteger(Elt)); +} + +/// isString - This method returns true if this is an array of i8. +bool ConstantDataSequential::isString() const { + return isa<ArrayType>(getType()) && getElementType()->isIntegerTy(8); +} + +/// isCString - This method returns true if the array "isString", ends with a +/// nul byte, and does not contains any other nul bytes. +bool ConstantDataSequential::isCString() const { + if (!isString()) + return false; + + StringRef Str = getAsString(); + + // The last value must be nul. + if (Str.back() != 0) return false; + + // Other elements must be non-nul. + return Str.drop_back().find(0) == StringRef::npos; +} + +/// getSplatValue - If this is a splat constant, meaning that all of the +/// elements have the same value, return that value. Otherwise return NULL. +Constant *ConstantDataVector::getSplatValue() const { + const char *Base = getRawDataValues().data(); + + // Compare elements 1+ to the 0'th element. + unsigned EltSize = getElementByteSize(); + for (unsigned i = 1, e = getNumElements(); i != e; ++i) + if (memcmp(Base, Base+i*EltSize, EltSize)) + return 0; + + // If they're all the same, return the 0th one as a representative. + return getElementAsConstant(0); +} + +//===----------------------------------------------------------------------===// +// replaceUsesOfWithOnConstant implementations + +/// replaceUsesOfWithOnConstant - Update this constant array to change uses of +/// 'From' to be uses of 'To'. This must update the uniquing data structures +/// etc. +/// +/// Note that we intentionally replace all uses of From with To here. Consider +/// a large array that uses 'From' 1000 times. By handling this case all here, +/// ConstantArray::replaceUsesOfWithOnConstant is only invoked once, and that +/// single invocation handles all 1000 uses. Handling them one at a time would +/// work, but would be really slow because it would have to unique each updated +/// array instance. +/// +void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To, + Use *U) { + assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!"); + Constant *ToC = cast<Constant>(To); + + LLVMContextImpl *pImpl = getType()->getContext().pImpl; + + SmallVector<Constant*, 8> Values; + LLVMContextImpl::ArrayConstantsTy::LookupKey Lookup; + Lookup.first = cast<ArrayType>(getType()); + Values.reserve(getNumOperands()); // Build replacement array. + + // Fill values with the modified operands of the constant array. Also, + // compute whether this turns into an all-zeros array. + unsigned NumUpdated = 0; + + // Keep track of whether all the values in the array are "ToC". + bool AllSame = true; + for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) { + Constant *Val = cast<Constant>(O->get()); + if (Val == From) { + Val = ToC; + ++NumUpdated; + } + Values.push_back(Val); + AllSame &= Val == ToC; + } + + Constant *Replacement = 0; + if (AllSame && ToC->isNullValue()) { + Replacement = ConstantAggregateZero::get(getType()); + } else if (AllSame && isa<UndefValue>(ToC)) { + Replacement = UndefValue::get(getType()); + } else { + // Check to see if we have this array type already. + Lookup.second = makeArrayRef(Values); + LLVMContextImpl::ArrayConstantsTy::MapTy::iterator I = + pImpl->ArrayConstants.find(Lookup); + + if (I != pImpl->ArrayConstants.map_end()) { + Replacement = I->first; + } else { + // Okay, the new shape doesn't exist in the system yet. Instead of + // creating a new constant array, inserting it, replaceallusesof'ing the + // old with the new, then deleting the old... just update the current one + // in place! + pImpl->ArrayConstants.remove(this); + + // Update to the new value. Optimize for the case when we have a single + // operand that we're changing, but handle bulk updates efficiently. + if (NumUpdated == 1) { + unsigned OperandToUpdate = U - OperandList; + assert(getOperand(OperandToUpdate) == From && + "ReplaceAllUsesWith broken!"); + setOperand(OperandToUpdate, ToC); + } else { + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + if (getOperand(i) == From) + setOperand(i, ToC); + } + pImpl->ArrayConstants.insert(this); + return; + } + } + + // Otherwise, I do need to replace this with an existing value. + assert(Replacement != this && "I didn't contain From!"); + + // Everyone using this now uses the replacement. + replaceAllUsesWith(Replacement); + + // Delete the old constant! + destroyConstant(); +} + +void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To, + Use *U) { + assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!"); + Constant *ToC = cast<Constant>(To); + + unsigned OperandToUpdate = U-OperandList; + assert(getOperand(OperandToUpdate) == From && "ReplaceAllUsesWith broken!"); + + SmallVector<Constant*, 8> Values; + LLVMContextImpl::StructConstantsTy::LookupKey Lookup; + Lookup.first = cast<StructType>(getType()); + Values.reserve(getNumOperands()); // Build replacement struct. + + // Fill values with the modified operands of the constant struct. Also, + // compute whether this turns into an all-zeros struct. + bool isAllZeros = false; + bool isAllUndef = false; + if (ToC->isNullValue()) { + isAllZeros = true; + for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) { + Constant *Val = cast<Constant>(O->get()); + Values.push_back(Val); + if (isAllZeros) isAllZeros = Val->isNullValue(); + } + } else if (isa<UndefValue>(ToC)) { + isAllUndef = true; + for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) { + Constant *Val = cast<Constant>(O->get()); + Values.push_back(Val); + if (isAllUndef) isAllUndef = isa<UndefValue>(Val); + } + } else { + for (Use *O = OperandList, *E = OperandList + getNumOperands(); O != E; ++O) + Values.push_back(cast<Constant>(O->get())); + } + Values[OperandToUpdate] = ToC; + + LLVMContextImpl *pImpl = getContext().pImpl; + + Constant *Replacement = 0; + if (isAllZeros) { + Replacement = ConstantAggregateZero::get(getType()); + } else if (isAllUndef) { + Replacement = UndefValue::get(getType()); + } else { + // Check to see if we have this struct type already. + Lookup.second = makeArrayRef(Values); + LLVMContextImpl::StructConstantsTy::MapTy::iterator I = + pImpl->StructConstants.find(Lookup); + + if (I != pImpl->StructConstants.map_end()) { + Replacement = I->first; + } else { + // Okay, the new shape doesn't exist in the system yet. Instead of + // creating a new constant struct, inserting it, replaceallusesof'ing the + // old with the new, then deleting the old... just update the current one + // in place! + pImpl->StructConstants.remove(this); + + // Update to the new value. + setOperand(OperandToUpdate, ToC); + pImpl->StructConstants.insert(this); + return; + } + } + + assert(Replacement != this && "I didn't contain From!"); + + // Everyone using this now uses the replacement. + replaceAllUsesWith(Replacement); + + // Delete the old constant! + destroyConstant(); +} + +void ConstantVector::replaceUsesOfWithOnConstant(Value *From, Value *To, + Use *U) { + assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!"); + + SmallVector<Constant*, 8> Values; + Values.reserve(getNumOperands()); // Build replacement array... + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { + Constant *Val = getOperand(i); + if (Val == From) Val = cast<Constant>(To); + Values.push_back(Val); + } + + Constant *Replacement = get(Values); + assert(Replacement != this && "I didn't contain From!"); + + // Everyone using this now uses the replacement. + replaceAllUsesWith(Replacement); + + // Delete the old constant! + destroyConstant(); +} + +void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV, + Use *U) { + assert(isa<Constant>(ToV) && "Cannot make Constant refer to non-constant!"); + Constant *To = cast<Constant>(ToV); + + SmallVector<Constant*, 8> NewOps; + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { + Constant *Op = getOperand(i); + NewOps.push_back(Op == From ? To : Op); + } + + Constant *Replacement = getWithOperands(NewOps); + assert(Replacement != this && "I didn't contain From!"); + + // Everyone using this now uses the replacement. + replaceAllUsesWith(Replacement); + + // Delete the old constant! + destroyConstant(); +} + +Instruction *ConstantExpr::getAsInstruction() { + SmallVector<Value*,4> ValueOperands; + for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) + ValueOperands.push_back(cast<Value>(I)); + + ArrayRef<Value*> Ops(ValueOperands); + + switch (getOpcode()) { + case Instruction::Trunc: + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPTrunc: + case Instruction::FPExt: + case Instruction::UIToFP: + case Instruction::SIToFP: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::BitCast: + return CastInst::Create((Instruction::CastOps)getOpcode(), + Ops[0], getType()); + case Instruction::Select: + return SelectInst::Create(Ops[0], Ops[1], Ops[2]); + case Instruction::InsertElement: + return InsertElementInst::Create(Ops[0], Ops[1], Ops[2]); + case Instruction::ExtractElement: + return ExtractElementInst::Create(Ops[0], Ops[1]); + case Instruction::InsertValue: + return InsertValueInst::Create(Ops[0], Ops[1], getIndices()); + case Instruction::ExtractValue: + return ExtractValueInst::Create(Ops[0], getIndices()); + case Instruction::ShuffleVector: + return new ShuffleVectorInst(Ops[0], Ops[1], Ops[2]); + + case Instruction::GetElementPtr: + if (cast<GEPOperator>(this)->isInBounds()) + return GetElementPtrInst::CreateInBounds(Ops[0], Ops.slice(1)); + else + return GetElementPtrInst::Create(Ops[0], Ops.slice(1)); + + case Instruction::ICmp: + case Instruction::FCmp: + return CmpInst::Create((Instruction::OtherOps)getOpcode(), + getPredicate(), Ops[0], Ops[1]); + + default: + assert(getNumOperands() == 2 && "Must be binary operator?"); + BinaryOperator *BO = + BinaryOperator::Create((Instruction::BinaryOps)getOpcode(), + Ops[0], Ops[1]); + if (isa<OverflowingBinaryOperator>(BO)) { + BO->setHasNoUnsignedWrap(SubclassOptionalData & + OverflowingBinaryOperator::NoUnsignedWrap); + BO->setHasNoSignedWrap(SubclassOptionalData & + OverflowingBinaryOperator::NoSignedWrap); + } + if (isa<PossiblyExactOperator>(BO)) + BO->setIsExact(SubclassOptionalData & PossiblyExactOperator::IsExact); + return BO; + } +} diff --git a/lib/IR/ConstantsContext.h b/lib/IR/ConstantsContext.h new file mode 100644 index 0000000000..e9958589f5 --- /dev/null +++ b/lib/IR/ConstantsContext.h @@ -0,0 +1,774 @@ +//===-- ConstantsContext.h - Constants-related Context Interals -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines various helper methods and classes used by +// LLVMContextImpl for creating and managing constants. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CONSTANTSCONTEXT_H +#define LLVM_CONSTANTSCONTEXT_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/Hashing.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Operator.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include <map> + +namespace llvm { +template<class ValType> +struct ConstantTraits; + +/// UnaryConstantExpr - This class is private to Constants.cpp, and is used +/// behind the scenes to implement unary constant exprs. +class UnaryConstantExpr : public ConstantExpr { + virtual void anchor(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; +public: + // allocate space for exactly one operand + void *operator new(size_t s) { + return User::operator new(s, 1); + } + UnaryConstantExpr(unsigned Opcode, Constant *C, Type *Ty) + : ConstantExpr(Ty, Opcode, &Op<0>(), 1) { + Op<0>() = C; + } + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +/// BinaryConstantExpr - This class is private to Constants.cpp, and is used +/// behind the scenes to implement binary constant exprs. +class BinaryConstantExpr : public ConstantExpr { + virtual void anchor(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; +public: + // allocate space for exactly two operands + void *operator new(size_t s) { + return User::operator new(s, 2); + } + BinaryConstantExpr(unsigned Opcode, Constant *C1, Constant *C2, + unsigned Flags) + : ConstantExpr(C1->getType(), Opcode, &Op<0>(), 2) { + Op<0>() = C1; + Op<1>() = C2; + SubclassOptionalData = Flags; + } + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +/// SelectConstantExpr - This class is private to Constants.cpp, and is used +/// behind the scenes to implement select constant exprs. +class SelectConstantExpr : public ConstantExpr { + virtual void anchor(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; +public: + // allocate space for exactly three operands + void *operator new(size_t s) { + return User::operator new(s, 3); + } + SelectConstantExpr(Constant *C1, Constant *C2, Constant *C3) + : ConstantExpr(C2->getType(), Instruction::Select, &Op<0>(), 3) { + Op<0>() = C1; + Op<1>() = C2; + Op<2>() = C3; + } + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +/// ExtractElementConstantExpr - This class is private to +/// Constants.cpp, and is used behind the scenes to implement +/// extractelement constant exprs. +class ExtractElementConstantExpr : public ConstantExpr { + virtual void anchor(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; +public: + // allocate space for exactly two operands + void *operator new(size_t s) { + return User::operator new(s, 2); + } + ExtractElementConstantExpr(Constant *C1, Constant *C2) + : ConstantExpr(cast<VectorType>(C1->getType())->getElementType(), + Instruction::ExtractElement, &Op<0>(), 2) { + Op<0>() = C1; + Op<1>() = C2; + } + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +/// InsertElementConstantExpr - This class is private to +/// Constants.cpp, and is used behind the scenes to implement +/// insertelement constant exprs. +class InsertElementConstantExpr : public ConstantExpr { + virtual void anchor(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; +public: + // allocate space for exactly three operands + void *operator new(size_t s) { + return User::operator new(s, 3); + } + InsertElementConstantExpr(Constant *C1, Constant *C2, Constant *C3) + : ConstantExpr(C1->getType(), Instruction::InsertElement, + &Op<0>(), 3) { + Op<0>() = C1; + Op<1>() = C2; + Op<2>() = C3; + } + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +/// ShuffleVectorConstantExpr - This class is private to +/// Constants.cpp, and is used behind the scenes to implement +/// shufflevector constant exprs. +class ShuffleVectorConstantExpr : public ConstantExpr { + virtual void anchor(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; +public: + // allocate space for exactly three operands + void *operator new(size_t s) { + return User::operator new(s, 3); + } + ShuffleVectorConstantExpr(Constant *C1, Constant *C2, Constant *C3) + : ConstantExpr(VectorType::get( + cast<VectorType>(C1->getType())->getElementType(), + cast<VectorType>(C3->getType())->getNumElements()), + Instruction::ShuffleVector, + &Op<0>(), 3) { + Op<0>() = C1; + Op<1>() = C2; + Op<2>() = C3; + } + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +/// ExtractValueConstantExpr - This class is private to +/// Constants.cpp, and is used behind the scenes to implement +/// extractvalue constant exprs. +class ExtractValueConstantExpr : public ConstantExpr { + virtual void anchor(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; +public: + // allocate space for exactly one operand + void *operator new(size_t s) { + return User::operator new(s, 1); + } + ExtractValueConstantExpr(Constant *Agg, + const SmallVector<unsigned, 4> &IdxList, + Type *DestTy) + : ConstantExpr(DestTy, Instruction::ExtractValue, &Op<0>(), 1), + Indices(IdxList) { + Op<0>() = Agg; + } + + /// Indices - These identify which value to extract. + const SmallVector<unsigned, 4> Indices; + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +/// InsertValueConstantExpr - This class is private to +/// Constants.cpp, and is used behind the scenes to implement +/// insertvalue constant exprs. +class InsertValueConstantExpr : public ConstantExpr { + virtual void anchor(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; +public: + // allocate space for exactly one operand + void *operator new(size_t s) { + return User::operator new(s, 2); + } + InsertValueConstantExpr(Constant *Agg, Constant *Val, + const SmallVector<unsigned, 4> &IdxList, + Type *DestTy) + : ConstantExpr(DestTy, Instruction::InsertValue, &Op<0>(), 2), + Indices(IdxList) { + Op<0>() = Agg; + Op<1>() = Val; + } + + /// Indices - These identify the position for the insertion. + const SmallVector<unsigned, 4> Indices; + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + + +/// GetElementPtrConstantExpr - This class is private to Constants.cpp, and is +/// used behind the scenes to implement getelementpr constant exprs. +class GetElementPtrConstantExpr : public ConstantExpr { + virtual void anchor(); + GetElementPtrConstantExpr(Constant *C, ArrayRef<Constant*> IdxList, + Type *DestTy); +public: + static GetElementPtrConstantExpr *Create(Constant *C, + ArrayRef<Constant*> IdxList, + Type *DestTy, + unsigned Flags) { + GetElementPtrConstantExpr *Result = + new(IdxList.size() + 1) GetElementPtrConstantExpr(C, IdxList, DestTy); + Result->SubclassOptionalData = Flags; + return Result; + } + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +// CompareConstantExpr - This class is private to Constants.cpp, and is used +// behind the scenes to implement ICmp and FCmp constant expressions. This is +// needed in order to store the predicate value for these instructions. +class CompareConstantExpr : public ConstantExpr { + virtual void anchor(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; +public: + // allocate space for exactly two operands + void *operator new(size_t s) { + return User::operator new(s, 2); + } + unsigned short predicate; + CompareConstantExpr(Type *ty, Instruction::OtherOps opc, + unsigned short pred, Constant* LHS, Constant* RHS) + : ConstantExpr(ty, opc, &Op<0>(), 2), predicate(pred) { + Op<0>() = LHS; + Op<1>() = RHS; + } + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +template <> +struct OperandTraits<UnaryConstantExpr> : + public FixedNumOperandTraits<UnaryConstantExpr, 1> { +}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryConstantExpr, Value) + +template <> +struct OperandTraits<BinaryConstantExpr> : + public FixedNumOperandTraits<BinaryConstantExpr, 2> { +}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryConstantExpr, Value) + +template <> +struct OperandTraits<SelectConstantExpr> : + public FixedNumOperandTraits<SelectConstantExpr, 3> { +}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectConstantExpr, Value) + +template <> +struct OperandTraits<ExtractElementConstantExpr> : + public FixedNumOperandTraits<ExtractElementConstantExpr, 2> { +}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementConstantExpr, Value) + +template <> +struct OperandTraits<InsertElementConstantExpr> : + public FixedNumOperandTraits<InsertElementConstantExpr, 3> { +}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementConstantExpr, Value) + +template <> +struct OperandTraits<ShuffleVectorConstantExpr> : + public FixedNumOperandTraits<ShuffleVectorConstantExpr, 3> { +}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorConstantExpr, Value) + +template <> +struct OperandTraits<ExtractValueConstantExpr> : + public FixedNumOperandTraits<ExtractValueConstantExpr, 1> { +}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractValueConstantExpr, Value) + +template <> +struct OperandTraits<InsertValueConstantExpr> : + public FixedNumOperandTraits<InsertValueConstantExpr, 2> { +}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueConstantExpr, Value) + +template <> +struct OperandTraits<GetElementPtrConstantExpr> : + public VariadicOperandTraits<GetElementPtrConstantExpr, 1> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrConstantExpr, Value) + + +template <> +struct OperandTraits<CompareConstantExpr> : + public FixedNumOperandTraits<CompareConstantExpr, 2> { +}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CompareConstantExpr, Value) + +struct ExprMapKeyType { + ExprMapKeyType(unsigned opc, + ArrayRef<Constant*> ops, + unsigned short flags = 0, + unsigned short optionalflags = 0, + ArrayRef<unsigned> inds = ArrayRef<unsigned>()) + : opcode(opc), subclassoptionaldata(optionalflags), subclassdata(flags), + operands(ops.begin(), ops.end()), indices(inds.begin(), inds.end()) {} + uint8_t opcode; + uint8_t subclassoptionaldata; + uint16_t subclassdata; + std::vector<Constant*> operands; + SmallVector<unsigned, 4> indices; + bool operator==(const ExprMapKeyType& that) const { + return this->opcode == that.opcode && + this->subclassdata == that.subclassdata && + this->subclassoptionaldata == that.subclassoptionaldata && + this->operands == that.operands && + this->indices == that.indices; + } + bool operator<(const ExprMapKeyType & that) const { + if (this->opcode != that.opcode) return this->opcode < that.opcode; + if (this->operands != that.operands) return this->operands < that.operands; + if (this->subclassdata != that.subclassdata) + return this->subclassdata < that.subclassdata; + if (this->subclassoptionaldata != that.subclassoptionaldata) + return this->subclassoptionaldata < that.subclassoptionaldata; + if (this->indices != that.indices) return this->indices < that.indices; + return false; + } + + bool operator!=(const ExprMapKeyType& that) const { + return !(*this == that); + } +}; + +struct InlineAsmKeyType { + InlineAsmKeyType(StringRef AsmString, + StringRef Constraints, bool hasSideEffects, + bool isAlignStack, InlineAsm::AsmDialect asmDialect) + : asm_string(AsmString), constraints(Constraints), + has_side_effects(hasSideEffects), is_align_stack(isAlignStack), + asm_dialect(asmDialect) {} + std::string asm_string; + std::string constraints; + bool has_side_effects; + bool is_align_stack; + InlineAsm::AsmDialect asm_dialect; + bool operator==(const InlineAsmKeyType& that) const { + return this->asm_string == that.asm_string && + this->constraints == that.constraints && + this->has_side_effects == that.has_side_effects && + this->is_align_stack == that.is_align_stack && + this->asm_dialect == that.asm_dialect; + } + bool operator<(const InlineAsmKeyType& that) const { + if (this->asm_string != that.asm_string) + return this->asm_string < that.asm_string; + if (this->constraints != that.constraints) + return this->constraints < that.constraints; + if (this->has_side_effects != that.has_side_effects) + return this->has_side_effects < that.has_side_effects; + if (this->is_align_stack != that.is_align_stack) + return this->is_align_stack < that.is_align_stack; + if (this->asm_dialect != that.asm_dialect) + return this->asm_dialect < that.asm_dialect; + return false; + } + + bool operator!=(const InlineAsmKeyType& that) const { + return !(*this == that); + } +}; + +// The number of operands for each ConstantCreator::create method is +// determined by the ConstantTraits template. +// ConstantCreator - A class that is used to create constants by +// ConstantUniqueMap*. This class should be partially specialized if there is +// something strange that needs to be done to interface to the ctor for the +// constant. +// +template<typename T, typename Alloc> +struct ConstantTraits< std::vector<T, Alloc> > { + static unsigned uses(const std::vector<T, Alloc>& v) { + return v.size(); + } +}; + +template<> +struct ConstantTraits<Constant *> { + static unsigned uses(Constant * const & v) { + return 1; + } +}; + +template<class ConstantClass, class TypeClass, class ValType> +struct ConstantCreator { + static ConstantClass *create(TypeClass *Ty, const ValType &V) { + return new(ConstantTraits<ValType>::uses(V)) ConstantClass(Ty, V); + } +}; + +template<class ConstantClass, class TypeClass> +struct ConstantArrayCreator { + static ConstantClass *create(TypeClass *Ty, ArrayRef<Constant*> V) { + return new(V.size()) ConstantClass(Ty, V); + } +}; + +template<class ConstantClass> +struct ConstantKeyData { + typedef void ValType; + static ValType getValType(ConstantClass *C) { + llvm_unreachable("Unknown Constant type!"); + } +}; + +template<> +struct ConstantCreator<ConstantExpr, Type, ExprMapKeyType> { + static ConstantExpr *create(Type *Ty, const ExprMapKeyType &V, + unsigned short pred = 0) { + if (Instruction::isCast(V.opcode)) + return new UnaryConstantExpr(V.opcode, V.operands[0], Ty); + if ((V.opcode >= Instruction::BinaryOpsBegin && + V.opcode < Instruction::BinaryOpsEnd)) + return new BinaryConstantExpr(V.opcode, V.operands[0], V.operands[1], + V.subclassoptionaldata); + if (V.opcode == Instruction::Select) + return new SelectConstantExpr(V.operands[0], V.operands[1], + V.operands[2]); + if (V.opcode == Instruction::ExtractElement) + return new ExtractElementConstantExpr(V.operands[0], V.operands[1]); + if (V.opcode == Instruction::InsertElement) + return new InsertElementConstantExpr(V.operands[0], V.operands[1], + V.operands[2]); + if (V.opcode == Instruction::ShuffleVector) + return new ShuffleVectorConstantExpr(V.operands[0], V.operands[1], + V.operands[2]); + if (V.opcode == Instruction::InsertValue) + return new InsertValueConstantExpr(V.operands[0], V.operands[1], + V.indices, Ty); + if (V.opcode == Instruction::ExtractValue) + return new ExtractValueConstantExpr(V.operands[0], V.indices, Ty); + if (V.opcode == Instruction::GetElementPtr) { + std::vector<Constant*> IdxList(V.operands.begin()+1, V.operands.end()); + return GetElementPtrConstantExpr::Create(V.operands[0], IdxList, Ty, + V.subclassoptionaldata); + } + + // The compare instructions are weird. We have to encode the predicate + // value and it is combined with the instruction opcode by multiplying + // the opcode by one hundred. We must decode this to get the predicate. + if (V.opcode == Instruction::ICmp) + return new CompareConstantExpr(Ty, Instruction::ICmp, V.subclassdata, + V.operands[0], V.operands[1]); + if (V.opcode == Instruction::FCmp) + return new CompareConstantExpr(Ty, Instruction::FCmp, V.subclassdata, + V.operands[0], V.operands[1]); + llvm_unreachable("Invalid ConstantExpr!"); + } +}; + +template<> +struct ConstantKeyData<ConstantExpr> { + typedef ExprMapKeyType ValType; + static ValType getValType(ConstantExpr *CE) { + std::vector<Constant*> Operands; + Operands.reserve(CE->getNumOperands()); + for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) + Operands.push_back(cast<Constant>(CE->getOperand(i))); + return ExprMapKeyType(CE->getOpcode(), Operands, + CE->isCompare() ? CE->getPredicate() : 0, + CE->getRawSubclassOptionalData(), + CE->hasIndices() ? + CE->getIndices() : ArrayRef<unsigned>()); + } +}; + +template<> +struct ConstantCreator<InlineAsm, PointerType, InlineAsmKeyType> { + static InlineAsm *create(PointerType *Ty, const InlineAsmKeyType &Key) { + return new InlineAsm(Ty, Key.asm_string, Key.constraints, + Key.has_side_effects, Key.is_align_stack, + Key.asm_dialect); + } +}; + +template<> +struct ConstantKeyData<InlineAsm> { + typedef InlineAsmKeyType ValType; + static ValType getValType(InlineAsm *Asm) { + return InlineAsmKeyType(Asm->getAsmString(), Asm->getConstraintString(), + Asm->hasSideEffects(), Asm->isAlignStack(), + Asm->getDialect()); + } +}; + +template<class ValType, class ValRefType, class TypeClass, class ConstantClass, + bool HasLargeKey = false /*true for arrays and structs*/ > +class ConstantUniqueMap { +public: + typedef std::pair<TypeClass*, ValType> MapKey; + typedef std::map<MapKey, ConstantClass *> MapTy; + typedef std::map<ConstantClass *, typename MapTy::iterator> InverseMapTy; +private: + /// Map - This is the main map from the element descriptor to the Constants. + /// This is the primary way we avoid creating two of the same shape + /// constant. + MapTy Map; + + /// InverseMap - If "HasLargeKey" is true, this contains an inverse mapping + /// from the constants to their element in Map. This is important for + /// removal of constants from the array, which would otherwise have to scan + /// through the map with very large keys. + InverseMapTy InverseMap; + +public: + typename MapTy::iterator map_begin() { return Map.begin(); } + typename MapTy::iterator map_end() { return Map.end(); } + + void freeConstants() { + for (typename MapTy::iterator I=Map.begin(), E=Map.end(); + I != E; ++I) { + // Asserts that use_empty(). + delete I->second; + } + } + + /// InsertOrGetItem - Return an iterator for the specified element. + /// If the element exists in the map, the returned iterator points to the + /// entry and Exists=true. If not, the iterator points to the newly + /// inserted entry and returns Exists=false. Newly inserted entries have + /// I->second == 0, and should be filled in. + typename MapTy::iterator InsertOrGetItem(std::pair<MapKey, ConstantClass *> + &InsertVal, + bool &Exists) { + std::pair<typename MapTy::iterator, bool> IP = Map.insert(InsertVal); + Exists = !IP.second; + return IP.first; + } + +private: + typename MapTy::iterator FindExistingElement(ConstantClass *CP) { + if (HasLargeKey) { + typename InverseMapTy::iterator IMI = InverseMap.find(CP); + assert(IMI != InverseMap.end() && IMI->second != Map.end() && + IMI->second->second == CP && + "InverseMap corrupt!"); + return IMI->second; + } + + typename MapTy::iterator I = + Map.find(MapKey(static_cast<TypeClass*>(CP->getType()), + ConstantKeyData<ConstantClass>::getValType(CP))); + if (I == Map.end() || I->second != CP) { + // FIXME: This should not use a linear scan. If this gets to be a + // performance problem, someone should look at this. + for (I = Map.begin(); I != Map.end() && I->second != CP; ++I) + /* empty */; + } + return I; + } + + ConstantClass *Create(TypeClass *Ty, ValRefType V, + typename MapTy::iterator I) { + ConstantClass* Result = + ConstantCreator<ConstantClass,TypeClass,ValType>::create(Ty, V); + + assert(Result->getType() == Ty && "Type specified is not correct!"); + I = Map.insert(I, std::make_pair(MapKey(Ty, V), Result)); + + if (HasLargeKey) // Remember the reverse mapping if needed. + InverseMap.insert(std::make_pair(Result, I)); + + return Result; + } +public: + + /// getOrCreate - Return the specified constant from the map, creating it if + /// necessary. + ConstantClass *getOrCreate(TypeClass *Ty, ValRefType V) { + MapKey Lookup(Ty, V); + ConstantClass* Result = 0; + + typename MapTy::iterator I = Map.find(Lookup); + // Is it in the map? + if (I != Map.end()) + Result = I->second; + + if (!Result) { + // If no preexisting value, create one now... + Result = Create(Ty, V, I); + } + + return Result; + } + + void remove(ConstantClass *CP) { + typename MapTy::iterator I = FindExistingElement(CP); + assert(I != Map.end() && "Constant not found in constant table!"); + assert(I->second == CP && "Didn't find correct element?"); + + if (HasLargeKey) // Remember the reverse mapping if needed. + InverseMap.erase(CP); + + Map.erase(I); + } + + /// MoveConstantToNewSlot - If we are about to change C to be the element + /// specified by I, update our internal data structures to reflect this + /// fact. + void MoveConstantToNewSlot(ConstantClass *C, typename MapTy::iterator I) { + // First, remove the old location of the specified constant in the map. + typename MapTy::iterator OldI = FindExistingElement(C); + assert(OldI != Map.end() && "Constant not found in constant table!"); + assert(OldI->second == C && "Didn't find correct element?"); + + // Remove the old entry from the map. + Map.erase(OldI); + + // Update the inverse map so that we know that this constant is now + // located at descriptor I. + if (HasLargeKey) { + assert(I->second == C && "Bad inversemap entry!"); + InverseMap[C] = I; + } + } + + void dump() const { + DEBUG(dbgs() << "Constant.cpp: ConstantUniqueMap\n"); + } +}; + +// Unique map for aggregate constants +template<class TypeClass, class ConstantClass> +class ConstantAggrUniqueMap { +public: + typedef ArrayRef<Constant*> Operands; + typedef std::pair<TypeClass*, Operands> LookupKey; +private: + struct MapInfo { + typedef DenseMapInfo<ConstantClass*> ConstantClassInfo; + typedef DenseMapInfo<Constant*> ConstantInfo; + typedef DenseMapInfo<TypeClass*> TypeClassInfo; + static inline ConstantClass* getEmptyKey() { + return ConstantClassInfo::getEmptyKey(); + } + static inline ConstantClass* getTombstoneKey() { + return ConstantClassInfo::getTombstoneKey(); + } + static unsigned getHashValue(const ConstantClass *CP) { + SmallVector<Constant*, 8> CPOperands; + CPOperands.reserve(CP->getNumOperands()); + for (unsigned I = 0, E = CP->getNumOperands(); I < E; ++I) + CPOperands.push_back(CP->getOperand(I)); + return getHashValue(LookupKey(CP->getType(), CPOperands)); + } + static bool isEqual(const ConstantClass *LHS, const ConstantClass *RHS) { + return LHS == RHS; + } + static unsigned getHashValue(const LookupKey &Val) { + return hash_combine(Val.first, hash_combine_range(Val.second.begin(), + Val.second.end())); + } + static bool isEqual(const LookupKey &LHS, const ConstantClass *RHS) { + if (RHS == getEmptyKey() || RHS == getTombstoneKey()) + return false; + if (LHS.first != RHS->getType() + || LHS.second.size() != RHS->getNumOperands()) + return false; + for (unsigned I = 0, E = RHS->getNumOperands(); I < E; ++I) { + if (LHS.second[I] != RHS->getOperand(I)) + return false; + } + return true; + } + }; +public: + typedef DenseMap<ConstantClass *, char, MapInfo> MapTy; + +private: + /// Map - This is the main map from the element descriptor to the Constants. + /// This is the primary way we avoid creating two of the same shape + /// constant. + MapTy Map; + +public: + typename MapTy::iterator map_begin() { return Map.begin(); } + typename MapTy::iterator map_end() { return Map.end(); } + + void freeConstants() { + for (typename MapTy::iterator I=Map.begin(), E=Map.end(); + I != E; ++I) { + // Asserts that use_empty(). + delete I->first; + } + } + +private: + typename MapTy::iterator findExistingElement(ConstantClass *CP) { + return Map.find(CP); + } + + ConstantClass *Create(TypeClass *Ty, Operands V, typename MapTy::iterator I) { + ConstantClass* Result = + ConstantArrayCreator<ConstantClass,TypeClass>::create(Ty, V); + + assert(Result->getType() == Ty && "Type specified is not correct!"); + Map[Result] = '\0'; + + return Result; + } +public: + + /// getOrCreate - Return the specified constant from the map, creating it if + /// necessary. + ConstantClass *getOrCreate(TypeClass *Ty, Operands V) { + LookupKey Lookup(Ty, V); + ConstantClass* Result = 0; + + typename MapTy::iterator I = Map.find_as(Lookup); + // Is it in the map? + if (I != Map.end()) + Result = I->first; + + if (!Result) { + // If no preexisting value, create one now... + Result = Create(Ty, V, I); + } + + return Result; + } + + /// Find the constant by lookup key. + typename MapTy::iterator find(LookupKey Lookup) { + return Map.find_as(Lookup); + } + + /// Insert the constant into its proper slot. + void insert(ConstantClass *CP) { + Map[CP] = '\0'; + } + + /// Remove this constant from the map + void remove(ConstantClass *CP) { + typename MapTy::iterator I = findExistingElement(CP); + assert(I != Map.end() && "Constant not found in constant table!"); + assert(I->first == CP && "Didn't find correct element?"); + Map.erase(I); + } + + void dump() const { + DEBUG(dbgs() << "Constant.cpp: ConstantUniqueMap\n"); + } +}; + +} + +#endif diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp new file mode 100644 index 0000000000..983b49c628 --- /dev/null +++ b/lib/IR/Core.cpp @@ -0,0 +1,2458 @@ +//===-- Core.cpp ----------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the common infrastructure (including the C bindings) +// for libLLVMCore.a, which implements the LLVM intermediate representation. +// +//===----------------------------------------------------------------------===// + +#include "llvm-c/Core.h" +#include "llvm/Bitcode/ReaderWriter.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/PassManager.h" +#include "llvm/Support/CallSite.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/system_error.h" +#include "llvm/Support/Threading.h" +#include <cassert> +#include <cstdlib> +#include <cstring> + +using namespace llvm; + +void llvm::initializeCore(PassRegistry &Registry) { + initializeDominatorTreePass(Registry); + initializePrintModulePassPass(Registry); + initializePrintFunctionPassPass(Registry); + initializePrintBasicBlockPassPass(Registry); + initializeVerifierPass(Registry); + initializePreVerifierPass(Registry); +} + +void LLVMInitializeCore(LLVMPassRegistryRef R) { + initializeCore(*unwrap(R)); +} + +void LLVMShutdown() { + llvm_shutdown(); +} + +/*===-- Error handling ----------------------------------------------------===*/ + +void LLVMDisposeMessage(char *Message) { + free(Message); +} + + +/*===-- Operations on contexts --------------------------------------------===*/ + +LLVMContextRef LLVMContextCreate() { + return wrap(new LLVMContext()); +} + +LLVMContextRef LLVMGetGlobalContext() { + return wrap(&getGlobalContext()); +} + +void LLVMContextDispose(LLVMContextRef C) { + delete unwrap(C); +} + +unsigned LLVMGetMDKindIDInContext(LLVMContextRef C, const char* Name, + unsigned SLen) { + return unwrap(C)->getMDKindID(StringRef(Name, SLen)); +} + +unsigned LLVMGetMDKindID(const char* Name, unsigned SLen) { + return LLVMGetMDKindIDInContext(LLVMGetGlobalContext(), Name, SLen); +} + + +/*===-- Operations on modules ---------------------------------------------===*/ + +LLVMModuleRef LLVMModuleCreateWithName(const char *ModuleID) { + return wrap(new Module(ModuleID, getGlobalContext())); +} + +LLVMModuleRef LLVMModuleCreateWithNameInContext(const char *ModuleID, + LLVMContextRef C) { + return wrap(new Module(ModuleID, *unwrap(C))); +} + +void LLVMDisposeModule(LLVMModuleRef M) { + delete unwrap(M); +} + +/*--.. Data layout .........................................................--*/ +const char * LLVMGetDataLayout(LLVMModuleRef M) { + return unwrap(M)->getDataLayout().c_str(); +} + +void LLVMSetDataLayout(LLVMModuleRef M, const char *Triple) { + unwrap(M)->setDataLayout(Triple); +} + +/*--.. Target triple .......................................................--*/ +const char * LLVMGetTarget(LLVMModuleRef M) { + return unwrap(M)->getTargetTriple().c_str(); +} + +void LLVMSetTarget(LLVMModuleRef M, const char *Triple) { + unwrap(M)->setTargetTriple(Triple); +} + +void LLVMDumpModule(LLVMModuleRef M) { + unwrap(M)->dump(); +} + +LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename, + char **ErrorMessage) { + std::string error; + raw_fd_ostream dest(Filename, error); + if (!error.empty()) { + *ErrorMessage = strdup(error.c_str()); + return true; + } + + unwrap(M)->print(dest, NULL); + + if (!error.empty()) { + *ErrorMessage = strdup(error.c_str()); + return true; + } + dest.flush(); + return false; +} + +/*--.. Operations on inline assembler ......................................--*/ +void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm) { + unwrap(M)->setModuleInlineAsm(StringRef(Asm)); +} + + +/*--.. Operations on module contexts ......................................--*/ +LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M) { + return wrap(&unwrap(M)->getContext()); +} + + +/*===-- Operations on types -----------------------------------------------===*/ + +/*--.. Operations on all types (mostly) ....................................--*/ + +LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty) { + switch (unwrap(Ty)->getTypeID()) { + default: llvm_unreachable("Unhandled TypeID."); + case Type::VoidTyID: + return LLVMVoidTypeKind; + case Type::HalfTyID: + return LLVMHalfTypeKind; + case Type::FloatTyID: + return LLVMFloatTypeKind; + case Type::DoubleTyID: + return LLVMDoubleTypeKind; + case Type::X86_FP80TyID: + return LLVMX86_FP80TypeKind; + case Type::FP128TyID: + return LLVMFP128TypeKind; + case Type::PPC_FP128TyID: + return LLVMPPC_FP128TypeKind; + case Type::LabelTyID: + return LLVMLabelTypeKind; + case Type::MetadataTyID: + return LLVMMetadataTypeKind; + case Type::IntegerTyID: + return LLVMIntegerTypeKind; + case Type::FunctionTyID: + return LLVMFunctionTypeKind; + case Type::StructTyID: + return LLVMStructTypeKind; + case Type::ArrayTyID: + return LLVMArrayTypeKind; + case Type::PointerTyID: + return LLVMPointerTypeKind; + case Type::VectorTyID: + return LLVMVectorTypeKind; + case Type::X86_MMXTyID: + return LLVMX86_MMXTypeKind; + } +} + +LLVMBool LLVMTypeIsSized(LLVMTypeRef Ty) +{ + return unwrap(Ty)->isSized(); +} + +LLVMContextRef LLVMGetTypeContext(LLVMTypeRef Ty) { + return wrap(&unwrap(Ty)->getContext()); +} + +/*--.. Operations on integer types .........................................--*/ + +LLVMTypeRef LLVMInt1TypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getInt1Ty(*unwrap(C)); +} +LLVMTypeRef LLVMInt8TypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getInt8Ty(*unwrap(C)); +} +LLVMTypeRef LLVMInt16TypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getInt16Ty(*unwrap(C)); +} +LLVMTypeRef LLVMInt32TypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getInt32Ty(*unwrap(C)); +} +LLVMTypeRef LLVMInt64TypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getInt64Ty(*unwrap(C)); +} +LLVMTypeRef LLVMIntTypeInContext(LLVMContextRef C, unsigned NumBits) { + return wrap(IntegerType::get(*unwrap(C), NumBits)); +} + +LLVMTypeRef LLVMInt1Type(void) { + return LLVMInt1TypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMInt8Type(void) { + return LLVMInt8TypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMInt16Type(void) { + return LLVMInt16TypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMInt32Type(void) { + return LLVMInt32TypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMInt64Type(void) { + return LLVMInt64TypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMIntType(unsigned NumBits) { + return LLVMIntTypeInContext(LLVMGetGlobalContext(), NumBits); +} + +unsigned LLVMGetIntTypeWidth(LLVMTypeRef IntegerTy) { + return unwrap<IntegerType>(IntegerTy)->getBitWidth(); +} + +/*--.. Operations on real types ............................................--*/ + +LLVMTypeRef LLVMHalfTypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getHalfTy(*unwrap(C)); +} +LLVMTypeRef LLVMFloatTypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getFloatTy(*unwrap(C)); +} +LLVMTypeRef LLVMDoubleTypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getDoubleTy(*unwrap(C)); +} +LLVMTypeRef LLVMX86FP80TypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getX86_FP80Ty(*unwrap(C)); +} +LLVMTypeRef LLVMFP128TypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getFP128Ty(*unwrap(C)); +} +LLVMTypeRef LLVMPPCFP128TypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getPPC_FP128Ty(*unwrap(C)); +} +LLVMTypeRef LLVMX86MMXTypeInContext(LLVMContextRef C) { + return (LLVMTypeRef) Type::getX86_MMXTy(*unwrap(C)); +} + +LLVMTypeRef LLVMHalfType(void) { + return LLVMHalfTypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMFloatType(void) { + return LLVMFloatTypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMDoubleType(void) { + return LLVMDoubleTypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMX86FP80Type(void) { + return LLVMX86FP80TypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMFP128Type(void) { + return LLVMFP128TypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMPPCFP128Type(void) { + return LLVMPPCFP128TypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMX86MMXType(void) { + return LLVMX86MMXTypeInContext(LLVMGetGlobalContext()); +} + +/*--.. Operations on function types ........................................--*/ + +LLVMTypeRef LLVMFunctionType(LLVMTypeRef ReturnType, + LLVMTypeRef *ParamTypes, unsigned ParamCount, + LLVMBool IsVarArg) { + ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount); + return wrap(FunctionType::get(unwrap(ReturnType), Tys, IsVarArg != 0)); +} + +LLVMBool LLVMIsFunctionVarArg(LLVMTypeRef FunctionTy) { + return unwrap<FunctionType>(FunctionTy)->isVarArg(); +} + +LLVMTypeRef LLVMGetReturnType(LLVMTypeRef FunctionTy) { + return wrap(unwrap<FunctionType>(FunctionTy)->getReturnType()); +} + +unsigned LLVMCountParamTypes(LLVMTypeRef FunctionTy) { + return unwrap<FunctionType>(FunctionTy)->getNumParams(); +} + +void LLVMGetParamTypes(LLVMTypeRef FunctionTy, LLVMTypeRef *Dest) { + FunctionType *Ty = unwrap<FunctionType>(FunctionTy); + for (FunctionType::param_iterator I = Ty->param_begin(), + E = Ty->param_end(); I != E; ++I) + *Dest++ = wrap(*I); +} + +/*--.. Operations on struct types ..........................................--*/ + +LLVMTypeRef LLVMStructTypeInContext(LLVMContextRef C, LLVMTypeRef *ElementTypes, + unsigned ElementCount, LLVMBool Packed) { + ArrayRef<Type*> Tys(unwrap(ElementTypes), ElementCount); + return wrap(StructType::get(*unwrap(C), Tys, Packed != 0)); +} + +LLVMTypeRef LLVMStructType(LLVMTypeRef *ElementTypes, + unsigned ElementCount, LLVMBool Packed) { + return LLVMStructTypeInContext(LLVMGetGlobalContext(), ElementTypes, + ElementCount, Packed); +} + +LLVMTypeRef LLVMStructCreateNamed(LLVMContextRef C, const char *Name) +{ + return wrap(StructType::create(*unwrap(C), Name)); +} + +const char *LLVMGetStructName(LLVMTypeRef Ty) +{ + StructType *Type = unwrap<StructType>(Ty); + if (!Type->hasName()) + return 0; + return Type->getName().data(); +} + +void LLVMStructSetBody(LLVMTypeRef StructTy, LLVMTypeRef *ElementTypes, + unsigned ElementCount, LLVMBool Packed) { + ArrayRef<Type*> Tys(unwrap(ElementTypes), ElementCount); + unwrap<StructType>(StructTy)->setBody(Tys, Packed != 0); +} + +unsigned LLVMCountStructElementTypes(LLVMTypeRef StructTy) { + return unwrap<StructType>(StructTy)->getNumElements(); +} + +void LLVMGetStructElementTypes(LLVMTypeRef StructTy, LLVMTypeRef *Dest) { + StructType *Ty = unwrap<StructType>(StructTy); + for (StructType::element_iterator I = Ty->element_begin(), + E = Ty->element_end(); I != E; ++I) + *Dest++ = wrap(*I); +} + +LLVMBool LLVMIsPackedStruct(LLVMTypeRef StructTy) { + return unwrap<StructType>(StructTy)->isPacked(); +} + +LLVMBool LLVMIsOpaqueStruct(LLVMTypeRef StructTy) { + return unwrap<StructType>(StructTy)->isOpaque(); +} + +LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name) { + return wrap(unwrap(M)->getTypeByName(Name)); +} + +/*--.. Operations on array, pointer, and vector types (sequence types) .....--*/ + +LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount) { + return wrap(ArrayType::get(unwrap(ElementType), ElementCount)); +} + +LLVMTypeRef LLVMPointerType(LLVMTypeRef ElementType, unsigned AddressSpace) { + return wrap(PointerType::get(unwrap(ElementType), AddressSpace)); +} + +LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount) { + return wrap(VectorType::get(unwrap(ElementType), ElementCount)); +} + +LLVMTypeRef LLVMGetElementType(LLVMTypeRef Ty) { + return wrap(unwrap<SequentialType>(Ty)->getElementType()); +} + +unsigned LLVMGetArrayLength(LLVMTypeRef ArrayTy) { + return unwrap<ArrayType>(ArrayTy)->getNumElements(); +} + +unsigned LLVMGetPointerAddressSpace(LLVMTypeRef PointerTy) { + return unwrap<PointerType>(PointerTy)->getAddressSpace(); +} + +unsigned LLVMGetVectorSize(LLVMTypeRef VectorTy) { + return unwrap<VectorType>(VectorTy)->getNumElements(); +} + +/*--.. Operations on other types ...........................................--*/ + +LLVMTypeRef LLVMVoidTypeInContext(LLVMContextRef C) { + return wrap(Type::getVoidTy(*unwrap(C))); +} +LLVMTypeRef LLVMLabelTypeInContext(LLVMContextRef C) { + return wrap(Type::getLabelTy(*unwrap(C))); +} + +LLVMTypeRef LLVMVoidType(void) { + return LLVMVoidTypeInContext(LLVMGetGlobalContext()); +} +LLVMTypeRef LLVMLabelType(void) { + return LLVMLabelTypeInContext(LLVMGetGlobalContext()); +} + +/*===-- Operations on values ----------------------------------------------===*/ + +/*--.. Operations on all values ............................................--*/ + +LLVMTypeRef LLVMTypeOf(LLVMValueRef Val) { + return wrap(unwrap(Val)->getType()); +} + +const char *LLVMGetValueName(LLVMValueRef Val) { + return unwrap(Val)->getName().data(); +} + +void LLVMSetValueName(LLVMValueRef Val, const char *Name) { + unwrap(Val)->setName(Name); +} + +void LLVMDumpValue(LLVMValueRef Val) { + unwrap(Val)->dump(); +} + +void LLVMReplaceAllUsesWith(LLVMValueRef OldVal, LLVMValueRef NewVal) { + unwrap(OldVal)->replaceAllUsesWith(unwrap(NewVal)); +} + +int LLVMHasMetadata(LLVMValueRef Inst) { + return unwrap<Instruction>(Inst)->hasMetadata(); +} + +LLVMValueRef LLVMGetMetadata(LLVMValueRef Inst, unsigned KindID) { + return wrap(unwrap<Instruction>(Inst)->getMetadata(KindID)); +} + +void LLVMSetMetadata(LLVMValueRef Inst, unsigned KindID, LLVMValueRef MD) { + unwrap<Instruction>(Inst)->setMetadata(KindID, MD? unwrap<MDNode>(MD) : NULL); +} + +/*--.. Conversion functions ................................................--*/ + +#define LLVM_DEFINE_VALUE_CAST(name) \ + LLVMValueRef LLVMIsA##name(LLVMValueRef Val) { \ + return wrap(static_cast<Value*>(dyn_cast_or_null<name>(unwrap(Val)))); \ + } + +LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DEFINE_VALUE_CAST) + +/*--.. Operations on Uses ..................................................--*/ +LLVMUseRef LLVMGetFirstUse(LLVMValueRef Val) { + Value *V = unwrap(Val); + Value::use_iterator I = V->use_begin(); + if (I == V->use_end()) + return 0; + return wrap(&(I.getUse())); +} + +LLVMUseRef LLVMGetNextUse(LLVMUseRef U) { + Use *Next = unwrap(U)->getNext(); + if (Next) + return wrap(Next); + return 0; +} + +LLVMValueRef LLVMGetUser(LLVMUseRef U) { + return wrap(unwrap(U)->getUser()); +} + +LLVMValueRef LLVMGetUsedValue(LLVMUseRef U) { + return wrap(unwrap(U)->get()); +} + +/*--.. Operations on Users .................................................--*/ +LLVMValueRef LLVMGetOperand(LLVMValueRef Val, unsigned Index) { + Value *V = unwrap(Val); + if (MDNode *MD = dyn_cast<MDNode>(V)) + return wrap(MD->getOperand(Index)); + return wrap(cast<User>(V)->getOperand(Index)); +} + +void LLVMSetOperand(LLVMValueRef Val, unsigned Index, LLVMValueRef Op) { + unwrap<User>(Val)->setOperand(Index, unwrap(Op)); +} + +int LLVMGetNumOperands(LLVMValueRef Val) { + Value *V = unwrap(Val); + if (MDNode *MD = dyn_cast<MDNode>(V)) + return MD->getNumOperands(); + return cast<User>(V)->getNumOperands(); +} + +/*--.. Operations on constants of any type .................................--*/ + +LLVMValueRef LLVMConstNull(LLVMTypeRef Ty) { + return wrap(Constant::getNullValue(unwrap(Ty))); +} + +LLVMValueRef LLVMConstAllOnes(LLVMTypeRef Ty) { + return wrap(Constant::getAllOnesValue(unwrap(Ty))); +} + +LLVMValueRef LLVMGetUndef(LLVMTypeRef Ty) { + return wrap(UndefValue::get(unwrap(Ty))); +} + +LLVMBool LLVMIsConstant(LLVMValueRef Ty) { + return isa<Constant>(unwrap(Ty)); +} + +LLVMBool LLVMIsNull(LLVMValueRef Val) { + if (Constant *C = dyn_cast<Constant>(unwrap(Val))) + return C->isNullValue(); + return false; +} + +LLVMBool LLVMIsUndef(LLVMValueRef Val) { + return isa<UndefValue>(unwrap(Val)); +} + +LLVMValueRef LLVMConstPointerNull(LLVMTypeRef Ty) { + return + wrap(ConstantPointerNull::get(unwrap<PointerType>(Ty))); +} + +/*--.. Operations on metadata nodes ........................................--*/ + +LLVMValueRef LLVMMDStringInContext(LLVMContextRef C, const char *Str, + unsigned SLen) { + return wrap(MDString::get(*unwrap(C), StringRef(Str, SLen))); +} + +LLVMValueRef LLVMMDString(const char *Str, unsigned SLen) { + return LLVMMDStringInContext(LLVMGetGlobalContext(), Str, SLen); +} + +LLVMValueRef LLVMMDNodeInContext(LLVMContextRef C, LLVMValueRef *Vals, + unsigned Count) { + return wrap(MDNode::get(*unwrap(C), + makeArrayRef(unwrap<Value>(Vals, Count), Count))); +} + +LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count) { + return LLVMMDNodeInContext(LLVMGetGlobalContext(), Vals, Count); +} + +const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len) { + if (const MDString *S = dyn_cast<MDString>(unwrap(V))) { + *Len = S->getString().size(); + return S->getString().data(); + } + *Len = 0; + return 0; +} + +unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V) +{ + return cast<MDNode>(unwrap(V))->getNumOperands(); +} + +void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest) +{ + const MDNode *N = cast<MDNode>(unwrap(V)); + const unsigned numOperands = N->getNumOperands(); + for (unsigned i = 0; i < numOperands; i++) + Dest[i] = wrap(N->getOperand(i)); +} + +unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name) +{ + if (NamedMDNode *N = unwrap(M)->getNamedMetadata(name)) { + return N->getNumOperands(); + } + return 0; +} + +void LLVMGetNamedMetadataOperands(LLVMModuleRef M, const char* name, LLVMValueRef *Dest) +{ + NamedMDNode *N = unwrap(M)->getNamedMetadata(name); + if (!N) + return; + for (unsigned i=0;i<N->getNumOperands();i++) + Dest[i] = wrap(N->getOperand(i)); +} + +void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char* name, + LLVMValueRef Val) +{ + NamedMDNode *N = unwrap(M)->getOrInsertNamedMetadata(name); + if (!N) + return; + MDNode *Op = Val ? unwrap<MDNode>(Val) : NULL; + if (Op) + N->addOperand(Op); +} + +/*--.. Operations on scalar constants ......................................--*/ + +LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N, + LLVMBool SignExtend) { + return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), N, SignExtend != 0)); +} + +LLVMValueRef LLVMConstIntOfArbitraryPrecision(LLVMTypeRef IntTy, + unsigned NumWords, + const uint64_t Words[]) { + IntegerType *Ty = unwrap<IntegerType>(IntTy); + return wrap(ConstantInt::get(Ty->getContext(), + APInt(Ty->getBitWidth(), + makeArrayRef(Words, NumWords)))); +} + +LLVMValueRef LLVMConstIntOfString(LLVMTypeRef IntTy, const char Str[], + uint8_t Radix) { + return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), StringRef(Str), + Radix)); +} + +LLVMValueRef LLVMConstIntOfStringAndSize(LLVMTypeRef IntTy, const char Str[], + unsigned SLen, uint8_t Radix) { + return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), StringRef(Str, SLen), + Radix)); +} + +LLVMValueRef LLVMConstReal(LLVMTypeRef RealTy, double N) { + return wrap(ConstantFP::get(unwrap(RealTy), N)); +} + +LLVMValueRef LLVMConstRealOfString(LLVMTypeRef RealTy, const char *Text) { + return wrap(ConstantFP::get(unwrap(RealTy), StringRef(Text))); +} + +LLVMValueRef LLVMConstRealOfStringAndSize(LLVMTypeRef RealTy, const char Str[], + unsigned SLen) { + return wrap(ConstantFP::get(unwrap(RealTy), StringRef(Str, SLen))); +} + +unsigned long long LLVMConstIntGetZExtValue(LLVMValueRef ConstantVal) { + return unwrap<ConstantInt>(ConstantVal)->getZExtValue(); +} + +long long LLVMConstIntGetSExtValue(LLVMValueRef ConstantVal) { + return unwrap<ConstantInt>(ConstantVal)->getSExtValue(); +} + +/*--.. Operations on composite constants ...................................--*/ + +LLVMValueRef LLVMConstStringInContext(LLVMContextRef C, const char *Str, + unsigned Length, + LLVMBool DontNullTerminate) { + /* Inverted the sense of AddNull because ', 0)' is a + better mnemonic for null termination than ', 1)'. */ + return wrap(ConstantDataArray::getString(*unwrap(C), StringRef(Str, Length), + DontNullTerminate == 0)); +} +LLVMValueRef LLVMConstStructInContext(LLVMContextRef C, + LLVMValueRef *ConstantVals, + unsigned Count, LLVMBool Packed) { + Constant **Elements = unwrap<Constant>(ConstantVals, Count); + return wrap(ConstantStruct::getAnon(*unwrap(C), makeArrayRef(Elements, Count), + Packed != 0)); +} + +LLVMValueRef LLVMConstString(const char *Str, unsigned Length, + LLVMBool DontNullTerminate) { + return LLVMConstStringInContext(LLVMGetGlobalContext(), Str, Length, + DontNullTerminate); +} +LLVMValueRef LLVMConstArray(LLVMTypeRef ElementTy, + LLVMValueRef *ConstantVals, unsigned Length) { + ArrayRef<Constant*> V(unwrap<Constant>(ConstantVals, Length), Length); + return wrap(ConstantArray::get(ArrayType::get(unwrap(ElementTy), Length), V)); +} +LLVMValueRef LLVMConstStruct(LLVMValueRef *ConstantVals, unsigned Count, + LLVMBool Packed) { + return LLVMConstStructInContext(LLVMGetGlobalContext(), ConstantVals, Count, + Packed); +} + +LLVMValueRef LLVMConstNamedStruct(LLVMTypeRef StructTy, + LLVMValueRef *ConstantVals, + unsigned Count) { + Constant **Elements = unwrap<Constant>(ConstantVals, Count); + StructType *Ty = cast<StructType>(unwrap(StructTy)); + + return wrap(ConstantStruct::get(Ty, makeArrayRef(Elements, Count))); +} + +LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size) { + return wrap(ConstantVector::get(makeArrayRef( + unwrap<Constant>(ScalarConstantVals, Size), Size))); +} + +/*-- Opcode mapping */ + +static LLVMOpcode map_to_llvmopcode(int opcode) +{ + switch (opcode) { + default: llvm_unreachable("Unhandled Opcode."); +#define HANDLE_INST(num, opc, clas) case num: return LLVM##opc; +#include "llvm/IR/Instruction.def" +#undef HANDLE_INST + } +} + +static int map_from_llvmopcode(LLVMOpcode code) +{ + switch (code) { +#define HANDLE_INST(num, opc, clas) case LLVM##opc: return num; +#include "llvm/IR/Instruction.def" +#undef HANDLE_INST + } + llvm_unreachable("Unhandled Opcode."); +} + +/*--.. Constant expressions ................................................--*/ + +LLVMOpcode LLVMGetConstOpcode(LLVMValueRef ConstantVal) { + return map_to_llvmopcode(unwrap<ConstantExpr>(ConstantVal)->getOpcode()); +} + +LLVMValueRef LLVMAlignOf(LLVMTypeRef Ty) { + return wrap(ConstantExpr::getAlignOf(unwrap(Ty))); +} + +LLVMValueRef LLVMSizeOf(LLVMTypeRef Ty) { + return wrap(ConstantExpr::getSizeOf(unwrap(Ty))); +} + +LLVMValueRef LLVMConstNeg(LLVMValueRef ConstantVal) { + return wrap(ConstantExpr::getNeg(unwrap<Constant>(ConstantVal))); +} + +LLVMValueRef LLVMConstNSWNeg(LLVMValueRef ConstantVal) { + return wrap(ConstantExpr::getNSWNeg(unwrap<Constant>(ConstantVal))); +} + +LLVMValueRef LLVMConstNUWNeg(LLVMValueRef ConstantVal) { + return wrap(ConstantExpr::getNUWNeg(unwrap<Constant>(ConstantVal))); +} + + +LLVMValueRef LLVMConstFNeg(LLVMValueRef ConstantVal) { + return wrap(ConstantExpr::getFNeg(unwrap<Constant>(ConstantVal))); +} + +LLVMValueRef LLVMConstNot(LLVMValueRef ConstantVal) { + return wrap(ConstantExpr::getNot(unwrap<Constant>(ConstantVal))); +} + +LLVMValueRef LLVMConstAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getAdd(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstNSWAdd(LLVMValueRef LHSConstant, + LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getNSWAdd(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstNUWAdd(LLVMValueRef LHSConstant, + LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getNUWAdd(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstFAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getFAdd(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getSub(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstNSWSub(LLVMValueRef LHSConstant, + LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getNSWSub(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstNUWSub(LLVMValueRef LHSConstant, + LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getNUWSub(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstFSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getFSub(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getMul(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstNSWMul(LLVMValueRef LHSConstant, + LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getNSWMul(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstNUWMul(LLVMValueRef LHSConstant, + LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getNUWMul(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstFMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getFMul(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstUDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getUDiv(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstSDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getSDiv(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstExactSDiv(LLVMValueRef LHSConstant, + LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getExactSDiv(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstFDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getFDiv(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstURem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getURem(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstSRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getSRem(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstFRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getFRem(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstAnd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getAnd(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstOr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getOr(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstXor(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getXor(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstICmp(LLVMIntPredicate Predicate, + LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getICmp(Predicate, + unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstFCmp(LLVMRealPredicate Predicate, + LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getFCmp(Predicate, + unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstShl(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getShl(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstLShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getLShr(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) { + return wrap(ConstantExpr::getAShr(unwrap<Constant>(LHSConstant), + unwrap<Constant>(RHSConstant))); +} + +LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal, + LLVMValueRef *ConstantIndices, unsigned NumIndices) { + ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices), + NumIndices); + return wrap(ConstantExpr::getGetElementPtr(unwrap<Constant>(ConstantVal), + IdxList)); +} + +LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal, + LLVMValueRef *ConstantIndices, + unsigned NumIndices) { + Constant* Val = unwrap<Constant>(ConstantVal); + ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices), + NumIndices); + return wrap(ConstantExpr::getInBoundsGetElementPtr(Val, IdxList)); +} + +LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getTrunc(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstSExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getSExt(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstZExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getZExt(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstFPTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getFPTrunc(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstFPExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getFPExtend(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstUIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getUIToFP(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstSIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getSIToFP(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstFPToUI(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getFPToUI(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstFPToSI(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getFPToSI(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstPtrToInt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getPtrToInt(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstIntToPtr(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getIntToPtr(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstBitCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getBitCast(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstZExtOrBitCast(LLVMValueRef ConstantVal, + LLVMTypeRef ToType) { + return wrap(ConstantExpr::getZExtOrBitCast(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstSExtOrBitCast(LLVMValueRef ConstantVal, + LLVMTypeRef ToType) { + return wrap(ConstantExpr::getSExtOrBitCast(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstTruncOrBitCast(LLVMValueRef ConstantVal, + LLVMTypeRef ToType) { + return wrap(ConstantExpr::getTruncOrBitCast(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstPointerCast(LLVMValueRef ConstantVal, + LLVMTypeRef ToType) { + return wrap(ConstantExpr::getPointerCast(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstIntCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType, + LLVMBool isSigned) { + return wrap(ConstantExpr::getIntegerCast(unwrap<Constant>(ConstantVal), + unwrap(ToType), isSigned)); +} + +LLVMValueRef LLVMConstFPCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { + return wrap(ConstantExpr::getFPCast(unwrap<Constant>(ConstantVal), + unwrap(ToType))); +} + +LLVMValueRef LLVMConstSelect(LLVMValueRef ConstantCondition, + LLVMValueRef ConstantIfTrue, + LLVMValueRef ConstantIfFalse) { + return wrap(ConstantExpr::getSelect(unwrap<Constant>(ConstantCondition), + unwrap<Constant>(ConstantIfTrue), + unwrap<Constant>(ConstantIfFalse))); +} + +LLVMValueRef LLVMConstExtractElement(LLVMValueRef VectorConstant, + LLVMValueRef IndexConstant) { + return wrap(ConstantExpr::getExtractElement(unwrap<Constant>(VectorConstant), + unwrap<Constant>(IndexConstant))); +} + +LLVMValueRef LLVMConstInsertElement(LLVMValueRef VectorConstant, + LLVMValueRef ElementValueConstant, + LLVMValueRef IndexConstant) { + return wrap(ConstantExpr::getInsertElement(unwrap<Constant>(VectorConstant), + unwrap<Constant>(ElementValueConstant), + unwrap<Constant>(IndexConstant))); +} + +LLVMValueRef LLVMConstShuffleVector(LLVMValueRef VectorAConstant, + LLVMValueRef VectorBConstant, + LLVMValueRef MaskConstant) { + return wrap(ConstantExpr::getShuffleVector(unwrap<Constant>(VectorAConstant), + unwrap<Constant>(VectorBConstant), + unwrap<Constant>(MaskConstant))); +} + +LLVMValueRef LLVMConstExtractValue(LLVMValueRef AggConstant, unsigned *IdxList, + unsigned NumIdx) { + return wrap(ConstantExpr::getExtractValue(unwrap<Constant>(AggConstant), + makeArrayRef(IdxList, NumIdx))); +} + +LLVMValueRef LLVMConstInsertValue(LLVMValueRef AggConstant, + LLVMValueRef ElementValueConstant, + unsigned *IdxList, unsigned NumIdx) { + return wrap(ConstantExpr::getInsertValue(unwrap<Constant>(AggConstant), + unwrap<Constant>(ElementValueConstant), + makeArrayRef(IdxList, NumIdx))); +} + +LLVMValueRef LLVMConstInlineAsm(LLVMTypeRef Ty, const char *AsmString, + const char *Constraints, + LLVMBool HasSideEffects, + LLVMBool IsAlignStack) { + return wrap(InlineAsm::get(dyn_cast<FunctionType>(unwrap(Ty)), AsmString, + Constraints, HasSideEffects, IsAlignStack)); +} + +LLVMValueRef LLVMBlockAddress(LLVMValueRef F, LLVMBasicBlockRef BB) { + return wrap(BlockAddress::get(unwrap<Function>(F), unwrap(BB))); +} + +/*--.. Operations on global variables, functions, and aliases (globals) ....--*/ + +LLVMModuleRef LLVMGetGlobalParent(LLVMValueRef Global) { + return wrap(unwrap<GlobalValue>(Global)->getParent()); +} + +LLVMBool LLVMIsDeclaration(LLVMValueRef Global) { + return unwrap<GlobalValue>(Global)->isDeclaration(); +} + +LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) { + switch (unwrap<GlobalValue>(Global)->getLinkage()) { + case GlobalValue::ExternalLinkage: + return LLVMExternalLinkage; + case GlobalValue::AvailableExternallyLinkage: + return LLVMAvailableExternallyLinkage; + case GlobalValue::LinkOnceAnyLinkage: + return LLVMLinkOnceAnyLinkage; + case GlobalValue::LinkOnceODRLinkage: + return LLVMLinkOnceODRLinkage; + case GlobalValue::LinkOnceODRAutoHideLinkage: + return LLVMLinkOnceODRAutoHideLinkage; + case GlobalValue::WeakAnyLinkage: + return LLVMWeakAnyLinkage; + case GlobalValue::WeakODRLinkage: + return LLVMWeakODRLinkage; + case GlobalValue::AppendingLinkage: + return LLVMAppendingLinkage; + case GlobalValue::InternalLinkage: + return LLVMInternalLinkage; + case GlobalValue::PrivateLinkage: + return LLVMPrivateLinkage; + case GlobalValue::LinkerPrivateLinkage: + return LLVMLinkerPrivateLinkage; + case GlobalValue::LinkerPrivateWeakLinkage: + return LLVMLinkerPrivateWeakLinkage; + case GlobalValue::DLLImportLinkage: + return LLVMDLLImportLinkage; + case GlobalValue::DLLExportLinkage: + return LLVMDLLExportLinkage; + case GlobalValue::ExternalWeakLinkage: + return LLVMExternalWeakLinkage; + case GlobalValue::CommonLinkage: + return LLVMCommonLinkage; + } + + llvm_unreachable("Invalid GlobalValue linkage!"); +} + +void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) { + GlobalValue *GV = unwrap<GlobalValue>(Global); + + switch (Linkage) { + case LLVMExternalLinkage: + GV->setLinkage(GlobalValue::ExternalLinkage); + break; + case LLVMAvailableExternallyLinkage: + GV->setLinkage(GlobalValue::AvailableExternallyLinkage); + break; + case LLVMLinkOnceAnyLinkage: + GV->setLinkage(GlobalValue::LinkOnceAnyLinkage); + break; + case LLVMLinkOnceODRLinkage: + GV->setLinkage(GlobalValue::LinkOnceODRLinkage); + break; + case LLVMLinkOnceODRAutoHideLinkage: + GV->setLinkage(GlobalValue::LinkOnceODRAutoHideLinkage); + break; + case LLVMWeakAnyLinkage: + GV->setLinkage(GlobalValue::WeakAnyLinkage); + break; + case LLVMWeakODRLinkage: + GV->setLinkage(GlobalValue::WeakODRLinkage); + break; + case LLVMAppendingLinkage: + GV->setLinkage(GlobalValue::AppendingLinkage); + break; + case LLVMInternalLinkage: + GV->setLinkage(GlobalValue::InternalLinkage); + break; + case LLVMPrivateLinkage: + GV->setLinkage(GlobalValue::PrivateLinkage); + break; + case LLVMLinkerPrivateLinkage: + GV->setLinkage(GlobalValue::LinkerPrivateLinkage); + break; + case LLVMLinkerPrivateWeakLinkage: + GV->setLinkage(GlobalValue::LinkerPrivateWeakLinkage); + break; + case LLVMDLLImportLinkage: + GV->setLinkage(GlobalValue::DLLImportLinkage); + break; + case LLVMDLLExportLinkage: + GV->setLinkage(GlobalValue::DLLExportLinkage); + break; + case LLVMExternalWeakLinkage: + GV->setLinkage(GlobalValue::ExternalWeakLinkage); + break; + case LLVMGhostLinkage: + DEBUG(errs() + << "LLVMSetLinkage(): LLVMGhostLinkage is no longer supported."); + break; + case LLVMCommonLinkage: + GV->setLinkage(GlobalValue::CommonLinkage); + break; + } +} + +const char *LLVMGetSection(LLVMValueRef Global) { + return unwrap<GlobalValue>(Global)->getSection().c_str(); +} + +void LLVMSetSection(LLVMValueRef Global, const char *Section) { + unwrap<GlobalValue>(Global)->setSection(Section); +} + +LLVMVisibility LLVMGetVisibility(LLVMValueRef Global) { + return static_cast<LLVMVisibility>( + unwrap<GlobalValue>(Global)->getVisibility()); +} + +void LLVMSetVisibility(LLVMValueRef Global, LLVMVisibility Viz) { + unwrap<GlobalValue>(Global) + ->setVisibility(static_cast<GlobalValue::VisibilityTypes>(Viz)); +} + +unsigned LLVMGetAlignment(LLVMValueRef Global) { + return unwrap<GlobalValue>(Global)->getAlignment(); +} + +void LLVMSetAlignment(LLVMValueRef Global, unsigned Bytes) { + unwrap<GlobalValue>(Global)->setAlignment(Bytes); +} + +/*--.. Operations on global variables ......................................--*/ + +LLVMValueRef LLVMAddGlobal(LLVMModuleRef M, LLVMTypeRef Ty, const char *Name) { + return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false, + GlobalValue::ExternalLinkage, 0, Name)); +} + +LLVMValueRef LLVMAddGlobalInAddressSpace(LLVMModuleRef M, LLVMTypeRef Ty, + const char *Name, + unsigned AddressSpace) { + return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false, + GlobalValue::ExternalLinkage, 0, Name, 0, + GlobalVariable::NotThreadLocal, AddressSpace)); +} + +LLVMValueRef LLVMGetNamedGlobal(LLVMModuleRef M, const char *Name) { + return wrap(unwrap(M)->getNamedGlobal(Name)); +} + +LLVMValueRef LLVMGetFirstGlobal(LLVMModuleRef M) { + Module *Mod = unwrap(M); + Module::global_iterator I = Mod->global_begin(); + if (I == Mod->global_end()) + return 0; + return wrap(I); +} + +LLVMValueRef LLVMGetLastGlobal(LLVMModuleRef M) { + Module *Mod = unwrap(M); + Module::global_iterator I = Mod->global_end(); + if (I == Mod->global_begin()) + return 0; + return wrap(--I); +} + +LLVMValueRef LLVMGetNextGlobal(LLVMValueRef GlobalVar) { + GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar); + Module::global_iterator I = GV; + if (++I == GV->getParent()->global_end()) + return 0; + return wrap(I); +} + +LLVMValueRef LLVMGetPreviousGlobal(LLVMValueRef GlobalVar) { + GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar); + Module::global_iterator I = GV; + if (I == GV->getParent()->global_begin()) + return 0; + return wrap(--I); +} + +void LLVMDeleteGlobal(LLVMValueRef GlobalVar) { + unwrap<GlobalVariable>(GlobalVar)->eraseFromParent(); +} + +LLVMValueRef LLVMGetInitializer(LLVMValueRef GlobalVar) { + GlobalVariable* GV = unwrap<GlobalVariable>(GlobalVar); + if ( !GV->hasInitializer() ) + return 0; + return wrap(GV->getInitializer()); +} + +void LLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal) { + unwrap<GlobalVariable>(GlobalVar) + ->setInitializer(unwrap<Constant>(ConstantVal)); +} + +LLVMBool LLVMIsThreadLocal(LLVMValueRef GlobalVar) { + return unwrap<GlobalVariable>(GlobalVar)->isThreadLocal(); +} + +void LLVMSetThreadLocal(LLVMValueRef GlobalVar, LLVMBool IsThreadLocal) { + unwrap<GlobalVariable>(GlobalVar)->setThreadLocal(IsThreadLocal != 0); +} + +LLVMBool LLVMIsGlobalConstant(LLVMValueRef GlobalVar) { + return unwrap<GlobalVariable>(GlobalVar)->isConstant(); +} + +void LLVMSetGlobalConstant(LLVMValueRef GlobalVar, LLVMBool IsConstant) { + unwrap<GlobalVariable>(GlobalVar)->setConstant(IsConstant != 0); +} + +/*--.. Operations on aliases ......................................--*/ + +LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee, + const char *Name) { + return wrap(new GlobalAlias(unwrap(Ty), GlobalValue::ExternalLinkage, Name, + unwrap<Constant>(Aliasee), unwrap (M))); +} + +/*--.. Operations on functions .............................................--*/ + +LLVMValueRef LLVMAddFunction(LLVMModuleRef M, const char *Name, + LLVMTypeRef FunctionTy) { + return wrap(Function::Create(unwrap<FunctionType>(FunctionTy), + GlobalValue::ExternalLinkage, Name, unwrap(M))); +} + +LLVMValueRef LLVMGetNamedFunction(LLVMModuleRef M, const char *Name) { + return wrap(unwrap(M)->getFunction(Name)); +} + +LLVMValueRef LLVMGetFirstFunction(LLVMModuleRef M) { + Module *Mod = unwrap(M); + Module::iterator I = Mod->begin(); + if (I == Mod->end()) + return 0; + return wrap(I); +} + +LLVMValueRef LLVMGetLastFunction(LLVMModuleRef M) { + Module *Mod = unwrap(M); + Module::iterator I = Mod->end(); + if (I == Mod->begin()) + return 0; + return wrap(--I); +} + +LLVMValueRef LLVMGetNextFunction(LLVMValueRef Fn) { + Function *Func = unwrap<Function>(Fn); + Module::iterator I = Func; + if (++I == Func->getParent()->end()) + return 0; + return wrap(I); +} + +LLVMValueRef LLVMGetPreviousFunction(LLVMValueRef Fn) { + Function *Func = unwrap<Function>(Fn); + Module::iterator I = Func; + if (I == Func->getParent()->begin()) + return 0; + return wrap(--I); +} + +void LLVMDeleteFunction(LLVMValueRef Fn) { + unwrap<Function>(Fn)->eraseFromParent(); +} + +unsigned LLVMGetIntrinsicID(LLVMValueRef Fn) { + if (Function *F = dyn_cast<Function>(unwrap(Fn))) + return F->getIntrinsicID(); + return 0; +} + +unsigned LLVMGetFunctionCallConv(LLVMValueRef Fn) { + return unwrap<Function>(Fn)->getCallingConv(); +} + +void LLVMSetFunctionCallConv(LLVMValueRef Fn, unsigned CC) { + return unwrap<Function>(Fn)->setCallingConv( + static_cast<CallingConv::ID>(CC)); +} + +const char *LLVMGetGC(LLVMValueRef Fn) { + Function *F = unwrap<Function>(Fn); + return F->hasGC()? F->getGC() : 0; +} + +void LLVMSetGC(LLVMValueRef Fn, const char *GC) { + Function *F = unwrap<Function>(Fn); + if (GC) + F->setGC(GC); + else + F->clearGC(); +} + +void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) { + Function *Func = unwrap<Function>(Fn); + const AttributeSet PAL = Func->getAttributes(); + AttrBuilder B(PA); + const AttributeSet PALnew = + PAL.addAttributes(Func->getContext(), AttributeSet::FunctionIndex, + AttributeSet::get(Func->getContext(), + AttributeSet::FunctionIndex, B)); + Func->setAttributes(PALnew); +} + +void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) { + Function *Func = unwrap<Function>(Fn); + const AttributeSet PAL = Func->getAttributes(); + AttrBuilder B(PA); + const AttributeSet PALnew = + PAL.removeAttributes(Func->getContext(), AttributeSet::FunctionIndex, + AttributeSet::get(Func->getContext(), + AttributeSet::FunctionIndex, B)); + Func->setAttributes(PALnew); +} + +LLVMAttribute LLVMGetFunctionAttr(LLVMValueRef Fn) { + Function *Func = unwrap<Function>(Fn); + const AttributeSet PAL = Func->getAttributes(); + return (LLVMAttribute)PAL.Raw(AttributeSet::FunctionIndex); +} + +/*--.. Operations on parameters ............................................--*/ + +unsigned LLVMCountParams(LLVMValueRef FnRef) { + // This function is strictly redundant to + // LLVMCountParamTypes(LLVMGetElementType(LLVMTypeOf(FnRef))) + return unwrap<Function>(FnRef)->arg_size(); +} + +void LLVMGetParams(LLVMValueRef FnRef, LLVMValueRef *ParamRefs) { + Function *Fn = unwrap<Function>(FnRef); + for (Function::arg_iterator I = Fn->arg_begin(), + E = Fn->arg_end(); I != E; I++) + *ParamRefs++ = wrap(I); +} + +LLVMValueRef LLVMGetParam(LLVMValueRef FnRef, unsigned index) { + Function::arg_iterator AI = unwrap<Function>(FnRef)->arg_begin(); + while (index --> 0) + AI++; + return wrap(AI); +} + +LLVMValueRef LLVMGetParamParent(LLVMValueRef V) { + return wrap(unwrap<Argument>(V)->getParent()); +} + +LLVMValueRef LLVMGetFirstParam(LLVMValueRef Fn) { + Function *Func = unwrap<Function>(Fn); + Function::arg_iterator I = Func->arg_begin(); + if (I == Func->arg_end()) + return 0; + return wrap(I); +} + +LLVMValueRef LLVMGetLastParam(LLVMValueRef Fn) { + Function *Func = unwrap<Function>(Fn); + Function::arg_iterator I = Func->arg_end(); + if (I == Func->arg_begin()) + return 0; + return wrap(--I); +} + +LLVMValueRef LLVMGetNextParam(LLVMValueRef Arg) { + Argument *A = unwrap<Argument>(Arg); + Function::arg_iterator I = A; + if (++I == A->getParent()->arg_end()) + return 0; + return wrap(I); +} + +LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) { + Argument *A = unwrap<Argument>(Arg); + Function::arg_iterator I = A; + if (I == A->getParent()->arg_begin()) + return 0; + return wrap(--I); +} + +void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA) { + Argument *A = unwrap<Argument>(Arg); + AttrBuilder B(PA); + A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); +} + +void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA) { + Argument *A = unwrap<Argument>(Arg); + AttrBuilder B(PA); + A->removeAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); +} + +LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg) { + Argument *A = unwrap<Argument>(Arg); + return (LLVMAttribute)A->getParent()->getAttributes(). + Raw(A->getArgNo()+1); +} + + +void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) { + Argument *A = unwrap<Argument>(Arg); + AttrBuilder B; + B.addAlignmentAttr(align); + A->addAttr(AttributeSet::get(A->getContext(),A->getArgNo() + 1, B)); +} + +/*--.. Operations on basic blocks ..........................................--*/ + +LLVMValueRef LLVMBasicBlockAsValue(LLVMBasicBlockRef BB) { + return wrap(static_cast<Value*>(unwrap(BB))); +} + +LLVMBool LLVMValueIsBasicBlock(LLVMValueRef Val) { + return isa<BasicBlock>(unwrap(Val)); +} + +LLVMBasicBlockRef LLVMValueAsBasicBlock(LLVMValueRef Val) { + return wrap(unwrap<BasicBlock>(Val)); +} + +LLVMValueRef LLVMGetBasicBlockParent(LLVMBasicBlockRef BB) { + return wrap(unwrap(BB)->getParent()); +} + +LLVMValueRef LLVMGetBasicBlockTerminator(LLVMBasicBlockRef BB) { + return wrap(unwrap(BB)->getTerminator()); +} + +unsigned LLVMCountBasicBlocks(LLVMValueRef FnRef) { + return unwrap<Function>(FnRef)->size(); +} + +void LLVMGetBasicBlocks(LLVMValueRef FnRef, LLVMBasicBlockRef *BasicBlocksRefs){ + Function *Fn = unwrap<Function>(FnRef); + for (Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) + *BasicBlocksRefs++ = wrap(I); +} + +LLVMBasicBlockRef LLVMGetEntryBasicBlock(LLVMValueRef Fn) { + return wrap(&unwrap<Function>(Fn)->getEntryBlock()); +} + +LLVMBasicBlockRef LLVMGetFirstBasicBlock(LLVMValueRef Fn) { + Function *Func = unwrap<Function>(Fn); + Function::iterator I = Func->begin(); + if (I == Func->end()) + return 0; + return wrap(I); +} + +LLVMBasicBlockRef LLVMGetLastBasicBlock(LLVMValueRef Fn) { + Function *Func = unwrap<Function>(Fn); + Function::iterator I = Func->end(); + if (I == Func->begin()) + return 0; + return wrap(--I); +} + +LLVMBasicBlockRef LLVMGetNextBasicBlock(LLVMBasicBlockRef BB) { + BasicBlock *Block = unwrap(BB); + Function::iterator I = Block; + if (++I == Block->getParent()->end()) + return 0; + return wrap(I); +} + +LLVMBasicBlockRef LLVMGetPreviousBasicBlock(LLVMBasicBlockRef BB) { + BasicBlock *Block = unwrap(BB); + Function::iterator I = Block; + if (I == Block->getParent()->begin()) + return 0; + return wrap(--I); +} + +LLVMBasicBlockRef LLVMAppendBasicBlockInContext(LLVMContextRef C, + LLVMValueRef FnRef, + const char *Name) { + return wrap(BasicBlock::Create(*unwrap(C), Name, unwrap<Function>(FnRef))); +} + +LLVMBasicBlockRef LLVMAppendBasicBlock(LLVMValueRef FnRef, const char *Name) { + return LLVMAppendBasicBlockInContext(LLVMGetGlobalContext(), FnRef, Name); +} + +LLVMBasicBlockRef LLVMInsertBasicBlockInContext(LLVMContextRef C, + LLVMBasicBlockRef BBRef, + const char *Name) { + BasicBlock *BB = unwrap(BBRef); + return wrap(BasicBlock::Create(*unwrap(C), Name, BB->getParent(), BB)); +} + +LLVMBasicBlockRef LLVMInsertBasicBlock(LLVMBasicBlockRef BBRef, + const char *Name) { + return LLVMInsertBasicBlockInContext(LLVMGetGlobalContext(), BBRef, Name); +} + +void LLVMDeleteBasicBlock(LLVMBasicBlockRef BBRef) { + unwrap(BBRef)->eraseFromParent(); +} + +void LLVMRemoveBasicBlockFromParent(LLVMBasicBlockRef BBRef) { + unwrap(BBRef)->removeFromParent(); +} + +void LLVMMoveBasicBlockBefore(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos) { + unwrap(BB)->moveBefore(unwrap(MovePos)); +} + +void LLVMMoveBasicBlockAfter(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos) { + unwrap(BB)->moveAfter(unwrap(MovePos)); +} + +/*--.. Operations on instructions ..........................................--*/ + +LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst) { + return wrap(unwrap<Instruction>(Inst)->getParent()); +} + +LLVMValueRef LLVMGetFirstInstruction(LLVMBasicBlockRef BB) { + BasicBlock *Block = unwrap(BB); + BasicBlock::iterator I = Block->begin(); + if (I == Block->end()) + return 0; + return wrap(I); +} + +LLVMValueRef LLVMGetLastInstruction(LLVMBasicBlockRef BB) { + BasicBlock *Block = unwrap(BB); + BasicBlock::iterator I = Block->end(); + if (I == Block->begin()) + return 0; + return wrap(--I); +} + +LLVMValueRef LLVMGetNextInstruction(LLVMValueRef Inst) { + Instruction *Instr = unwrap<Instruction>(Inst); + BasicBlock::iterator I = Instr; + if (++I == Instr->getParent()->end()) + return 0; + return wrap(I); +} + +LLVMValueRef LLVMGetPreviousInstruction(LLVMValueRef Inst) { + Instruction *Instr = unwrap<Instruction>(Inst); + BasicBlock::iterator I = Instr; + if (I == Instr->getParent()->begin()) + return 0; + return wrap(--I); +} + +void LLVMInstructionEraseFromParent(LLVMValueRef Inst) { + unwrap<Instruction>(Inst)->eraseFromParent(); +} + +LLVMIntPredicate LLVMGetICmpPredicate(LLVMValueRef Inst) { + if (ICmpInst *I = dyn_cast<ICmpInst>(unwrap(Inst))) + return (LLVMIntPredicate)I->getPredicate(); + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(unwrap(Inst))) + if (CE->getOpcode() == Instruction::ICmp) + return (LLVMIntPredicate)CE->getPredicate(); + return (LLVMIntPredicate)0; +} + +LLVMOpcode LLVMGetInstructionOpcode(LLVMValueRef Inst) { + if (Instruction *C = dyn_cast<Instruction>(unwrap(Inst))) + return map_to_llvmopcode(C->getOpcode()); + return (LLVMOpcode)0; +} + +/*--.. Call and invoke instructions ........................................--*/ + +unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr) { + Value *V = unwrap(Instr); + if (CallInst *CI = dyn_cast<CallInst>(V)) + return CI->getCallingConv(); + if (InvokeInst *II = dyn_cast<InvokeInst>(V)) + return II->getCallingConv(); + llvm_unreachable("LLVMGetInstructionCallConv applies only to call and invoke!"); +} + +void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) { + Value *V = unwrap(Instr); + if (CallInst *CI = dyn_cast<CallInst>(V)) + return CI->setCallingConv(static_cast<CallingConv::ID>(CC)); + else if (InvokeInst *II = dyn_cast<InvokeInst>(V)) + return II->setCallingConv(static_cast<CallingConv::ID>(CC)); + llvm_unreachable("LLVMSetInstructionCallConv applies only to call and invoke!"); +} + +void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index, + LLVMAttribute PA) { + CallSite Call = CallSite(unwrap<Instruction>(Instr)); + AttrBuilder B(PA); + Call.setAttributes( + Call.getAttributes().addAttributes(Call->getContext(), index, + AttributeSet::get(Call->getContext(), + index, B))); +} + +void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index, + LLVMAttribute PA) { + CallSite Call = CallSite(unwrap<Instruction>(Instr)); + AttrBuilder B(PA); + Call.setAttributes(Call.getAttributes() + .removeAttributes(Call->getContext(), index, + AttributeSet::get(Call->getContext(), + index, B))); +} + +void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index, + unsigned align) { + CallSite Call = CallSite(unwrap<Instruction>(Instr)); + AttrBuilder B; + B.addAlignmentAttr(align); + Call.setAttributes(Call.getAttributes() + .addAttributes(Call->getContext(), index, + AttributeSet::get(Call->getContext(), + index, B))); +} + +/*--.. Operations on call instructions (only) ..............................--*/ + +LLVMBool LLVMIsTailCall(LLVMValueRef Call) { + return unwrap<CallInst>(Call)->isTailCall(); +} + +void LLVMSetTailCall(LLVMValueRef Call, LLVMBool isTailCall) { + unwrap<CallInst>(Call)->setTailCall(isTailCall); +} + +/*--.. Operations on switch instructions (only) ............................--*/ + +LLVMBasicBlockRef LLVMGetSwitchDefaultDest(LLVMValueRef Switch) { + return wrap(unwrap<SwitchInst>(Switch)->getDefaultDest()); +} + +/*--.. Operations on phi nodes .............................................--*/ + +void LLVMAddIncoming(LLVMValueRef PhiNode, LLVMValueRef *IncomingValues, + LLVMBasicBlockRef *IncomingBlocks, unsigned Count) { + PHINode *PhiVal = unwrap<PHINode>(PhiNode); + for (unsigned I = 0; I != Count; ++I) + PhiVal->addIncoming(unwrap(IncomingValues[I]), unwrap(IncomingBlocks[I])); +} + +unsigned LLVMCountIncoming(LLVMValueRef PhiNode) { + return unwrap<PHINode>(PhiNode)->getNumIncomingValues(); +} + +LLVMValueRef LLVMGetIncomingValue(LLVMValueRef PhiNode, unsigned Index) { + return wrap(unwrap<PHINode>(PhiNode)->getIncomingValue(Index)); +} + +LLVMBasicBlockRef LLVMGetIncomingBlock(LLVMValueRef PhiNode, unsigned Index) { + return wrap(unwrap<PHINode>(PhiNode)->getIncomingBlock(Index)); +} + + +/*===-- Instruction builders ----------------------------------------------===*/ + +LLVMBuilderRef LLVMCreateBuilderInContext(LLVMContextRef C) { + return wrap(new IRBuilder<>(*unwrap(C))); +} + +LLVMBuilderRef LLVMCreateBuilder(void) { + return LLVMCreateBuilderInContext(LLVMGetGlobalContext()); +} + +void LLVMPositionBuilder(LLVMBuilderRef Builder, LLVMBasicBlockRef Block, + LLVMValueRef Instr) { + BasicBlock *BB = unwrap(Block); + Instruction *I = Instr? unwrap<Instruction>(Instr) : (Instruction*) BB->end(); + unwrap(Builder)->SetInsertPoint(BB, I); +} + +void LLVMPositionBuilderBefore(LLVMBuilderRef Builder, LLVMValueRef Instr) { + Instruction *I = unwrap<Instruction>(Instr); + unwrap(Builder)->SetInsertPoint(I->getParent(), I); +} + +void LLVMPositionBuilderAtEnd(LLVMBuilderRef Builder, LLVMBasicBlockRef Block) { + BasicBlock *BB = unwrap(Block); + unwrap(Builder)->SetInsertPoint(BB); +} + +LLVMBasicBlockRef LLVMGetInsertBlock(LLVMBuilderRef Builder) { + return wrap(unwrap(Builder)->GetInsertBlock()); +} + +void LLVMClearInsertionPosition(LLVMBuilderRef Builder) { + unwrap(Builder)->ClearInsertionPoint(); +} + +void LLVMInsertIntoBuilder(LLVMBuilderRef Builder, LLVMValueRef Instr) { + unwrap(Builder)->Insert(unwrap<Instruction>(Instr)); +} + +void LLVMInsertIntoBuilderWithName(LLVMBuilderRef Builder, LLVMValueRef Instr, + const char *Name) { + unwrap(Builder)->Insert(unwrap<Instruction>(Instr), Name); +} + +void LLVMDisposeBuilder(LLVMBuilderRef Builder) { + delete unwrap(Builder); +} + +/*--.. Metadata builders ...................................................--*/ + +void LLVMSetCurrentDebugLocation(LLVMBuilderRef Builder, LLVMValueRef L) { + MDNode *Loc = L ? unwrap<MDNode>(L) : NULL; + unwrap(Builder)->SetCurrentDebugLocation(DebugLoc::getFromDILocation(Loc)); +} + +LLVMValueRef LLVMGetCurrentDebugLocation(LLVMBuilderRef Builder) { + return wrap(unwrap(Builder)->getCurrentDebugLocation() + .getAsMDNode(unwrap(Builder)->getContext())); +} + +void LLVMSetInstDebugLocation(LLVMBuilderRef Builder, LLVMValueRef Inst) { + unwrap(Builder)->SetInstDebugLocation(unwrap<Instruction>(Inst)); +} + + +/*--.. Instruction builders ................................................--*/ + +LLVMValueRef LLVMBuildRetVoid(LLVMBuilderRef B) { + return wrap(unwrap(B)->CreateRetVoid()); +} + +LLVMValueRef LLVMBuildRet(LLVMBuilderRef B, LLVMValueRef V) { + return wrap(unwrap(B)->CreateRet(unwrap(V))); +} + +LLVMValueRef LLVMBuildAggregateRet(LLVMBuilderRef B, LLVMValueRef *RetVals, + unsigned N) { + return wrap(unwrap(B)->CreateAggregateRet(unwrap(RetVals), N)); +} + +LLVMValueRef LLVMBuildBr(LLVMBuilderRef B, LLVMBasicBlockRef Dest) { + return wrap(unwrap(B)->CreateBr(unwrap(Dest))); +} + +LLVMValueRef LLVMBuildCondBr(LLVMBuilderRef B, LLVMValueRef If, + LLVMBasicBlockRef Then, LLVMBasicBlockRef Else) { + return wrap(unwrap(B)->CreateCondBr(unwrap(If), unwrap(Then), unwrap(Else))); +} + +LLVMValueRef LLVMBuildSwitch(LLVMBuilderRef B, LLVMValueRef V, + LLVMBasicBlockRef Else, unsigned NumCases) { + return wrap(unwrap(B)->CreateSwitch(unwrap(V), unwrap(Else), NumCases)); +} + +LLVMValueRef LLVMBuildIndirectBr(LLVMBuilderRef B, LLVMValueRef Addr, + unsigned NumDests) { + return wrap(unwrap(B)->CreateIndirectBr(unwrap(Addr), NumDests)); +} + +LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn, + LLVMValueRef *Args, unsigned NumArgs, + LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch, + const char *Name) { + return wrap(unwrap(B)->CreateInvoke(unwrap(Fn), unwrap(Then), unwrap(Catch), + makeArrayRef(unwrap(Args), NumArgs), + Name)); +} + +LLVMValueRef LLVMBuildLandingPad(LLVMBuilderRef B, LLVMTypeRef Ty, + LLVMValueRef PersFn, unsigned NumClauses, + const char *Name) { + return wrap(unwrap(B)->CreateLandingPad(unwrap(Ty), + cast<Function>(unwrap(PersFn)), + NumClauses, Name)); +} + +LLVMValueRef LLVMBuildResume(LLVMBuilderRef B, LLVMValueRef Exn) { + return wrap(unwrap(B)->CreateResume(unwrap(Exn))); +} + +LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef B) { + return wrap(unwrap(B)->CreateUnreachable()); +} + +void LLVMAddCase(LLVMValueRef Switch, LLVMValueRef OnVal, + LLVMBasicBlockRef Dest) { + unwrap<SwitchInst>(Switch)->addCase(unwrap<ConstantInt>(OnVal), unwrap(Dest)); +} + +void LLVMAddDestination(LLVMValueRef IndirectBr, LLVMBasicBlockRef Dest) { + unwrap<IndirectBrInst>(IndirectBr)->addDestination(unwrap(Dest)); +} + +void LLVMAddClause(LLVMValueRef LandingPad, LLVMValueRef ClauseVal) { + unwrap<LandingPadInst>(LandingPad)-> + addClause(cast<Constant>(unwrap(ClauseVal))); +} + +void LLVMSetCleanup(LLVMValueRef LandingPad, LLVMBool Val) { + unwrap<LandingPadInst>(LandingPad)->setCleanup(Val); +} + +/*--.. Arithmetic ..........................................................--*/ + +LLVMValueRef LLVMBuildAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateAdd(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildNSWAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateNSWAdd(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildNUWAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateNUWAdd(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildFAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateFAdd(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateSub(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildNSWSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateNSWSub(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildNUWSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateNUWSub(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildFSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateFSub(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateMul(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildNSWMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateNSWMul(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildNUWMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateNUWMul(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildFMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateFMul(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildUDiv(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateUDiv(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildSDiv(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateSDiv(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildExactSDiv(LLVMBuilderRef B, LLVMValueRef LHS, + LLVMValueRef RHS, const char *Name) { + return wrap(unwrap(B)->CreateExactSDiv(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildFDiv(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateFDiv(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildURem(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateURem(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildSRem(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateSRem(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildFRem(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateFRem(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildShl(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateShl(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildLShr(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateLShr(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildAShr(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateAShr(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildAnd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateAnd(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildOr(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateOr(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildXor(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateXor(unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildBinOp(LLVMBuilderRef B, LLVMOpcode Op, + LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateBinOp(Instruction::BinaryOps(map_from_llvmopcode(Op)), unwrap(LHS), + unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildNeg(LLVMBuilderRef B, LLVMValueRef V, const char *Name) { + return wrap(unwrap(B)->CreateNeg(unwrap(V), Name)); +} + +LLVMValueRef LLVMBuildNSWNeg(LLVMBuilderRef B, LLVMValueRef V, + const char *Name) { + return wrap(unwrap(B)->CreateNSWNeg(unwrap(V), Name)); +} + +LLVMValueRef LLVMBuildNUWNeg(LLVMBuilderRef B, LLVMValueRef V, + const char *Name) { + return wrap(unwrap(B)->CreateNUWNeg(unwrap(V), Name)); +} + +LLVMValueRef LLVMBuildFNeg(LLVMBuilderRef B, LLVMValueRef V, const char *Name) { + return wrap(unwrap(B)->CreateFNeg(unwrap(V), Name)); +} + +LLVMValueRef LLVMBuildNot(LLVMBuilderRef B, LLVMValueRef V, const char *Name) { + return wrap(unwrap(B)->CreateNot(unwrap(V), Name)); +} + +/*--.. Memory ..............................................................--*/ + +LLVMValueRef LLVMBuildMalloc(LLVMBuilderRef B, LLVMTypeRef Ty, + const char *Name) { + Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext()); + Constant* AllocSize = ConstantExpr::getSizeOf(unwrap(Ty)); + AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy); + Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(), + ITy, unwrap(Ty), AllocSize, + 0, 0, ""); + return wrap(unwrap(B)->Insert(Malloc, Twine(Name))); +} + +LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef B, LLVMTypeRef Ty, + LLVMValueRef Val, const char *Name) { + Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext()); + Constant* AllocSize = ConstantExpr::getSizeOf(unwrap(Ty)); + AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy); + Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(), + ITy, unwrap(Ty), AllocSize, + unwrap(Val), 0, ""); + return wrap(unwrap(B)->Insert(Malloc, Twine(Name))); +} + +LLVMValueRef LLVMBuildAlloca(LLVMBuilderRef B, LLVMTypeRef Ty, + const char *Name) { + return wrap(unwrap(B)->CreateAlloca(unwrap(Ty), 0, Name)); +} + +LLVMValueRef LLVMBuildArrayAlloca(LLVMBuilderRef B, LLVMTypeRef Ty, + LLVMValueRef Val, const char *Name) { + return wrap(unwrap(B)->CreateAlloca(unwrap(Ty), unwrap(Val), Name)); +} + +LLVMValueRef LLVMBuildFree(LLVMBuilderRef B, LLVMValueRef PointerVal) { + return wrap(unwrap(B)->Insert( + CallInst::CreateFree(unwrap(PointerVal), unwrap(B)->GetInsertBlock()))); +} + + +LLVMValueRef LLVMBuildLoad(LLVMBuilderRef B, LLVMValueRef PointerVal, + const char *Name) { + return wrap(unwrap(B)->CreateLoad(unwrap(PointerVal), Name)); +} + +LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val, + LLVMValueRef PointerVal) { + return wrap(unwrap(B)->CreateStore(unwrap(Val), unwrap(PointerVal))); +} + +LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer, + LLVMValueRef *Indices, unsigned NumIndices, + const char *Name) { + ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices); + return wrap(unwrap(B)->CreateGEP(unwrap(Pointer), IdxList, Name)); +} + +LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer, + LLVMValueRef *Indices, unsigned NumIndices, + const char *Name) { + ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices); + return wrap(unwrap(B)->CreateInBoundsGEP(unwrap(Pointer), IdxList, Name)); +} + +LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer, + unsigned Idx, const char *Name) { + return wrap(unwrap(B)->CreateStructGEP(unwrap(Pointer), Idx, Name)); +} + +LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str, + const char *Name) { + return wrap(unwrap(B)->CreateGlobalString(Str, Name)); +} + +LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str, + const char *Name) { + return wrap(unwrap(B)->CreateGlobalStringPtr(Str, Name)); +} + +LLVMBool LLVMGetVolatile(LLVMValueRef MemAccessInst) { + Value *P = unwrap<Value>(MemAccessInst); + if (LoadInst *LI = dyn_cast<LoadInst>(P)) + return LI->isVolatile(); + return cast<StoreInst>(P)->isVolatile(); +} + +void LLVMSetVolatile(LLVMValueRef MemAccessInst, LLVMBool isVolatile) { + Value *P = unwrap<Value>(MemAccessInst); + if (LoadInst *LI = dyn_cast<LoadInst>(P)) + return LI->setVolatile(isVolatile); + return cast<StoreInst>(P)->setVolatile(isVolatile); +} + +/*--.. Casts ...............................................................--*/ + +LLVMValueRef LLVMBuildTrunc(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateTrunc(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildZExt(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateZExt(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildSExt(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateSExt(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildFPToUI(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateFPToUI(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildFPToSI(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateFPToSI(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildUIToFP(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateUIToFP(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildSIToFP(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateSIToFP(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildFPTrunc(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateFPTrunc(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildFPExt(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateFPExt(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildPtrToInt(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreatePtrToInt(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildIntToPtr(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateIntToPtr(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildBitCast(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateBitCast(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildZExtOrBitCast(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateZExtOrBitCast(unwrap(Val), unwrap(DestTy), + Name)); +} + +LLVMValueRef LLVMBuildSExtOrBitCast(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateSExtOrBitCast(unwrap(Val), unwrap(DestTy), + Name)); +} + +LLVMValueRef LLVMBuildTruncOrBitCast(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateTruncOrBitCast(unwrap(Val), unwrap(DestTy), + Name)); +} + +LLVMValueRef LLVMBuildCast(LLVMBuilderRef B, LLVMOpcode Op, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateCast(Instruction::CastOps(map_from_llvmopcode(Op)), unwrap(Val), + unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildPointerCast(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreatePointerCast(unwrap(Val), unwrap(DestTy), Name)); +} + +LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy), + /*isSigned*/true, Name)); +} + +LLVMValueRef LLVMBuildFPCast(LLVMBuilderRef B, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name) { + return wrap(unwrap(B)->CreateFPCast(unwrap(Val), unwrap(DestTy), Name)); +} + +/*--.. Comparisons .........................................................--*/ + +LLVMValueRef LLVMBuildICmp(LLVMBuilderRef B, LLVMIntPredicate Op, + LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateICmp(static_cast<ICmpInst::Predicate>(Op), + unwrap(LHS), unwrap(RHS), Name)); +} + +LLVMValueRef LLVMBuildFCmp(LLVMBuilderRef B, LLVMRealPredicate Op, + LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name) { + return wrap(unwrap(B)->CreateFCmp(static_cast<FCmpInst::Predicate>(Op), + unwrap(LHS), unwrap(RHS), Name)); +} + +/*--.. Miscellaneous instructions ..........................................--*/ + +LLVMValueRef LLVMBuildPhi(LLVMBuilderRef B, LLVMTypeRef Ty, const char *Name) { + return wrap(unwrap(B)->CreatePHI(unwrap(Ty), 0, Name)); +} + +LLVMValueRef LLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, + LLVMValueRef *Args, unsigned NumArgs, + const char *Name) { + return wrap(unwrap(B)->CreateCall(unwrap(Fn), + makeArrayRef(unwrap(Args), NumArgs), + Name)); +} + +LLVMValueRef LLVMBuildSelect(LLVMBuilderRef B, LLVMValueRef If, + LLVMValueRef Then, LLVMValueRef Else, + const char *Name) { + return wrap(unwrap(B)->CreateSelect(unwrap(If), unwrap(Then), unwrap(Else), + Name)); +} + +LLVMValueRef LLVMBuildVAArg(LLVMBuilderRef B, LLVMValueRef List, + LLVMTypeRef Ty, const char *Name) { + return wrap(unwrap(B)->CreateVAArg(unwrap(List), unwrap(Ty), Name)); +} + +LLVMValueRef LLVMBuildExtractElement(LLVMBuilderRef B, LLVMValueRef VecVal, + LLVMValueRef Index, const char *Name) { + return wrap(unwrap(B)->CreateExtractElement(unwrap(VecVal), unwrap(Index), + Name)); +} + +LLVMValueRef LLVMBuildInsertElement(LLVMBuilderRef B, LLVMValueRef VecVal, + LLVMValueRef EltVal, LLVMValueRef Index, + const char *Name) { + return wrap(unwrap(B)->CreateInsertElement(unwrap(VecVal), unwrap(EltVal), + unwrap(Index), Name)); +} + +LLVMValueRef LLVMBuildShuffleVector(LLVMBuilderRef B, LLVMValueRef V1, + LLVMValueRef V2, LLVMValueRef Mask, + const char *Name) { + return wrap(unwrap(B)->CreateShuffleVector(unwrap(V1), unwrap(V2), + unwrap(Mask), Name)); +} + +LLVMValueRef LLVMBuildExtractValue(LLVMBuilderRef B, LLVMValueRef AggVal, + unsigned Index, const char *Name) { + return wrap(unwrap(B)->CreateExtractValue(unwrap(AggVal), Index, Name)); +} + +LLVMValueRef LLVMBuildInsertValue(LLVMBuilderRef B, LLVMValueRef AggVal, + LLVMValueRef EltVal, unsigned Index, + const char *Name) { + return wrap(unwrap(B)->CreateInsertValue(unwrap(AggVal), unwrap(EltVal), + Index, Name)); +} + +LLVMValueRef LLVMBuildIsNull(LLVMBuilderRef B, LLVMValueRef Val, + const char *Name) { + return wrap(unwrap(B)->CreateIsNull(unwrap(Val), Name)); +} + +LLVMValueRef LLVMBuildIsNotNull(LLVMBuilderRef B, LLVMValueRef Val, + const char *Name) { + return wrap(unwrap(B)->CreateIsNotNull(unwrap(Val), Name)); +} + +LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef B, LLVMValueRef LHS, + LLVMValueRef RHS, const char *Name) { + return wrap(unwrap(B)->CreatePtrDiff(unwrap(LHS), unwrap(RHS), Name)); +} + + +/*===-- Module providers --------------------------------------------------===*/ + +LLVMModuleProviderRef +LLVMCreateModuleProviderForExistingModule(LLVMModuleRef M) { + return reinterpret_cast<LLVMModuleProviderRef>(M); +} + +void LLVMDisposeModuleProvider(LLVMModuleProviderRef MP) { + delete unwrap(MP); +} + + +/*===-- Memory buffers ----------------------------------------------------===*/ + +LLVMBool LLVMCreateMemoryBufferWithContentsOfFile( + const char *Path, + LLVMMemoryBufferRef *OutMemBuf, + char **OutMessage) { + + OwningPtr<MemoryBuffer> MB; + error_code ec; + if (!(ec = MemoryBuffer::getFile(Path, MB))) { + *OutMemBuf = wrap(MB.take()); + return 0; + } + + *OutMessage = strdup(ec.message().c_str()); + return 1; +} + +LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf, + char **OutMessage) { + OwningPtr<MemoryBuffer> MB; + error_code ec; + if (!(ec = MemoryBuffer::getSTDIN(MB))) { + *OutMemBuf = wrap(MB.take()); + return 0; + } + + *OutMessage = strdup(ec.message().c_str()); + return 1; +} + +LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRange( + const char *InputData, + size_t InputDataLength, + const char *BufferName, + LLVMBool RequiresNullTerminator) { + + return wrap(MemoryBuffer::getMemBuffer( + StringRef(InputData, InputDataLength), + StringRef(BufferName), + RequiresNullTerminator)); +} + +LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRangeCopy( + const char *InputData, + size_t InputDataLength, + const char *BufferName) { + + return wrap(MemoryBuffer::getMemBufferCopy( + StringRef(InputData, InputDataLength), + StringRef(BufferName))); +} + + +void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf) { + delete unwrap(MemBuf); +} + +/*===-- Pass Registry -----------------------------------------------------===*/ + +LLVMPassRegistryRef LLVMGetGlobalPassRegistry(void) { + return wrap(PassRegistry::getPassRegistry()); +} + +/*===-- Pass Manager ------------------------------------------------------===*/ + +LLVMPassManagerRef LLVMCreatePassManager() { + return wrap(new PassManager()); +} + +LLVMPassManagerRef LLVMCreateFunctionPassManagerForModule(LLVMModuleRef M) { + return wrap(new FunctionPassManager(unwrap(M))); +} + +LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef P) { + return LLVMCreateFunctionPassManagerForModule( + reinterpret_cast<LLVMModuleRef>(P)); +} + +LLVMBool LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M) { + return unwrap<PassManager>(PM)->run(*unwrap(M)); +} + +LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM) { + return unwrap<FunctionPassManager>(FPM)->doInitialization(); +} + +LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F) { + return unwrap<FunctionPassManager>(FPM)->run(*unwrap<Function>(F)); +} + +LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM) { + return unwrap<FunctionPassManager>(FPM)->doFinalization(); +} + +void LLVMDisposePassManager(LLVMPassManagerRef PM) { + delete unwrap(PM); +} + +/*===-- Threading ------------------------------------------------------===*/ + +LLVMBool LLVMStartMultithreaded() { + return llvm_start_multithreaded(); +} + +void LLVMStopMultithreaded() { + llvm_stop_multithreaded(); +} + +LLVMBool LLVMIsMultithreaded() { + return llvm_is_multithreaded(); +} diff --git a/lib/IR/DIBuilder.cpp b/lib/IR/DIBuilder.cpp new file mode 100644 index 0000000000..5ee36abc6b --- /dev/null +++ b/lib/IR/DIBuilder.cpp @@ -0,0 +1,1106 @@ +//===--- DIBuilder.cpp - Debug Information Builder ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the DIBuilder. +// +//===----------------------------------------------------------------------===// + +#include "llvm/DIBuilder.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/DebugInfo.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Dwarf.h" + +using namespace llvm; +using namespace llvm::dwarf; + +static Constant *GetTagConstant(LLVMContext &VMContext, unsigned Tag) { + assert((Tag & LLVMDebugVersionMask) == 0 && + "Tag too large for debug encoding!"); + return ConstantInt::get(Type::getInt32Ty(VMContext), Tag | LLVMDebugVersion); +} + +DIBuilder::DIBuilder(Module &m) + : M(m), VMContext(M.getContext()), TheCU(0), TempEnumTypes(0), + TempRetainTypes(0), TempSubprograms(0), TempGVs(0), DeclareFn(0), + ValueFn(0) +{} + +/// finalize - Construct any deferred debug info descriptors. +void DIBuilder::finalize() { + DIArray Enums = getOrCreateArray(AllEnumTypes); + DIType(TempEnumTypes).replaceAllUsesWith(Enums); + + DIArray RetainTypes = getOrCreateArray(AllRetainTypes); + DIType(TempRetainTypes).replaceAllUsesWith(RetainTypes); + + DIArray SPs = getOrCreateArray(AllSubprograms); + DIType(TempSubprograms).replaceAllUsesWith(SPs); + for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) { + DISubprogram SP(SPs.getElement(i)); + SmallVector<Value *, 4> Variables; + if (NamedMDNode *NMD = getFnSpecificMDNode(M, SP)) { + for (unsigned ii = 0, ee = NMD->getNumOperands(); ii != ee; ++ii) + Variables.push_back(NMD->getOperand(ii)); + NMD->eraseFromParent(); + } + if (MDNode *Temp = SP.getVariablesNodes()) { + DIArray AV = getOrCreateArray(Variables); + DIType(Temp).replaceAllUsesWith(AV); + } + } + + DIArray GVs = getOrCreateArray(AllGVs); + DIType(TempGVs).replaceAllUsesWith(GVs); +} + +/// getNonCompileUnitScope - If N is compile unit return NULL otherwise return +/// N. +static MDNode *getNonCompileUnitScope(MDNode *N) { + if (DIDescriptor(N).isCompileUnit()) + return NULL; + return N; +} + +/// createCompileUnit - A CompileUnit provides an anchor for all debugging +/// information generated during this instance of compilation. +void DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename, + StringRef Directory, StringRef Producer, + bool isOptimized, StringRef Flags, + unsigned RunTimeVer, StringRef SplitName) { + assert(((Lang <= dwarf::DW_LANG_Python && Lang >= dwarf::DW_LANG_C89) || + (Lang <= dwarf::DW_LANG_hi_user && Lang >= dwarf::DW_LANG_lo_user)) && + "Invalid Language tag"); + assert(!Filename.empty() && + "Unable to create compile unit without filename"); + Value *TElts[] = { GetTagConstant(VMContext, DW_TAG_base_type) }; + TempEnumTypes = MDNode::getTemporary(VMContext, TElts); + + TempRetainTypes = MDNode::getTemporary(VMContext, TElts); + + TempSubprograms = MDNode::getTemporary(VMContext, TElts); + + TempGVs = MDNode::getTemporary(VMContext, TElts); + + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_compile_unit), + Constant::getNullValue(Type::getInt32Ty(VMContext)), + ConstantInt::get(Type::getInt32Ty(VMContext), Lang), + MDString::get(VMContext, Filename), + MDString::get(VMContext, Directory), + MDString::get(VMContext, Producer), + // isMain field can be removed when we remove the legacy debug info. + ConstantInt::get(Type::getInt1Ty(VMContext), true), // isMain + ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized), + MDString::get(VMContext, Flags), + ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeVer), + TempEnumTypes, + TempRetainTypes, + TempSubprograms, + TempGVs, + MDString::get(VMContext, SplitName) + }; + TheCU = DICompileUnit(MDNode::get(VMContext, Elts)); + + // Create a named metadata so that it is easier to find cu in a module. + NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.cu"); + NMD->addOperand(TheCU); +} + +/// createFile - Create a file descriptor to hold debugging information +/// for a file. +DIFile DIBuilder::createFile(StringRef Filename, StringRef Directory) { + assert(TheCU && "Unable to create DW_TAG_file_type without CompileUnit"); + assert(!Filename.empty() && "Unable to create file without name"); + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_file_type), + MDString::get(VMContext, Filename), + MDString::get(VMContext, Directory), + NULL // TheCU + }; + return DIFile(MDNode::get(VMContext, Elts)); +} + +/// createEnumerator - Create a single enumerator value. +DIEnumerator DIBuilder::createEnumerator(StringRef Name, uint64_t Val) { + assert(!Name.empty() && "Unable to create enumerator without name"); + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_enumerator), + MDString::get(VMContext, Name), + ConstantInt::get(Type::getInt64Ty(VMContext), Val) + }; + return DIEnumerator(MDNode::get(VMContext, Elts)); +} + +/// createNullPtrType - Create C++0x nullptr type. +DIType DIBuilder::createNullPtrType(StringRef Name) { + assert(!Name.empty() && "Unable to create type without name"); + // nullptr is encoded in DIBasicType format. Line number, filename, + // ,size, alignment, offset and flags are always empty here. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_unspecified_type), + NULL, //TheCU, + MDString::get(VMContext, Name), + NULL, // Filename + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags; + ConstantInt::get(Type::getInt32Ty(VMContext), 0) // Encoding + }; + return DIType(MDNode::get(VMContext, Elts)); +} + +/// createBasicType - Create debugging information entry for a basic +/// type, e.g 'char'. +DIBasicType +DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits, + uint64_t AlignInBits, unsigned Encoding) { + assert(!Name.empty() && "Unable to create type without name"); + // Basic types are encoded in DIBasicType format. Line number, filename, + // offset and flags are always empty here. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_base_type), + NULL, //TheCU, + MDString::get(VMContext, Name), + NULL, // Filename + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags; + ConstantInt::get(Type::getInt32Ty(VMContext), Encoding) + }; + return DIBasicType(MDNode::get(VMContext, Elts)); +} + +/// createQualifiedType - Create debugging information entry for a qualified +/// type, e.g. 'const int'. +DIDerivedType DIBuilder::createQualifiedType(unsigned Tag, DIType FromTy) { + // Qualified types are encoded in DIDerivedType format. + Value *Elts[] = { + GetTagConstant(VMContext, Tag), + NULL, //TheCU, + MDString::get(VMContext, StringRef()), // Empty name. + NULL, // Filename + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags + FromTy + }; + return DIDerivedType(MDNode::get(VMContext, Elts)); +} + +/// createPointerType - Create debugging information entry for a pointer. +DIDerivedType +DIBuilder::createPointerType(DIType PointeeTy, uint64_t SizeInBits, + uint64_t AlignInBits, StringRef Name) { + // Pointer types are encoded in DIDerivedType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_pointer_type), + NULL, //TheCU, + MDString::get(VMContext, Name), + NULL, // Filename + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags + PointeeTy + }; + return DIDerivedType(MDNode::get(VMContext, Elts)); +} + +DIDerivedType DIBuilder::createMemberPointerType(DIType PointeeTy, DIType Base) { + // Pointer types are encoded in DIDerivedType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_ptr_to_member_type), + NULL, //TheCU, + NULL, + NULL, // Filename + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line + ConstantInt::get(Type::getInt64Ty(VMContext), 0), + ConstantInt::get(Type::getInt64Ty(VMContext), 0), + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags + PointeeTy, + Base + }; + return DIDerivedType(MDNode::get(VMContext, Elts)); +} + +/// createReferenceType - Create debugging information entry for a reference +/// type. +DIDerivedType DIBuilder::createReferenceType(unsigned Tag, DIType RTy) { + assert(RTy.Verify() && "Unable to create reference type"); + // References are encoded in DIDerivedType format. + Value *Elts[] = { + GetTagConstant(VMContext, Tag), + NULL, // TheCU, + NULL, // Name + NULL, // Filename + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags + RTy + }; + return DIDerivedType(MDNode::get(VMContext, Elts)); +} + +/// createTypedef - Create debugging information entry for a typedef. +DIDerivedType DIBuilder::createTypedef(DIType Ty, StringRef Name, DIFile File, + unsigned LineNo, DIDescriptor Context) { + // typedefs are encoded in DIDerivedType format. + assert(Ty.Verify() && "Invalid typedef type!"); + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_typedef), + getNonCompileUnitScope(Context), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags + Ty + }; + return DIDerivedType(MDNode::get(VMContext, Elts)); +} + +/// createFriend - Create debugging information entry for a 'friend'. +DIType DIBuilder::createFriend(DIType Ty, DIType FriendTy) { + // typedefs are encoded in DIDerivedType format. + assert(Ty.Verify() && "Invalid type!"); + assert(FriendTy.Verify() && "Invalid friend type!"); + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_friend), + Ty, + NULL, // Name + Ty.getFile(), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags + FriendTy + }; + return DIType(MDNode::get(VMContext, Elts)); +} + +/// createInheritance - Create debugging information entry to establish +/// inheritance relationship between two types. +DIDerivedType DIBuilder::createInheritance( + DIType Ty, DIType BaseTy, uint64_t BaseOffset, unsigned Flags) { + assert(Ty.Verify() && "Unable to create inheritance"); + // TAG_inheritance is encoded in DIDerivedType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_inheritance), + Ty, + NULL, // Name + Ty.getFile(), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size + ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align + ConstantInt::get(Type::getInt64Ty(VMContext), BaseOffset), + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + BaseTy + }; + return DIDerivedType(MDNode::get(VMContext, Elts)); +} + +/// createMemberType - Create debugging information entry for a member. +DIDerivedType DIBuilder::createMemberType( + DIDescriptor Scope, StringRef Name, DIFile File, unsigned LineNumber, + uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits, + unsigned Flags, DIType Ty) { + // TAG_member is encoded in DIDerivedType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_member), + getNonCompileUnitScope(Scope), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + Ty + }; + return DIDerivedType(MDNode::get(VMContext, Elts)); +} + +/// createStaticMemberType - Create debugging information entry for a +/// C++ static data member. +DIType DIBuilder::createStaticMemberType(DIDescriptor Scope, StringRef Name, + DIFile File, unsigned LineNumber, + DIType Ty, unsigned Flags, + llvm::Value *Val) { + // TAG_member is encoded in DIDerivedType format. + Flags |= DIDescriptor::FlagStaticMember; + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_member), + getNonCompileUnitScope(Scope), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + ConstantInt::get(Type::getInt64Ty(VMContext), 0/*SizeInBits*/), + ConstantInt::get(Type::getInt64Ty(VMContext), 0/*AlignInBits*/), + ConstantInt::get(Type::getInt64Ty(VMContext), 0/*OffsetInBits*/), + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + Ty, + Val + }; + return DIType(MDNode::get(VMContext, Elts)); +} + +/// createObjCIVar - Create debugging information entry for Objective-C +/// instance variable. +DIType DIBuilder::createObjCIVar(StringRef Name, + DIFile File, unsigned LineNumber, + uint64_t SizeInBits, uint64_t AlignInBits, + uint64_t OffsetInBits, unsigned Flags, + DIType Ty, StringRef PropertyName, + StringRef GetterName, StringRef SetterName, + unsigned PropertyAttributes) { + // TAG_member is encoded in DIDerivedType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_member), + getNonCompileUnitScope(File), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + Ty, + MDString::get(VMContext, PropertyName), + MDString::get(VMContext, GetterName), + MDString::get(VMContext, SetterName), + ConstantInt::get(Type::getInt32Ty(VMContext), PropertyAttributes) + }; + return DIType(MDNode::get(VMContext, Elts)); +} + +/// createObjCIVar - Create debugging information entry for Objective-C +/// instance variable. +DIType DIBuilder::createObjCIVar(StringRef Name, + DIFile File, unsigned LineNumber, + uint64_t SizeInBits, uint64_t AlignInBits, + uint64_t OffsetInBits, unsigned Flags, + DIType Ty, MDNode *PropertyNode) { + // TAG_member is encoded in DIDerivedType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_member), + getNonCompileUnitScope(File), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + Ty, + PropertyNode + }; + return DIType(MDNode::get(VMContext, Elts)); +} + +/// createObjCProperty - Create debugging information entry for Objective-C +/// property. +DIObjCProperty DIBuilder::createObjCProperty(StringRef Name, + DIFile File, unsigned LineNumber, + StringRef GetterName, + StringRef SetterName, + unsigned PropertyAttributes, + DIType Ty) { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_APPLE_property), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + MDString::get(VMContext, GetterName), + MDString::get(VMContext, SetterName), + ConstantInt::get(Type::getInt32Ty(VMContext), PropertyAttributes), + Ty + }; + return DIObjCProperty(MDNode::get(VMContext, Elts)); +} + +/// createTemplateTypeParameter - Create debugging information for template +/// type parameter. +DITemplateTypeParameter +DIBuilder::createTemplateTypeParameter(DIDescriptor Context, StringRef Name, + DIType Ty, MDNode *File, unsigned LineNo, + unsigned ColumnNo) { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_template_type_parameter), + getNonCompileUnitScope(Context), + MDString::get(VMContext, Name), + Ty, + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), + ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo) + }; + return DITemplateTypeParameter(MDNode::get(VMContext, Elts)); +} + +/// createTemplateValueParameter - Create debugging information for template +/// value parameter. +DITemplateValueParameter +DIBuilder::createTemplateValueParameter(DIDescriptor Context, StringRef Name, + DIType Ty, uint64_t Val, + MDNode *File, unsigned LineNo, + unsigned ColumnNo) { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_template_value_parameter), + getNonCompileUnitScope(Context), + MDString::get(VMContext, Name), + Ty, + ConstantInt::get(Type::getInt64Ty(VMContext), Val), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), + ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo) + }; + return DITemplateValueParameter(MDNode::get(VMContext, Elts)); +} + +/// createClassType - Create debugging information entry for a class. +DIType DIBuilder::createClassType(DIDescriptor Context, StringRef Name, + DIFile File, unsigned LineNumber, + uint64_t SizeInBits, uint64_t AlignInBits, + uint64_t OffsetInBits, unsigned Flags, + DIType DerivedFrom, DIArray Elements, + MDNode *VTableHolder, + MDNode *TemplateParams) { + assert((!Context || Context.Verify()) && + "createClassType should be called with a valid Context"); + // TAG_class_type is encoded in DICompositeType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_class_type), + getNonCompileUnitScope(Context), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), OffsetInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + DerivedFrom, + Elements, + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + VTableHolder, + TemplateParams + }; + DIType R(MDNode::get(VMContext, Elts)); + assert(R.Verify() && "createClassType should return a verifiable DIType"); + return R; +} + +/// createStructType - Create debugging information entry for a struct. +DICompositeType DIBuilder::createStructType(DIDescriptor Context, + StringRef Name, DIFile File, + unsigned LineNumber, + uint64_t SizeInBits, + uint64_t AlignInBits, + unsigned Flags, DIType DerivedFrom, + DIArray Elements, + unsigned RunTimeLang, + MDNode *VTableHolder) { + // TAG_structure_type is encoded in DICompositeType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_structure_type), + getNonCompileUnitScope(Context), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + DerivedFrom, + Elements, + ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang), + VTableHolder, + NULL, + }; + DICompositeType R(MDNode::get(VMContext, Elts)); + assert(R.Verify() && "createStructType should return a verifiable DIType"); + return R; +} + +/// createUnionType - Create debugging information entry for an union. +DICompositeType DIBuilder::createUnionType( + DIDescriptor Scope, StringRef Name, DIFile File, unsigned LineNumber, + uint64_t SizeInBits, uint64_t AlignInBits, unsigned Flags, DIArray Elements, + unsigned RunTimeLang) { + // TAG_union_type is encoded in DICompositeType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_union_type), + getNonCompileUnitScope(Scope), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + NULL, + Elements, + ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang), + Constant::getNullValue(Type::getInt32Ty(VMContext)) + }; + return DICompositeType(MDNode::get(VMContext, Elts)); +} + +/// createSubroutineType - Create subroutine type. +DICompositeType +DIBuilder::createSubroutineType(DIFile File, DIArray ParameterTypes) { + // TAG_subroutine_type is encoded in DICompositeType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_subroutine_type), + Constant::getNullValue(Type::getInt32Ty(VMContext)), + MDString::get(VMContext, ""), + Constant::getNullValue(Type::getInt32Ty(VMContext)), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt64Ty(VMContext), 0), + ConstantInt::get(Type::getInt64Ty(VMContext), 0), + ConstantInt::get(Type::getInt64Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + NULL, + ParameterTypes, + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + Constant::getNullValue(Type::getInt32Ty(VMContext)) + }; + return DICompositeType(MDNode::get(VMContext, Elts)); +} + +/// createEnumerationType - Create debugging information entry for an +/// enumeration. +DICompositeType DIBuilder::createEnumerationType( + DIDescriptor Scope, StringRef Name, DIFile File, unsigned LineNumber, + uint64_t SizeInBits, uint64_t AlignInBits, DIArray Elements, + DIType ClassType) { + // TAG_enumeration_type is encoded in DICompositeType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_enumeration_type), + getNonCompileUnitScope(Scope), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ClassType, + Elements, + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + Constant::getNullValue(Type::getInt32Ty(VMContext)) + }; + MDNode *Node = MDNode::get(VMContext, Elts); + AllEnumTypes.push_back(Node); + return DICompositeType(Node); +} + +/// createArrayType - Create debugging information entry for an array. +DICompositeType DIBuilder::createArrayType(uint64_t Size, uint64_t AlignInBits, + DIType Ty, DIArray Subscripts) { + // TAG_array_type is encoded in DICompositeType format. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_array_type), + NULL, //TheCU, + MDString::get(VMContext, ""), + NULL, //TheCU, + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt64Ty(VMContext), Size), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + Ty, + Subscripts, + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + Constant::getNullValue(Type::getInt32Ty(VMContext)) + }; + return DICompositeType(MDNode::get(VMContext, Elts)); +} + +/// createVectorType - Create debugging information entry for a vector. +DIType DIBuilder::createVectorType(uint64_t Size, uint64_t AlignInBits, + DIType Ty, DIArray Subscripts) { + + // A vector is an array type with the FlagVector flag applied. + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_array_type), + NULL, //TheCU, + MDString::get(VMContext, ""), + NULL, //TheCU, + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt64Ty(VMContext), Size), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), DIType::FlagVector), + Ty, + Subscripts, + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + Constant::getNullValue(Type::getInt32Ty(VMContext)) + }; + return DIType(MDNode::get(VMContext, Elts)); +} + +/// createArtificialType - Create a new DIType with "artificial" flag set. +DIType DIBuilder::createArtificialType(DIType Ty) { + if (Ty.isArtificial()) + return Ty; + + SmallVector<Value *, 9> Elts; + MDNode *N = Ty; + assert (N && "Unexpected input DIType!"); + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + if (Value *V = N->getOperand(i)) + Elts.push_back(V); + else + Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))); + } + + unsigned CurFlags = Ty.getFlags(); + CurFlags = CurFlags | DIType::FlagArtificial; + + // Flags are stored at this slot. + Elts[8] = ConstantInt::get(Type::getInt32Ty(VMContext), CurFlags); + + return DIType(MDNode::get(VMContext, Elts)); +} + +/// createObjectPointerType - Create a new type with both the object pointer +/// and artificial flags set. +DIType DIBuilder::createObjectPointerType(DIType Ty) { + if (Ty.isObjectPointer()) + return Ty; + + SmallVector<Value *, 9> Elts; + MDNode *N = Ty; + assert (N && "Unexpected input DIType!"); + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + if (Value *V = N->getOperand(i)) + Elts.push_back(V); + else + Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))); + } + + unsigned CurFlags = Ty.getFlags(); + CurFlags = CurFlags | (DIType::FlagObjectPointer | DIType::FlagArtificial); + + // Flags are stored at this slot. + Elts[8] = ConstantInt::get(Type::getInt32Ty(VMContext), CurFlags); + + return DIType(MDNode::get(VMContext, Elts)); +} + +/// retainType - Retain DIType in a module even if it is not referenced +/// through debug info anchors. +void DIBuilder::retainType(DIType T) { + AllRetainTypes.push_back(T); +} + +/// createUnspecifiedParameter - Create unspeicified type descriptor +/// for the subroutine type. +DIDescriptor DIBuilder::createUnspecifiedParameter() { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_unspecified_parameters) + }; + return DIDescriptor(MDNode::get(VMContext, Elts)); +} + +/// createTemporaryType - Create a temporary forward-declared type. +DIType DIBuilder::createTemporaryType() { + // Give the temporary MDNode a tag. It doesn't matter what tag we + // use here as long as DIType accepts it. + Value *Elts[] = { GetTagConstant(VMContext, DW_TAG_base_type) }; + MDNode *Node = MDNode::getTemporary(VMContext, Elts); + return DIType(Node); +} + +/// createTemporaryType - Create a temporary forward-declared type. +DIType DIBuilder::createTemporaryType(DIFile F) { + // Give the temporary MDNode a tag. It doesn't matter what tag we + // use here as long as DIType accepts it. + Value *Elts[] = { + GetTagConstant(VMContext, DW_TAG_base_type), + TheCU, + NULL, + F + }; + MDNode *Node = MDNode::getTemporary(VMContext, Elts); + return DIType(Node); +} + +/// createForwardDecl - Create a temporary forward-declared type that +/// can be RAUW'd if the full type is seen. +DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, + DIDescriptor Scope, DIFile F, + unsigned Line, unsigned RuntimeLang, + uint64_t SizeInBits, + uint64_t AlignInBits) { + // Create a temporary MDNode. + Value *Elts[] = { + GetTagConstant(VMContext, Tag), + getNonCompileUnitScope(Scope), + MDString::get(VMContext, Name), + F, + ConstantInt::get(Type::getInt32Ty(VMContext), Line), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), + DIDescriptor::FlagFwdDecl), + NULL, + DIArray(), + ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang) + }; + MDNode *Node = MDNode::getTemporary(VMContext, Elts); + assert(DIType(Node).Verify() && + "createForwardDecl result should be verifiable"); + return DIType(Node); +} + +/// getOrCreateArray - Get a DIArray, create one if required. +DIArray DIBuilder::getOrCreateArray(ArrayRef<Value *> Elements) { + if (Elements.empty()) { + Value *Null = Constant::getNullValue(Type::getInt32Ty(VMContext)); + return DIArray(MDNode::get(VMContext, Null)); + } + return DIArray(MDNode::get(VMContext, Elements)); +} + +/// getOrCreateSubrange - Create a descriptor for a value range. This +/// implicitly uniques the values returned. +DISubrange DIBuilder::getOrCreateSubrange(int64_t Lo, int64_t Count) { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_subrange_type), + ConstantInt::get(Type::getInt64Ty(VMContext), Lo), + ConstantInt::get(Type::getInt64Ty(VMContext), Count) + }; + + return DISubrange(MDNode::get(VMContext, Elts)); +} + +/// createGlobalVariable - Create a new descriptor for the specified global. +DIGlobalVariable DIBuilder:: +createGlobalVariable(StringRef Name, DIFile F, unsigned LineNumber, + DIType Ty, bool isLocalToUnit, Value *Val) { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_variable), + Constant::getNullValue(Type::getInt32Ty(VMContext)), + NULL, // TheCU, + MDString::get(VMContext, Name), + MDString::get(VMContext, Name), + MDString::get(VMContext, Name), + F, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + Ty, + ConstantInt::get(Type::getInt32Ty(VMContext), isLocalToUnit), + ConstantInt::get(Type::getInt32Ty(VMContext), 1), /* isDefinition*/ + Val, + DIDescriptor() + }; + MDNode *Node = MDNode::get(VMContext, Elts); + AllGVs.push_back(Node); + return DIGlobalVariable(Node); +} + +/// createStaticVariable - Create a new descriptor for the specified static +/// variable. +DIGlobalVariable DIBuilder:: +createStaticVariable(DIDescriptor Context, StringRef Name, + StringRef LinkageName, DIFile F, unsigned LineNumber, + DIType Ty, bool isLocalToUnit, Value *Val, MDNode *Decl) { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_variable), + Constant::getNullValue(Type::getInt32Ty(VMContext)), + getNonCompileUnitScope(Context), + MDString::get(VMContext, Name), + MDString::get(VMContext, Name), + MDString::get(VMContext, LinkageName), + F, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), + Ty, + ConstantInt::get(Type::getInt32Ty(VMContext), isLocalToUnit), + ConstantInt::get(Type::getInt32Ty(VMContext), 1), /* isDefinition*/ + Val, + DIDescriptor(Decl) + }; + MDNode *Node = MDNode::get(VMContext, Elts); + AllGVs.push_back(Node); + return DIGlobalVariable(Node); +} + +/// createVariable - Create a new descriptor for the specified variable. +DIVariable DIBuilder::createLocalVariable(unsigned Tag, DIDescriptor Scope, + StringRef Name, DIFile File, + unsigned LineNo, DIType Ty, + bool AlwaysPreserve, unsigned Flags, + unsigned ArgNo) { + DIDescriptor Context(getNonCompileUnitScope(Scope)); + assert((!Context || Context.Verify()) && + "createLocalVariable should be called with a valid Context"); + assert(Ty.Verify() && + "createLocalVariable should be called with a valid type"); + Value *Elts[] = { + GetTagConstant(VMContext, Tag), + getNonCompileUnitScope(Scope), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), (LineNo | (ArgNo << 24))), + Ty, + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + Constant::getNullValue(Type::getInt32Ty(VMContext)) + }; + MDNode *Node = MDNode::get(VMContext, Elts); + if (AlwaysPreserve) { + // The optimizer may remove local variable. If there is an interest + // to preserve variable info in such situation then stash it in a + // named mdnode. + DISubprogram Fn(getDISubprogram(Scope)); + NamedMDNode *FnLocals = getOrInsertFnSpecificMDNode(M, Fn); + FnLocals->addOperand(Node); + } + assert(DIVariable(Node).Verify() && + "createLocalVariable should return a verifiable DIVariable"); + return DIVariable(Node); +} + +/// createComplexVariable - Create a new descriptor for the specified variable +/// which has a complex address expression for its address. +DIVariable DIBuilder::createComplexVariable(unsigned Tag, DIDescriptor Scope, + StringRef Name, DIFile F, + unsigned LineNo, + DIType Ty, ArrayRef<Value *> Addr, + unsigned ArgNo) { + SmallVector<Value *, 15> Elts; + Elts.push_back(GetTagConstant(VMContext, Tag)); + Elts.push_back(getNonCompileUnitScope(Scope)), + Elts.push_back(MDString::get(VMContext, Name)); + Elts.push_back(F); + Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext), + (LineNo | (ArgNo << 24)))); + Elts.push_back(Ty); + Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))); + Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))); + Elts.append(Addr.begin(), Addr.end()); + + return DIVariable(MDNode::get(VMContext, Elts)); +} + +/// createFunction - Create a new descriptor for the specified function. +DISubprogram DIBuilder::createFunction(DIDescriptor Context, + StringRef Name, + StringRef LinkageName, + DIFile File, unsigned LineNo, + DIType Ty, + bool isLocalToUnit, bool isDefinition, + unsigned ScopeLine, + unsigned Flags, bool isOptimized, + Function *Fn, + MDNode *TParams, + MDNode *Decl) { + Value *TElts[] = { GetTagConstant(VMContext, DW_TAG_base_type) }; + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_subprogram), + Constant::getNullValue(Type::getInt32Ty(VMContext)), + getNonCompileUnitScope(Context), + MDString::get(VMContext, Name), + MDString::get(VMContext, Name), + MDString::get(VMContext, LinkageName), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), + Ty, + ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit), + ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + NULL, + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized), + Fn, + TParams, + Decl, + MDNode::getTemporary(VMContext, TElts), + ConstantInt::get(Type::getInt32Ty(VMContext), ScopeLine) + }; + MDNode *Node = MDNode::get(VMContext, Elts); + + // Create a named metadata so that we do not lose this mdnode. + if (isDefinition) + AllSubprograms.push_back(Node); + return DISubprogram(Node); +} + +/// createMethod - Create a new descriptor for the specified C++ method. +DISubprogram DIBuilder::createMethod(DIDescriptor Context, + StringRef Name, + StringRef LinkageName, + DIFile F, + unsigned LineNo, DIType Ty, + bool isLocalToUnit, + bool isDefinition, + unsigned VK, unsigned VIndex, + MDNode *VTableHolder, + unsigned Flags, + bool isOptimized, + Function *Fn, + MDNode *TParam) { + Value *TElts[] = { GetTagConstant(VMContext, DW_TAG_base_type) }; + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_subprogram), + Constant::getNullValue(Type::getInt32Ty(VMContext)), + getNonCompileUnitScope(Context), + MDString::get(VMContext, Name), + MDString::get(VMContext, Name), + MDString::get(VMContext, LinkageName), + F, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), + Ty, + ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit), + ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition), + ConstantInt::get(Type::getInt32Ty(VMContext), (unsigned)VK), + ConstantInt::get(Type::getInt32Ty(VMContext), VIndex), + VTableHolder, + ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized), + Fn, + TParam, + Constant::getNullValue(Type::getInt32Ty(VMContext)), + MDNode::getTemporary(VMContext, TElts), + // FIXME: Do we want to use different scope/lines? + ConstantInt::get(Type::getInt32Ty(VMContext), LineNo) + }; + MDNode *Node = MDNode::get(VMContext, Elts); + if (isDefinition) + AllSubprograms.push_back(Node); + return DISubprogram(Node); +} + +/// createNameSpace - This creates new descriptor for a namespace +/// with the specified parent scope. +DINameSpace DIBuilder::createNameSpace(DIDescriptor Scope, StringRef Name, + DIFile File, unsigned LineNo) { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_namespace), + getNonCompileUnitScope(Scope), + MDString::get(VMContext, Name), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), LineNo) + }; + DINameSpace R(MDNode::get(VMContext, Elts)); + assert(R.Verify() && + "createNameSpace should return a verifiable DINameSpace"); + return R; +} + +/// createLexicalBlockFile - This creates a new MDNode that encapsulates +/// an existing scope with a new filename. +DILexicalBlockFile DIBuilder::createLexicalBlockFile(DIDescriptor Scope, + DIFile File) { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_lexical_block), + Scope, + File + }; + DILexicalBlockFile R(MDNode::get(VMContext, Elts)); + assert( + R.Verify() && + "createLexicalBlockFile should return a verifiable DILexicalBlockFile"); + return R; +} + +DILexicalBlock DIBuilder::createLexicalBlock(DIDescriptor Scope, DIFile File, + unsigned Line, unsigned Col) { + // Defeat MDNode uniqing for lexical blocks by using unique id. + static unsigned int unique_id = 0; + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_lexical_block), + getNonCompileUnitScope(Scope), + ConstantInt::get(Type::getInt32Ty(VMContext), Line), + ConstantInt::get(Type::getInt32Ty(VMContext), Col), + File, + ConstantInt::get(Type::getInt32Ty(VMContext), unique_id++) + }; + DILexicalBlock R(MDNode::get(VMContext, Elts)); + assert(R.Verify() && + "createLexicalBlock should return a verifiable DILexicalBlock"); + return R; +} + +/// insertDeclare - Insert a new llvm.dbg.declare intrinsic call. +Instruction *DIBuilder::insertDeclare(Value *Storage, DIVariable VarInfo, + Instruction *InsertBefore) { + assert(Storage && "no storage passed to dbg.declare"); + assert(VarInfo.Verify() && "empty DIVariable passed to dbg.declare"); + if (!DeclareFn) + DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare); + + Value *Args[] = { MDNode::get(Storage->getContext(), Storage), VarInfo }; + return CallInst::Create(DeclareFn, Args, "", InsertBefore); +} + +/// insertDeclare - Insert a new llvm.dbg.declare intrinsic call. +Instruction *DIBuilder::insertDeclare(Value *Storage, DIVariable VarInfo, + BasicBlock *InsertAtEnd) { + assert(Storage && "no storage passed to dbg.declare"); + assert(VarInfo.Verify() && "invalid DIVariable passed to dbg.declare"); + if (!DeclareFn) + DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare); + + Value *Args[] = { MDNode::get(Storage->getContext(), Storage), VarInfo }; + + // If this block already has a terminator then insert this intrinsic + // before the terminator. + if (TerminatorInst *T = InsertAtEnd->getTerminator()) + return CallInst::Create(DeclareFn, Args, "", T); + else + return CallInst::Create(DeclareFn, Args, "", InsertAtEnd); +} + +/// insertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call. +Instruction *DIBuilder::insertDbgValueIntrinsic(Value *V, uint64_t Offset, + DIVariable VarInfo, + Instruction *InsertBefore) { + assert(V && "no value passed to dbg.value"); + assert(VarInfo.Verify() && "invalid DIVariable passed to dbg.value"); + if (!ValueFn) + ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value); + + Value *Args[] = { MDNode::get(V->getContext(), V), + ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset), + VarInfo }; + return CallInst::Create(ValueFn, Args, "", InsertBefore); +} + +/// insertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call. +Instruction *DIBuilder::insertDbgValueIntrinsic(Value *V, uint64_t Offset, + DIVariable VarInfo, + BasicBlock *InsertAtEnd) { + assert(V && "no value passed to dbg.value"); + assert(VarInfo.Verify() && "invalid DIVariable passed to dbg.value"); + if (!ValueFn) + ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value); + + Value *Args[] = { MDNode::get(V->getContext(), V), + ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset), + VarInfo }; + return CallInst::Create(ValueFn, Args, "", InsertAtEnd); +} diff --git a/lib/IR/DataLayout.cpp b/lib/IR/DataLayout.cpp new file mode 100644 index 0000000000..f09de3a731 --- /dev/null +++ b/lib/IR/DataLayout.cpp @@ -0,0 +1,725 @@ +//===-- DataLayout.cpp - Data size & alignment routines --------------------==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines layout properties related to datatype size/offset/alignment +// information. +// +// This structure should be created once, filled in if the defaults are not +// correct and then passed around by const&. None of the members functions +// require modification to the object. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/DataLayout.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/Mutex.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cstdlib> +using namespace llvm; + +// Handle the Pass registration stuff necessary to use DataLayout's. + +// Register the default SparcV9 implementation... +INITIALIZE_PASS(DataLayout, "datalayout", "Data Layout", false, true) +char DataLayout::ID = 0; + +//===----------------------------------------------------------------------===// +// Support for StructLayout +//===----------------------------------------------------------------------===// + +StructLayout::StructLayout(StructType *ST, const DataLayout &TD) { + assert(!ST->isOpaque() && "Cannot get layout of opaque structs"); + StructAlignment = 0; + StructSize = 0; + NumElements = ST->getNumElements(); + + // Loop over each of the elements, placing them in memory. + for (unsigned i = 0, e = NumElements; i != e; ++i) { + Type *Ty = ST->getElementType(i); + unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty); + + // Add padding if necessary to align the data element properly. + if ((StructSize & (TyAlign-1)) != 0) + StructSize = DataLayout::RoundUpAlignment(StructSize, TyAlign); + + // Keep track of maximum alignment constraint. + StructAlignment = std::max(TyAlign, StructAlignment); + + MemberOffsets[i] = StructSize; + StructSize += TD.getTypeAllocSize(Ty); // Consume space for this data item + } + + // Empty structures have alignment of 1 byte. + if (StructAlignment == 0) StructAlignment = 1; + + // Add padding to the end of the struct so that it could be put in an array + // and all array elements would be aligned correctly. + if ((StructSize & (StructAlignment-1)) != 0) + StructSize = DataLayout::RoundUpAlignment(StructSize, StructAlignment); +} + + +/// getElementContainingOffset - Given a valid offset into the structure, +/// return the structure index that contains it. +unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const { + const uint64_t *SI = + std::upper_bound(&MemberOffsets[0], &MemberOffsets[NumElements], Offset); + assert(SI != &MemberOffsets[0] && "Offset not in structure type!"); + --SI; + assert(*SI <= Offset && "upper_bound didn't work"); + assert((SI == &MemberOffsets[0] || *(SI-1) <= Offset) && + (SI+1 == &MemberOffsets[NumElements] || *(SI+1) > Offset) && + "Upper bound didn't work!"); + + // Multiple fields can have the same offset if any of them are zero sized. + // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop + // at the i32 element, because it is the last element at that offset. This is + // the right one to return, because anything after it will have a higher + // offset, implying that this element is non-empty. + return SI-&MemberOffsets[0]; +} + +//===----------------------------------------------------------------------===// +// LayoutAlignElem, LayoutAlign support +//===----------------------------------------------------------------------===// + +LayoutAlignElem +LayoutAlignElem::get(AlignTypeEnum align_type, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + LayoutAlignElem retval; + retval.AlignType = align_type; + retval.ABIAlign = abi_align; + retval.PrefAlign = pref_align; + retval.TypeBitWidth = bit_width; + return retval; +} + +bool +LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const { + return (AlignType == rhs.AlignType + && ABIAlign == rhs.ABIAlign + && PrefAlign == rhs.PrefAlign + && TypeBitWidth == rhs.TypeBitWidth); +} + +const LayoutAlignElem +DataLayout::InvalidAlignmentElem = LayoutAlignElem::get(INVALID_ALIGN, 0, 0, 0); + +//===----------------------------------------------------------------------===// +// PointerAlignElem, PointerAlign support +//===----------------------------------------------------------------------===// + +PointerAlignElem +PointerAlignElem::get(uint32_t addr_space, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + PointerAlignElem retval; + retval.AddressSpace = addr_space; + retval.ABIAlign = abi_align; + retval.PrefAlign = pref_align; + retval.TypeBitWidth = bit_width; + return retval; +} + +bool +PointerAlignElem::operator==(const PointerAlignElem &rhs) const { + return (ABIAlign == rhs.ABIAlign + && AddressSpace == rhs.AddressSpace + && PrefAlign == rhs.PrefAlign + && TypeBitWidth == rhs.TypeBitWidth); +} + +const PointerAlignElem +DataLayout::InvalidPointerElem = PointerAlignElem::get(~0U, 0U, 0U, 0U); + +//===----------------------------------------------------------------------===// +// DataLayout Class Implementation +//===----------------------------------------------------------------------===// + +void DataLayout::init(StringRef Desc) { + initializeDataLayoutPass(*PassRegistry::getPassRegistry()); + + LayoutMap = 0; + LittleEndian = false; + StackNaturalAlign = 0; + + // Default alignments + setAlignment(INTEGER_ALIGN, 1, 1, 1); // i1 + setAlignment(INTEGER_ALIGN, 1, 1, 8); // i8 + setAlignment(INTEGER_ALIGN, 2, 2, 16); // i16 + setAlignment(INTEGER_ALIGN, 4, 4, 32); // i32 + setAlignment(INTEGER_ALIGN, 4, 8, 64); // i64 + setAlignment(FLOAT_ALIGN, 2, 2, 16); // half + setAlignment(FLOAT_ALIGN, 4, 4, 32); // float + setAlignment(FLOAT_ALIGN, 8, 8, 64); // double + setAlignment(FLOAT_ALIGN, 16, 16, 128); // ppcf128, quad, ... + setAlignment(VECTOR_ALIGN, 8, 8, 64); // v2i32, v1i64, ... + setAlignment(VECTOR_ALIGN, 16, 16, 128); // v16i8, v8i16, v4i32, ... + setAlignment(AGGREGATE_ALIGN, 0, 8, 0); // struct + setPointerAlignment(0, 8, 8, 8); + + parseSpecifier(Desc); +} + +/// Checked version of split, to ensure mandatory subparts. +static std::pair<StringRef, StringRef> split(StringRef Str, char Separator) { + assert(!Str.empty() && "parse error, string can't be empty here"); + std::pair<StringRef, StringRef> Split = Str.split(Separator); + assert((!Split.second.empty() || Split.first == Str) && + "a trailing separator is not allowed"); + return Split; +} + +/// Get an unsinged integer, including error checks. +static unsigned getInt(StringRef R) { + unsigned Result; + bool error = R.getAsInteger(10, Result); (void)error; + assert(!error && "not a number, or does not fit in an unsigned int"); + return Result; +} + +/// Convert bits into bytes. Assert if not a byte width multiple. +static unsigned inBytes(unsigned Bits) { + assert(Bits % 8 == 0 && "number of bits must be a byte width multiple"); + return Bits / 8; +} + +void DataLayout::parseSpecifier(StringRef Desc) { + + while (!Desc.empty()) { + + // Split at '-'. + std::pair<StringRef, StringRef> Split = split(Desc, '-'); + Desc = Split.second; + + // Split at ':'. + Split = split(Split.first, ':'); + + // Aliases used below. + StringRef &Tok = Split.first; // Current token. + StringRef &Rest = Split.second; // The rest of the string. + + char Specifier = Tok.front(); + Tok = Tok.substr(1); + + switch (Specifier) { + case 'E': + LittleEndian = false; + break; + case 'e': + LittleEndian = true; + break; + case 'p': { + // Address space. + unsigned AddrSpace = Tok.empty() ? 0 : getInt(Tok); + assert(AddrSpace < 1 << 24 && + "Invalid address space, must be a 24bit integer"); + + // Size. + Split = split(Rest, ':'); + unsigned PointerMemSize = inBytes(getInt(Tok)); + + // ABI alignment. + Split = split(Rest, ':'); + unsigned PointerABIAlign = inBytes(getInt(Tok)); + + // Preferred alignment. + unsigned PointerPrefAlign = PointerABIAlign; + if (!Rest.empty()) { + Split = split(Rest, ':'); + PointerPrefAlign = inBytes(getInt(Tok)); + } + + setPointerAlignment(AddrSpace, PointerABIAlign, PointerPrefAlign, + PointerMemSize); + break; + } + case 'i': + case 'v': + case 'f': + case 'a': + case 's': { + AlignTypeEnum AlignType; + switch (Specifier) { + default: + case 'i': AlignType = INTEGER_ALIGN; break; + case 'v': AlignType = VECTOR_ALIGN; break; + case 'f': AlignType = FLOAT_ALIGN; break; + case 'a': AlignType = AGGREGATE_ALIGN; break; + case 's': AlignType = STACK_ALIGN; break; + } + + // Bit size. + unsigned Size = Tok.empty() ? 0 : getInt(Tok); + + // ABI alignment. + Split = split(Rest, ':'); + unsigned ABIAlign = inBytes(getInt(Tok)); + + // Preferred alignment. + unsigned PrefAlign = ABIAlign; + if (!Rest.empty()) { + Split = split(Rest, ':'); + PrefAlign = inBytes(getInt(Tok)); + } + + setAlignment(AlignType, ABIAlign, PrefAlign, Size); + + break; + } + case 'n': // Native integer types. + for (;;) { + unsigned Width = getInt(Tok); + assert(Width != 0 && "width must be non-zero"); + LegalIntWidths.push_back(Width); + if (Rest.empty()) + break; + Split = split(Rest, ':'); + } + break; + case 'S': { // Stack natural alignment. + StackNaturalAlign = inBytes(getInt(Tok)); + break; + } + default: + llvm_unreachable("Unknown specifier in datalayout string"); + break; + } + } +} + +/// Default ctor. +/// +/// @note This has to exist, because this is a pass, but it should never be +/// used. +DataLayout::DataLayout() : ImmutablePass(ID) { + report_fatal_error("Bad DataLayout ctor used. " + "Tool did not specify a DataLayout to use?"); +} + +DataLayout::DataLayout(const Module *M) + : ImmutablePass(ID) { + init(M->getDataLayout()); +} + +void +DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + assert(pref_align < (1 << 16) && "Alignment doesn't fit in bitfield"); + assert(bit_width < (1 << 24) && "Bit width doesn't fit in bitfield"); + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { + if (Alignments[i].AlignType == (unsigned)align_type && + Alignments[i].TypeBitWidth == bit_width) { + // Update the abi, preferred alignments. + Alignments[i].ABIAlign = abi_align; + Alignments[i].PrefAlign = pref_align; + return; + } + } + + Alignments.push_back(LayoutAlignElem::get(align_type, abi_align, + pref_align, bit_width)); +} + +void +DataLayout::setPointerAlignment(uint32_t addr_space, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + DenseMap<unsigned,PointerAlignElem>::iterator val = Pointers.find(addr_space); + if (val == Pointers.end()) { + Pointers[addr_space] = PointerAlignElem::get(addr_space, + abi_align, pref_align, bit_width); + } else { + val->second.ABIAlign = abi_align; + val->second.PrefAlign = pref_align; + val->second.TypeBitWidth = bit_width; + } +} + +/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or +/// preferred if ABIInfo = false) the layout wants for the specified datatype. +unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, + uint32_t BitWidth, bool ABIInfo, + Type *Ty) const { + // Check to see if we have an exact match and remember the best match we see. + int BestMatchIdx = -1; + int LargestInt = -1; + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { + if (Alignments[i].AlignType == (unsigned)AlignType && + Alignments[i].TypeBitWidth == BitWidth) + return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign; + + // The best match so far depends on what we're looking for. + if (AlignType == INTEGER_ALIGN && + Alignments[i].AlignType == INTEGER_ALIGN) { + // The "best match" for integers is the smallest size that is larger than + // the BitWidth requested. + if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 || + Alignments[i].TypeBitWidth < Alignments[BestMatchIdx].TypeBitWidth)) + BestMatchIdx = i; + // However, if there isn't one that's larger, then we must use the + // largest one we have (see below) + if (LargestInt == -1 || + Alignments[i].TypeBitWidth > Alignments[LargestInt].TypeBitWidth) + LargestInt = i; + } + } + + // Okay, we didn't find an exact solution. Fall back here depending on what + // is being looked for. + if (BestMatchIdx == -1) { + // If we didn't find an integer alignment, fall back on most conservative. + if (AlignType == INTEGER_ALIGN) { + BestMatchIdx = LargestInt; + } else { + assert(AlignType == VECTOR_ALIGN && "Unknown alignment type!"); + + // By default, use natural alignment for vector types. This is consistent + // with what clang and llvm-gcc do. + unsigned Align = getTypeAllocSize(cast<VectorType>(Ty)->getElementType()); + Align *= cast<VectorType>(Ty)->getNumElements(); + // If the alignment is not a power of 2, round up to the next power of 2. + // This happens for non-power-of-2 length vectors. + if (Align & (Align-1)) + Align = NextPowerOf2(Align); + return Align; + } + } + + // Since we got a "best match" index, just return it. + return ABIInfo ? Alignments[BestMatchIdx].ABIAlign + : Alignments[BestMatchIdx].PrefAlign; +} + +namespace { + +class StructLayoutMap { + typedef DenseMap<StructType*, StructLayout*> LayoutInfoTy; + LayoutInfoTy LayoutInfo; + +public: + virtual ~StructLayoutMap() { + // Remove any layouts. + for (LayoutInfoTy::iterator I = LayoutInfo.begin(), E = LayoutInfo.end(); + I != E; ++I) { + StructLayout *Value = I->second; + Value->~StructLayout(); + free(Value); + } + } + + StructLayout *&operator[](StructType *STy) { + return LayoutInfo[STy]; + } + + // for debugging... + virtual void dump() const {} +}; + +} // end anonymous namespace + +DataLayout::~DataLayout() { + delete static_cast<StructLayoutMap*>(LayoutMap); +} + +const StructLayout *DataLayout::getStructLayout(StructType *Ty) const { + if (!LayoutMap) + LayoutMap = new StructLayoutMap(); + + StructLayoutMap *STM = static_cast<StructLayoutMap*>(LayoutMap); + StructLayout *&SL = (*STM)[Ty]; + if (SL) return SL; + + // Otherwise, create the struct layout. Because it is variable length, we + // malloc it, then use placement new. + int NumElts = Ty->getNumElements(); + StructLayout *L = + (StructLayout *)malloc(sizeof(StructLayout)+(NumElts-1) * sizeof(uint64_t)); + + // Set SL before calling StructLayout's ctor. The ctor could cause other + // entries to be added to TheMap, invalidating our reference. + SL = L; + + new (L) StructLayout(Ty, *this); + + return L; +} + +std::string DataLayout::getStringRepresentation() const { + std::string Result; + raw_string_ostream OS(Result); + + OS << (LittleEndian ? "e" : "E"); + SmallVector<unsigned, 8> addrSpaces; + // Lets get all of the known address spaces and sort them + // into increasing order so that we can emit the string + // in a cleaner format. + for (DenseMap<unsigned, PointerAlignElem>::const_iterator + pib = Pointers.begin(), pie = Pointers.end(); + pib != pie; ++pib) { + addrSpaces.push_back(pib->first); + } + std::sort(addrSpaces.begin(), addrSpaces.end()); + for (SmallVector<unsigned, 8>::iterator asb = addrSpaces.begin(), + ase = addrSpaces.end(); asb != ase; ++asb) { + const PointerAlignElem &PI = Pointers.find(*asb)->second; + OS << "-p"; + if (PI.AddressSpace) { + OS << PI.AddressSpace; + } + OS << ":" << PI.TypeBitWidth*8 << ':' << PI.ABIAlign*8 + << ':' << PI.PrefAlign*8; + } + OS << "-S" << StackNaturalAlign*8; + + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { + const LayoutAlignElem &AI = Alignments[i]; + OS << '-' << (char)AI.AlignType << AI.TypeBitWidth << ':' + << AI.ABIAlign*8 << ':' << AI.PrefAlign*8; + } + + if (!LegalIntWidths.empty()) { + OS << "-n" << (unsigned)LegalIntWidths[0]; + + for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i) + OS << ':' << (unsigned)LegalIntWidths[i]; + } + return OS.str(); +} + + +uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const { + assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); + switch (Ty->getTypeID()) { + case Type::LabelTyID: + return getPointerSizeInBits(0); + case Type::PointerTyID: { + unsigned AS = dyn_cast<PointerType>(Ty)->getAddressSpace(); + return getPointerSizeInBits(AS); + } + case Type::ArrayTyID: { + ArrayType *ATy = cast<ArrayType>(Ty); + return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements(); + } + case Type::StructTyID: + // Get the layout annotation... which is lazily created on demand. + return getStructLayout(cast<StructType>(Ty))->getSizeInBits(); + case Type::IntegerTyID: + return cast<IntegerType>(Ty)->getBitWidth(); + case Type::HalfTyID: + return 16; + case Type::FloatTyID: + return 32; + case Type::DoubleTyID: + case Type::X86_MMXTyID: + return 64; + case Type::PPC_FP128TyID: + case Type::FP128TyID: + return 128; + // In memory objects this is always aligned to a higher boundary, but + // only 80 bits contain information. + case Type::X86_FP80TyID: + return 80; + case Type::VectorTyID: { + VectorType *VTy = cast<VectorType>(Ty); + return VTy->getNumElements()*getTypeSizeInBits(VTy->getElementType()); + } + default: + llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type"); + } +} + +/*! + \param abi_or_pref Flag that determines which alignment is returned. true + returns the ABI alignment, false returns the preferred alignment. + \param Ty The underlying type for which alignment is determined. + + Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref + == false) for the requested type \a Ty. + */ +unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { + int AlignType = -1; + + assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); + switch (Ty->getTypeID()) { + // Early escape for the non-numeric types. + case Type::LabelTyID: + return (abi_or_pref + ? getPointerABIAlignment(0) + : getPointerPrefAlignment(0)); + case Type::PointerTyID: { + unsigned AS = dyn_cast<PointerType>(Ty)->getAddressSpace(); + return (abi_or_pref + ? getPointerABIAlignment(AS) + : getPointerPrefAlignment(AS)); + } + case Type::ArrayTyID: + return getAlignment(cast<ArrayType>(Ty)->getElementType(), abi_or_pref); + + case Type::StructTyID: { + // Packed structure types always have an ABI alignment of one. + if (cast<StructType>(Ty)->isPacked() && abi_or_pref) + return 1; + + // Get the layout annotation... which is lazily created on demand. + const StructLayout *Layout = getStructLayout(cast<StructType>(Ty)); + unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty); + return std::max(Align, Layout->getAlignment()); + } + case Type::IntegerTyID: + AlignType = INTEGER_ALIGN; + break; + case Type::HalfTyID: + case Type::FloatTyID: + case Type::DoubleTyID: + // PPC_FP128TyID and FP128TyID have different data contents, but the + // same size and alignment, so they look the same here. + case Type::PPC_FP128TyID: + case Type::FP128TyID: + case Type::X86_FP80TyID: + AlignType = FLOAT_ALIGN; + break; + case Type::X86_MMXTyID: + case Type::VectorTyID: + AlignType = VECTOR_ALIGN; + break; + default: + llvm_unreachable("Bad type for getAlignment!!!"); + } + + return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty), + abi_or_pref, Ty); +} + +unsigned DataLayout::getABITypeAlignment(Type *Ty) const { + return getAlignment(Ty, true); +} + +/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for +/// an integer type of the specified bitwidth. +unsigned DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const { + return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0); +} + + +unsigned DataLayout::getCallFrameTypeAlignment(Type *Ty) const { + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) + if (Alignments[i].AlignType == STACK_ALIGN) + return Alignments[i].ABIAlign; + + return getABITypeAlignment(Ty); +} + +unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const { + return getAlignment(Ty, false); +} + +unsigned DataLayout::getPreferredTypeAlignmentShift(Type *Ty) const { + unsigned Align = getPrefTypeAlignment(Ty); + assert(!(Align & (Align-1)) && "Alignment is not a power of two!"); + return Log2_32(Align); +} + +/// getIntPtrType - Return an integer type with size at least as big as that +/// of a pointer in the given address space. +IntegerType *DataLayout::getIntPtrType(LLVMContext &C, + unsigned AddressSpace) const { + return IntegerType::get(C, getPointerSizeInBits(AddressSpace)); +} + +/// getIntPtrType - Return an integer (vector of integer) type with size at +/// least as big as that of a pointer of the given pointer (vector of pointer) +/// type. +Type *DataLayout::getIntPtrType(Type *Ty) const { + assert(Ty->isPtrOrPtrVectorTy() && + "Expected a pointer or pointer vector type."); + unsigned NumBits = getTypeSizeInBits(Ty->getScalarType()); + IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits); + if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) + return VectorType::get(IntTy, VecTy->getNumElements()); + return IntTy; +} + +uint64_t DataLayout::getIndexedOffset(Type *ptrTy, + ArrayRef<Value *> Indices) const { + Type *Ty = ptrTy; + assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()"); + uint64_t Result = 0; + + generic_gep_type_iterator<Value* const*> + TI = gep_type_begin(ptrTy, Indices); + for (unsigned CurIDX = 0, EndIDX = Indices.size(); CurIDX != EndIDX; + ++CurIDX, ++TI) { + if (StructType *STy = dyn_cast<StructType>(*TI)) { + assert(Indices[CurIDX]->getType() == + Type::getInt32Ty(ptrTy->getContext()) && + "Illegal struct idx"); + unsigned FieldNo = cast<ConstantInt>(Indices[CurIDX])->getZExtValue(); + + // Get structure layout information... + const StructLayout *Layout = getStructLayout(STy); + + // Add in the offset, as calculated by the structure layout info... + Result += Layout->getElementOffset(FieldNo); + + // Update Ty to refer to current element + Ty = STy->getElementType(FieldNo); + } else { + // Update Ty to refer to current element + Ty = cast<SequentialType>(Ty)->getElementType(); + + // Get the array index and the size of each array element. + if (int64_t arrayIdx = cast<ConstantInt>(Indices[CurIDX])->getSExtValue()) + Result += (uint64_t)arrayIdx * getTypeAllocSize(Ty); + } + } + + return Result; +} + +/// getPreferredAlignment - Return the preferred alignment of the specified +/// global. This includes an explicitly requested alignment (if the global +/// has one). +unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const { + Type *ElemType = GV->getType()->getElementType(); + unsigned Alignment = getPrefTypeAlignment(ElemType); + unsigned GVAlignment = GV->getAlignment(); + if (GVAlignment >= Alignment) { + Alignment = GVAlignment; + } else if (GVAlignment != 0) { + Alignment = std::max(GVAlignment, getABITypeAlignment(ElemType)); + } + + if (GV->hasInitializer() && GVAlignment == 0) { + if (Alignment < 16) { + // If the global is not external, see if it is large. If so, give it a + // larger alignment. + if (getTypeSizeInBits(ElemType) > 128) + Alignment = 16; // 16-byte alignment. + } + } + return Alignment; +} + +/// getPreferredAlignmentLog - Return the preferred alignment of the +/// specified global, returned in log form. This includes an explicitly +/// requested alignment (if the global has one). +unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const { + return Log2_32(getPreferredAlignment(GV)); +} diff --git a/lib/IR/DebugInfo.cpp b/lib/IR/DebugInfo.cpp new file mode 100644 index 0000000000..e85d4adf77 --- /dev/null +++ b/lib/IR/DebugInfo.cpp @@ -0,0 +1,1192 @@ +//===--- DebugInfo.cpp - Debug Information Helper Classes -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the helper classes used to build and interpret debug +// information in LLVM IR form. +// +//===----------------------------------------------------------------------===// + +#include "llvm/DebugInfo.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Dwarf.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; +using namespace llvm::dwarf; + +//===----------------------------------------------------------------------===// +// DIDescriptor +//===----------------------------------------------------------------------===// + +DIDescriptor::DIDescriptor(const DIFile F) : DbgNode(F.DbgNode) { +} + +DIDescriptor::DIDescriptor(const DISubprogram F) : DbgNode(F.DbgNode) { +} + +DIDescriptor::DIDescriptor(const DILexicalBlockFile F) : DbgNode(F.DbgNode) { +} + +DIDescriptor::DIDescriptor(const DILexicalBlock F) : DbgNode(F.DbgNode) { +} + +DIDescriptor::DIDescriptor(const DIVariable F) : DbgNode(F.DbgNode) { +} + +DIDescriptor::DIDescriptor(const DIType F) : DbgNode(F.DbgNode) { +} + +bool DIDescriptor::Verify() const { + return DbgNode && + (DIDerivedType(DbgNode).Verify() || + DICompositeType(DbgNode).Verify() || DIBasicType(DbgNode).Verify() || + DIVariable(DbgNode).Verify() || DISubprogram(DbgNode).Verify() || + DIGlobalVariable(DbgNode).Verify() || DIFile(DbgNode).Verify() || + DICompileUnit(DbgNode).Verify() || DINameSpace(DbgNode).Verify() || + DILexicalBlock(DbgNode).Verify() || + DILexicalBlockFile(DbgNode).Verify() || + DISubrange(DbgNode).Verify() || DIEnumerator(DbgNode).Verify() || + DIObjCProperty(DbgNode).Verify() || + DITemplateTypeParameter(DbgNode).Verify() || + DITemplateValueParameter(DbgNode).Verify()); +} + +StringRef +DIDescriptor::getStringField(unsigned Elt) const { + if (DbgNode == 0) + return StringRef(); + + if (Elt < DbgNode->getNumOperands()) + if (MDString *MDS = dyn_cast_or_null<MDString>(DbgNode->getOperand(Elt))) + return MDS->getString(); + + return StringRef(); +} + +uint64_t DIDescriptor::getUInt64Field(unsigned Elt) const { + if (DbgNode == 0) + return 0; + + if (Elt < DbgNode->getNumOperands()) + if (ConstantInt *CI + = dyn_cast_or_null<ConstantInt>(DbgNode->getOperand(Elt))) + return CI->getZExtValue(); + + return 0; +} + +int64_t DIDescriptor::getInt64Field(unsigned Elt) const { + if (DbgNode == 0) + return 0; + + if (Elt < DbgNode->getNumOperands()) + if (ConstantInt *CI + = dyn_cast_or_null<ConstantInt>(DbgNode->getOperand(Elt))) + return CI->getSExtValue(); + + return 0; +} + +DIDescriptor DIDescriptor::getDescriptorField(unsigned Elt) const { + if (DbgNode == 0) + return DIDescriptor(); + + if (Elt < DbgNode->getNumOperands()) + return + DIDescriptor(dyn_cast_or_null<const MDNode>(DbgNode->getOperand(Elt))); + return DIDescriptor(); +} + +GlobalVariable *DIDescriptor::getGlobalVariableField(unsigned Elt) const { + if (DbgNode == 0) + return 0; + + if (Elt < DbgNode->getNumOperands()) + return dyn_cast_or_null<GlobalVariable>(DbgNode->getOperand(Elt)); + return 0; +} + +Constant *DIDescriptor::getConstantField(unsigned Elt) const { + if (DbgNode == 0) + return 0; + + if (Elt < DbgNode->getNumOperands()) + return dyn_cast_or_null<Constant>(DbgNode->getOperand(Elt)); + return 0; +} + +Function *DIDescriptor::getFunctionField(unsigned Elt) const { + if (DbgNode == 0) + return 0; + + if (Elt < DbgNode->getNumOperands()) + return dyn_cast_or_null<Function>(DbgNode->getOperand(Elt)); + return 0; +} + +void DIDescriptor::replaceFunctionField(unsigned Elt, Function *F) { + if (DbgNode == 0) + return; + + if (Elt < DbgNode->getNumOperands()) { + MDNode *Node = const_cast<MDNode*>(DbgNode); + Node->replaceOperandWith(Elt, F); + } +} + +unsigned DIVariable::getNumAddrElements() const { + return DbgNode->getNumOperands()-8; +} + +/// getInlinedAt - If this variable is inlined then return inline location. +MDNode *DIVariable::getInlinedAt() const { + return dyn_cast_or_null<MDNode>(DbgNode->getOperand(7)); +} + +//===----------------------------------------------------------------------===// +// Predicates +//===----------------------------------------------------------------------===// + +/// isBasicType - Return true if the specified tag is legal for +/// DIBasicType. +bool DIDescriptor::isBasicType() const { + if (!DbgNode) return false; + switch (getTag()) { + case dwarf::DW_TAG_base_type: + case dwarf::DW_TAG_unspecified_type: + return true; + default: + return false; + } +} + +/// isDerivedType - Return true if the specified tag is legal for DIDerivedType. +bool DIDescriptor::isDerivedType() const { + if (!DbgNode) return false; + switch (getTag()) { + case dwarf::DW_TAG_typedef: + case dwarf::DW_TAG_pointer_type: + case dwarf::DW_TAG_ptr_to_member_type: + case dwarf::DW_TAG_reference_type: + case dwarf::DW_TAG_rvalue_reference_type: + case dwarf::DW_TAG_const_type: + case dwarf::DW_TAG_volatile_type: + case dwarf::DW_TAG_restrict_type: + case dwarf::DW_TAG_member: + case dwarf::DW_TAG_inheritance: + case dwarf::DW_TAG_friend: + return true; + default: + // CompositeTypes are currently modelled as DerivedTypes. + return isCompositeType(); + } +} + +/// isCompositeType - Return true if the specified tag is legal for +/// DICompositeType. +bool DIDescriptor::isCompositeType() const { + if (!DbgNode) return false; + switch (getTag()) { + case dwarf::DW_TAG_array_type: + case dwarf::DW_TAG_structure_type: + case dwarf::DW_TAG_union_type: + case dwarf::DW_TAG_enumeration_type: + case dwarf::DW_TAG_subroutine_type: + case dwarf::DW_TAG_class_type: + return true; + default: + return false; + } +} + +/// isVariable - Return true if the specified tag is legal for DIVariable. +bool DIDescriptor::isVariable() const { + if (!DbgNode) return false; + switch (getTag()) { + case dwarf::DW_TAG_auto_variable: + case dwarf::DW_TAG_arg_variable: + return true; + default: + return false; + } +} + +/// isType - Return true if the specified tag is legal for DIType. +bool DIDescriptor::isType() const { + return isBasicType() || isCompositeType() || isDerivedType(); +} + +/// isSubprogram - Return true if the specified tag is legal for +/// DISubprogram. +bool DIDescriptor::isSubprogram() const { + return DbgNode && getTag() == dwarf::DW_TAG_subprogram; +} + +/// isGlobalVariable - Return true if the specified tag is legal for +/// DIGlobalVariable. +bool DIDescriptor::isGlobalVariable() const { + return DbgNode && (getTag() == dwarf::DW_TAG_variable || + getTag() == dwarf::DW_TAG_constant); +} + +/// isGlobal - Return true if the specified tag is legal for DIGlobal. +bool DIDescriptor::isGlobal() const { + return isGlobalVariable(); +} + +/// isUnspecifiedParmeter - Return true if the specified tag is +/// DW_TAG_unspecified_parameters. +bool DIDescriptor::isUnspecifiedParameter() const { + return DbgNode && getTag() == dwarf::DW_TAG_unspecified_parameters; +} + +/// isScope - Return true if the specified tag is one of the scope +/// related tag. +bool DIDescriptor::isScope() const { + if (!DbgNode) return false; + switch (getTag()) { + case dwarf::DW_TAG_compile_unit: + case dwarf::DW_TAG_lexical_block: + case dwarf::DW_TAG_subprogram: + case dwarf::DW_TAG_namespace: + return true; + default: + break; + } + return false; +} + +/// isTemplateTypeParameter - Return true if the specified tag is +/// DW_TAG_template_type_parameter. +bool DIDescriptor::isTemplateTypeParameter() const { + return DbgNode && getTag() == dwarf::DW_TAG_template_type_parameter; +} + +/// isTemplateValueParameter - Return true if the specified tag is +/// DW_TAG_template_value_parameter. +bool DIDescriptor::isTemplateValueParameter() const { + return DbgNode && getTag() == dwarf::DW_TAG_template_value_parameter; +} + +/// isCompileUnit - Return true if the specified tag is DW_TAG_compile_unit. +bool DIDescriptor::isCompileUnit() const { + return DbgNode && getTag() == dwarf::DW_TAG_compile_unit; +} + +/// isFile - Return true if the specified tag is DW_TAG_file_type. +bool DIDescriptor::isFile() const { + return DbgNode && getTag() == dwarf::DW_TAG_file_type; +} + +/// isNameSpace - Return true if the specified tag is DW_TAG_namespace. +bool DIDescriptor::isNameSpace() const { + return DbgNode && getTag() == dwarf::DW_TAG_namespace; +} + +/// isLexicalBlockFile - Return true if the specified descriptor is a +/// lexical block with an extra file. +bool DIDescriptor::isLexicalBlockFile() const { + return DbgNode && getTag() == dwarf::DW_TAG_lexical_block && + (DbgNode->getNumOperands() == 3); +} + +/// isLexicalBlock - Return true if the specified tag is DW_TAG_lexical_block. +bool DIDescriptor::isLexicalBlock() const { + return DbgNode && getTag() == dwarf::DW_TAG_lexical_block && + (DbgNode->getNumOperands() > 3); +} + +/// isSubrange - Return true if the specified tag is DW_TAG_subrange_type. +bool DIDescriptor::isSubrange() const { + return DbgNode && getTag() == dwarf::DW_TAG_subrange_type; +} + +/// isEnumerator - Return true if the specified tag is DW_TAG_enumerator. +bool DIDescriptor::isEnumerator() const { + return DbgNode && getTag() == dwarf::DW_TAG_enumerator; +} + +/// isObjCProperty - Return true if the specified tag is DW_TAG +bool DIDescriptor::isObjCProperty() const { + return DbgNode && getTag() == dwarf::DW_TAG_APPLE_property; +} +//===----------------------------------------------------------------------===// +// Simple Descriptor Constructors and other Methods +//===----------------------------------------------------------------------===// + +DIType::DIType(const MDNode *N) : DIScope(N) { + if (!N) return; + if (!isBasicType() && !isDerivedType() && !isCompositeType()) { + DbgNode = 0; + } +} + +unsigned DIArray::getNumElements() const { + if (!DbgNode) + return 0; + return DbgNode->getNumOperands(); +} + +/// replaceAllUsesWith - Replace all uses of debug info referenced by +/// this descriptor. +void DIType::replaceAllUsesWith(DIDescriptor &D) { + if (!DbgNode) + return; + + // Since we use a TrackingVH for the node, its easy for clients to manufacture + // legitimate situations where they want to replaceAllUsesWith() on something + // which, due to uniquing, has merged with the source. We shield clients from + // this detail by allowing a value to be replaced with replaceAllUsesWith() + // itself. + if (DbgNode != D) { + MDNode *Node = const_cast<MDNode*>(DbgNode); + const MDNode *DN = D; + const Value *V = cast_or_null<Value>(DN); + Node->replaceAllUsesWith(const_cast<Value*>(V)); + MDNode::deleteTemporary(Node); + } +} + +/// replaceAllUsesWith - Replace all uses of debug info referenced by +/// this descriptor. +void DIType::replaceAllUsesWith(MDNode *D) { + if (!DbgNode) + return; + + // Since we use a TrackingVH for the node, its easy for clients to manufacture + // legitimate situations where they want to replaceAllUsesWith() on something + // which, due to uniquing, has merged with the source. We shield clients from + // this detail by allowing a value to be replaced with replaceAllUsesWith() + // itself. + if (DbgNode != D) { + MDNode *Node = const_cast<MDNode*>(DbgNode); + const MDNode *DN = D; + const Value *V = cast_or_null<Value>(DN); + Node->replaceAllUsesWith(const_cast<Value*>(V)); + MDNode::deleteTemporary(Node); + } +} + +/// isUnsignedDIType - Return true if type encoding is unsigned. +bool DIType::isUnsignedDIType() { + DIDerivedType DTy(DbgNode); + if (DTy.Verify()) + return DTy.getTypeDerivedFrom().isUnsignedDIType(); + + DIBasicType BTy(DbgNode); + if (BTy.Verify()) { + unsigned Encoding = BTy.getEncoding(); + if (Encoding == dwarf::DW_ATE_unsigned || + Encoding == dwarf::DW_ATE_unsigned_char || + Encoding == dwarf::DW_ATE_boolean) + return true; + } + return false; +} + +/// Verify - Verify that a compile unit is well formed. +bool DICompileUnit::Verify() const { + if (!isCompileUnit()) + return false; + StringRef N = getFilename(); + if (N.empty()) + return false; + // It is possible that directory and produce string is empty. + return DbgNode->getNumOperands() == 15; +} + +/// Verify - Verify that an ObjC property is well formed. +bool DIObjCProperty::Verify() const { + if (!isObjCProperty()) + return false; + + DIType Ty = getType(); + if (!Ty.Verify()) return false; + + // Don't worry about the rest of the strings for now. + return DbgNode->getNumOperands() == 8; +} + +/// Verify - Verify that a type descriptor is well formed. +bool DIType::Verify() const { + if (!isType()) + return false; + if (getContext() && !getContext().Verify()) + return false; + unsigned Tag = getTag(); + if (!isBasicType() && Tag != dwarf::DW_TAG_const_type && + Tag != dwarf::DW_TAG_volatile_type && Tag != dwarf::DW_TAG_pointer_type && + Tag != dwarf::DW_TAG_ptr_to_member_type && + Tag != dwarf::DW_TAG_reference_type && + Tag != dwarf::DW_TAG_rvalue_reference_type && + Tag != dwarf::DW_TAG_restrict_type && + Tag != dwarf::DW_TAG_array_type && + Tag != dwarf::DW_TAG_enumeration_type && + Tag != dwarf::DW_TAG_subroutine_type && + getFilename().empty()) + return false; + return true; +} + +/// Verify - Verify that a basic type descriptor is well formed. +bool DIBasicType::Verify() const { + return isBasicType() && DbgNode->getNumOperands() == 10; +} + +/// Verify - Verify that a derived type descriptor is well formed. +bool DIDerivedType::Verify() const { + return isDerivedType() && DbgNode->getNumOperands() >= 10 && + DbgNode->getNumOperands() <= 14; +} + +/// Verify - Verify that a composite type descriptor is well formed. +bool DICompositeType::Verify() const { + if (!isCompositeType()) + return false; + if (getContext() && !getContext().Verify()) + return false; + + return DbgNode->getNumOperands() >= 10 && DbgNode->getNumOperands() <= 14; +} + +/// Verify - Verify that a subprogram descriptor is well formed. +bool DISubprogram::Verify() const { + if (!isSubprogram()) + return false; + + if (getContext() && !getContext().Verify()) + return false; + + DICompositeType Ty = getType(); + if (!Ty.Verify()) + return false; + return DbgNode->getNumOperands() == 21; +} + +/// Verify - Verify that a global variable descriptor is well formed. +bool DIGlobalVariable::Verify() const { + if (!isGlobalVariable()) + return false; + + if (getDisplayName().empty()) + return false; + + if (getContext() && !getContext().Verify()) + return false; + + DIType Ty = getType(); + if (!Ty.Verify()) + return false; + + if (!getGlobal() && !getConstant()) + return false; + + return DbgNode->getNumOperands() == 13; +} + +/// Verify - Verify that a variable descriptor is well formed. +bool DIVariable::Verify() const { + if (!isVariable()) + return false; + + if (getContext() && !getContext().Verify()) + return false; + + DIType Ty = getType(); + if (!Ty.Verify()) + return false; + + return DbgNode->getNumOperands() >= 8; +} + +/// Verify - Verify that a location descriptor is well formed. +bool DILocation::Verify() const { + if (!DbgNode) + return false; + + return DbgNode->getNumOperands() == 4; +} + +/// Verify - Verify that a namespace descriptor is well formed. +bool DINameSpace::Verify() const { + if (!isNameSpace()) + return false; + return DbgNode->getNumOperands() == 5; +} + +/// \brief Verify that the file descriptor is well formed. +bool DIFile::Verify() const { + return isFile() && DbgNode->getNumOperands() == 4; +} + +/// \brief Verify that the enumerator descriptor is well formed. +bool DIEnumerator::Verify() const { + return isEnumerator() && DbgNode->getNumOperands() == 3; +} + +/// \brief Verify that the subrange descriptor is well formed. +bool DISubrange::Verify() const { + return isSubrange() && DbgNode->getNumOperands() == 3; +} + +/// \brief Verify that the lexical block descriptor is well formed. +bool DILexicalBlock::Verify() const { + return isLexicalBlock() && DbgNode->getNumOperands() == 6; +} + +/// \brief Verify that the file-scoped lexical block descriptor is well formed. +bool DILexicalBlockFile::Verify() const { + return isLexicalBlockFile() && DbgNode->getNumOperands() == 3; +} + +/// \brief Verify that the template type parameter descriptor is well formed. +bool DITemplateTypeParameter::Verify() const { + return isTemplateTypeParameter() && DbgNode->getNumOperands() == 7; +} + +/// \brief Verify that the template value parameter descriptor is well formed. +bool DITemplateValueParameter::Verify() const { + return isTemplateValueParameter() && DbgNode->getNumOperands() == 8; +} + +/// getOriginalTypeSize - If this type is derived from a base type then +/// return base type size. +uint64_t DIDerivedType::getOriginalTypeSize() const { + unsigned Tag = getTag(); + + if (Tag != dwarf::DW_TAG_member && Tag != dwarf::DW_TAG_typedef && + Tag != dwarf::DW_TAG_const_type && Tag != dwarf::DW_TAG_volatile_type && + Tag != dwarf::DW_TAG_restrict_type) + return getSizeInBits(); + + DIType BaseType = getTypeDerivedFrom(); + + // If this type is not derived from any type then take conservative approach. + if (!BaseType.isValid()) + return getSizeInBits(); + + // If this is a derived type, go ahead and get the base type, unless it's a + // reference then it's just the size of the field. Pointer types have no need + // of this since they're a different type of qualification on the type. + if (BaseType.getTag() == dwarf::DW_TAG_reference_type || + BaseType.getTag() == dwarf::DW_TAG_rvalue_reference_type) + return getSizeInBits(); + + if (BaseType.isDerivedType()) + return DIDerivedType(BaseType).getOriginalTypeSize(); + + return BaseType.getSizeInBits(); +} + +/// getObjCProperty - Return property node, if this ivar is associated with one. +MDNode *DIDerivedType::getObjCProperty() const { + if (DbgNode->getNumOperands() <= 10) + return NULL; + return dyn_cast_or_null<MDNode>(DbgNode->getOperand(10)); +} + +/// isInlinedFnArgument - Return true if this variable provides debugging +/// information for an inlined function arguments. +bool DIVariable::isInlinedFnArgument(const Function *CurFn) { + assert(CurFn && "Invalid function"); + if (!getContext().isSubprogram()) + return false; + // This variable is not inlined function argument if its scope + // does not describe current function. + return !DISubprogram(getContext()).describes(CurFn); +} + +/// describes - Return true if this subprogram provides debugging +/// information for the function F. +bool DISubprogram::describes(const Function *F) { + assert(F && "Invalid function"); + if (F == getFunction()) + return true; + StringRef Name = getLinkageName(); + if (Name.empty()) + Name = getName(); + if (F->getName() == Name) + return true; + return false; +} + +unsigned DISubprogram::isOptimized() const { + assert (DbgNode && "Invalid subprogram descriptor!"); + if (DbgNode->getNumOperands() == 16) + return getUnsignedField(15); + return 0; +} + +MDNode *DISubprogram::getVariablesNodes() const { + if (!DbgNode || DbgNode->getNumOperands() <= 19) + return NULL; + return dyn_cast_or_null<MDNode>(DbgNode->getOperand(19)); +} + +DIArray DISubprogram::getVariables() const { + if (!DbgNode || DbgNode->getNumOperands() <= 19) + return DIArray(); + if (MDNode *T = dyn_cast_or_null<MDNode>(DbgNode->getOperand(19))) + return DIArray(T); + return DIArray(); +} + +StringRef DIScope::getFilename() const { + if (!DbgNode) + return StringRef(); + if (isLexicalBlockFile()) + return DILexicalBlockFile(DbgNode).getFilename(); + if (isLexicalBlock()) + return DILexicalBlock(DbgNode).getFilename(); + if (isSubprogram()) + return DISubprogram(DbgNode).getFilename(); + if (isCompileUnit()) + return DICompileUnit(DbgNode).getFilename(); + if (isNameSpace()) + return DINameSpace(DbgNode).getFilename(); + if (isType()) + return DIType(DbgNode).getFilename(); + if (isFile()) + return DIFile(DbgNode).getFilename(); + llvm_unreachable("Invalid DIScope!"); +} + +StringRef DIScope::getDirectory() const { + if (!DbgNode) + return StringRef(); + if (isLexicalBlockFile()) + return DILexicalBlockFile(DbgNode).getDirectory(); + if (isLexicalBlock()) + return DILexicalBlock(DbgNode).getDirectory(); + if (isSubprogram()) + return DISubprogram(DbgNode).getDirectory(); + if (isCompileUnit()) + return DICompileUnit(DbgNode).getDirectory(); + if (isNameSpace()) + return DINameSpace(DbgNode).getDirectory(); + if (isType()) + return DIType(DbgNode).getDirectory(); + if (isFile()) + return DIFile(DbgNode).getDirectory(); + llvm_unreachable("Invalid DIScope!"); +} + +DIArray DICompileUnit::getEnumTypes() const { + if (!DbgNode || DbgNode->getNumOperands() < 14) + return DIArray(); + + if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(10))) + return DIArray(N); + return DIArray(); +} + +DIArray DICompileUnit::getRetainedTypes() const { + if (!DbgNode || DbgNode->getNumOperands() < 14) + return DIArray(); + + if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(11))) + return DIArray(N); + return DIArray(); +} + +DIArray DICompileUnit::getSubprograms() const { + if (!DbgNode || DbgNode->getNumOperands() < 14) + return DIArray(); + + if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(12))) + return DIArray(N); + return DIArray(); +} + + +DIArray DICompileUnit::getGlobalVariables() const { + if (!DbgNode || DbgNode->getNumOperands() < 14) + return DIArray(); + + if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(13))) + return DIArray(N); + return DIArray(); +} + +/// fixupObjcLikeName - Replace contains special characters used +/// in a typical Objective-C names with '.' in a given string. +static void fixupObjcLikeName(StringRef Str, SmallVectorImpl<char> &Out) { + bool isObjCLike = false; + for (size_t i = 0, e = Str.size(); i < e; ++i) { + char C = Str[i]; + if (C == '[') + isObjCLike = true; + + if (isObjCLike && (C == '[' || C == ']' || C == ' ' || C == ':' || + C == '+' || C == '(' || C == ')')) + Out.push_back('.'); + else + Out.push_back(C); + } +} + +/// getFnSpecificMDNode - Return a NameMDNode, if available, that is +/// suitable to hold function specific information. +NamedMDNode *llvm::getFnSpecificMDNode(const Module &M, DISubprogram Fn) { + SmallString<32> Name = StringRef("llvm.dbg.lv."); + StringRef FName = "fn"; + if (Fn.getFunction()) + FName = Fn.getFunction()->getName(); + else + FName = Fn.getName(); + char One = '\1'; + if (FName.startswith(StringRef(&One, 1))) + FName = FName.substr(1); + fixupObjcLikeName(FName, Name); + return M.getNamedMetadata(Name.str()); +} + +/// getOrInsertFnSpecificMDNode - Return a NameMDNode that is suitable +/// to hold function specific information. +NamedMDNode *llvm::getOrInsertFnSpecificMDNode(Module &M, DISubprogram Fn) { + SmallString<32> Name = StringRef("llvm.dbg.lv."); + StringRef FName = "fn"; + if (Fn.getFunction()) + FName = Fn.getFunction()->getName(); + else + FName = Fn.getName(); + char One = '\1'; + if (FName.startswith(StringRef(&One, 1))) + FName = FName.substr(1); + fixupObjcLikeName(FName, Name); + + return M.getOrInsertNamedMetadata(Name.str()); +} + +/// createInlinedVariable - Create a new inlined variable based on current +/// variable. +/// @param DV Current Variable. +/// @param InlinedScope Location at current variable is inlined. +DIVariable llvm::createInlinedVariable(MDNode *DV, MDNode *InlinedScope, + LLVMContext &VMContext) { + SmallVector<Value *, 16> Elts; + // Insert inlined scope as 7th element. + for (unsigned i = 0, e = DV->getNumOperands(); i != e; ++i) + i == 7 ? Elts.push_back(InlinedScope) : + Elts.push_back(DV->getOperand(i)); + return DIVariable(MDNode::get(VMContext, Elts)); +} + +/// cleanseInlinedVariable - Remove inlined scope from the variable. +DIVariable llvm::cleanseInlinedVariable(MDNode *DV, LLVMContext &VMContext) { + SmallVector<Value *, 16> Elts; + // Insert inlined scope as 7th element. + for (unsigned i = 0, e = DV->getNumOperands(); i != e; ++i) + i == 7 ? + Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))): + Elts.push_back(DV->getOperand(i)); + return DIVariable(MDNode::get(VMContext, Elts)); +} + +/// getDISubprogram - Find subprogram that is enclosing this scope. +DISubprogram llvm::getDISubprogram(const MDNode *Scope) { + DIDescriptor D(Scope); + if (D.isSubprogram()) + return DISubprogram(Scope); + + if (D.isLexicalBlockFile()) + return getDISubprogram(DILexicalBlockFile(Scope).getContext()); + + if (D.isLexicalBlock()) + return getDISubprogram(DILexicalBlock(Scope).getContext()); + + return DISubprogram(); +} + +/// getDICompositeType - Find underlying composite type. +DICompositeType llvm::getDICompositeType(DIType T) { + if (T.isCompositeType()) + return DICompositeType(T); + + if (T.isDerivedType()) + return getDICompositeType(DIDerivedType(T).getTypeDerivedFrom()); + + return DICompositeType(); +} + +/// isSubprogramContext - Return true if Context is either a subprogram +/// or another context nested inside a subprogram. +bool llvm::isSubprogramContext(const MDNode *Context) { + if (!Context) + return false; + DIDescriptor D(Context); + if (D.isSubprogram()) + return true; + if (D.isType()) + return isSubprogramContext(DIType(Context).getContext()); + return false; +} + +//===----------------------------------------------------------------------===// +// DebugInfoFinder implementations. +//===----------------------------------------------------------------------===// + +/// processModule - Process entire module and collect debug info. +void DebugInfoFinder::processModule(const Module &M) { + if (NamedMDNode *CU_Nodes = M.getNamedMetadata("llvm.dbg.cu")) { + for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) { + DICompileUnit CU(CU_Nodes->getOperand(i)); + addCompileUnit(CU); + DIArray GVs = CU.getGlobalVariables(); + for (unsigned i = 0, e = GVs.getNumElements(); i != e; ++i) { + DIGlobalVariable DIG(GVs.getElement(i)); + if (addGlobalVariable(DIG)) + processType(DIG.getType()); + } + DIArray SPs = CU.getSubprograms(); + for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) + processSubprogram(DISubprogram(SPs.getElement(i))); + DIArray EnumTypes = CU.getEnumTypes(); + for (unsigned i = 0, e = EnumTypes.getNumElements(); i != e; ++i) + processType(DIType(EnumTypes.getElement(i))); + DIArray RetainedTypes = CU.getRetainedTypes(); + for (unsigned i = 0, e = RetainedTypes.getNumElements(); i != e; ++i) + processType(DIType(RetainedTypes.getElement(i))); + // FIXME: We really shouldn't be bailing out after visiting just one CU + return; + } + } +} + +/// processLocation - Process DILocation. +void DebugInfoFinder::processLocation(DILocation Loc) { + if (!Loc.Verify()) return; + DIDescriptor S(Loc.getScope()); + if (S.isCompileUnit()) + addCompileUnit(DICompileUnit(S)); + else if (S.isSubprogram()) + processSubprogram(DISubprogram(S)); + else if (S.isLexicalBlock()) + processLexicalBlock(DILexicalBlock(S)); + else if (S.isLexicalBlockFile()) { + DILexicalBlockFile DBF = DILexicalBlockFile(S); + processLexicalBlock(DILexicalBlock(DBF.getScope())); + } + processLocation(Loc.getOrigLocation()); +} + +/// processType - Process DIType. +void DebugInfoFinder::processType(DIType DT) { + if (!addType(DT)) + return; + if (DT.isCompositeType()) { + DICompositeType DCT(DT); + processType(DCT.getTypeDerivedFrom()); + DIArray DA = DCT.getTypeArray(); + for (unsigned i = 0, e = DA.getNumElements(); i != e; ++i) { + DIDescriptor D = DA.getElement(i); + if (D.isType()) + processType(DIType(D)); + else if (D.isSubprogram()) + processSubprogram(DISubprogram(D)); + } + } else if (DT.isDerivedType()) { + DIDerivedType DDT(DT); + processType(DDT.getTypeDerivedFrom()); + } +} + +/// processLexicalBlock +void DebugInfoFinder::processLexicalBlock(DILexicalBlock LB) { + DIScope Context = LB.getContext(); + if (Context.isLexicalBlock()) + return processLexicalBlock(DILexicalBlock(Context)); + else if (Context.isLexicalBlockFile()) { + DILexicalBlockFile DBF = DILexicalBlockFile(Context); + return processLexicalBlock(DILexicalBlock(DBF.getScope())); + } + else + return processSubprogram(DISubprogram(Context)); +} + +/// processSubprogram - Process DISubprogram. +void DebugInfoFinder::processSubprogram(DISubprogram SP) { + if (!addSubprogram(SP)) + return; + processType(SP.getType()); +} + +/// processDeclare - Process DbgDeclareInst. +void DebugInfoFinder::processDeclare(const DbgDeclareInst *DDI) { + MDNode *N = dyn_cast<MDNode>(DDI->getVariable()); + if (!N) return; + + DIDescriptor DV(N); + if (!DV.isVariable()) + return; + + if (!NodesSeen.insert(DV)) + return; + processType(DIVariable(N).getType()); +} + +/// addType - Add type into Tys. +bool DebugInfoFinder::addType(DIType DT) { + if (!DT.isValid()) + return false; + + if (!NodesSeen.insert(DT)) + return false; + + TYs.push_back(DT); + return true; +} + +/// addCompileUnit - Add compile unit into CUs. +bool DebugInfoFinder::addCompileUnit(DICompileUnit CU) { + if (!CU.Verify()) + return false; + + if (!NodesSeen.insert(CU)) + return false; + + CUs.push_back(CU); + return true; +} + +/// addGlobalVariable - Add global variable into GVs. +bool DebugInfoFinder::addGlobalVariable(DIGlobalVariable DIG) { + if (!DIDescriptor(DIG).isGlobalVariable()) + return false; + + if (!NodesSeen.insert(DIG)) + return false; + + GVs.push_back(DIG); + return true; +} + +// addSubprogram - Add subprgoram into SPs. +bool DebugInfoFinder::addSubprogram(DISubprogram SP) { + if (!DIDescriptor(SP).isSubprogram()) + return false; + + if (!NodesSeen.insert(SP)) + return false; + + SPs.push_back(SP); + return true; +} + +//===----------------------------------------------------------------------===// +// DIDescriptor: dump routines for all descriptors. +//===----------------------------------------------------------------------===// + +/// dump - Print descriptor to dbgs() with a newline. +void DIDescriptor::dump() const { + print(dbgs()); dbgs() << '\n'; +} + +/// print - Print descriptor. +void DIDescriptor::print(raw_ostream &OS) const { + if (!DbgNode) return; + + if (const char *Tag = dwarf::TagString(getTag())) + OS << "[ " << Tag << " ]"; + + if (this->isSubrange()) { + DISubrange(DbgNode).printInternal(OS); + } else if (this->isCompileUnit()) { + DICompileUnit(DbgNode).printInternal(OS); + } else if (this->isFile()) { + DIFile(DbgNode).printInternal(OS); + } else if (this->isEnumerator()) { + DIEnumerator(DbgNode).printInternal(OS); + } else if (this->isBasicType()) { + DIType(DbgNode).printInternal(OS); + } else if (this->isDerivedType()) { + DIDerivedType(DbgNode).printInternal(OS); + } else if (this->isCompositeType()) { + DICompositeType(DbgNode).printInternal(OS); + } else if (this->isSubprogram()) { + DISubprogram(DbgNode).printInternal(OS); + } else if (this->isGlobalVariable()) { + DIGlobalVariable(DbgNode).printInternal(OS); + } else if (this->isVariable()) { + DIVariable(DbgNode).printInternal(OS); + } else if (this->isObjCProperty()) { + DIObjCProperty(DbgNode).printInternal(OS); + } else if (this->isScope()) { + DIScope(DbgNode).printInternal(OS); + } +} + +void DISubrange::printInternal(raw_ostream &OS) const { + int64_t Count = getCount(); + if (Count != -1) + OS << " [" << getLo() << ", " << Count - 1 << ']'; + else + OS << " [unbounded]"; +} + +void DIScope::printInternal(raw_ostream &OS) const { + OS << " [" << getDirectory() << "/" << getFilename() << ']'; +} + +void DICompileUnit::printInternal(raw_ostream &OS) const { + DIScope::printInternal(OS); + if (const char *Lang = dwarf::LanguageString(getLanguage())) + OS << " [" << Lang << ']'; +} + +void DIEnumerator::printInternal(raw_ostream &OS) const { + OS << " [" << getName() << " :: " << getEnumValue() << ']'; +} + +void DIType::printInternal(raw_ostream &OS) const { + if (!DbgNode) return; + + StringRef Res = getName(); + if (!Res.empty()) + OS << " [" << Res << "]"; + + // TODO: Print context? + + OS << " [line " << getLineNumber() + << ", size " << getSizeInBits() + << ", align " << getAlignInBits() + << ", offset " << getOffsetInBits(); + if (isBasicType()) + if (const char *Enc = + dwarf::AttributeEncodingString(DIBasicType(DbgNode).getEncoding())) + OS << ", enc " << Enc; + OS << "]"; + + if (isPrivate()) + OS << " [private]"; + else if (isProtected()) + OS << " [protected]"; + + if (isArtificial()) + OS << " [artificial]"; + + if (isForwardDecl()) + OS << " [fwd]"; + if (isVector()) + OS << " [vector]"; + if (isStaticMember()) + OS << " [static]"; +} + +void DIDerivedType::printInternal(raw_ostream &OS) const { + DIType::printInternal(OS); + OS << " [from " << getTypeDerivedFrom().getName() << ']'; +} + +void DICompositeType::printInternal(raw_ostream &OS) const { + DIType::printInternal(OS); + DIArray A = getTypeArray(); + OS << " [" << A.getNumElements() << " elements]"; +} + +void DISubprogram::printInternal(raw_ostream &OS) const { + // TODO : Print context + OS << " [line " << getLineNumber() << ']'; + + if (isLocalToUnit()) + OS << " [local]"; + + if (isDefinition()) + OS << " [def]"; + + if (getScopeLineNumber() != getLineNumber()) + OS << " [scope " << getScopeLineNumber() << "]"; + + if (isPrivate()) + OS << " [private]"; + else if (isProtected()) + OS << " [protected]"; + + StringRef Res = getName(); + if (!Res.empty()) + OS << " [" << Res << ']'; +} + +void DIGlobalVariable::printInternal(raw_ostream &OS) const { + StringRef Res = getName(); + if (!Res.empty()) + OS << " [" << Res << ']'; + + OS << " [line " << getLineNumber() << ']'; + + // TODO : Print context + + if (isLocalToUnit()) + OS << " [local]"; + + if (isDefinition()) + OS << " [def]"; +} + +void DIVariable::printInternal(raw_ostream &OS) const { + StringRef Res = getName(); + if (!Res.empty()) + OS << " [" << Res << ']'; + + OS << " [line " << getLineNumber() << ']'; +} + +void DIObjCProperty::printInternal(raw_ostream &OS) const { + StringRef Name = getObjCPropertyName(); + if (!Name.empty()) + OS << " [" << Name << ']'; + + OS << " [line " << getLineNumber() + << ", properties " << getUnsignedField(6) << ']'; +} + +static void printDebugLoc(DebugLoc DL, raw_ostream &CommentOS, + const LLVMContext &Ctx) { + if (!DL.isUnknown()) { // Print source line info. + DIScope Scope(DL.getScope(Ctx)); + // Omit the directory, because it's likely to be long and uninteresting. + if (Scope.Verify()) + CommentOS << Scope.getFilename(); + else + CommentOS << "<unknown>"; + CommentOS << ':' << DL.getLine(); + if (DL.getCol() != 0) + CommentOS << ':' << DL.getCol(); + DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx)); + if (!InlinedAtDL.isUnknown()) { + CommentOS << " @[ "; + printDebugLoc(InlinedAtDL, CommentOS, Ctx); + CommentOS << " ]"; + } + } +} + +void DIVariable::printExtendedName(raw_ostream &OS) const { + const LLVMContext &Ctx = DbgNode->getContext(); + StringRef Res = getName(); + if (!Res.empty()) + OS << Res << "," << getLineNumber(); + if (MDNode *InlinedAt = getInlinedAt()) { + DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(InlinedAt); + if (!InlinedAtDL.isUnknown()) { + OS << " @["; + printDebugLoc(InlinedAtDL, OS, Ctx); + OS << "]"; + } + } +} diff --git a/lib/IR/DebugLoc.cpp b/lib/IR/DebugLoc.cpp new file mode 100644 index 0000000000..c57b5a3053 --- /dev/null +++ b/lib/IR/DebugLoc.cpp @@ -0,0 +1,315 @@ +//===-- DebugLoc.cpp - Implement DebugLoc class ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/DebugLoc.h" +#include "LLVMContextImpl.h" +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/DebugInfo.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// DebugLoc Implementation +//===----------------------------------------------------------------------===// + +MDNode *DebugLoc::getScope(const LLVMContext &Ctx) const { + if (ScopeIdx == 0) return 0; + + if (ScopeIdx > 0) { + // Positive ScopeIdx is an index into ScopeRecords, which has no inlined-at + // position specified. + assert(unsigned(ScopeIdx) <= Ctx.pImpl->ScopeRecords.size() && + "Invalid ScopeIdx!"); + return Ctx.pImpl->ScopeRecords[ScopeIdx-1].get(); + } + + // Otherwise, the index is in the ScopeInlinedAtRecords array. + assert(unsigned(-ScopeIdx) <= Ctx.pImpl->ScopeInlinedAtRecords.size() && + "Invalid ScopeIdx"); + return Ctx.pImpl->ScopeInlinedAtRecords[-ScopeIdx-1].first.get(); +} + +MDNode *DebugLoc::getInlinedAt(const LLVMContext &Ctx) const { + // Positive ScopeIdx is an index into ScopeRecords, which has no inlined-at + // position specified. Zero is invalid. + if (ScopeIdx >= 0) return 0; + + // Otherwise, the index is in the ScopeInlinedAtRecords array. + assert(unsigned(-ScopeIdx) <= Ctx.pImpl->ScopeInlinedAtRecords.size() && + "Invalid ScopeIdx"); + return Ctx.pImpl->ScopeInlinedAtRecords[-ScopeIdx-1].second.get(); +} + +/// Return both the Scope and the InlinedAt values. +void DebugLoc::getScopeAndInlinedAt(MDNode *&Scope, MDNode *&IA, + const LLVMContext &Ctx) const { + if (ScopeIdx == 0) { + Scope = IA = 0; + return; + } + + if (ScopeIdx > 0) { + // Positive ScopeIdx is an index into ScopeRecords, which has no inlined-at + // position specified. + assert(unsigned(ScopeIdx) <= Ctx.pImpl->ScopeRecords.size() && + "Invalid ScopeIdx!"); + Scope = Ctx.pImpl->ScopeRecords[ScopeIdx-1].get(); + IA = 0; + return; + } + + // Otherwise, the index is in the ScopeInlinedAtRecords array. + assert(unsigned(-ScopeIdx) <= Ctx.pImpl->ScopeInlinedAtRecords.size() && + "Invalid ScopeIdx"); + Scope = Ctx.pImpl->ScopeInlinedAtRecords[-ScopeIdx-1].first.get(); + IA = Ctx.pImpl->ScopeInlinedAtRecords[-ScopeIdx-1].second.get(); +} + + +DebugLoc DebugLoc::get(unsigned Line, unsigned Col, + MDNode *Scope, MDNode *InlinedAt) { + DebugLoc Result; + + // If no scope is available, this is an unknown location. + if (Scope == 0) return Result; + + // Saturate line and col to "unknown". + if (Col > 255) Col = 0; + if (Line >= (1 << 24)) Line = 0; + Result.LineCol = Line | (Col << 24); + + LLVMContext &Ctx = Scope->getContext(); + + // If there is no inlined-at location, use the ScopeRecords array. + if (InlinedAt == 0) + Result.ScopeIdx = Ctx.pImpl->getOrAddScopeRecordIdxEntry(Scope, 0); + else + Result.ScopeIdx = Ctx.pImpl->getOrAddScopeInlinedAtIdxEntry(Scope, + InlinedAt, 0); + + return Result; +} + +/// getAsMDNode - This method converts the compressed DebugLoc node into a +/// DILocation compatible MDNode. +MDNode *DebugLoc::getAsMDNode(const LLVMContext &Ctx) const { + if (isUnknown()) return 0; + + MDNode *Scope, *IA; + getScopeAndInlinedAt(Scope, IA, Ctx); + assert(Scope && "If scope is null, this should be isUnknown()"); + + LLVMContext &Ctx2 = Scope->getContext(); + Type *Int32 = Type::getInt32Ty(Ctx2); + Value *Elts[] = { + ConstantInt::get(Int32, getLine()), ConstantInt::get(Int32, getCol()), + Scope, IA + }; + return MDNode::get(Ctx2, Elts); +} + +/// getFromDILocation - Translate the DILocation quad into a DebugLoc. +DebugLoc DebugLoc::getFromDILocation(MDNode *N) { + DILocation Loc(N); + MDNode *Scope = Loc.getScope(); + if (Scope == 0) return DebugLoc(); + return get(Loc.getLineNumber(), Loc.getColumnNumber(), Scope, + Loc.getOrigLocation()); +} + +/// getFromDILexicalBlock - Translate the DILexicalBlock into a DebugLoc. +DebugLoc DebugLoc::getFromDILexicalBlock(MDNode *N) { + DILexicalBlock LexBlock(N); + MDNode *Scope = LexBlock.getContext(); + if (Scope == 0) return DebugLoc(); + return get(LexBlock.getLineNumber(), LexBlock.getColumnNumber(), Scope, NULL); +} + +void DebugLoc::dump(const LLVMContext &Ctx) const { +#ifndef NDEBUG + if (!isUnknown()) { + dbgs() << getLine(); + if (getCol() != 0) + dbgs() << ',' << getCol(); + DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(getInlinedAt(Ctx)); + if (!InlinedAtDL.isUnknown()) { + dbgs() << " @ "; + InlinedAtDL.dump(Ctx); + } else + dbgs() << "\n"; + } +#endif +} + +//===----------------------------------------------------------------------===// +// DenseMap specialization +//===----------------------------------------------------------------------===// + +unsigned DenseMapInfo<DebugLoc>::getHashValue(const DebugLoc &Key) { + return static_cast<unsigned>(hash_combine(Key.LineCol, Key.ScopeIdx)); +} + +//===----------------------------------------------------------------------===// +// LLVMContextImpl Implementation +//===----------------------------------------------------------------------===// + +int LLVMContextImpl::getOrAddScopeRecordIdxEntry(MDNode *Scope, + int ExistingIdx) { + // If we already have an entry for this scope, return it. + int &Idx = ScopeRecordIdx[Scope]; + if (Idx) return Idx; + + // If we don't have an entry, but ExistingIdx is specified, use it. + if (ExistingIdx) + return Idx = ExistingIdx; + + // Otherwise add a new entry. + + // Start out ScopeRecords with a minimal reasonable size to avoid + // excessive reallocation starting out. + if (ScopeRecords.empty()) + ScopeRecords.reserve(128); + + // Index is biased by 1 for index. + Idx = ScopeRecords.size()+1; + ScopeRecords.push_back(DebugRecVH(Scope, this, Idx)); + return Idx; +} + +int LLVMContextImpl::getOrAddScopeInlinedAtIdxEntry(MDNode *Scope, MDNode *IA, + int ExistingIdx) { + // If we already have an entry, return it. + int &Idx = ScopeInlinedAtIdx[std::make_pair(Scope, IA)]; + if (Idx) return Idx; + + // If we don't have an entry, but ExistingIdx is specified, use it. + if (ExistingIdx) + return Idx = ExistingIdx; + + // Start out ScopeInlinedAtRecords with a minimal reasonable size to avoid + // excessive reallocation starting out. + if (ScopeInlinedAtRecords.empty()) + ScopeInlinedAtRecords.reserve(128); + + // Index is biased by 1 and negated. + Idx = -ScopeInlinedAtRecords.size()-1; + ScopeInlinedAtRecords.push_back(std::make_pair(DebugRecVH(Scope, this, Idx), + DebugRecVH(IA, this, Idx))); + return Idx; +} + + +//===----------------------------------------------------------------------===// +// DebugRecVH Implementation +//===----------------------------------------------------------------------===// + +/// deleted - The MDNode this is pointing to got deleted, so this pointer needs +/// to drop to null and we need remove our entry from the DenseMap. +void DebugRecVH::deleted() { + // If this is a non-canonical reference, just drop the value to null, we know + // it doesn't have a map entry. + if (Idx == 0) { + setValPtr(0); + return; + } + + MDNode *Cur = get(); + + // If the index is positive, it is an entry in ScopeRecords. + if (Idx > 0) { + assert(Ctx->ScopeRecordIdx[Cur] == Idx && "Mapping out of date!"); + Ctx->ScopeRecordIdx.erase(Cur); + // Reset this VH to null and we're done. + setValPtr(0); + Idx = 0; + return; + } + + // Otherwise, it is an entry in ScopeInlinedAtRecords, we don't know if it + // is the scope or the inlined-at record entry. + assert(unsigned(-Idx-1) < Ctx->ScopeInlinedAtRecords.size()); + std::pair<DebugRecVH, DebugRecVH> &Entry = Ctx->ScopeInlinedAtRecords[-Idx-1]; + assert((this == &Entry.first || this == &Entry.second) && + "Mapping out of date!"); + + MDNode *OldScope = Entry.first.get(); + MDNode *OldInlinedAt = Entry.second.get(); + assert(OldScope != 0 && OldInlinedAt != 0 && + "Entry should be non-canonical if either val dropped to null"); + + // Otherwise, we do have an entry in it, nuke it and we're done. + assert(Ctx->ScopeInlinedAtIdx[std::make_pair(OldScope, OldInlinedAt)] == Idx&& + "Mapping out of date"); + Ctx->ScopeInlinedAtIdx.erase(std::make_pair(OldScope, OldInlinedAt)); + + // Reset this VH to null. Drop both 'Idx' values to null to indicate that + // we're in non-canonical form now. + setValPtr(0); + Entry.first.Idx = Entry.second.Idx = 0; +} + +void DebugRecVH::allUsesReplacedWith(Value *NewVa) { + // If being replaced with a non-mdnode value (e.g. undef) handle this as if + // the mdnode got deleted. + MDNode *NewVal = dyn_cast<MDNode>(NewVa); + if (NewVal == 0) return deleted(); + + // If this is a non-canonical reference, just change it, we know it already + // doesn't have a map entry. + if (Idx == 0) { + setValPtr(NewVa); + return; + } + + MDNode *OldVal = get(); + assert(OldVal != NewVa && "Node replaced with self?"); + + // If the index is positive, it is an entry in ScopeRecords. + if (Idx > 0) { + assert(Ctx->ScopeRecordIdx[OldVal] == Idx && "Mapping out of date!"); + Ctx->ScopeRecordIdx.erase(OldVal); + setValPtr(NewVal); + + int NewEntry = Ctx->getOrAddScopeRecordIdxEntry(NewVal, Idx); + + // If NewVal already has an entry, this becomes a non-canonical reference, + // just drop Idx to 0 to signify this. + if (NewEntry != Idx) + Idx = 0; + return; + } + + // Otherwise, it is an entry in ScopeInlinedAtRecords, we don't know if it + // is the scope or the inlined-at record entry. + assert(unsigned(-Idx-1) < Ctx->ScopeInlinedAtRecords.size()); + std::pair<DebugRecVH, DebugRecVH> &Entry = Ctx->ScopeInlinedAtRecords[-Idx-1]; + assert((this == &Entry.first || this == &Entry.second) && + "Mapping out of date!"); + + MDNode *OldScope = Entry.first.get(); + MDNode *OldInlinedAt = Entry.second.get(); + assert(OldScope != 0 && OldInlinedAt != 0 && + "Entry should be non-canonical if either val dropped to null"); + + // Otherwise, we do have an entry in it, nuke it and we're done. + assert(Ctx->ScopeInlinedAtIdx[std::make_pair(OldScope, OldInlinedAt)] == Idx&& + "Mapping out of date"); + Ctx->ScopeInlinedAtIdx.erase(std::make_pair(OldScope, OldInlinedAt)); + + // Reset this VH to the new value. + setValPtr(NewVal); + + int NewIdx = Ctx->getOrAddScopeInlinedAtIdxEntry(Entry.first.get(), + Entry.second.get(), Idx); + // If NewVal already has an entry, this becomes a non-canonical reference, + // just drop Idx to 0 to signify this. + if (NewIdx != Idx) { + std::pair<DebugRecVH, DebugRecVH> &Entry=Ctx->ScopeInlinedAtRecords[-Idx-1]; + Entry.first.Idx = Entry.second.Idx = 0; + } +} diff --git a/lib/IR/Dominators.cpp b/lib/IR/Dominators.cpp new file mode 100644 index 0000000000..a1160cdc83 --- /dev/null +++ b/lib/IR/Dominators.cpp @@ -0,0 +1,302 @@ +//===- Dominators.cpp - Dominator Calculation -----------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements simple dominator construction algorithms for finding +// forward dominators. Postdominators are available in libanalysis, but are not +// included in libvmcore, because it's not needed. Forward dominators are +// needed to support the Verifier pass. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Analysis/Dominators.h" +#include "llvm/ADT/DepthFirstIterator.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/DominatorInternals.h" +#include "llvm/Assembly/Writer.h" +#include "llvm/IR/Instructions.h" +#include "llvm/Support/CFG.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +using namespace llvm; + +// Always verify dominfo if expensive checking is enabled. +#ifdef XDEBUG +static bool VerifyDomInfo = true; +#else +static bool VerifyDomInfo = false; +#endif +static cl::opt<bool,true> +VerifyDomInfoX("verify-dom-info", cl::location(VerifyDomInfo), + cl::desc("Verify dominator info (time consuming)")); + +bool BasicBlockEdge::isSingleEdge() const { + const TerminatorInst *TI = Start->getTerminator(); + unsigned NumEdgesToEnd = 0; + for (unsigned int i = 0, n = TI->getNumSuccessors(); i < n; ++i) { + if (TI->getSuccessor(i) == End) + ++NumEdgesToEnd; + if (NumEdgesToEnd >= 2) + return false; + } + assert(NumEdgesToEnd == 1); + return true; +} + +//===----------------------------------------------------------------------===// +// DominatorTree Implementation +//===----------------------------------------------------------------------===// +// +// Provide public access to DominatorTree information. Implementation details +// can be found in DominatorInternals.h. +// +//===----------------------------------------------------------------------===// + +TEMPLATE_INSTANTIATION(class llvm::DomTreeNodeBase<BasicBlock>); +TEMPLATE_INSTANTIATION(class llvm::DominatorTreeBase<BasicBlock>); + +char DominatorTree::ID = 0; +INITIALIZE_PASS(DominatorTree, "domtree", + "Dominator Tree Construction", true, true) + +bool DominatorTree::runOnFunction(Function &F) { + DT->recalculate(F); + return false; +} + +void DominatorTree::verifyAnalysis() const { + if (!VerifyDomInfo) return; + + Function &F = *getRoot()->getParent(); + + DominatorTree OtherDT; + OtherDT.getBase().recalculate(F); + if (compare(OtherDT)) { + errs() << "DominatorTree is not up to date!\nComputed:\n"; + print(errs()); + errs() << "\nActual:\n"; + OtherDT.print(errs()); + abort(); + } +} + +void DominatorTree::print(raw_ostream &OS, const Module *) const { + DT->print(OS); +} + +// dominates - Return true if Def dominates a use in User. This performs +// the special checks necessary if Def and User are in the same basic block. +// Note that Def doesn't dominate a use in Def itself! +bool DominatorTree::dominates(const Instruction *Def, + const Instruction *User) const { + const BasicBlock *UseBB = User->getParent(); + const BasicBlock *DefBB = Def->getParent(); + + // Any unreachable use is dominated, even if Def == User. + if (!isReachableFromEntry(UseBB)) + return true; + + // Unreachable definitions don't dominate anything. + if (!isReachableFromEntry(DefBB)) + return false; + + // An instruction doesn't dominate a use in itself. + if (Def == User) + return false; + + // The value defined by an invoke dominates an instruction only if + // it dominates every instruction in UseBB. + // A PHI is dominated only if the instruction dominates every possible use + // in the UseBB. + if (isa<InvokeInst>(Def) || isa<PHINode>(User)) + return dominates(Def, UseBB); + + if (DefBB != UseBB) + return dominates(DefBB, UseBB); + + // Loop through the basic block until we find Def or User. + BasicBlock::const_iterator I = DefBB->begin(); + for (; &*I != Def && &*I != User; ++I) + /*empty*/; + + return &*I == Def; +} + +// true if Def would dominate a use in any instruction in UseBB. +// note that dominates(Def, Def->getParent()) is false. +bool DominatorTree::dominates(const Instruction *Def, + const BasicBlock *UseBB) const { + const BasicBlock *DefBB = Def->getParent(); + + // Any unreachable use is dominated, even if DefBB == UseBB. + if (!isReachableFromEntry(UseBB)) + return true; + + // Unreachable definitions don't dominate anything. + if (!isReachableFromEntry(DefBB)) + return false; + + if (DefBB == UseBB) + return false; + + const InvokeInst *II = dyn_cast<InvokeInst>(Def); + if (!II) + return dominates(DefBB, UseBB); + + // Invoke results are only usable in the normal destination, not in the + // exceptional destination. + BasicBlock *NormalDest = II->getNormalDest(); + BasicBlockEdge E(DefBB, NormalDest); + return dominates(E, UseBB); +} + +bool DominatorTree::dominates(const BasicBlockEdge &BBE, + const BasicBlock *UseBB) const { + // Assert that we have a single edge. We could handle them by simply + // returning false, but since isSingleEdge is linear on the number of + // edges, the callers can normally handle them more efficiently. + assert(BBE.isSingleEdge()); + + // If the BB the edge ends in doesn't dominate the use BB, then the + // edge also doesn't. + const BasicBlock *Start = BBE.getStart(); + const BasicBlock *End = BBE.getEnd(); + if (!dominates(End, UseBB)) + return false; + + // Simple case: if the end BB has a single predecessor, the fact that it + // dominates the use block implies that the edge also does. + if (End->getSinglePredecessor()) + return true; + + // The normal edge from the invoke is critical. Conceptually, what we would + // like to do is split it and check if the new block dominates the use. + // With X being the new block, the graph would look like: + // + // DefBB + // /\ . . + // / \ . . + // / \ . . + // / \ | | + // A X B C + // | \ | / + // . \|/ + // . NormalDest + // . + // + // Given the definition of dominance, NormalDest is dominated by X iff X + // dominates all of NormalDest's predecessors (X, B, C in the example). X + // trivially dominates itself, so we only have to find if it dominates the + // other predecessors. Since the only way out of X is via NormalDest, X can + // only properly dominate a node if NormalDest dominates that node too. + for (const_pred_iterator PI = pred_begin(End), E = pred_end(End); + PI != E; ++PI) { + const BasicBlock *BB = *PI; + if (BB == Start) + continue; + + if (!dominates(End, BB)) + return false; + } + return true; +} + +bool DominatorTree::dominates(const BasicBlockEdge &BBE, + const Use &U) const { + // Assert that we have a single edge. We could handle them by simply + // returning false, but since isSingleEdge is linear on the number of + // edges, the callers can normally handle them more efficiently. + assert(BBE.isSingleEdge()); + + Instruction *UserInst = cast<Instruction>(U.getUser()); + // A PHI in the end of the edge is dominated by it. + PHINode *PN = dyn_cast<PHINode>(UserInst); + if (PN && PN->getParent() == BBE.getEnd() && + PN->getIncomingBlock(U) == BBE.getStart()) + return true; + + // Otherwise use the edge-dominates-block query, which + // handles the crazy critical edge cases properly. + const BasicBlock *UseBB; + if (PN) + UseBB = PN->getIncomingBlock(U); + else + UseBB = UserInst->getParent(); + return dominates(BBE, UseBB); +} + +bool DominatorTree::dominates(const Instruction *Def, + const Use &U) const { + Instruction *UserInst = cast<Instruction>(U.getUser()); + const BasicBlock *DefBB = Def->getParent(); + + // Determine the block in which the use happens. PHI nodes use + // their operands on edges; simulate this by thinking of the use + // happening at the end of the predecessor block. + const BasicBlock *UseBB; + if (PHINode *PN = dyn_cast<PHINode>(UserInst)) + UseBB = PN->getIncomingBlock(U); + else + UseBB = UserInst->getParent(); + + // Any unreachable use is dominated, even if Def == User. + if (!isReachableFromEntry(UseBB)) + return true; + + // Unreachable definitions don't dominate anything. + if (!isReachableFromEntry(DefBB)) + return false; + + // Invoke instructions define their return values on the edges + // to their normal successors, so we have to handle them specially. + // Among other things, this means they don't dominate anything in + // their own block, except possibly a phi, so we don't need to + // walk the block in any case. + if (const InvokeInst *II = dyn_cast<InvokeInst>(Def)) { + BasicBlock *NormalDest = II->getNormalDest(); + BasicBlockEdge E(DefBB, NormalDest); + return dominates(E, U); + } + + // If the def and use are in different blocks, do a simple CFG dominator + // tree query. + if (DefBB != UseBB) + return dominates(DefBB, UseBB); + + // Ok, def and use are in the same block. If the def is an invoke, it + // doesn't dominate anything in the block. If it's a PHI, it dominates + // everything in the block. + if (isa<PHINode>(UserInst)) + return true; + + // Otherwise, just loop through the basic block until we find Def or User. + BasicBlock::const_iterator I = DefBB->begin(); + for (; &*I != Def && &*I != UserInst; ++I) + /*empty*/; + + return &*I != UserInst; +} + +bool DominatorTree::isReachableFromEntry(const Use &U) const { + Instruction *I = dyn_cast<Instruction>(U.getUser()); + + // ConstantExprs aren't really reachable from the entry block, but they + // don't need to be treated like unreachable code either. + if (!I) return true; + + // PHI nodes use their operands on their incoming edges. + if (PHINode *PN = dyn_cast<PHINode>(I)) + return isReachableFromEntry(PN->getIncomingBlock(U)); + + // Everything else uses their operands in their own block. + return isReachableFromEntry(I->getParent()); +} diff --git a/lib/IR/Function.cpp b/lib/IR/Function.cpp new file mode 100644 index 0000000000..5559a6c56e --- /dev/null +++ b/lib/IR/Function.cpp @@ -0,0 +1,707 @@ +//===-- Function.cpp - Implement the Global object classes ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Function class for the IR library. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Function.h" +#include "LLVMContextImpl.h" +#include "SymbolTableListTraitsImpl.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/CallSite.h" +#include "llvm/Support/InstIterator.h" +#include "llvm/Support/LeakDetector.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/RWMutex.h" +#include "llvm/Support/StringPool.h" +#include "llvm/Support/Threading.h" +using namespace llvm; + +// Explicit instantiations of SymbolTableListTraits since some of the methods +// are not in the public header file... +template class llvm::SymbolTableListTraits<Argument, Function>; +template class llvm::SymbolTableListTraits<BasicBlock, Function>; + +//===----------------------------------------------------------------------===// +// Argument Implementation +//===----------------------------------------------------------------------===// + +void Argument::anchor() { } + +Argument::Argument(Type *Ty, const Twine &Name, Function *Par) + : Value(Ty, Value::ArgumentVal) { + Parent = 0; + + // Make sure that we get added to a function + LeakDetector::addGarbageObject(this); + + if (Par) + Par->getArgumentList().push_back(this); + setName(Name); +} + +void Argument::setParent(Function *parent) { + if (getParent()) + LeakDetector::addGarbageObject(this); + Parent = parent; + if (getParent()) + LeakDetector::removeGarbageObject(this); +} + +/// getArgNo - Return the index of this formal argument in its containing +/// function. For example in "void foo(int a, float b)" a is 0 and b is 1. +unsigned Argument::getArgNo() const { + const Function *F = getParent(); + assert(F && "Argument is not in a function"); + + Function::const_arg_iterator AI = F->arg_begin(); + unsigned ArgIdx = 0; + for (; &*AI != this; ++AI) + ++ArgIdx; + + return ArgIdx; +} + +/// hasByValAttr - Return true if this argument has the byval attribute on it +/// in its containing function. +bool Argument::hasByValAttr() const { + if (!getType()->isPointerTy()) return false; + return getParent()->getAttributes(). + hasAttribute(getArgNo()+1, Attribute::ByVal); +} + +unsigned Argument::getParamAlignment() const { + assert(getType()->isPointerTy() && "Only pointers have alignments"); + return getParent()->getParamAlignment(getArgNo()+1); + +} + +/// hasNestAttr - Return true if this argument has the nest attribute on +/// it in its containing function. +bool Argument::hasNestAttr() const { + if (!getType()->isPointerTy()) return false; + return getParent()->getAttributes(). + hasAttribute(getArgNo()+1, Attribute::Nest); +} + +/// hasNoAliasAttr - Return true if this argument has the noalias attribute on +/// it in its containing function. +bool Argument::hasNoAliasAttr() const { + if (!getType()->isPointerTy()) return false; + return getParent()->getAttributes(). + hasAttribute(getArgNo()+1, Attribute::NoAlias); +} + +/// hasNoCaptureAttr - Return true if this argument has the nocapture attribute +/// on it in its containing function. +bool Argument::hasNoCaptureAttr() const { + if (!getType()->isPointerTy()) return false; + return getParent()->getAttributes(). + hasAttribute(getArgNo()+1, Attribute::NoCapture); +} + +/// hasSRetAttr - Return true if this argument has the sret attribute on +/// it in its containing function. +bool Argument::hasStructRetAttr() const { + if (!getType()->isPointerTy()) return false; + if (this != getParent()->arg_begin()) + return false; // StructRet param must be first param + return getParent()->getAttributes(). + hasAttribute(1, Attribute::StructRet); +} + +/// addAttr - Add attributes to an argument. +void Argument::addAttr(AttributeSet AS) { + assert(AS.getNumSlots() <= 1 && + "Trying to add more than one attribute set to an argument!"); + AttrBuilder B(AS, AS.getSlotIndex(0)); + getParent()->addAttributes(getArgNo() + 1, + AttributeSet::get(Parent->getContext(), + getArgNo() + 1, B)); +} + +/// removeAttr - Remove attributes from an argument. +void Argument::removeAttr(AttributeSet AS) { + assert(AS.getNumSlots() <= 1 && + "Trying to remove more than one attribute set from an argument!"); + AttrBuilder B(AS, AS.getSlotIndex(0)); + getParent()->removeAttributes(getArgNo() + 1, + AttributeSet::get(Parent->getContext(), + getArgNo() + 1, B)); +} + +//===----------------------------------------------------------------------===// +// Helper Methods in Function +//===----------------------------------------------------------------------===// + +LLVMContext &Function::getContext() const { + return getType()->getContext(); +} + +FunctionType *Function::getFunctionType() const { + return cast<FunctionType>(getType()->getElementType()); +} + +bool Function::isVarArg() const { + return getFunctionType()->isVarArg(); +} + +Type *Function::getReturnType() const { + return getFunctionType()->getReturnType(); +} + +void Function::removeFromParent() { + getParent()->getFunctionList().remove(this); +} + +void Function::eraseFromParent() { + getParent()->getFunctionList().erase(this); +} + +//===----------------------------------------------------------------------===// +// Function Implementation +//===----------------------------------------------------------------------===// + +Function::Function(FunctionType *Ty, LinkageTypes Linkage, + const Twine &name, Module *ParentModule) + : GlobalValue(PointerType::getUnqual(Ty), + Value::FunctionVal, 0, 0, Linkage, name) { + assert(FunctionType::isValidReturnType(getReturnType()) && + "invalid return type"); + SymTab = new ValueSymbolTable(); + + // If the function has arguments, mark them as lazily built. + if (Ty->getNumParams()) + setValueSubclassData(1); // Set the "has lazy arguments" bit. + + // Make sure that we get added to a function + LeakDetector::addGarbageObject(this); + + if (ParentModule) + ParentModule->getFunctionList().push_back(this); + + // Ensure intrinsics have the right parameter attributes. + if (unsigned IID = getIntrinsicID()) + setAttributes(Intrinsic::getAttributes(getContext(), Intrinsic::ID(IID))); + +} + +Function::~Function() { + dropAllReferences(); // After this it is safe to delete instructions. + + // Delete all of the method arguments and unlink from symbol table... + ArgumentList.clear(); + delete SymTab; + + // Remove the function from the on-the-side GC table. + clearGC(); + + // Remove the intrinsicID from the Cache. + if(getValueName() && isIntrinsic()) + getContext().pImpl->IntrinsicIDCache.erase(this); +} + +void Function::BuildLazyArguments() const { + // Create the arguments vector, all arguments start out unnamed. + FunctionType *FT = getFunctionType(); + for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) { + assert(!FT->getParamType(i)->isVoidTy() && + "Cannot have void typed arguments!"); + ArgumentList.push_back(new Argument(FT->getParamType(i))); + } + + // Clear the lazy arguments bit. + unsigned SDC = getSubclassDataFromValue(); + const_cast<Function*>(this)->setValueSubclassData(SDC &= ~1); +} + +size_t Function::arg_size() const { + return getFunctionType()->getNumParams(); +} +bool Function::arg_empty() const { + return getFunctionType()->getNumParams() == 0; +} + +void Function::setParent(Module *parent) { + if (getParent()) + LeakDetector::addGarbageObject(this); + Parent = parent; + if (getParent()) + LeakDetector::removeGarbageObject(this); +} + +// dropAllReferences() - This function causes all the subinstructions to "let +// go" of all references that they are maintaining. This allows one to +// 'delete' a whole class at a time, even though there may be circular +// references... first all references are dropped, and all use counts go to +// zero. Then everything is deleted for real. Note that no operations are +// valid on an object that has "dropped all references", except operator +// delete. +// +void Function::dropAllReferences() { + for (iterator I = begin(), E = end(); I != E; ++I) + I->dropAllReferences(); + + // Delete all basic blocks. They are now unused, except possibly by + // blockaddresses, but BasicBlock's destructor takes care of those. + while (!BasicBlocks.empty()) + BasicBlocks.begin()->eraseFromParent(); +} + +void Function::addAttribute(unsigned i, Attribute::AttrKind attr) { + AttributeSet PAL = getAttributes(); + PAL = PAL.addAttribute(getContext(), i, attr); + setAttributes(PAL); +} + +void Function::addAttributes(unsigned i, AttributeSet attrs) { + AttributeSet PAL = getAttributes(); + PAL = PAL.addAttributes(getContext(), i, attrs); + setAttributes(PAL); +} + +void Function::removeAttributes(unsigned i, AttributeSet attrs) { + AttributeSet PAL = getAttributes(); + PAL = PAL.removeAttributes(getContext(), i, attrs); + setAttributes(PAL); +} + +// Maintain the GC name for each function in an on-the-side table. This saves +// allocating an additional word in Function for programs which do not use GC +// (i.e., most programs) at the cost of increased overhead for clients which do +// use GC. +static DenseMap<const Function*,PooledStringPtr> *GCNames; +static StringPool *GCNamePool; +static ManagedStatic<sys::SmartRWMutex<true> > GCLock; + +bool Function::hasGC() const { + sys::SmartScopedReader<true> Reader(*GCLock); + return GCNames && GCNames->count(this); +} + +const char *Function::getGC() const { + assert(hasGC() && "Function has no collector"); + sys::SmartScopedReader<true> Reader(*GCLock); + return *(*GCNames)[this]; +} + +void Function::setGC(const char *Str) { + sys::SmartScopedWriter<true> Writer(*GCLock); + if (!GCNamePool) + GCNamePool = new StringPool(); + if (!GCNames) + GCNames = new DenseMap<const Function*,PooledStringPtr>(); + (*GCNames)[this] = GCNamePool->intern(Str); +} + +void Function::clearGC() { + sys::SmartScopedWriter<true> Writer(*GCLock); + if (GCNames) { + GCNames->erase(this); + if (GCNames->empty()) { + delete GCNames; + GCNames = 0; + if (GCNamePool->empty()) { + delete GCNamePool; + GCNamePool = 0; + } + } + } +} + +/// copyAttributesFrom - copy all additional attributes (those not needed to +/// create a Function) from the Function Src to this one. +void Function::copyAttributesFrom(const GlobalValue *Src) { + assert(isa<Function>(Src) && "Expected a Function!"); + GlobalValue::copyAttributesFrom(Src); + const Function *SrcF = cast<Function>(Src); + setCallingConv(SrcF->getCallingConv()); + setAttributes(SrcF->getAttributes()); + if (SrcF->hasGC()) + setGC(SrcF->getGC()); + else + clearGC(); +} + +/// getIntrinsicID - This method returns the ID number of the specified +/// function, or Intrinsic::not_intrinsic if the function is not an +/// intrinsic, or if the pointer is null. This value is always defined to be +/// zero to allow easy checking for whether a function is intrinsic or not. The +/// particular intrinsic functions which correspond to this value are defined in +/// llvm/Intrinsics.h. Results are cached in the LLVM context, subsequent +/// requests for the same ID return results much faster from the cache. +/// +unsigned Function::getIntrinsicID() const { + const ValueName *ValName = this->getValueName(); + if (!ValName || !isIntrinsic()) + return 0; + + LLVMContextImpl::IntrinsicIDCacheTy &IntrinsicIDCache = + getContext().pImpl->IntrinsicIDCache; + if(!IntrinsicIDCache.count(this)) { + unsigned Id = lookupIntrinsicID(); + IntrinsicIDCache[this]=Id; + return Id; + } + return IntrinsicIDCache[this]; +} + +/// This private method does the actual lookup of an intrinsic ID when the query +/// could not be answered from the cache. +unsigned Function::lookupIntrinsicID() const { + const ValueName *ValName = this->getValueName(); + unsigned Len = ValName->getKeyLength(); + const char *Name = ValName->getKeyData(); + +#define GET_FUNCTION_RECOGNIZER +#include "llvm/IR/Intrinsics.gen" +#undef GET_FUNCTION_RECOGNIZER + + return 0; +} + +std::string Intrinsic::getName(ID id, ArrayRef<Type*> Tys) { + assert(id < num_intrinsics && "Invalid intrinsic ID!"); + static const char * const Table[] = { + "not_intrinsic", +#define GET_INTRINSIC_NAME_TABLE +#include "llvm/IR/Intrinsics.gen" +#undef GET_INTRINSIC_NAME_TABLE + }; + if (Tys.empty()) + return Table[id]; + std::string Result(Table[id]); + for (unsigned i = 0; i < Tys.size(); ++i) { + if (PointerType* PTyp = dyn_cast<PointerType>(Tys[i])) { + Result += ".p" + llvm::utostr(PTyp->getAddressSpace()) + + EVT::getEVT(PTyp->getElementType()).getEVTString(); + } + else if (Tys[i]) + Result += "." + EVT::getEVT(Tys[i]).getEVTString(); + } + return Result; +} + + +/// IIT_Info - These are enumerators that describe the entries returned by the +/// getIntrinsicInfoTableEntries function. +/// +/// NOTE: This must be kept in synch with the copy in TblGen/IntrinsicEmitter! +enum IIT_Info { + // Common values should be encoded with 0-15. + IIT_Done = 0, + IIT_I1 = 1, + IIT_I8 = 2, + IIT_I16 = 3, + IIT_I32 = 4, + IIT_I64 = 5, + IIT_F16 = 6, + IIT_F32 = 7, + IIT_F64 = 8, + IIT_V2 = 9, + IIT_V4 = 10, + IIT_V8 = 11, + IIT_V16 = 12, + IIT_V32 = 13, + IIT_PTR = 14, + IIT_ARG = 15, + + // Values from 16+ are only encodable with the inefficient encoding. + IIT_MMX = 16, + IIT_METADATA = 17, + IIT_EMPTYSTRUCT = 18, + IIT_STRUCT2 = 19, + IIT_STRUCT3 = 20, + IIT_STRUCT4 = 21, + IIT_STRUCT5 = 22, + IIT_EXTEND_VEC_ARG = 23, + IIT_TRUNC_VEC_ARG = 24, + IIT_ANYPTR = 25 +}; + + +static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos, + SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) { + IIT_Info Info = IIT_Info(Infos[NextElt++]); + unsigned StructElts = 2; + using namespace Intrinsic; + + switch (Info) { + case IIT_Done: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Void, 0)); + return; + case IIT_MMX: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::MMX, 0)); + return; + case IIT_METADATA: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Metadata, 0)); + return; + case IIT_F16: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Half, 0)); + return; + case IIT_F32: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Float, 0)); + return; + case IIT_F64: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Double, 0)); + return; + case IIT_I1: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 1)); + return; + case IIT_I8: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8)); + return; + case IIT_I16: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer,16)); + return; + case IIT_I32: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 32)); + return; + case IIT_I64: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 64)); + return; + case IIT_V2: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 2)); + DecodeIITType(NextElt, Infos, OutputTable); + return; + case IIT_V4: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 4)); + DecodeIITType(NextElt, Infos, OutputTable); + return; + case IIT_V8: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 8)); + DecodeIITType(NextElt, Infos, OutputTable); + return; + case IIT_V16: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 16)); + DecodeIITType(NextElt, Infos, OutputTable); + return; + case IIT_V32: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 32)); + DecodeIITType(NextElt, Infos, OutputTable); + return; + case IIT_PTR: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0)); + DecodeIITType(NextElt, Infos, OutputTable); + return; + case IIT_ANYPTR: { // [ANYPTR addrspace, subtype] + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, + Infos[NextElt++])); + DecodeIITType(NextElt, Infos, OutputTable); + return; + } + case IIT_ARG: { + unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo)); + return; + } + case IIT_EXTEND_VEC_ARG: { + unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); + OutputTable.push_back(IITDescriptor::get(IITDescriptor::ExtendVecArgument, + ArgInfo)); + return; + } + case IIT_TRUNC_VEC_ARG: { + unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); + OutputTable.push_back(IITDescriptor::get(IITDescriptor::TruncVecArgument, + ArgInfo)); + return; + } + case IIT_EMPTYSTRUCT: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0)); + return; + case IIT_STRUCT5: ++StructElts; // FALL THROUGH. + case IIT_STRUCT4: ++StructElts; // FALL THROUGH. + case IIT_STRUCT3: ++StructElts; // FALL THROUGH. + case IIT_STRUCT2: { + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct,StructElts)); + + for (unsigned i = 0; i != StructElts; ++i) + DecodeIITType(NextElt, Infos, OutputTable); + return; + } + } + llvm_unreachable("unhandled"); +} + + +#define GET_INTRINSIC_GENERATOR_GLOBAL +#include "llvm/IR/Intrinsics.gen" +#undef GET_INTRINSIC_GENERATOR_GLOBAL + +void Intrinsic::getIntrinsicInfoTableEntries(ID id, + SmallVectorImpl<IITDescriptor> &T){ + // Check to see if the intrinsic's type was expressible by the table. + unsigned TableVal = IIT_Table[id-1]; + + // Decode the TableVal into an array of IITValues. + SmallVector<unsigned char, 8> IITValues; + ArrayRef<unsigned char> IITEntries; + unsigned NextElt = 0; + if ((TableVal >> 31) != 0) { + // This is an offset into the IIT_LongEncodingTable. + IITEntries = IIT_LongEncodingTable; + + // Strip sentinel bit. + NextElt = (TableVal << 1) >> 1; + } else { + // Decode the TableVal into an array of IITValues. If the entry was encoded + // into a single word in the table itself, decode it now. + do { + IITValues.push_back(TableVal & 0xF); + TableVal >>= 4; + } while (TableVal); + + IITEntries = IITValues; + NextElt = 0; + } + + // Okay, decode the table into the output vector of IITDescriptors. + DecodeIITType(NextElt, IITEntries, T); + while (NextElt != IITEntries.size() && IITEntries[NextElt] != 0) + DecodeIITType(NextElt, IITEntries, T); +} + + +static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos, + ArrayRef<Type*> Tys, LLVMContext &Context) { + using namespace Intrinsic; + IITDescriptor D = Infos.front(); + Infos = Infos.slice(1); + + switch (D.Kind) { + case IITDescriptor::Void: return Type::getVoidTy(Context); + case IITDescriptor::MMX: return Type::getX86_MMXTy(Context); + case IITDescriptor::Metadata: return Type::getMetadataTy(Context); + case IITDescriptor::Half: return Type::getHalfTy(Context); + case IITDescriptor::Float: return Type::getFloatTy(Context); + case IITDescriptor::Double: return Type::getDoubleTy(Context); + + case IITDescriptor::Integer: + return IntegerType::get(Context, D.Integer_Width); + case IITDescriptor::Vector: + return VectorType::get(DecodeFixedType(Infos, Tys, Context),D.Vector_Width); + case IITDescriptor::Pointer: + return PointerType::get(DecodeFixedType(Infos, Tys, Context), + D.Pointer_AddressSpace); + case IITDescriptor::Struct: { + Type *Elts[5]; + assert(D.Struct_NumElements <= 5 && "Can't handle this yet"); + for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i) + Elts[i] = DecodeFixedType(Infos, Tys, Context); + return StructType::get(Context, ArrayRef<Type*>(Elts,D.Struct_NumElements)); + } + + case IITDescriptor::Argument: + return Tys[D.getArgumentNumber()]; + case IITDescriptor::ExtendVecArgument: + return VectorType::getExtendedElementVectorType(cast<VectorType>( + Tys[D.getArgumentNumber()])); + + case IITDescriptor::TruncVecArgument: + return VectorType::getTruncatedElementVectorType(cast<VectorType>( + Tys[D.getArgumentNumber()])); + } + llvm_unreachable("unhandled"); +} + + + +FunctionType *Intrinsic::getType(LLVMContext &Context, + ID id, ArrayRef<Type*> Tys) { + SmallVector<IITDescriptor, 8> Table; + getIntrinsicInfoTableEntries(id, Table); + + ArrayRef<IITDescriptor> TableRef = Table; + Type *ResultTy = DecodeFixedType(TableRef, Tys, Context); + + SmallVector<Type*, 8> ArgTys; + while (!TableRef.empty()) + ArgTys.push_back(DecodeFixedType(TableRef, Tys, Context)); + + return FunctionType::get(ResultTy, ArgTys, false); +} + +bool Intrinsic::isOverloaded(ID id) { +#define GET_INTRINSIC_OVERLOAD_TABLE +#include "llvm/IR/Intrinsics.gen" +#undef GET_INTRINSIC_OVERLOAD_TABLE +} + +/// This defines the "Intrinsic::getAttributes(ID id)" method. +#define GET_INTRINSIC_ATTRIBUTES +#include "llvm/IR/Intrinsics.gen" +#undef GET_INTRINSIC_ATTRIBUTES + +Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys) { + // There can never be multiple globals with the same name of different types, + // because intrinsics must be a specific type. + return + cast<Function>(M->getOrInsertFunction(getName(id, Tys), + getType(M->getContext(), id, Tys))); +} + +// This defines the "Intrinsic::getIntrinsicForGCCBuiltin()" method. +#define GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN +#include "llvm/IR/Intrinsics.gen" +#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN + +/// hasAddressTaken - returns true if there are any uses of this function +/// other than direct calls or invokes to it. +bool Function::hasAddressTaken(const User* *PutOffender) const { + for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) { + const User *U = *I; + if (isa<BlockAddress>(U)) + continue; + if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) + return PutOffender ? (*PutOffender = U, true) : true; + ImmutableCallSite CS(cast<Instruction>(U)); + if (!CS.isCallee(I)) + return PutOffender ? (*PutOffender = U, true) : true; + } + return false; +} + +bool Function::isDefTriviallyDead() const { + // Check the linkage + if (!hasLinkOnceLinkage() && !hasLocalLinkage() && + !hasAvailableExternallyLinkage()) + return false; + + // Check if the function is used by anything other than a blockaddress. + for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) + if (!isa<BlockAddress>(*I)) + return false; + + return true; +} + +/// callsFunctionThatReturnsTwice - Return true if the function has a call to +/// setjmp or other function that gcc recognizes as "returning twice". +bool Function::callsFunctionThatReturnsTwice() const { + for (const_inst_iterator + I = inst_begin(this), E = inst_end(this); I != E; ++I) { + const CallInst* callInst = dyn_cast<CallInst>(&*I); + if (!callInst) + continue; + if (callInst->canReturnTwice()) + return true; + } + + return false; +} + diff --git a/lib/IR/GCOV.cpp b/lib/IR/GCOV.cpp new file mode 100644 index 0000000000..ea2f0a6d55 --- /dev/null +++ b/lib/IR/GCOV.cpp @@ -0,0 +1,283 @@ +//===- GCOVr.cpp - LLVM coverage tool -------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// GCOV implements the interface to read and write coverage files that use +// 'gcov' format. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/GCOV.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/MemoryObject.h" +#include "llvm/Support/system_error.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// GCOVFile implementation. + +/// ~GCOVFile - Delete GCOVFile and its content. +GCOVFile::~GCOVFile() { + DeleteContainerPointers(Functions); +} + +/// isGCDAFile - Return true if Format identifies a .gcda file. +static bool isGCDAFile(GCOV::GCOVFormat Format) { + return Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404; +} + +/// isGCNOFile - Return true if Format identifies a .gcno file. +static bool isGCNOFile(GCOV::GCOVFormat Format) { + return Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404; +} + +/// read - Read GCOV buffer. +bool GCOVFile::read(GCOVBuffer &Buffer) { + GCOV::GCOVFormat Format = Buffer.readGCOVFormat(); + if (Format == GCOV::InvalidGCOV) + return false; + + unsigned i = 0; + while (1) { + GCOVFunction *GFun = NULL; + if (isGCDAFile(Format)) { + // Use existing function while reading .gcda file. + assert(i < Functions.size() && ".gcda data does not match .gcno data"); + GFun = Functions[i]; + } else if (isGCNOFile(Format)){ + GFun = new GCOVFunction(); + Functions.push_back(GFun); + } + if (!GFun || !GFun->read(Buffer, Format)) + break; + ++i; + } + return true; +} + +/// dump - Dump GCOVFile content on standard out for debugging purposes. +void GCOVFile::dump() { + for (SmallVector<GCOVFunction *, 16>::iterator I = Functions.begin(), + E = Functions.end(); I != E; ++I) + (*I)->dump(); +} + +/// collectLineCounts - Collect line counts. This must be used after +/// reading .gcno and .gcda files. +void GCOVFile::collectLineCounts(FileInfo &FI) { + for (SmallVector<GCOVFunction *, 16>::iterator I = Functions.begin(), + E = Functions.end(); I != E; ++I) + (*I)->collectLineCounts(FI); + FI.print(); +} + +//===----------------------------------------------------------------------===// +// GCOVFunction implementation. + +/// ~GCOVFunction - Delete GCOVFunction and its content. +GCOVFunction::~GCOVFunction() { + DeleteContainerPointers(Blocks); +} + +/// read - Read a aunction from the buffer. Return false if buffer cursor +/// does not point to a function tag. +bool GCOVFunction::read(GCOVBuffer &Buff, GCOV::GCOVFormat Format) { + if (!Buff.readFunctionTag()) + return false; + + Buff.readInt(); // Function header length + Ident = Buff.readInt(); + Buff.readInt(); // Checksum #1 + if (Format != GCOV::GCNO_402) + Buff.readInt(); // Checksum #2 + + Name = Buff.readString(); + if (Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404) + Filename = Buff.readString(); + + if (Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404) { + Buff.readArcTag(); + uint32_t Count = Buff.readInt() / 2; + for (unsigned i = 0, e = Count; i != e; ++i) { + Blocks[i]->addCount(Buff.readInt64()); + } + return true; + } + + LineNumber = Buff.readInt(); + + // read blocks. + bool BlockTagFound = Buff.readBlockTag(); + (void)BlockTagFound; + assert(BlockTagFound && "Block Tag not found!"); + uint32_t BlockCount = Buff.readInt(); + for (int i = 0, e = BlockCount; i != e; ++i) { + Buff.readInt(); // Block flags; + Blocks.push_back(new GCOVBlock(i)); + } + + // read edges. + while (Buff.readEdgeTag()) { + uint32_t EdgeCount = (Buff.readInt() - 1) / 2; + uint32_t BlockNo = Buff.readInt(); + assert(BlockNo < BlockCount && "Unexpected Block number!"); + for (int i = 0, e = EdgeCount; i != e; ++i) { + Blocks[BlockNo]->addEdge(Buff.readInt()); + Buff.readInt(); // Edge flag + } + } + + // read line table. + while (Buff.readLineTag()) { + uint32_t LineTableLength = Buff.readInt(); + uint32_t Size = Buff.getCursor() + LineTableLength*4; + uint32_t BlockNo = Buff.readInt(); + assert(BlockNo < BlockCount && "Unexpected Block number!"); + GCOVBlock *Block = Blocks[BlockNo]; + Buff.readInt(); // flag + while (Buff.getCursor() != (Size - 4)) { + StringRef Filename = Buff.readString(); + if (Buff.getCursor() == (Size - 4)) break; + while (uint32_t L = Buff.readInt()) + Block->addLine(Filename, L); + } + Buff.readInt(); // flag + } + return true; +} + +/// dump - Dump GCOVFunction content on standard out for debugging purposes. +void GCOVFunction::dump() { + outs() << "===== " << Name << " @ " << Filename << ":" << LineNumber << "\n"; + for (SmallVector<GCOVBlock *, 16>::iterator I = Blocks.begin(), + E = Blocks.end(); I != E; ++I) + (*I)->dump(); +} + +/// collectLineCounts - Collect line counts. This must be used after +/// reading .gcno and .gcda files. +void GCOVFunction::collectLineCounts(FileInfo &FI) { + for (SmallVector<GCOVBlock *, 16>::iterator I = Blocks.begin(), + E = Blocks.end(); I != E; ++I) + (*I)->collectLineCounts(FI); +} + +//===----------------------------------------------------------------------===// +// GCOVBlock implementation. + +/// ~GCOVBlock - Delete GCOVBlock and its content. +GCOVBlock::~GCOVBlock() { + Edges.clear(); + DeleteContainerSeconds(Lines); +} + +void GCOVBlock::addLine(StringRef Filename, uint32_t LineNo) { + GCOVLines *&LinesForFile = Lines[Filename]; + if (!LinesForFile) + LinesForFile = new GCOVLines(); + LinesForFile->add(LineNo); +} + +/// collectLineCounts - Collect line counts. This must be used after +/// reading .gcno and .gcda files. +void GCOVBlock::collectLineCounts(FileInfo &FI) { + for (StringMap<GCOVLines *>::iterator I = Lines.begin(), + E = Lines.end(); I != E; ++I) + I->second->collectLineCounts(FI, I->first(), Counter); +} + +/// dump - Dump GCOVBlock content on standard out for debugging purposes. +void GCOVBlock::dump() { + outs() << "Block : " << Number << " Counter : " << Counter << "\n"; + if (!Edges.empty()) { + outs() << "\tEdges : "; + for (SmallVector<uint32_t, 16>::iterator I = Edges.begin(), E = Edges.end(); + I != E; ++I) + outs() << (*I) << ","; + outs() << "\n"; + } + if (!Lines.empty()) { + outs() << "\tLines : "; + for (StringMap<GCOVLines *>::iterator LI = Lines.begin(), + LE = Lines.end(); LI != LE; ++LI) { + outs() << LI->first() << " -> "; + LI->second->dump(); + outs() << "\n"; + } + } +} + +//===----------------------------------------------------------------------===// +// GCOVLines implementation. + +/// collectLineCounts - Collect line counts. This must be used after +/// reading .gcno and .gcda files. +void GCOVLines::collectLineCounts(FileInfo &FI, StringRef Filename, + uint32_t Count) { + for (SmallVector<uint32_t, 16>::iterator I = Lines.begin(), + E = Lines.end(); I != E; ++I) + FI.addLineCount(Filename, *I, Count); +} + +/// dump - Dump GCOVLines content on standard out for debugging purposes. +void GCOVLines::dump() { + for (SmallVector<uint32_t, 16>::iterator I = Lines.begin(), + E = Lines.end(); I != E; ++I) + outs() << (*I) << ","; +} + +//===----------------------------------------------------------------------===// +// FileInfo implementation. + +/// addLineCount - Add line count for the given line number in a file. +void FileInfo::addLineCount(StringRef Filename, uint32_t Line, uint32_t Count) { + if (LineInfo.find(Filename) == LineInfo.end()) { + OwningPtr<MemoryBuffer> Buff; + if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, Buff)) { + errs() << Filename << ": " << ec.message() << "\n"; + return; + } + StringRef AllLines = Buff.take()->getBuffer(); + LineCounts L(AllLines.count('\n')+2); + L[Line-1] = Count; + LineInfo[Filename] = L; + return; + } + LineCounts &L = LineInfo[Filename]; + L[Line-1] = Count; +} + +/// print - Print source files with collected line count information. +void FileInfo::print() { + for (StringMap<LineCounts>::iterator I = LineInfo.begin(), E = LineInfo.end(); + I != E; ++I) { + StringRef Filename = I->first(); + outs() << Filename << "\n"; + LineCounts &L = LineInfo[Filename]; + OwningPtr<MemoryBuffer> Buff; + if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, Buff)) { + errs() << Filename << ": " << ec.message() << "\n"; + return; + } + StringRef AllLines = Buff.take()->getBuffer(); + for (unsigned i = 0, e = L.size(); i != e; ++i) { + if (L[i]) + outs() << L[i] << ":\t"; + else + outs() << " :\t"; + std::pair<StringRef, StringRef> P = AllLines.split('\n'); + if (AllLines != P.first) + outs() << P.first; + outs() << "\n"; + AllLines = P.second; + } + } +} + + diff --git a/lib/IR/GVMaterializer.cpp b/lib/IR/GVMaterializer.cpp new file mode 100644 index 0000000000..f77a9c908d --- /dev/null +++ b/lib/IR/GVMaterializer.cpp @@ -0,0 +1,18 @@ +//===-- GVMaterializer.cpp - Base implementation for GV materializers -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Minimal implementation of the abstract interface for materializing +// GlobalValues. +// +//===----------------------------------------------------------------------===// + +#include "llvm/GVMaterializer.h" +using namespace llvm; + +GVMaterializer::~GVMaterializer() {} diff --git a/lib/IR/Globals.cpp b/lib/IR/Globals.cpp new file mode 100644 index 0000000000..6d547f3edf --- /dev/null +++ b/lib/IR/Globals.cpp @@ -0,0 +1,269 @@ +//===-- Globals.cpp - Implement the GlobalValue & GlobalVariable class ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the GlobalValue & GlobalVariable classes for the IR +// library. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/GlobalValue.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LeakDetector.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// GlobalValue Class +//===----------------------------------------------------------------------===// + +bool GlobalValue::isMaterializable() const { + return getParent() && getParent()->isMaterializable(this); +} +bool GlobalValue::isDematerializable() const { + return getParent() && getParent()->isDematerializable(this); +} +bool GlobalValue::Materialize(std::string *ErrInfo) { + return getParent()->Materialize(this, ErrInfo); +} +void GlobalValue::Dematerialize() { + getParent()->Dematerialize(this); +} + +/// Override destroyConstant to make sure it doesn't get called on +/// GlobalValue's because they shouldn't be treated like other constants. +void GlobalValue::destroyConstant() { + llvm_unreachable("You can't GV->destroyConstant()!"); +} + +/// copyAttributesFrom - copy all additional attributes (those not needed to +/// create a GlobalValue) from the GlobalValue Src to this one. +void GlobalValue::copyAttributesFrom(const GlobalValue *Src) { + setAlignment(Src->getAlignment()); + setSection(Src->getSection()); + setVisibility(Src->getVisibility()); + setUnnamedAddr(Src->hasUnnamedAddr()); +} + +void GlobalValue::setAlignment(unsigned Align) { + assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); + assert(Align <= MaximumAlignment && + "Alignment is greater than MaximumAlignment!"); + Alignment = Log2_32(Align) + 1; + assert(getAlignment() == Align && "Alignment representation error!"); +} + +bool GlobalValue::isDeclaration() const { + // Globals are definitions if they have an initializer. + if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(this)) + return GV->getNumOperands() == 0; + + // Functions are definitions if they have a body. + if (const Function *F = dyn_cast<Function>(this)) + return F->empty(); + + // Aliases are always definitions. + assert(isa<GlobalAlias>(this)); + return false; +} + +//===----------------------------------------------------------------------===// +// GlobalVariable Implementation +//===----------------------------------------------------------------------===// + +GlobalVariable::GlobalVariable(Type *Ty, bool constant, LinkageTypes Link, + Constant *InitVal, + const Twine &Name, ThreadLocalMode TLMode, + unsigned AddressSpace, + bool isExternallyInitialized) + : GlobalValue(PointerType::get(Ty, AddressSpace), + Value::GlobalVariableVal, + OperandTraits<GlobalVariable>::op_begin(this), + InitVal != 0, Link, Name), + isConstantGlobal(constant), threadLocalMode(TLMode), + isExternallyInitializedConstant(isExternallyInitialized) { + if (InitVal) { + assert(InitVal->getType() == Ty && + "Initializer should be the same type as the GlobalVariable!"); + Op<0>() = InitVal; + } + + LeakDetector::addGarbageObject(this); +} + +GlobalVariable::GlobalVariable(Module &M, Type *Ty, bool constant, + LinkageTypes Link, Constant *InitVal, + const Twine &Name, + GlobalVariable *Before, ThreadLocalMode TLMode, + unsigned AddressSpace, + bool isExternallyInitialized) + : GlobalValue(PointerType::get(Ty, AddressSpace), + Value::GlobalVariableVal, + OperandTraits<GlobalVariable>::op_begin(this), + InitVal != 0, Link, Name), + isConstantGlobal(constant), threadLocalMode(TLMode), + isExternallyInitializedConstant(isExternallyInitialized) { + if (InitVal) { + assert(InitVal->getType() == Ty && + "Initializer should be the same type as the GlobalVariable!"); + Op<0>() = InitVal; + } + + LeakDetector::addGarbageObject(this); + + if (Before) + Before->getParent()->getGlobalList().insert(Before, this); + else + M.getGlobalList().push_back(this); +} + +void GlobalVariable::setParent(Module *parent) { + if (getParent()) + LeakDetector::addGarbageObject(this); + Parent = parent; + if (getParent()) + LeakDetector::removeGarbageObject(this); +} + +void GlobalVariable::removeFromParent() { + getParent()->getGlobalList().remove(this); +} + +void GlobalVariable::eraseFromParent() { + getParent()->getGlobalList().erase(this); +} + +void GlobalVariable::replaceUsesOfWithOnConstant(Value *From, Value *To, + Use *U) { + // If you call this, then you better know this GVar has a constant + // initializer worth replacing. Enforce that here. + assert(getNumOperands() == 1 && + "Attempt to replace uses of Constants on a GVar with no initializer"); + + // And, since you know it has an initializer, the From value better be + // the initializer :) + assert(getOperand(0) == From && + "Attempt to replace wrong constant initializer in GVar"); + + // And, you better have a constant for the replacement value + assert(isa<Constant>(To) && + "Attempt to replace GVar initializer with non-constant"); + + // Okay, preconditions out of the way, replace the constant initializer. + this->setOperand(0, cast<Constant>(To)); +} + +void GlobalVariable::setInitializer(Constant *InitVal) { + if (InitVal == 0) { + if (hasInitializer()) { + Op<0>().set(0); + NumOperands = 0; + } + } else { + assert(InitVal->getType() == getType()->getElementType() && + "Initializer type must match GlobalVariable type"); + if (!hasInitializer()) + NumOperands = 1; + Op<0>().set(InitVal); + } +} + +/// copyAttributesFrom - copy all additional attributes (those not needed to +/// create a GlobalVariable) from the GlobalVariable Src to this one. +void GlobalVariable::copyAttributesFrom(const GlobalValue *Src) { + assert(isa<GlobalVariable>(Src) && "Expected a GlobalVariable!"); + GlobalValue::copyAttributesFrom(Src); + const GlobalVariable *SrcVar = cast<GlobalVariable>(Src); + setThreadLocal(SrcVar->isThreadLocal()); +} + + +//===----------------------------------------------------------------------===// +// GlobalAlias Implementation +//===----------------------------------------------------------------------===// + +GlobalAlias::GlobalAlias(Type *Ty, LinkageTypes Link, + const Twine &Name, Constant* aliasee, + Module *ParentModule) + : GlobalValue(Ty, Value::GlobalAliasVal, &Op<0>(), 1, Link, Name) { + LeakDetector::addGarbageObject(this); + + if (aliasee) + assert(aliasee->getType() == Ty && "Alias and aliasee types should match!"); + Op<0>() = aliasee; + + if (ParentModule) + ParentModule->getAliasList().push_back(this); +} + +void GlobalAlias::setParent(Module *parent) { + if (getParent()) + LeakDetector::addGarbageObject(this); + Parent = parent; + if (getParent()) + LeakDetector::removeGarbageObject(this); +} + +void GlobalAlias::removeFromParent() { + getParent()->getAliasList().remove(this); +} + +void GlobalAlias::eraseFromParent() { + getParent()->getAliasList().erase(this); +} + +void GlobalAlias::setAliasee(Constant *Aliasee) { + assert((!Aliasee || Aliasee->getType() == getType()) && + "Alias and aliasee types should match!"); + + setOperand(0, Aliasee); +} + +const GlobalValue *GlobalAlias::getAliasedGlobal() const { + const Constant *C = getAliasee(); + if (C == 0) return 0; + + if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) + return GV; + + const ConstantExpr *CE = cast<ConstantExpr>(C); + assert((CE->getOpcode() == Instruction::BitCast || + CE->getOpcode() == Instruction::GetElementPtr) && + "Unsupported aliasee"); + + return cast<GlobalValue>(CE->getOperand(0)); +} + +const GlobalValue *GlobalAlias::resolveAliasedGlobal(bool stopOnWeak) const { + SmallPtrSet<const GlobalValue*, 3> Visited; + + // Check if we need to stop early. + if (stopOnWeak && mayBeOverridden()) + return this; + + const GlobalValue *GV = getAliasedGlobal(); + Visited.insert(GV); + + // Iterate over aliasing chain, stopping on weak alias if necessary. + while (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) { + if (stopOnWeak && GA->mayBeOverridden()) + break; + + GV = GA->getAliasedGlobal(); + + if (!Visited.insert(GV)) + return 0; + } + + return GV; +} diff --git a/lib/IR/IRBuilder.cpp b/lib/IR/IRBuilder.cpp new file mode 100644 index 0000000000..435e54f0ea --- /dev/null +++ b/lib/IR/IRBuilder.cpp @@ -0,0 +1,153 @@ +//===---- IRBuilder.cpp - Builder for LLVM Instrs -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the IRBuilder class, which is used as a convenient way +// to create LLVM instructions with a consistent and simplified interface. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/LLVMContext.h" +using namespace llvm; + +/// CreateGlobalString - Make a new global variable with an initializer that +/// has array of i8 type filled in with the nul terminated string value +/// specified. If Name is specified, it is the name of the global variable +/// created. +Value *IRBuilderBase::CreateGlobalString(StringRef Str, const Twine &Name) { + Constant *StrConstant = ConstantDataArray::getString(Context, Str); + Module &M = *BB->getParent()->getParent(); + GlobalVariable *GV = new GlobalVariable(M, StrConstant->getType(), + true, GlobalValue::PrivateLinkage, + StrConstant); + GV->setName(Name); + GV->setUnnamedAddr(true); + return GV; +} + +Type *IRBuilderBase::getCurrentFunctionReturnType() const { + assert(BB && BB->getParent() && "No current function!"); + return BB->getParent()->getReturnType(); +} + +Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) { + PointerType *PT = cast<PointerType>(Ptr->getType()); + if (PT->getElementType()->isIntegerTy(8)) + return Ptr; + + // Otherwise, we need to insert a bitcast. + PT = getInt8PtrTy(PT->getAddressSpace()); + BitCastInst *BCI = new BitCastInst(Ptr, PT, ""); + BB->getInstList().insert(InsertPt, BCI); + SetInstDebugLocation(BCI); + return BCI; +} + +static CallInst *createCallHelper(Value *Callee, ArrayRef<Value *> Ops, + IRBuilderBase *Builder) { + CallInst *CI = CallInst::Create(Callee, Ops, ""); + Builder->GetInsertBlock()->getInstList().insert(Builder->GetInsertPoint(),CI); + Builder->SetInstDebugLocation(CI); + return CI; +} + +CallInst *IRBuilderBase:: +CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align, + bool isVolatile, MDNode *TBAATag) { + Ptr = getCastedInt8PtrValue(Ptr); + Value *Ops[] = { Ptr, Val, Size, getInt32(Align), getInt1(isVolatile) }; + Type *Tys[] = { Ptr->getType(), Size->getType() }; + Module *M = BB->getParent()->getParent(); + Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); + + CallInst *CI = createCallHelper(TheFn, Ops, this); + + // Set the TBAA info if present. + if (TBAATag) + CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); + + return CI; +} + +CallInst *IRBuilderBase:: +CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align, + bool isVolatile, MDNode *TBAATag, MDNode *TBAAStructTag) { + Dst = getCastedInt8PtrValue(Dst); + Src = getCastedInt8PtrValue(Src); + + Value *Ops[] = { Dst, Src, Size, getInt32(Align), getInt1(isVolatile) }; + Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; + Module *M = BB->getParent()->getParent(); + Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys); + + CallInst *CI = createCallHelper(TheFn, Ops, this); + + // Set the TBAA info if present. + if (TBAATag) + CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); + + // Set the TBAA Struct info if present. + if (TBAAStructTag) + CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); + + return CI; +} + +CallInst *IRBuilderBase:: +CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align, + bool isVolatile, MDNode *TBAATag) { + Dst = getCastedInt8PtrValue(Dst); + Src = getCastedInt8PtrValue(Src); + + Value *Ops[] = { Dst, Src, Size, getInt32(Align), getInt1(isVolatile) }; + Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; + Module *M = BB->getParent()->getParent(); + Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys); + + CallInst *CI = createCallHelper(TheFn, Ops, this); + + // Set the TBAA info if present. + if (TBAATag) + CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); + + return CI; +} + +CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) { + assert(isa<PointerType>(Ptr->getType()) && + "lifetime.start only applies to pointers."); + Ptr = getCastedInt8PtrValue(Ptr); + if (!Size) + Size = getInt64(-1); + else + assert(Size->getType() == getInt64Ty() && + "lifetime.start requires the size to be an i64"); + Value *Ops[] = { Size, Ptr }; + Module *M = BB->getParent()->getParent(); + Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_start); + return createCallHelper(TheFn, Ops, this); +} + +CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) { + assert(isa<PointerType>(Ptr->getType()) && + "lifetime.end only applies to pointers."); + Ptr = getCastedInt8PtrValue(Ptr); + if (!Size) + Size = getInt64(-1); + else + assert(Size->getType() == getInt64Ty() && + "lifetime.end requires the size to be an i64"); + Value *Ops[] = { Size, Ptr }; + Module *M = BB->getParent()->getParent(); + Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_end); + return createCallHelper(TheFn, Ops, this); +} diff --git a/lib/IR/InlineAsm.cpp b/lib/IR/InlineAsm.cpp new file mode 100644 index 0000000000..9f2a9fea4b --- /dev/null +++ b/lib/IR/InlineAsm.cpp @@ -0,0 +1,295 @@ +//===-- InlineAsm.cpp - Implement the InlineAsm class ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the InlineAsm class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/InlineAsm.h" +#include "ConstantsContext.h" +#include "LLVMContextImpl.h" +#include "llvm/IR/DerivedTypes.h" +#include <algorithm> +#include <cctype> +using namespace llvm; + +// Implement the first virtual method in this class in this file so the +// InlineAsm vtable is emitted here. +InlineAsm::~InlineAsm() { +} + + +InlineAsm *InlineAsm::get(FunctionType *Ty, StringRef AsmString, + StringRef Constraints, bool hasSideEffects, + bool isAlignStack, AsmDialect asmDialect) { + InlineAsmKeyType Key(AsmString, Constraints, hasSideEffects, isAlignStack, + asmDialect); + LLVMContextImpl *pImpl = Ty->getContext().pImpl; + return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(Ty), Key); +} + +InlineAsm::InlineAsm(PointerType *Ty, const std::string &asmString, + const std::string &constraints, bool hasSideEffects, + bool isAlignStack, AsmDialect asmDialect) + : Value(Ty, Value::InlineAsmVal), + AsmString(asmString), Constraints(constraints), + HasSideEffects(hasSideEffects), IsAlignStack(isAlignStack), + Dialect(asmDialect) { + + // Do various checks on the constraint string and type. + assert(Verify(getFunctionType(), constraints) && + "Function type not legal for constraints!"); +} + +void InlineAsm::destroyConstant() { + getType()->getContext().pImpl->InlineAsms.remove(this); + delete this; +} + +FunctionType *InlineAsm::getFunctionType() const { + return cast<FunctionType>(getType()->getElementType()); +} + +///Default constructor. +InlineAsm::ConstraintInfo::ConstraintInfo() : + Type(isInput), isEarlyClobber(false), + MatchingInput(-1), isCommutative(false), + isIndirect(false), isMultipleAlternative(false), + currentAlternativeIndex(0) { +} + +/// Copy constructor. +InlineAsm::ConstraintInfo::ConstraintInfo(const ConstraintInfo &other) : + Type(other.Type), isEarlyClobber(other.isEarlyClobber), + MatchingInput(other.MatchingInput), isCommutative(other.isCommutative), + isIndirect(other.isIndirect), Codes(other.Codes), + isMultipleAlternative(other.isMultipleAlternative), + multipleAlternatives(other.multipleAlternatives), + currentAlternativeIndex(other.currentAlternativeIndex) { +} + +/// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the +/// fields in this structure. If the constraint string is not understood, +/// return true, otherwise return false. +bool InlineAsm::ConstraintInfo::Parse(StringRef Str, + InlineAsm::ConstraintInfoVector &ConstraintsSoFar) { + StringRef::iterator I = Str.begin(), E = Str.end(); + unsigned multipleAlternativeCount = Str.count('|') + 1; + unsigned multipleAlternativeIndex = 0; + ConstraintCodeVector *pCodes = &Codes; + + // Initialize + isMultipleAlternative = (multipleAlternativeCount > 1 ? true : false); + if (isMultipleAlternative) { + multipleAlternatives.resize(multipleAlternativeCount); + pCodes = &multipleAlternatives[0].Codes; + } + Type = isInput; + isEarlyClobber = false; + MatchingInput = -1; + isCommutative = false; + isIndirect = false; + currentAlternativeIndex = 0; + + // Parse prefixes. + if (*I == '~') { + Type = isClobber; + ++I; + } else if (*I == '=') { + ++I; + Type = isOutput; + } + + if (*I == '*') { + isIndirect = true; + ++I; + } + + if (I == E) return true; // Just a prefix, like "==" or "~". + + // Parse the modifiers. + bool DoneWithModifiers = false; + while (!DoneWithModifiers) { + switch (*I) { + default: + DoneWithModifiers = true; + break; + case '&': // Early clobber. + if (Type != isOutput || // Cannot early clobber anything but output. + isEarlyClobber) // Reject &&&&&& + return true; + isEarlyClobber = true; + break; + case '%': // Commutative. + if (Type == isClobber || // Cannot commute clobbers. + isCommutative) // Reject %%%%% + return true; + isCommutative = true; + break; + case '#': // Comment. + case '*': // Register preferencing. + return true; // Not supported. + } + + if (!DoneWithModifiers) { + ++I; + if (I == E) return true; // Just prefixes and modifiers! + } + } + + // Parse the various constraints. + while (I != E) { + if (*I == '{') { // Physical register reference. + // Find the end of the register name. + StringRef::iterator ConstraintEnd = std::find(I+1, E, '}'); + if (ConstraintEnd == E) return true; // "{foo" + pCodes->push_back(std::string(I, ConstraintEnd+1)); + I = ConstraintEnd+1; + } else if (isdigit(static_cast<unsigned char>(*I))) { // Matching Constraint + // Maximal munch numbers. + StringRef::iterator NumStart = I; + while (I != E && isdigit(static_cast<unsigned char>(*I))) + ++I; + pCodes->push_back(std::string(NumStart, I)); + unsigned N = atoi(pCodes->back().c_str()); + // Check that this is a valid matching constraint! + if (N >= ConstraintsSoFar.size() || ConstraintsSoFar[N].Type != isOutput|| + Type != isInput) + return true; // Invalid constraint number. + + // If Operand N already has a matching input, reject this. An output + // can't be constrained to the same value as multiple inputs. + if (isMultipleAlternative) { + InlineAsm::SubConstraintInfo &scInfo = + ConstraintsSoFar[N].multipleAlternatives[multipleAlternativeIndex]; + if (scInfo.MatchingInput != -1) + return true; + // Note that operand #n has a matching input. + scInfo.MatchingInput = ConstraintsSoFar.size(); + } else { + if (ConstraintsSoFar[N].hasMatchingInput()) + return true; + // Note that operand #n has a matching input. + ConstraintsSoFar[N].MatchingInput = ConstraintsSoFar.size(); + } + } else if (*I == '|') { + multipleAlternativeIndex++; + pCodes = &multipleAlternatives[multipleAlternativeIndex].Codes; + ++I; + } else if (*I == '^') { + // Multi-letter constraint + // FIXME: For now assuming these are 2-character constraints. + pCodes->push_back(std::string(I+1, I+3)); + I += 3; + } else { + // Single letter constraint. + pCodes->push_back(std::string(I, I+1)); + ++I; + } + } + + return false; +} + +/// selectAlternative - Point this constraint to the alternative constraint +/// indicated by the index. +void InlineAsm::ConstraintInfo::selectAlternative(unsigned index) { + if (index < multipleAlternatives.size()) { + currentAlternativeIndex = index; + InlineAsm::SubConstraintInfo &scInfo = + multipleAlternatives[currentAlternativeIndex]; + MatchingInput = scInfo.MatchingInput; + Codes = scInfo.Codes; + } +} + +InlineAsm::ConstraintInfoVector +InlineAsm::ParseConstraints(StringRef Constraints) { + ConstraintInfoVector Result; + + // Scan the constraints string. + for (StringRef::iterator I = Constraints.begin(), + E = Constraints.end(); I != E; ) { + ConstraintInfo Info; + + // Find the end of this constraint. + StringRef::iterator ConstraintEnd = std::find(I, E, ','); + + if (ConstraintEnd == I || // Empty constraint like ",," + Info.Parse(StringRef(I, ConstraintEnd-I), Result)) { + Result.clear(); // Erroneous constraint? + break; + } + + Result.push_back(Info); + + // ConstraintEnd may be either the next comma or the end of the string. In + // the former case, we skip the comma. + I = ConstraintEnd; + if (I != E) { + ++I; + if (I == E) { Result.clear(); break; } // don't allow "xyz," + } + } + + return Result; +} + +/// Verify - Verify that the specified constraint string is reasonable for the +/// specified function type, and otherwise validate the constraint string. +bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) { + if (Ty->isVarArg()) return false; + + ConstraintInfoVector Constraints = ParseConstraints(ConstStr); + + // Error parsing constraints. + if (Constraints.empty() && !ConstStr.empty()) return false; + + unsigned NumOutputs = 0, NumInputs = 0, NumClobbers = 0; + unsigned NumIndirect = 0; + + for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { + switch (Constraints[i].Type) { + case InlineAsm::isOutput: + if ((NumInputs-NumIndirect) != 0 || NumClobbers != 0) + return false; // outputs before inputs and clobbers. + if (!Constraints[i].isIndirect) { + ++NumOutputs; + break; + } + ++NumIndirect; + // FALLTHROUGH for Indirect Outputs. + case InlineAsm::isInput: + if (NumClobbers) return false; // inputs before clobbers. + ++NumInputs; + break; + case InlineAsm::isClobber: + ++NumClobbers; + break; + } + } + + switch (NumOutputs) { + case 0: + if (!Ty->getReturnType()->isVoidTy()) return false; + break; + case 1: + if (Ty->getReturnType()->isStructTy()) return false; + break; + default: + StructType *STy = dyn_cast<StructType>(Ty->getReturnType()); + if (STy == 0 || STy->getNumElements() != NumOutputs) + return false; + break; + } + + if (Ty->getNumParams() != NumInputs) return false; + return true; +} + diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp new file mode 100644 index 0000000000..2b5a0b39c3 --- /dev/null +++ b/lib/IR/Instruction.cpp @@ -0,0 +1,555 @@ +//===-- Instruction.cpp - Implement the Instruction class -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Instruction class for the IR library. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/CallSite.h" +#include "llvm/Support/LeakDetector.h" +using namespace llvm; + +Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps, + Instruction *InsertBefore) + : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(0) { + // Make sure that we get added to a basicblock + LeakDetector::addGarbageObject(this); + + // If requested, insert this instruction into a basic block... + if (InsertBefore) { + assert(InsertBefore->getParent() && + "Instruction to insert before is not in a basic block!"); + InsertBefore->getParent()->getInstList().insert(InsertBefore, this); + } +} + +Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps, + BasicBlock *InsertAtEnd) + : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(0) { + // Make sure that we get added to a basicblock + LeakDetector::addGarbageObject(this); + + // append this instruction into the basic block + assert(InsertAtEnd && "Basic block to append to may not be NULL!"); + InsertAtEnd->getInstList().push_back(this); +} + + +// Out of line virtual method, so the vtable, etc has a home. +Instruction::~Instruction() { + assert(Parent == 0 && "Instruction still linked in the program!"); + if (hasMetadataHashEntry()) + clearMetadataHashEntries(); +} + + +void Instruction::setParent(BasicBlock *P) { + if (getParent()) { + if (!P) LeakDetector::addGarbageObject(this); + } else { + if (P) LeakDetector::removeGarbageObject(this); + } + + Parent = P; +} + +void Instruction::removeFromParent() { + getParent()->getInstList().remove(this); +} + +void Instruction::eraseFromParent() { + getParent()->getInstList().erase(this); +} + +/// insertBefore - Insert an unlinked instructions into a basic block +/// immediately before the specified instruction. +void Instruction::insertBefore(Instruction *InsertPos) { + InsertPos->getParent()->getInstList().insert(InsertPos, this); +} + +/// insertAfter - Insert an unlinked instructions into a basic block +/// immediately after the specified instruction. +void Instruction::insertAfter(Instruction *InsertPos) { + InsertPos->getParent()->getInstList().insertAfter(InsertPos, this); +} + +/// moveBefore - Unlink this instruction from its current basic block and +/// insert it into the basic block that MovePos lives in, right before +/// MovePos. +void Instruction::moveBefore(Instruction *MovePos) { + MovePos->getParent()->getInstList().splice(MovePos,getParent()->getInstList(), + this); +} + +/// Set or clear the unsafe-algebra flag on this instruction, which must be an +/// operator which supports this flag. See LangRef.html for the meaning of this +/// flag. +void Instruction::setHasUnsafeAlgebra(bool B) { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + cast<FPMathOperator>(this)->setHasUnsafeAlgebra(B); +} + +/// Set or clear the NoNaNs flag on this instruction, which must be an operator +/// which supports this flag. See LangRef.html for the meaning of this flag. +void Instruction::setHasNoNaNs(bool B) { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + cast<FPMathOperator>(this)->setHasNoNaNs(B); +} + +/// Set or clear the no-infs flag on this instruction, which must be an operator +/// which supports this flag. See LangRef.html for the meaning of this flag. +void Instruction::setHasNoInfs(bool B) { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + cast<FPMathOperator>(this)->setHasNoInfs(B); +} + +/// Set or clear the no-signed-zeros flag on this instruction, which must be an +/// operator which supports this flag. See LangRef.html for the meaning of this +/// flag. +void Instruction::setHasNoSignedZeros(bool B) { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + cast<FPMathOperator>(this)->setHasNoSignedZeros(B); +} + +/// Set or clear the allow-reciprocal flag on this instruction, which must be an +/// operator which supports this flag. See LangRef.html for the meaning of this +/// flag. +void Instruction::setHasAllowReciprocal(bool B) { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + cast<FPMathOperator>(this)->setHasAllowReciprocal(B); +} + +/// Convenience function for setting all the fast-math flags on this +/// instruction, which must be an operator which supports these flags. See +/// LangRef.html for the meaning of these flats. +void Instruction::setFastMathFlags(FastMathFlags FMF) { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + cast<FPMathOperator>(this)->setFastMathFlags(FMF); +} + +/// Determine whether the unsafe-algebra flag is set. +bool Instruction::hasUnsafeAlgebra() const { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + return cast<FPMathOperator>(this)->hasUnsafeAlgebra(); +} + +/// Determine whether the no-NaNs flag is set. +bool Instruction::hasNoNaNs() const { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + return cast<FPMathOperator>(this)->hasNoNaNs(); +} + +/// Determine whether the no-infs flag is set. +bool Instruction::hasNoInfs() const { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + return cast<FPMathOperator>(this)->hasNoInfs(); +} + +/// Determine whether the no-signed-zeros flag is set. +bool Instruction::hasNoSignedZeros() const { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + return cast<FPMathOperator>(this)->hasNoSignedZeros(); +} + +/// Determine whether the allow-reciprocal flag is set. +bool Instruction::hasAllowReciprocal() const { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + return cast<FPMathOperator>(this)->hasAllowReciprocal(); +} + +/// Convenience function for getting all the fast-math flags, which must be an +/// operator which supports these flags. See LangRef.html for the meaning of +/// these flats. +FastMathFlags Instruction::getFastMathFlags() const { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + return cast<FPMathOperator>(this)->getFastMathFlags(); +} + +/// Copy I's fast-math flags +void Instruction::copyFastMathFlags(const Instruction *I) { + setFastMathFlags(I->getFastMathFlags()); +} + + +const char *Instruction::getOpcodeName(unsigned OpCode) { + switch (OpCode) { + // Terminators + case Ret: return "ret"; + case Br: return "br"; + case Switch: return "switch"; + case IndirectBr: return "indirectbr"; + case Invoke: return "invoke"; + case Resume: return "resume"; + case Unreachable: return "unreachable"; + + // Standard binary operators... + case Add: return "add"; + case FAdd: return "fadd"; + case Sub: return "sub"; + case FSub: return "fsub"; + case Mul: return "mul"; + case FMul: return "fmul"; + case UDiv: return "udiv"; + case SDiv: return "sdiv"; + case FDiv: return "fdiv"; + case URem: return "urem"; + case SRem: return "srem"; + case FRem: return "frem"; + + // Logical operators... + case And: return "and"; + case Or : return "or"; + case Xor: return "xor"; + + // Memory instructions... + case Alloca: return "alloca"; + case Load: return "load"; + case Store: return "store"; + case AtomicCmpXchg: return "cmpxchg"; + case AtomicRMW: return "atomicrmw"; + case Fence: return "fence"; + case GetElementPtr: return "getelementptr"; + + // Convert instructions... + case Trunc: return "trunc"; + case ZExt: return "zext"; + case SExt: return "sext"; + case FPTrunc: return "fptrunc"; + case FPExt: return "fpext"; + case FPToUI: return "fptoui"; + case FPToSI: return "fptosi"; + case UIToFP: return "uitofp"; + case SIToFP: return "sitofp"; + case IntToPtr: return "inttoptr"; + case PtrToInt: return "ptrtoint"; + case BitCast: return "bitcast"; + + // Other instructions... + case ICmp: return "icmp"; + case FCmp: return "fcmp"; + case PHI: return "phi"; + case Select: return "select"; + case Call: return "call"; + case Shl: return "shl"; + case LShr: return "lshr"; + case AShr: return "ashr"; + case VAArg: return "va_arg"; + case ExtractElement: return "extractelement"; + case InsertElement: return "insertelement"; + case ShuffleVector: return "shufflevector"; + case ExtractValue: return "extractvalue"; + case InsertValue: return "insertvalue"; + case LandingPad: return "landingpad"; + + default: return "<Invalid operator> "; + } +} + +/// isIdenticalTo - Return true if the specified instruction is exactly +/// identical to the current one. This means that all operands match and any +/// extra information (e.g. load is volatile) agree. +bool Instruction::isIdenticalTo(const Instruction *I) const { + return isIdenticalToWhenDefined(I) && + SubclassOptionalData == I->SubclassOptionalData; +} + +/// isIdenticalToWhenDefined - This is like isIdenticalTo, except that it +/// ignores the SubclassOptionalData flags, which specify conditions +/// under which the instruction's result is undefined. +bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const { + if (getOpcode() != I->getOpcode() || + getNumOperands() != I->getNumOperands() || + getType() != I->getType()) + return false; + + // We have two instructions of identical opcode and #operands. Check to see + // if all operands are the same. + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + if (getOperand(i) != I->getOperand(i)) + return false; + + // Check special state that is a part of some instructions. + if (const LoadInst *LI = dyn_cast<LoadInst>(this)) + return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() && + LI->getAlignment() == cast<LoadInst>(I)->getAlignment() && + LI->getOrdering() == cast<LoadInst>(I)->getOrdering() && + LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope(); + if (const StoreInst *SI = dyn_cast<StoreInst>(this)) + return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() && + SI->getAlignment() == cast<StoreInst>(I)->getAlignment() && + SI->getOrdering() == cast<StoreInst>(I)->getOrdering() && + SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope(); + if (const CmpInst *CI = dyn_cast<CmpInst>(this)) + return CI->getPredicate() == cast<CmpInst>(I)->getPredicate(); + if (const CallInst *CI = dyn_cast<CallInst>(this)) + return CI->isTailCall() == cast<CallInst>(I)->isTailCall() && + CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() && + CI->getAttributes() == cast<CallInst>(I)->getAttributes(); + if (const InvokeInst *CI = dyn_cast<InvokeInst>(this)) + return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() && + CI->getAttributes() == cast<InvokeInst>(I)->getAttributes(); + if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this)) + return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices(); + if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this)) + return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices(); + if (const FenceInst *FI = dyn_cast<FenceInst>(this)) + return FI->getOrdering() == cast<FenceInst>(FI)->getOrdering() && + FI->getSynchScope() == cast<FenceInst>(FI)->getSynchScope(); + if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this)) + return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() && + CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() && + CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope(); + if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this)) + return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() && + RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() && + RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() && + RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope(); + if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) { + const PHINode *otherPHI = cast<PHINode>(I); + for (unsigned i = 0, e = thisPHI->getNumOperands(); i != e; ++i) { + if (thisPHI->getIncomingBlock(i) != otherPHI->getIncomingBlock(i)) + return false; + } + return true; + } + return true; +} + +// isSameOperationAs +// This should be kept in sync with isEquivalentOperation in +// lib/Transforms/IPO/MergeFunctions.cpp. +bool Instruction::isSameOperationAs(const Instruction *I, + unsigned flags) const { + bool IgnoreAlignment = flags & CompareIgnoringAlignment; + bool UseScalarTypes = flags & CompareUsingScalarTypes; + + if (getOpcode() != I->getOpcode() || + getNumOperands() != I->getNumOperands() || + (UseScalarTypes ? + getType()->getScalarType() != I->getType()->getScalarType() : + getType() != I->getType())) + return false; + + // We have two instructions of identical opcode and #operands. Check to see + // if all operands are the same type + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + if (UseScalarTypes ? + getOperand(i)->getType()->getScalarType() != + I->getOperand(i)->getType()->getScalarType() : + getOperand(i)->getType() != I->getOperand(i)->getType()) + return false; + + // Check special state that is a part of some instructions. + if (const LoadInst *LI = dyn_cast<LoadInst>(this)) + return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() && + (LI->getAlignment() == cast<LoadInst>(I)->getAlignment() || + IgnoreAlignment) && + LI->getOrdering() == cast<LoadInst>(I)->getOrdering() && + LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope(); + if (const StoreInst *SI = dyn_cast<StoreInst>(this)) + return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() && + (SI->getAlignment() == cast<StoreInst>(I)->getAlignment() || + IgnoreAlignment) && + SI->getOrdering() == cast<StoreInst>(I)->getOrdering() && + SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope(); + if (const CmpInst *CI = dyn_cast<CmpInst>(this)) + return CI->getPredicate() == cast<CmpInst>(I)->getPredicate(); + if (const CallInst *CI = dyn_cast<CallInst>(this)) + return CI->isTailCall() == cast<CallInst>(I)->isTailCall() && + CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() && + CI->getAttributes() == cast<CallInst>(I)->getAttributes(); + if (const InvokeInst *CI = dyn_cast<InvokeInst>(this)) + return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() && + CI->getAttributes() == + cast<InvokeInst>(I)->getAttributes(); + if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this)) + return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices(); + if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this)) + return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices(); + if (const FenceInst *FI = dyn_cast<FenceInst>(this)) + return FI->getOrdering() == cast<FenceInst>(I)->getOrdering() && + FI->getSynchScope() == cast<FenceInst>(I)->getSynchScope(); + if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this)) + return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() && + CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() && + CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope(); + if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this)) + return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() && + RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() && + RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() && + RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope(); + + return true; +} + +/// isUsedOutsideOfBlock - Return true if there are any uses of I outside of the +/// specified block. Note that PHI nodes are considered to evaluate their +/// operands in the corresponding predecessor block. +bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const { + for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { + // PHI nodes uses values in the corresponding predecessor block. For other + // instructions, just check to see whether the parent of the use matches up. + const User *U = *UI; + const PHINode *PN = dyn_cast<PHINode>(U); + if (PN == 0) { + if (cast<Instruction>(U)->getParent() != BB) + return true; + continue; + } + + if (PN->getIncomingBlock(UI) != BB) + return true; + } + return false; +} + +/// mayReadFromMemory - Return true if this instruction may read memory. +/// +bool Instruction::mayReadFromMemory() const { + switch (getOpcode()) { + default: return false; + case Instruction::VAArg: + case Instruction::Load: + case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory + case Instruction::AtomicCmpXchg: + case Instruction::AtomicRMW: + return true; + case Instruction::Call: + return !cast<CallInst>(this)->doesNotAccessMemory(); + case Instruction::Invoke: + return !cast<InvokeInst>(this)->doesNotAccessMemory(); + case Instruction::Store: + return !cast<StoreInst>(this)->isUnordered(); + } +} + +/// mayWriteToMemory - Return true if this instruction may modify memory. +/// +bool Instruction::mayWriteToMemory() const { + switch (getOpcode()) { + default: return false; + case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory + case Instruction::Store: + case Instruction::VAArg: + case Instruction::AtomicCmpXchg: + case Instruction::AtomicRMW: + return true; + case Instruction::Call: + return !cast<CallInst>(this)->onlyReadsMemory(); + case Instruction::Invoke: + return !cast<InvokeInst>(this)->onlyReadsMemory(); + case Instruction::Load: + return !cast<LoadInst>(this)->isUnordered(); + } +} + +bool Instruction::mayThrow() const { + if (const CallInst *CI = dyn_cast<CallInst>(this)) + return !CI->doesNotThrow(); + return isa<ResumeInst>(this); +} + +bool Instruction::mayReturn() const { + if (const CallInst *CI = dyn_cast<CallInst>(this)) + return !CI->doesNotReturn(); + return true; +} + +/// isAssociative - Return true if the instruction is associative: +/// +/// Associative operators satisfy: x op (y op z) === (x op y) op z +/// +/// In LLVM, the Add, Mul, And, Or, and Xor operators are associative. +/// +bool Instruction::isAssociative(unsigned Opcode) { + return Opcode == And || Opcode == Or || Opcode == Xor || + Opcode == Add || Opcode == Mul; +} + +bool Instruction::isAssociative() const { + unsigned Opcode = getOpcode(); + if (isAssociative(Opcode)) + return true; + + switch (Opcode) { + case FMul: + case FAdd: + return cast<FPMathOperator>(this)->hasUnsafeAlgebra(); + default: + return false; + } +} + +/// isCommutative - Return true if the instruction is commutative: +/// +/// Commutative operators satisfy: (x op y) === (y op x) +/// +/// In LLVM, these are the associative operators, plus SetEQ and SetNE, when +/// applied to any type. +/// +bool Instruction::isCommutative(unsigned op) { + switch (op) { + case Add: + case FAdd: + case Mul: + case FMul: + case And: + case Or: + case Xor: + return true; + default: + return false; + } +} + +/// isIdempotent - Return true if the instruction is idempotent: +/// +/// Idempotent operators satisfy: x op x === x +/// +/// In LLVM, the And and Or operators are idempotent. +/// +bool Instruction::isIdempotent(unsigned Opcode) { + return Opcode == And || Opcode == Or; +} + +/// isNilpotent - Return true if the instruction is nilpotent: +/// +/// Nilpotent operators satisfy: x op x === Id, +/// +/// where Id is the identity for the operator, i.e. a constant such that +/// x op Id === x and Id op x === x for all x. +/// +/// In LLVM, the Xor operator is nilpotent. +/// +bool Instruction::isNilpotent(unsigned Opcode) { + return Opcode == Xor; +} + +Instruction *Instruction::clone() const { + Instruction *New = clone_impl(); + New->SubclassOptionalData = SubclassOptionalData; + if (!hasMetadata()) + return New; + + // Otherwise, enumerate and copy over metadata from the old instruction to the + // new one. + SmallVector<std::pair<unsigned, MDNode*>, 4> TheMDs; + getAllMetadataOtherThanDebugLoc(TheMDs); + for (unsigned i = 0, e = TheMDs.size(); i != e; ++i) + New->setMetadata(TheMDs[i].first, TheMDs[i].second); + + New->setDebugLoc(getDebugLoc()); + return New; +} diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp new file mode 100644 index 0000000000..2e3a525826 --- /dev/null +++ b/lib/IR/Instructions.cpp @@ -0,0 +1,3553 @@ +//===-- Instructions.cpp - Implement the LLVM instructions ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements all of the non-inline methods for the LLVM instruction +// classes. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Instructions.h" +#include "LLVMContextImpl.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" +#include "llvm/Support/CallSite.h" +#include "llvm/Support/ConstantRange.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// CallSite Class +//===----------------------------------------------------------------------===// + +User::op_iterator CallSite::getCallee() const { + Instruction *II(getInstruction()); + return isCall() + ? cast<CallInst>(II)->op_end() - 1 // Skip Callee + : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee +} + +//===----------------------------------------------------------------------===// +// TerminatorInst Class +//===----------------------------------------------------------------------===// + +// Out of line virtual method, so the vtable, etc has a home. +TerminatorInst::~TerminatorInst() { +} + +//===----------------------------------------------------------------------===// +// UnaryInstruction Class +//===----------------------------------------------------------------------===// + +// Out of line virtual method, so the vtable, etc has a home. +UnaryInstruction::~UnaryInstruction() { +} + +//===----------------------------------------------------------------------===// +// SelectInst Class +//===----------------------------------------------------------------------===// + +/// areInvalidOperands - Return a string if the specified operands are invalid +/// for a select operation, otherwise return null. +const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { + if (Op1->getType() != Op2->getType()) + return "both values to select must have same type"; + + if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { + // Vector select. + if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) + return "vector select condition element type must be i1"; + VectorType *ET = dyn_cast<VectorType>(Op1->getType()); + if (ET == 0) + return "selected values for vector select must be vectors"; + if (ET->getNumElements() != VT->getNumElements()) + return "vector select requires selected vectors to have " + "the same vector length as select condition"; + } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { + return "select condition must be i1 or <n x i1>"; + } + return 0; +} + + +//===----------------------------------------------------------------------===// +// PHINode Class +//===----------------------------------------------------------------------===// + +PHINode::PHINode(const PHINode &PN) + : Instruction(PN.getType(), Instruction::PHI, + allocHungoffUses(PN.getNumOperands()), PN.getNumOperands()), + ReservedSpace(PN.getNumOperands()) { + std::copy(PN.op_begin(), PN.op_end(), op_begin()); + std::copy(PN.block_begin(), PN.block_end(), block_begin()); + SubclassOptionalData = PN.SubclassOptionalData; +} + +PHINode::~PHINode() { + dropHungoffUses(); +} + +Use *PHINode::allocHungoffUses(unsigned N) const { + // Allocate the array of Uses of the incoming values, followed by a pointer + // (with bottom bit set) to the User, followed by the array of pointers to + // the incoming basic blocks. + size_t size = N * sizeof(Use) + sizeof(Use::UserRef) + + N * sizeof(BasicBlock*); + Use *Begin = static_cast<Use*>(::operator new(size)); + Use *End = Begin + N; + (void) new(End) Use::UserRef(const_cast<PHINode*>(this), 1); + return Use::initTags(Begin, End); +} + +// removeIncomingValue - Remove an incoming value. This is useful if a +// predecessor basic block is deleted. +Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { + Value *Removed = getIncomingValue(Idx); + + // Move everything after this operand down. + // + // FIXME: we could just swap with the end of the list, then erase. However, + // clients might not expect this to happen. The code as it is thrashes the + // use/def lists, which is kinda lame. + std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); + std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); + + // Nuke the last value. + Op<-1>().set(0); + --NumOperands; + + // If the PHI node is dead, because it has zero entries, nuke it now. + if (getNumOperands() == 0 && DeletePHIIfEmpty) { + // If anyone is using this PHI, make them use a dummy value instead... + replaceAllUsesWith(UndefValue::get(getType())); + eraseFromParent(); + } + return Removed; +} + +/// growOperands - grow operands - This grows the operand list in response +/// to a push_back style of operation. This grows the number of ops by 1.5 +/// times. +/// +void PHINode::growOperands() { + unsigned e = getNumOperands(); + unsigned NumOps = e + e / 2; + if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. + + Use *OldOps = op_begin(); + BasicBlock **OldBlocks = block_begin(); + + ReservedSpace = NumOps; + OperandList = allocHungoffUses(ReservedSpace); + + std::copy(OldOps, OldOps + e, op_begin()); + std::copy(OldBlocks, OldBlocks + e, block_begin()); + + Use::zap(OldOps, OldOps + e, true); +} + +/// hasConstantValue - If the specified PHI node always merges together the same +/// value, return the value, otherwise return null. +Value *PHINode::hasConstantValue() const { + // Exploit the fact that phi nodes always have at least one entry. + Value *ConstantValue = getIncomingValue(0); + for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) + if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { + if (ConstantValue != this) + return 0; // Incoming values not all the same. + // The case where the first value is this PHI. + ConstantValue = getIncomingValue(i); + } + if (ConstantValue == this) + return UndefValue::get(getType()); + return ConstantValue; +} + +//===----------------------------------------------------------------------===// +// LandingPadInst Implementation +//===----------------------------------------------------------------------===// + +LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn, + unsigned NumReservedValues, const Twine &NameStr, + Instruction *InsertBefore) + : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertBefore) { + init(PersonalityFn, 1 + NumReservedValues, NameStr); +} + +LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn, + unsigned NumReservedValues, const Twine &NameStr, + BasicBlock *InsertAtEnd) + : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertAtEnd) { + init(PersonalityFn, 1 + NumReservedValues, NameStr); +} + +LandingPadInst::LandingPadInst(const LandingPadInst &LP) + : Instruction(LP.getType(), Instruction::LandingPad, + allocHungoffUses(LP.getNumOperands()), LP.getNumOperands()), + ReservedSpace(LP.getNumOperands()) { + Use *OL = OperandList, *InOL = LP.OperandList; + for (unsigned I = 0, E = ReservedSpace; I != E; ++I) + OL[I] = InOL[I]; + + setCleanup(LP.isCleanup()); +} + +LandingPadInst::~LandingPadInst() { + dropHungoffUses(); +} + +LandingPadInst *LandingPadInst::Create(Type *RetTy, Value *PersonalityFn, + unsigned NumReservedClauses, + const Twine &NameStr, + Instruction *InsertBefore) { + return new LandingPadInst(RetTy, PersonalityFn, NumReservedClauses, NameStr, + InsertBefore); +} + +LandingPadInst *LandingPadInst::Create(Type *RetTy, Value *PersonalityFn, + unsigned NumReservedClauses, + const Twine &NameStr, + BasicBlock *InsertAtEnd) { + return new LandingPadInst(RetTy, PersonalityFn, NumReservedClauses, NameStr, + InsertAtEnd); +} + +void LandingPadInst::init(Value *PersFn, unsigned NumReservedValues, + const Twine &NameStr) { + ReservedSpace = NumReservedValues; + NumOperands = 1; + OperandList = allocHungoffUses(ReservedSpace); + OperandList[0] = PersFn; + setName(NameStr); + setCleanup(false); +} + +/// growOperands - grow operands - This grows the operand list in response to a +/// push_back style of operation. This grows the number of ops by 2 times. +void LandingPadInst::growOperands(unsigned Size) { + unsigned e = getNumOperands(); + if (ReservedSpace >= e + Size) return; + ReservedSpace = (e + Size / 2) * 2; + + Use *NewOps = allocHungoffUses(ReservedSpace); + Use *OldOps = OperandList; + for (unsigned i = 0; i != e; ++i) + NewOps[i] = OldOps[i]; + + OperandList = NewOps; + Use::zap(OldOps, OldOps + e, true); +} + +void LandingPadInst::addClause(Value *Val) { + unsigned OpNo = getNumOperands(); + growOperands(1); + assert(OpNo < ReservedSpace && "Growing didn't work!"); + ++NumOperands; + OperandList[OpNo] = Val; +} + +//===----------------------------------------------------------------------===// +// CallInst Implementation +//===----------------------------------------------------------------------===// + +CallInst::~CallInst() { +} + +void CallInst::init(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr) { + assert(NumOperands == Args.size() + 1 && "NumOperands not set up?"); + Op<-1>() = Func; + +#ifndef NDEBUG + FunctionType *FTy = + cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType()); + + assert((Args.size() == FTy->getNumParams() || + (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && + "Calling a function with bad signature!"); + + for (unsigned i = 0; i != Args.size(); ++i) + assert((i >= FTy->getNumParams() || + FTy->getParamType(i) == Args[i]->getType()) && + "Calling a function with a bad signature!"); +#endif + + std::copy(Args.begin(), Args.end(), op_begin()); + setName(NameStr); +} + +void CallInst::init(Value *Func, const Twine &NameStr) { + assert(NumOperands == 1 && "NumOperands not set up?"); + Op<-1>() = Func; + +#ifndef NDEBUG + FunctionType *FTy = + cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType()); + + assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); +#endif + + setName(NameStr); +} + +CallInst::CallInst(Value *Func, const Twine &Name, + Instruction *InsertBefore) + : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType()) + ->getElementType())->getReturnType(), + Instruction::Call, + OperandTraits<CallInst>::op_end(this) - 1, + 1, InsertBefore) { + init(Func, Name); +} + +CallInst::CallInst(Value *Func, const Twine &Name, + BasicBlock *InsertAtEnd) + : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType()) + ->getElementType())->getReturnType(), + Instruction::Call, + OperandTraits<CallInst>::op_end(this) - 1, + 1, InsertAtEnd) { + init(Func, Name); +} + +CallInst::CallInst(const CallInst &CI) + : Instruction(CI.getType(), Instruction::Call, + OperandTraits<CallInst>::op_end(this) - CI.getNumOperands(), + CI.getNumOperands()) { + setAttributes(CI.getAttributes()); + setTailCall(CI.isTailCall()); + setCallingConv(CI.getCallingConv()); + + std::copy(CI.op_begin(), CI.op_end(), op_begin()); + SubclassOptionalData = CI.SubclassOptionalData; +} + +void CallInst::addAttribute(unsigned i, Attribute::AttrKind attr) { + AttributeSet PAL = getAttributes(); + PAL = PAL.addAttribute(getContext(), i, attr); + setAttributes(PAL); +} + +void CallInst::removeAttribute(unsigned i, Attribute attr) { + AttributeSet PAL = getAttributes(); + AttrBuilder B(attr); + LLVMContext &Context = getContext(); + PAL = PAL.removeAttributes(Context, i, + AttributeSet::get(Context, i, B)); + setAttributes(PAL); +} + +bool CallInst::hasFnAttr(Attribute::AttrKind A) const { + if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, A)) + return true; + if (const Function *F = getCalledFunction()) + return F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, A); + return false; +} + +bool CallInst::paramHasAttr(unsigned i, Attribute::AttrKind A) const { + if (AttributeList.hasAttribute(i, A)) + return true; + if (const Function *F = getCalledFunction()) + return F->getAttributes().hasAttribute(i, A); + return false; +} + +/// IsConstantOne - Return true only if val is constant int 1 +static bool IsConstantOne(Value *val) { + assert(val && "IsConstantOne does not work with NULL val"); + return isa<ConstantInt>(val) && cast<ConstantInt>(val)->isOne(); +} + +static Instruction *createMalloc(Instruction *InsertBefore, + BasicBlock *InsertAtEnd, Type *IntPtrTy, + Type *AllocTy, Value *AllocSize, + Value *ArraySize, Function *MallocF, + const Twine &Name) { + assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && + "createMalloc needs either InsertBefore or InsertAtEnd"); + + // malloc(type) becomes: + // bitcast (i8* malloc(typeSize)) to type* + // malloc(type, arraySize) becomes: + // bitcast (i8 *malloc(typeSize*arraySize)) to type* + if (!ArraySize) + ArraySize = ConstantInt::get(IntPtrTy, 1); + else if (ArraySize->getType() != IntPtrTy) { + if (InsertBefore) + ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, + "", InsertBefore); + else + ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, + "", InsertAtEnd); + } + + if (!IsConstantOne(ArraySize)) { + if (IsConstantOne(AllocSize)) { + AllocSize = ArraySize; // Operand * 1 = Operand + } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { + Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, + false /*ZExt*/); + // Malloc arg is constant product of type size and array size + AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); + } else { + // Multiply type size by the array size... + if (InsertBefore) + AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, + "mallocsize", InsertBefore); + else + AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, + "mallocsize", InsertAtEnd); + } + } + + assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); + // Create the call to Malloc. + BasicBlock* BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; + Module* M = BB->getParent()->getParent(); + Type *BPTy = Type::getInt8PtrTy(BB->getContext()); + Value *MallocFunc = MallocF; + if (!MallocFunc) + // prototype malloc as "void *malloc(size_t)" + MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy, NULL); + PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); + CallInst *MCall = NULL; + Instruction *Result = NULL; + if (InsertBefore) { + MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall", InsertBefore); + Result = MCall; + if (Result->getType() != AllocPtrType) + // Create a cast instruction to convert to the right type... + Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); + } else { + MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall"); + Result = MCall; + if (Result->getType() != AllocPtrType) { + InsertAtEnd->getInstList().push_back(MCall); + // Create a cast instruction to convert to the right type... + Result = new BitCastInst(MCall, AllocPtrType, Name); + } + } + MCall->setTailCall(); + if (Function *F = dyn_cast<Function>(MallocFunc)) { + MCall->setCallingConv(F->getCallingConv()); + if (!F->doesNotAlias(0)) F->setDoesNotAlias(0); + } + assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); + + return Result; +} + +/// CreateMalloc - Generate the IR for a call to malloc: +/// 1. Compute the malloc call's argument as the specified type's size, +/// possibly multiplied by the array size if the array size is not +/// constant 1. +/// 2. Call malloc with that argument. +/// 3. Bitcast the result of the malloc call to the specified type. +Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, + Type *IntPtrTy, Type *AllocTy, + Value *AllocSize, Value *ArraySize, + Function * MallocF, + const Twine &Name) { + return createMalloc(InsertBefore, NULL, IntPtrTy, AllocTy, AllocSize, + ArraySize, MallocF, Name); +} + +/// CreateMalloc - Generate the IR for a call to malloc: +/// 1. Compute the malloc call's argument as the specified type's size, +/// possibly multiplied by the array size if the array size is not +/// constant 1. +/// 2. Call malloc with that argument. +/// 3. Bitcast the result of the malloc call to the specified type. +/// Note: This function does not add the bitcast to the basic block, that is the +/// responsibility of the caller. +Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, + Type *IntPtrTy, Type *AllocTy, + Value *AllocSize, Value *ArraySize, + Function *MallocF, const Twine &Name) { + return createMalloc(NULL, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, + ArraySize, MallocF, Name); +} + +static Instruction* createFree(Value* Source, Instruction *InsertBefore, + BasicBlock *InsertAtEnd) { + assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && + "createFree needs either InsertBefore or InsertAtEnd"); + assert(Source->getType()->isPointerTy() && + "Can not free something of nonpointer type!"); + + BasicBlock* BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; + Module* M = BB->getParent()->getParent(); + + Type *VoidTy = Type::getVoidTy(M->getContext()); + Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); + // prototype free as "void free(void*)" + Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy, NULL); + CallInst* Result = NULL; + Value *PtrCast = Source; + if (InsertBefore) { + if (Source->getType() != IntPtrTy) + PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); + Result = CallInst::Create(FreeFunc, PtrCast, "", InsertBefore); + } else { + if (Source->getType() != IntPtrTy) + PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); + Result = CallInst::Create(FreeFunc, PtrCast, ""); + } + Result->setTailCall(); + if (Function *F = dyn_cast<Function>(FreeFunc)) + Result->setCallingConv(F->getCallingConv()); + + return Result; +} + +/// CreateFree - Generate the IR for a call to the builtin free function. +Instruction * CallInst::CreateFree(Value* Source, Instruction *InsertBefore) { + return createFree(Source, InsertBefore, NULL); +} + +/// CreateFree - Generate the IR for a call to the builtin free function. +/// Note: This function does not add the call to the basic block, that is the +/// responsibility of the caller. +Instruction* CallInst::CreateFree(Value* Source, BasicBlock *InsertAtEnd) { + Instruction* FreeCall = createFree(Source, NULL, InsertAtEnd); + assert(FreeCall && "CreateFree did not create a CallInst"); + return FreeCall; +} + +//===----------------------------------------------------------------------===// +// InvokeInst Implementation +//===----------------------------------------------------------------------===// + +void InvokeInst::init(Value *Fn, BasicBlock *IfNormal, BasicBlock *IfException, + ArrayRef<Value *> Args, const Twine &NameStr) { + assert(NumOperands == 3 + Args.size() && "NumOperands not set up?"); + Op<-3>() = Fn; + Op<-2>() = IfNormal; + Op<-1>() = IfException; + +#ifndef NDEBUG + FunctionType *FTy = + cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()); + + assert(((Args.size() == FTy->getNumParams()) || + (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && + "Invoking a function with bad signature"); + + for (unsigned i = 0, e = Args.size(); i != e; i++) + assert((i >= FTy->getNumParams() || + FTy->getParamType(i) == Args[i]->getType()) && + "Invoking a function with a bad signature!"); +#endif + + std::copy(Args.begin(), Args.end(), op_begin()); + setName(NameStr); +} + +InvokeInst::InvokeInst(const InvokeInst &II) + : TerminatorInst(II.getType(), Instruction::Invoke, + OperandTraits<InvokeInst>::op_end(this) + - II.getNumOperands(), + II.getNumOperands()) { + setAttributes(II.getAttributes()); + setCallingConv(II.getCallingConv()); + std::copy(II.op_begin(), II.op_end(), op_begin()); + SubclassOptionalData = II.SubclassOptionalData; +} + +BasicBlock *InvokeInst::getSuccessorV(unsigned idx) const { + return getSuccessor(idx); +} +unsigned InvokeInst::getNumSuccessorsV() const { + return getNumSuccessors(); +} +void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) { + return setSuccessor(idx, B); +} + +bool InvokeInst::hasFnAttr(Attribute::AttrKind A) const { + if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, A)) + return true; + if (const Function *F = getCalledFunction()) + return F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, A); + return false; +} + +bool InvokeInst::paramHasAttr(unsigned i, Attribute::AttrKind A) const { + if (AttributeList.hasAttribute(i, A)) + return true; + if (const Function *F = getCalledFunction()) + return F->getAttributes().hasAttribute(i, A); + return false; +} + +void InvokeInst::addAttribute(unsigned i, Attribute::AttrKind attr) { + AttributeSet PAL = getAttributes(); + PAL = PAL.addAttribute(getContext(), i, attr); + setAttributes(PAL); +} + +void InvokeInst::removeAttribute(unsigned i, Attribute attr) { + AttributeSet PAL = getAttributes(); + AttrBuilder B(attr); + PAL = PAL.removeAttributes(getContext(), i, + AttributeSet::get(getContext(), i, B)); + setAttributes(PAL); +} + +LandingPadInst *InvokeInst::getLandingPadInst() const { + return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); +} + +//===----------------------------------------------------------------------===// +// ReturnInst Implementation +//===----------------------------------------------------------------------===// + +ReturnInst::ReturnInst(const ReturnInst &RI) + : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Ret, + OperandTraits<ReturnInst>::op_end(this) - + RI.getNumOperands(), + RI.getNumOperands()) { + if (RI.getNumOperands()) + Op<0>() = RI.Op<0>(); + SubclassOptionalData = RI.SubclassOptionalData; +} + +ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) + : TerminatorInst(Type::getVoidTy(C), Instruction::Ret, + OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, + InsertBefore) { + if (retVal) + Op<0>() = retVal; +} +ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) + : TerminatorInst(Type::getVoidTy(C), Instruction::Ret, + OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, + InsertAtEnd) { + if (retVal) + Op<0>() = retVal; +} +ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) + : TerminatorInst(Type::getVoidTy(Context), Instruction::Ret, + OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) { +} + +unsigned ReturnInst::getNumSuccessorsV() const { + return getNumSuccessors(); +} + +/// Out-of-line ReturnInst method, put here so the C++ compiler can choose to +/// emit the vtable for the class in this translation unit. +void ReturnInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) { + llvm_unreachable("ReturnInst has no successors!"); +} + +BasicBlock *ReturnInst::getSuccessorV(unsigned idx) const { + llvm_unreachable("ReturnInst has no successors!"); +} + +ReturnInst::~ReturnInst() { +} + +//===----------------------------------------------------------------------===// +// ResumeInst Implementation +//===----------------------------------------------------------------------===// + +ResumeInst::ResumeInst(const ResumeInst &RI) + : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Resume, + OperandTraits<ResumeInst>::op_begin(this), 1) { + Op<0>() = RI.Op<0>(); +} + +ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) + : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume, + OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { + Op<0>() = Exn; +} + +ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) + : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume, + OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { + Op<0>() = Exn; +} + +unsigned ResumeInst::getNumSuccessorsV() const { + return getNumSuccessors(); +} + +void ResumeInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) { + llvm_unreachable("ResumeInst has no successors!"); +} + +BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const { + llvm_unreachable("ResumeInst has no successors!"); +} + +//===----------------------------------------------------------------------===// +// UnreachableInst Implementation +//===----------------------------------------------------------------------===// + +UnreachableInst::UnreachableInst(LLVMContext &Context, + Instruction *InsertBefore) + : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable, + 0, 0, InsertBefore) { +} +UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) + : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable, + 0, 0, InsertAtEnd) { +} + +unsigned UnreachableInst::getNumSuccessorsV() const { + return getNumSuccessors(); +} + +void UnreachableInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) { + llvm_unreachable("UnreachableInst has no successors!"); +} + +BasicBlock *UnreachableInst::getSuccessorV(unsigned idx) const { + llvm_unreachable("UnreachableInst has no successors!"); +} + +//===----------------------------------------------------------------------===// +// BranchInst Implementation +//===----------------------------------------------------------------------===// + +void BranchInst::AssertOK() { + if (isConditional()) + assert(getCondition()->getType()->isIntegerTy(1) && + "May only branch on boolean predicates!"); +} + +BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) + : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, + OperandTraits<BranchInst>::op_end(this) - 1, + 1, InsertBefore) { + assert(IfTrue != 0 && "Branch destination may not be null!"); + Op<-1>() = IfTrue; +} +BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, + Instruction *InsertBefore) + : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, + OperandTraits<BranchInst>::op_end(this) - 3, + 3, InsertBefore) { + Op<-1>() = IfTrue; + Op<-2>() = IfFalse; + Op<-3>() = Cond; +#ifndef NDEBUG + AssertOK(); +#endif +} + +BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) + : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, + OperandTraits<BranchInst>::op_end(this) - 1, + 1, InsertAtEnd) { + assert(IfTrue != 0 && "Branch destination may not be null!"); + Op<-1>() = IfTrue; +} + +BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, + BasicBlock *InsertAtEnd) + : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, + OperandTraits<BranchInst>::op_end(this) - 3, + 3, InsertAtEnd) { + Op<-1>() = IfTrue; + Op<-2>() = IfFalse; + Op<-3>() = Cond; +#ifndef NDEBUG + AssertOK(); +#endif +} + + +BranchInst::BranchInst(const BranchInst &BI) : + TerminatorInst(Type::getVoidTy(BI.getContext()), Instruction::Br, + OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), + BI.getNumOperands()) { + Op<-1>() = BI.Op<-1>(); + if (BI.getNumOperands() != 1) { + assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); + Op<-3>() = BI.Op<-3>(); + Op<-2>() = BI.Op<-2>(); + } + SubclassOptionalData = BI.SubclassOptionalData; +} + +void BranchInst::swapSuccessors() { + assert(isConditional() && + "Cannot swap successors of an unconditional branch"); + Op<-1>().swap(Op<-2>()); + + // Update profile metadata if present and it matches our structural + // expectations. + MDNode *ProfileData = getMetadata(LLVMContext::MD_prof); + if (!ProfileData || ProfileData->getNumOperands() != 3) + return; + + // The first operand is the name. Fetch them backwards and build a new one. + Value *Ops[] = { + ProfileData->getOperand(0), + ProfileData->getOperand(2), + ProfileData->getOperand(1) + }; + setMetadata(LLVMContext::MD_prof, + MDNode::get(ProfileData->getContext(), Ops)); +} + +BasicBlock *BranchInst::getSuccessorV(unsigned idx) const { + return getSuccessor(idx); +} +unsigned BranchInst::getNumSuccessorsV() const { + return getNumSuccessors(); +} +void BranchInst::setSuccessorV(unsigned idx, BasicBlock *B) { + setSuccessor(idx, B); +} + + +//===----------------------------------------------------------------------===// +// AllocaInst Implementation +//===----------------------------------------------------------------------===// + +static Value *getAISize(LLVMContext &Context, Value *Amt) { + if (!Amt) + Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); + else { + assert(!isa<BasicBlock>(Amt) && + "Passed basic block into allocation size parameter! Use other ctor"); + assert(Amt->getType()->isIntegerTy() && + "Allocation array size is not an integer!"); + } + return Amt; +} + +AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, + const Twine &Name, Instruction *InsertBefore) + : UnaryInstruction(PointerType::getUnqual(Ty), Alloca, + getAISize(Ty->getContext(), ArraySize), InsertBefore) { + setAlignment(0); + assert(!Ty->isVoidTy() && "Cannot allocate void!"); + setName(Name); +} + +AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, + const Twine &Name, BasicBlock *InsertAtEnd) + : UnaryInstruction(PointerType::getUnqual(Ty), Alloca, + getAISize(Ty->getContext(), ArraySize), InsertAtEnd) { + setAlignment(0); + assert(!Ty->isVoidTy() && "Cannot allocate void!"); + setName(Name); +} + +AllocaInst::AllocaInst(Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : UnaryInstruction(PointerType::getUnqual(Ty), Alloca, + getAISize(Ty->getContext(), 0), InsertBefore) { + setAlignment(0); + assert(!Ty->isVoidTy() && "Cannot allocate void!"); + setName(Name); +} + +AllocaInst::AllocaInst(Type *Ty, const Twine &Name, + BasicBlock *InsertAtEnd) + : UnaryInstruction(PointerType::getUnqual(Ty), Alloca, + getAISize(Ty->getContext(), 0), InsertAtEnd) { + setAlignment(0); + assert(!Ty->isVoidTy() && "Cannot allocate void!"); + setName(Name); +} + +AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align, + const Twine &Name, Instruction *InsertBefore) + : UnaryInstruction(PointerType::getUnqual(Ty), Alloca, + getAISize(Ty->getContext(), ArraySize), InsertBefore) { + setAlignment(Align); + assert(!Ty->isVoidTy() && "Cannot allocate void!"); + setName(Name); +} + +AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align, + const Twine &Name, BasicBlock *InsertAtEnd) + : UnaryInstruction(PointerType::getUnqual(Ty), Alloca, + getAISize(Ty->getContext(), ArraySize), InsertAtEnd) { + setAlignment(Align); + assert(!Ty->isVoidTy() && "Cannot allocate void!"); + setName(Name); +} + +// Out of line virtual method, so the vtable, etc has a home. +AllocaInst::~AllocaInst() { +} + +void AllocaInst::setAlignment(unsigned Align) { + assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); + assert(Align <= MaximumAlignment && + "Alignment is greater than MaximumAlignment!"); + setInstructionSubclassData(Log2_32(Align) + 1); + assert(getAlignment() == Align && "Alignment representation error!"); +} + +bool AllocaInst::isArrayAllocation() const { + if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) + return !CI->isOne(); + return true; +} + +Type *AllocaInst::getAllocatedType() const { + return getType()->getElementType(); +} + +/// isStaticAlloca - Return true if this alloca is in the entry block of the +/// function and is a constant size. If so, the code generator will fold it +/// into the prolog/epilog code, so it is basically free. +bool AllocaInst::isStaticAlloca() const { + // Must be constant size. + if (!isa<ConstantInt>(getArraySize())) return false; + + // Must be in the entry block. + const BasicBlock *Parent = getParent(); + return Parent == &Parent->getParent()->front(); +} + +//===----------------------------------------------------------------------===// +// LoadInst Implementation +//===----------------------------------------------------------------------===// + +void LoadInst::AssertOK() { + assert(getOperand(0)->getType()->isPointerTy() && + "Ptr must have pointer type."); + assert(!(isAtomic() && getAlignment() == 0) && + "Alignment required for atomic load"); +} + +LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertBef) { + setVolatile(false); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); + setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertAE) { + setVolatile(false); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); + setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, + Instruction *InsertBef) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertBef) { + setVolatile(isVolatile); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); + setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, + BasicBlock *InsertAE) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertAE) { + setVolatile(isVolatile); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); + setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, + unsigned Align, Instruction *InsertBef) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertBef) { + setVolatile(isVolatile); + setAlignment(Align); + setAtomic(NotAtomic); + AssertOK(); + setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, + unsigned Align, BasicBlock *InsertAE) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertAE) { + setVolatile(isVolatile); + setAlignment(Align); + setAtomic(NotAtomic); + AssertOK(); + setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, + unsigned Align, AtomicOrdering Order, + SynchronizationScope SynchScope, + Instruction *InsertBef) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertBef) { + setVolatile(isVolatile); + setAlignment(Align); + setAtomic(Order, SynchScope); + AssertOK(); + setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, + unsigned Align, AtomicOrdering Order, + SynchronizationScope SynchScope, + BasicBlock *InsertAE) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertAE) { + setVolatile(isVolatile); + setAlignment(Align); + setAtomic(Order, SynchScope); + AssertOK(); + setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertBef) { + setVolatile(false); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); + if (Name && Name[0]) setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertAE) { + setVolatile(false); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); + if (Name && Name[0]) setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, + Instruction *InsertBef) +: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertBef) { + setVolatile(isVolatile); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); + if (Name && Name[0]) setName(Name); +} + +LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, + BasicBlock *InsertAE) + : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), + Load, Ptr, InsertAE) { + setVolatile(isVolatile); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); + if (Name && Name[0]) setName(Name); +} + +void LoadInst::setAlignment(unsigned Align) { + assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); + assert(Align <= MaximumAlignment && + "Alignment is greater than MaximumAlignment!"); + setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | + ((Log2_32(Align)+1)<<1)); + assert(getAlignment() == Align && "Alignment representation error!"); +} + +//===----------------------------------------------------------------------===// +// StoreInst Implementation +//===----------------------------------------------------------------------===// + +void StoreInst::AssertOK() { + assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); + assert(getOperand(1)->getType()->isPointerTy() && + "Ptr must have pointer type!"); + assert(getOperand(0)->getType() == + cast<PointerType>(getOperand(1)->getType())->getElementType() + && "Ptr must be a pointer to Val type!"); + assert(!(isAtomic() && getAlignment() == 0) && + "Alignment required for atomic load"); +} + + +StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) + : Instruction(Type::getVoidTy(val->getContext()), Store, + OperandTraits<StoreInst>::op_begin(this), + OperandTraits<StoreInst>::operands(this), + InsertBefore) { + Op<0>() = val; + Op<1>() = addr; + setVolatile(false); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); +} + +StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) + : Instruction(Type::getVoidTy(val->getContext()), Store, + OperandTraits<StoreInst>::op_begin(this), + OperandTraits<StoreInst>::operands(this), + InsertAtEnd) { + Op<0>() = val; + Op<1>() = addr; + setVolatile(false); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); +} + +StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, + Instruction *InsertBefore) + : Instruction(Type::getVoidTy(val->getContext()), Store, + OperandTraits<StoreInst>::op_begin(this), + OperandTraits<StoreInst>::operands(this), + InsertBefore) { + Op<0>() = val; + Op<1>() = addr; + setVolatile(isVolatile); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); +} + +StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, + unsigned Align, Instruction *InsertBefore) + : Instruction(Type::getVoidTy(val->getContext()), Store, + OperandTraits<StoreInst>::op_begin(this), + OperandTraits<StoreInst>::operands(this), + InsertBefore) { + Op<0>() = val; + Op<1>() = addr; + setVolatile(isVolatile); + setAlignment(Align); + setAtomic(NotAtomic); + AssertOK(); +} + +StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, + unsigned Align, AtomicOrdering Order, + SynchronizationScope SynchScope, + Instruction *InsertBefore) + : Instruction(Type::getVoidTy(val->getContext()), Store, + OperandTraits<StoreInst>::op_begin(this), + OperandTraits<StoreInst>::operands(this), + InsertBefore) { + Op<0>() = val; + Op<1>() = addr; + setVolatile(isVolatile); + setAlignment(Align); + setAtomic(Order, SynchScope); + AssertOK(); +} + +StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, + BasicBlock *InsertAtEnd) + : Instruction(Type::getVoidTy(val->getContext()), Store, + OperandTraits<StoreInst>::op_begin(this), + OperandTraits<StoreInst>::operands(this), + InsertAtEnd) { + Op<0>() = val; + Op<1>() = addr; + setVolatile(isVolatile); + setAlignment(0); + setAtomic(NotAtomic); + AssertOK(); +} + +StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, + unsigned Align, BasicBlock *InsertAtEnd) + : Instruction(Type::getVoidTy(val->getContext()), Store, + OperandTraits<StoreInst>::op_begin(this), + OperandTraits<StoreInst>::operands(this), + InsertAtEnd) { + Op<0>() = val; + Op<1>() = addr; + setVolatile(isVolatile); + setAlignment(Align); + setAtomic(NotAtomic); + AssertOK(); +} + +StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, + unsigned Align, AtomicOrdering Order, + SynchronizationScope SynchScope, + BasicBlock *InsertAtEnd) + : Instruction(Type::getVoidTy(val->getContext()), Store, + OperandTraits<StoreInst>::op_begin(this), + OperandTraits<StoreInst>::operands(this), + InsertAtEnd) { + Op<0>() = val; + Op<1>() = addr; + setVolatile(isVolatile); + setAlignment(Align); + setAtomic(Order, SynchScope); + AssertOK(); +} + +void StoreInst::setAlignment(unsigned Align) { + assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); + assert(Align <= MaximumAlignment && + "Alignment is greater than MaximumAlignment!"); + setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | + ((Log2_32(Align)+1) << 1)); + assert(getAlignment() == Align && "Alignment representation error!"); +} + +//===----------------------------------------------------------------------===// +// AtomicCmpXchgInst Implementation +//===----------------------------------------------------------------------===// + +void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, + AtomicOrdering Ordering, + SynchronizationScope SynchScope) { + Op<0>() = Ptr; + Op<1>() = Cmp; + Op<2>() = NewVal; + setOrdering(Ordering); + setSynchScope(SynchScope); + + assert(getOperand(0) && getOperand(1) && getOperand(2) && + "All operands must be non-null!"); + assert(getOperand(0)->getType()->isPointerTy() && + "Ptr must have pointer type!"); + assert(getOperand(1)->getType() == + cast<PointerType>(getOperand(0)->getType())->getElementType() + && "Ptr must be a pointer to Cmp type!"); + assert(getOperand(2)->getType() == + cast<PointerType>(getOperand(0)->getType())->getElementType() + && "Ptr must be a pointer to NewVal type!"); + assert(Ordering != NotAtomic && + "AtomicCmpXchg instructions must be atomic!"); +} + +AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, + AtomicOrdering Ordering, + SynchronizationScope SynchScope, + Instruction *InsertBefore) + : Instruction(Cmp->getType(), AtomicCmpXchg, + OperandTraits<AtomicCmpXchgInst>::op_begin(this), + OperandTraits<AtomicCmpXchgInst>::operands(this), + InsertBefore) { + Init(Ptr, Cmp, NewVal, Ordering, SynchScope); +} + +AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, + AtomicOrdering Ordering, + SynchronizationScope SynchScope, + BasicBlock *InsertAtEnd) + : Instruction(Cmp->getType(), AtomicCmpXchg, + OperandTraits<AtomicCmpXchgInst>::op_begin(this), + OperandTraits<AtomicCmpXchgInst>::operands(this), + InsertAtEnd) { + Init(Ptr, Cmp, NewVal, Ordering, SynchScope); +} + +//===----------------------------------------------------------------------===// +// AtomicRMWInst Implementation +//===----------------------------------------------------------------------===// + +void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, + AtomicOrdering Ordering, + SynchronizationScope SynchScope) { + Op<0>() = Ptr; + Op<1>() = Val; + setOperation(Operation); + setOrdering(Ordering); + setSynchScope(SynchScope); + + assert(getOperand(0) && getOperand(1) && + "All operands must be non-null!"); + assert(getOperand(0)->getType()->isPointerTy() && + "Ptr must have pointer type!"); + assert(getOperand(1)->getType() == + cast<PointerType>(getOperand(0)->getType())->getElementType() + && "Ptr must be a pointer to Val type!"); + assert(Ordering != NotAtomic && + "AtomicRMW instructions must be atomic!"); +} + +AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, + AtomicOrdering Ordering, + SynchronizationScope SynchScope, + Instruction *InsertBefore) + : Instruction(Val->getType(), AtomicRMW, + OperandTraits<AtomicRMWInst>::op_begin(this), + OperandTraits<AtomicRMWInst>::operands(this), + InsertBefore) { + Init(Operation, Ptr, Val, Ordering, SynchScope); +} + +AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, + AtomicOrdering Ordering, + SynchronizationScope SynchScope, + BasicBlock *InsertAtEnd) + : Instruction(Val->getType(), AtomicRMW, + OperandTraits<AtomicRMWInst>::op_begin(this), + OperandTraits<AtomicRMWInst>::operands(this), + InsertAtEnd) { + Init(Operation, Ptr, Val, Ordering, SynchScope); +} + +//===----------------------------------------------------------------------===// +// FenceInst Implementation +//===----------------------------------------------------------------------===// + +FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, + SynchronizationScope SynchScope, + Instruction *InsertBefore) + : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertBefore) { + setOrdering(Ordering); + setSynchScope(SynchScope); +} + +FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, + SynchronizationScope SynchScope, + BasicBlock *InsertAtEnd) + : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertAtEnd) { + setOrdering(Ordering); + setSynchScope(SynchScope); +} + +//===----------------------------------------------------------------------===// +// GetElementPtrInst Implementation +//===----------------------------------------------------------------------===// + +void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, + const Twine &Name) { + assert(NumOperands == 1 + IdxList.size() && "NumOperands not initialized?"); + OperandList[0] = Ptr; + std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1); + setName(Name); +} + +GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) + : Instruction(GEPI.getType(), GetElementPtr, + OperandTraits<GetElementPtrInst>::op_end(this) + - GEPI.getNumOperands(), + GEPI.getNumOperands()) { + std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); + SubclassOptionalData = GEPI.SubclassOptionalData; +} + +/// getIndexedType - Returns the type of the element that would be accessed with +/// a gep instruction with the specified parameters. +/// +/// The Idxs pointer should point to a continuous piece of memory containing the +/// indices, either as Value* or uint64_t. +/// +/// A null type is returned if the indices are invalid for the specified +/// pointer type. +/// +template <typename IndexTy> +static Type *getIndexedTypeInternal(Type *Ptr, ArrayRef<IndexTy> IdxList) { + PointerType *PTy = dyn_cast<PointerType>(Ptr->getScalarType()); + if (!PTy) return 0; // Type isn't a pointer type! + Type *Agg = PTy->getElementType(); + + // Handle the special case of the empty set index set, which is always valid. + if (IdxList.empty()) + return Agg; + + // If there is at least one index, the top level type must be sized, otherwise + // it cannot be 'stepped over'. + if (!Agg->isSized()) + return 0; + + unsigned CurIdx = 1; + for (; CurIdx != IdxList.size(); ++CurIdx) { + CompositeType *CT = dyn_cast<CompositeType>(Agg); + if (!CT || CT->isPointerTy()) return 0; + IndexTy Index = IdxList[CurIdx]; + if (!CT->indexValid(Index)) return 0; + Agg = CT->getTypeAtIndex(Index); + } + return CurIdx == IdxList.size() ? Agg : 0; +} + +Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<Value *> IdxList) { + return getIndexedTypeInternal(Ptr, IdxList); +} + +Type *GetElementPtrInst::getIndexedType(Type *Ptr, + ArrayRef<Constant *> IdxList) { + return getIndexedTypeInternal(Ptr, IdxList); +} + +Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<uint64_t> IdxList) { + return getIndexedTypeInternal(Ptr, IdxList); +} + +/// hasAllZeroIndices - Return true if all of the indices of this GEP are +/// zeros. If so, the result pointer and the first operand have the same +/// value, just potentially different types. +bool GetElementPtrInst::hasAllZeroIndices() const { + for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { + if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { + if (!CI->isZero()) return false; + } else { + return false; + } + } + return true; +} + +/// hasAllConstantIndices - Return true if all of the indices of this GEP are +/// constant integers. If so, the result pointer and the first operand have +/// a constant offset between them. +bool GetElementPtrInst::hasAllConstantIndices() const { + for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { + if (!isa<ConstantInt>(getOperand(i))) + return false; + } + return true; +} + +void GetElementPtrInst::setIsInBounds(bool B) { + cast<GEPOperator>(this)->setIsInBounds(B); +} + +bool GetElementPtrInst::isInBounds() const { + return cast<GEPOperator>(this)->isInBounds(); +} + +bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, + APInt &Offset) const { + // Delegate to the generic GEPOperator implementation. + return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); +} + +//===----------------------------------------------------------------------===// +// ExtractElementInst Implementation +//===----------------------------------------------------------------------===// + +ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, + const Twine &Name, + Instruction *InsertBef) + : Instruction(cast<VectorType>(Val->getType())->getElementType(), + ExtractElement, + OperandTraits<ExtractElementInst>::op_begin(this), + 2, InsertBef) { + assert(isValidOperands(Val, Index) && + "Invalid extractelement instruction operands!"); + Op<0>() = Val; + Op<1>() = Index; + setName(Name); +} + +ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, + const Twine &Name, + BasicBlock *InsertAE) + : Instruction(cast<VectorType>(Val->getType())->getElementType(), + ExtractElement, + OperandTraits<ExtractElementInst>::op_begin(this), + 2, InsertAE) { + assert(isValidOperands(Val, Index) && + "Invalid extractelement instruction operands!"); + + Op<0>() = Val; + Op<1>() = Index; + setName(Name); +} + + +bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { + if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy(32)) + return false; + return true; +} + + +//===----------------------------------------------------------------------===// +// InsertElementInst Implementation +//===----------------------------------------------------------------------===// + +InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, + const Twine &Name, + Instruction *InsertBef) + : Instruction(Vec->getType(), InsertElement, + OperandTraits<InsertElementInst>::op_begin(this), + 3, InsertBef) { + assert(isValidOperands(Vec, Elt, Index) && + "Invalid insertelement instruction operands!"); + Op<0>() = Vec; + Op<1>() = Elt; + Op<2>() = Index; + setName(Name); +} + +InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, + const Twine &Name, + BasicBlock *InsertAE) + : Instruction(Vec->getType(), InsertElement, + OperandTraits<InsertElementInst>::op_begin(this), + 3, InsertAE) { + assert(isValidOperands(Vec, Elt, Index) && + "Invalid insertelement instruction operands!"); + + Op<0>() = Vec; + Op<1>() = Elt; + Op<2>() = Index; + setName(Name); +} + +bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, + const Value *Index) { + if (!Vec->getType()->isVectorTy()) + return false; // First operand of insertelement must be vector type. + + if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) + return false;// Second operand of insertelement must be vector element type. + + if (!Index->getType()->isIntegerTy(32)) + return false; // Third operand of insertelement must be i32. + return true; +} + + +//===----------------------------------------------------------------------===// +// ShuffleVectorInst Implementation +//===----------------------------------------------------------------------===// + +ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, + const Twine &Name, + Instruction *InsertBefore) +: Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), + cast<VectorType>(Mask->getType())->getNumElements()), + ShuffleVector, + OperandTraits<ShuffleVectorInst>::op_begin(this), + OperandTraits<ShuffleVectorInst>::operands(this), + InsertBefore) { + assert(isValidOperands(V1, V2, Mask) && + "Invalid shuffle vector instruction operands!"); + Op<0>() = V1; + Op<1>() = V2; + Op<2>() = Mask; + setName(Name); +} + +ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, + const Twine &Name, + BasicBlock *InsertAtEnd) +: Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), + cast<VectorType>(Mask->getType())->getNumElements()), + ShuffleVector, + OperandTraits<ShuffleVectorInst>::op_begin(this), + OperandTraits<ShuffleVectorInst>::operands(this), + InsertAtEnd) { + assert(isValidOperands(V1, V2, Mask) && + "Invalid shuffle vector instruction operands!"); + + Op<0>() = V1; + Op<1>() = V2; + Op<2>() = Mask; + setName(Name); +} + +bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, + const Value *Mask) { + // V1 and V2 must be vectors of the same type. + if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) + return false; + + // Mask must be vector of i32. + VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType()); + if (MaskTy == 0 || !MaskTy->getElementType()->isIntegerTy(32)) + return false; + + // Check to see if Mask is valid. + if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) + return true; + + if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) { + unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); + for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) { + if (ConstantInt *CI = dyn_cast<ConstantInt>(MV->getOperand(i))) { + if (CI->uge(V1Size*2)) + return false; + } else if (!isa<UndefValue>(MV->getOperand(i))) { + return false; + } + } + return true; + } + + if (const ConstantDataSequential *CDS = + dyn_cast<ConstantDataSequential>(Mask)) { + unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); + for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) + if (CDS->getElementAsInteger(i) >= V1Size*2) + return false; + return true; + } + + // The bitcode reader can create a place holder for a forward reference + // used as the shuffle mask. When this occurs, the shuffle mask will + // fall into this case and fail. To avoid this error, do this bit of + // ugliness to allow such a mask pass. + if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Mask)) + if (CE->getOpcode() == Instruction::UserOp1) + return true; + + return false; +} + +/// getMaskValue - Return the index from the shuffle mask for the specified +/// output result. This is either -1 if the element is undef or a number less +/// than 2*numelements. +int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) { + assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); + if (ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(Mask)) + return CDS->getElementAsInteger(i); + Constant *C = Mask->getAggregateElement(i); + if (isa<UndefValue>(C)) + return -1; + return cast<ConstantInt>(C)->getZExtValue(); +} + +/// getShuffleMask - Return the full mask for this instruction, where each +/// element is the element number and undef's are returned as -1. +void ShuffleVectorInst::getShuffleMask(Constant *Mask, + SmallVectorImpl<int> &Result) { + unsigned NumElts = Mask->getType()->getVectorNumElements(); + + if (ConstantDataSequential *CDS=dyn_cast<ConstantDataSequential>(Mask)) { + for (unsigned i = 0; i != NumElts; ++i) + Result.push_back(CDS->getElementAsInteger(i)); + return; + } + for (unsigned i = 0; i != NumElts; ++i) { + Constant *C = Mask->getAggregateElement(i); + Result.push_back(isa<UndefValue>(C) ? -1 : + cast<ConstantInt>(C)->getZExtValue()); + } +} + + +//===----------------------------------------------------------------------===// +// InsertValueInst Class +//===----------------------------------------------------------------------===// + +void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, + const Twine &Name) { + assert(NumOperands == 2 && "NumOperands not initialized?"); + + // There's no fundamental reason why we require at least one index + // (other than weirdness with &*IdxBegin being invalid; see + // getelementptr's init routine for example). But there's no + // present need to support it. + assert(Idxs.size() > 0 && "InsertValueInst must have at least one index"); + + assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == + Val->getType() && "Inserted value must match indexed type!"); + Op<0>() = Agg; + Op<1>() = Val; + + Indices.append(Idxs.begin(), Idxs.end()); + setName(Name); +} + +InsertValueInst::InsertValueInst(const InsertValueInst &IVI) + : Instruction(IVI.getType(), InsertValue, + OperandTraits<InsertValueInst>::op_begin(this), 2), + Indices(IVI.Indices) { + Op<0>() = IVI.getOperand(0); + Op<1>() = IVI.getOperand(1); + SubclassOptionalData = IVI.SubclassOptionalData; +} + +//===----------------------------------------------------------------------===// +// ExtractValueInst Class +//===----------------------------------------------------------------------===// + +void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { + assert(NumOperands == 1 && "NumOperands not initialized?"); + + // There's no fundamental reason why we require at least one index. + // But there's no present need to support it. + assert(Idxs.size() > 0 && "ExtractValueInst must have at least one index"); + + Indices.append(Idxs.begin(), Idxs.end()); + setName(Name); +} + +ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) + : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), + Indices(EVI.Indices) { + SubclassOptionalData = EVI.SubclassOptionalData; +} + +// getIndexedType - Returns the type of the element that would be extracted +// with an extractvalue instruction with the specified parameters. +// +// A null type is returned if the indices are invalid for the specified +// pointer type. +// +Type *ExtractValueInst::getIndexedType(Type *Agg, + ArrayRef<unsigned> Idxs) { + for (unsigned CurIdx = 0; CurIdx != Idxs.size(); ++CurIdx) { + unsigned Index = Idxs[CurIdx]; + // We can't use CompositeType::indexValid(Index) here. + // indexValid() always returns true for arrays because getelementptr allows + // out-of-bounds indices. Since we don't allow those for extractvalue and + // insertvalue we need to check array indexing manually. + // Since the only other types we can index into are struct types it's just + // as easy to check those manually as well. + if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { + if (Index >= AT->getNumElements()) + return 0; + } else if (StructType *ST = dyn_cast<StructType>(Agg)) { + if (Index >= ST->getNumElements()) + return 0; + } else { + // Not a valid type to index into. + return 0; + } + + Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); + } + return const_cast<Type*>(Agg); +} + +//===----------------------------------------------------------------------===// +// BinaryOperator Class +//===----------------------------------------------------------------------===// + +BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, + Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : Instruction(Ty, iType, + OperandTraits<BinaryOperator>::op_begin(this), + OperandTraits<BinaryOperator>::operands(this), + InsertBefore) { + Op<0>() = S1; + Op<1>() = S2; + init(iType); + setName(Name); +} + +BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, + Type *Ty, const Twine &Name, + BasicBlock *InsertAtEnd) + : Instruction(Ty, iType, + OperandTraits<BinaryOperator>::op_begin(this), + OperandTraits<BinaryOperator>::operands(this), + InsertAtEnd) { + Op<0>() = S1; + Op<1>() = S2; + init(iType); + setName(Name); +} + + +void BinaryOperator::init(BinaryOps iType) { + Value *LHS = getOperand(0), *RHS = getOperand(1); + (void)LHS; (void)RHS; // Silence warnings. + assert(LHS->getType() == RHS->getType() && + "Binary operator operand types must match!"); +#ifndef NDEBUG + switch (iType) { + case Add: case Sub: + case Mul: + assert(getType() == LHS->getType() && + "Arithmetic operation should return same type as operands!"); + assert(getType()->isIntOrIntVectorTy() && + "Tried to create an integer operation on a non-integer type!"); + break; + case FAdd: case FSub: + case FMul: + assert(getType() == LHS->getType() && + "Arithmetic operation should return same type as operands!"); + assert(getType()->isFPOrFPVectorTy() && + "Tried to create a floating-point operation on a " + "non-floating-point type!"); + break; + case UDiv: + case SDiv: + assert(getType() == LHS->getType() && + "Arithmetic operation should return same type as operands!"); + assert((getType()->isIntegerTy() || (getType()->isVectorTy() && + cast<VectorType>(getType())->getElementType()->isIntegerTy())) && + "Incorrect operand type (not integer) for S/UDIV"); + break; + case FDiv: + assert(getType() == LHS->getType() && + "Arithmetic operation should return same type as operands!"); + assert(getType()->isFPOrFPVectorTy() && + "Incorrect operand type (not floating point) for FDIV"); + break; + case URem: + case SRem: + assert(getType() == LHS->getType() && + "Arithmetic operation should return same type as operands!"); + assert((getType()->isIntegerTy() || (getType()->isVectorTy() && + cast<VectorType>(getType())->getElementType()->isIntegerTy())) && + "Incorrect operand type (not integer) for S/UREM"); + break; + case FRem: + assert(getType() == LHS->getType() && + "Arithmetic operation should return same type as operands!"); + assert(getType()->isFPOrFPVectorTy() && + "Incorrect operand type (not floating point) for FREM"); + break; + case Shl: + case LShr: + case AShr: + assert(getType() == LHS->getType() && + "Shift operation should return same type as operands!"); + assert((getType()->isIntegerTy() || + (getType()->isVectorTy() && + cast<VectorType>(getType())->getElementType()->isIntegerTy())) && + "Tried to create a shift operation on a non-integral type!"); + break; + case And: case Or: + case Xor: + assert(getType() == LHS->getType() && + "Logical operation should return same type as operands!"); + assert((getType()->isIntegerTy() || + (getType()->isVectorTy() && + cast<VectorType>(getType())->getElementType()->isIntegerTy())) && + "Tried to create a logical operation on a non-integral type!"); + break; + default: + break; + } +#endif +} + +BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, + const Twine &Name, + Instruction *InsertBefore) { + assert(S1->getType() == S2->getType() && + "Cannot create binary operator with two operands of differing type!"); + return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); +} + +BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, + const Twine &Name, + BasicBlock *InsertAtEnd) { + BinaryOperator *Res = Create(Op, S1, S2, Name); + InsertAtEnd->getInstList().push_back(Res); + return Res; +} + +BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, + Instruction *InsertBefore) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return new BinaryOperator(Instruction::Sub, + zero, Op, + Op->getType(), Name, InsertBefore); +} + +BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, + BasicBlock *InsertAtEnd) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return new BinaryOperator(Instruction::Sub, + zero, Op, + Op->getType(), Name, InsertAtEnd); +} + +BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, + Instruction *InsertBefore) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); +} + +BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, + BasicBlock *InsertAtEnd) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); +} + +BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, + Instruction *InsertBefore) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); +} + +BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, + BasicBlock *InsertAtEnd) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); +} + +BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, + Instruction *InsertBefore) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return new BinaryOperator(Instruction::FSub, zero, Op, + Op->getType(), Name, InsertBefore); +} + +BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, + BasicBlock *InsertAtEnd) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return new BinaryOperator(Instruction::FSub, zero, Op, + Op->getType(), Name, InsertAtEnd); +} + +BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, + Instruction *InsertBefore) { + Constant *C = Constant::getAllOnesValue(Op->getType()); + return new BinaryOperator(Instruction::Xor, Op, C, + Op->getType(), Name, InsertBefore); +} + +BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, + BasicBlock *InsertAtEnd) { + Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); + return new BinaryOperator(Instruction::Xor, Op, AllOnes, + Op->getType(), Name, InsertAtEnd); +} + + +// isConstantAllOnes - Helper function for several functions below +static inline bool isConstantAllOnes(const Value *V) { + if (const Constant *C = dyn_cast<Constant>(V)) + return C->isAllOnesValue(); + return false; +} + +bool BinaryOperator::isNeg(const Value *V) { + if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) + if (Bop->getOpcode() == Instruction::Sub) + if (Constant* C = dyn_cast<Constant>(Bop->getOperand(0))) + return C->isNegativeZeroValue(); + return false; +} + +bool BinaryOperator::isFNeg(const Value *V, bool IgnoreZeroSign) { + if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) + if (Bop->getOpcode() == Instruction::FSub) + if (Constant* C = dyn_cast<Constant>(Bop->getOperand(0))) { + if (!IgnoreZeroSign) + IgnoreZeroSign = cast<Instruction>(V)->hasNoSignedZeros(); + return !IgnoreZeroSign ? C->isNegativeZeroValue() : C->isZeroValue(); + } + return false; +} + +bool BinaryOperator::isNot(const Value *V) { + if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) + return (Bop->getOpcode() == Instruction::Xor && + (isConstantAllOnes(Bop->getOperand(1)) || + isConstantAllOnes(Bop->getOperand(0)))); + return false; +} + +Value *BinaryOperator::getNegArgument(Value *BinOp) { + return cast<BinaryOperator>(BinOp)->getOperand(1); +} + +const Value *BinaryOperator::getNegArgument(const Value *BinOp) { + return getNegArgument(const_cast<Value*>(BinOp)); +} + +Value *BinaryOperator::getFNegArgument(Value *BinOp) { + return cast<BinaryOperator>(BinOp)->getOperand(1); +} + +const Value *BinaryOperator::getFNegArgument(const Value *BinOp) { + return getFNegArgument(const_cast<Value*>(BinOp)); +} + +Value *BinaryOperator::getNotArgument(Value *BinOp) { + assert(isNot(BinOp) && "getNotArgument on non-'not' instruction!"); + BinaryOperator *BO = cast<BinaryOperator>(BinOp); + Value *Op0 = BO->getOperand(0); + Value *Op1 = BO->getOperand(1); + if (isConstantAllOnes(Op0)) return Op1; + + assert(isConstantAllOnes(Op1)); + return Op0; +} + +const Value *BinaryOperator::getNotArgument(const Value *BinOp) { + return getNotArgument(const_cast<Value*>(BinOp)); +} + + +// swapOperands - Exchange the two operands to this instruction. This +// instruction is safe to use on any binary instruction and does not +// modify the semantics of the instruction. If the instruction is +// order dependent (SetLT f.e.) the opcode is changed. +// +bool BinaryOperator::swapOperands() { + if (!isCommutative()) + return true; // Can't commute operands + Op<0>().swap(Op<1>()); + return false; +} + +void BinaryOperator::setHasNoUnsignedWrap(bool b) { + cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b); +} + +void BinaryOperator::setHasNoSignedWrap(bool b) { + cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b); +} + +void BinaryOperator::setIsExact(bool b) { + cast<PossiblyExactOperator>(this)->setIsExact(b); +} + +bool BinaryOperator::hasNoUnsignedWrap() const { + return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap(); +} + +bool BinaryOperator::hasNoSignedWrap() const { + return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap(); +} + +bool BinaryOperator::isExact() const { + return cast<PossiblyExactOperator>(this)->isExact(); +} + +//===----------------------------------------------------------------------===// +// FPMathOperator Class +//===----------------------------------------------------------------------===// + +/// getFPAccuracy - Get the maximum error permitted by this operation in ULPs. +/// An accuracy of 0.0 means that the operation should be performed with the +/// default precision. +float FPMathOperator::getFPAccuracy() const { + const MDNode *MD = + cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); + if (!MD) + return 0.0; + ConstantFP *Accuracy = cast<ConstantFP>(MD->getOperand(0)); + return Accuracy->getValueAPF().convertToFloat(); +} + + +//===----------------------------------------------------------------------===// +// CastInst Class +//===----------------------------------------------------------------------===// + +void CastInst::anchor() {} + +// Just determine if this cast only deals with integral->integral conversion. +bool CastInst::isIntegerCast() const { + switch (getOpcode()) { + default: return false; + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::Trunc: + return true; + case Instruction::BitCast: + return getOperand(0)->getType()->isIntegerTy() && + getType()->isIntegerTy(); + } +} + +bool CastInst::isLosslessCast() const { + // Only BitCast can be lossless, exit fast if we're not BitCast + if (getOpcode() != Instruction::BitCast) + return false; + + // Identity cast is always lossless + Type* SrcTy = getOperand(0)->getType(); + Type* DstTy = getType(); + if (SrcTy == DstTy) + return true; + + // Pointer to pointer is always lossless. + if (SrcTy->isPointerTy()) + return DstTy->isPointerTy(); + return false; // Other types have no identity values +} + +/// This function determines if the CastInst does not require any bits to be +/// changed in order to effect the cast. Essentially, it identifies cases where +/// no code gen is necessary for the cast, hence the name no-op cast. For +/// example, the following are all no-op casts: +/// # bitcast i32* %x to i8* +/// # bitcast <2 x i32> %x to <4 x i16> +/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only +/// @brief Determine if the described cast is a no-op. +bool CastInst::isNoopCast(Instruction::CastOps Opcode, + Type *SrcTy, + Type *DestTy, + Type *IntPtrTy) { + switch (Opcode) { + default: llvm_unreachable("Invalid CastOp"); + case Instruction::Trunc: + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPTrunc: + case Instruction::FPExt: + case Instruction::UIToFP: + case Instruction::SIToFP: + case Instruction::FPToUI: + case Instruction::FPToSI: + return false; // These always modify bits + case Instruction::BitCast: + return true; // BitCast never modifies bits. + case Instruction::PtrToInt: + return IntPtrTy->getScalarSizeInBits() == + DestTy->getScalarSizeInBits(); + case Instruction::IntToPtr: + return IntPtrTy->getScalarSizeInBits() == + SrcTy->getScalarSizeInBits(); + } +} + +/// @brief Determine if a cast is a no-op. +bool CastInst::isNoopCast(Type *IntPtrTy) const { + return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy); +} + +/// This function determines if a pair of casts can be eliminated and what +/// opcode should be used in the elimination. This assumes that there are two +/// instructions like this: +/// * %F = firstOpcode SrcTy %x to MidTy +/// * %S = secondOpcode MidTy %F to DstTy +/// The function returns a resultOpcode so these two casts can be replaced with: +/// * %Replacement = resultOpcode %SrcTy %x to DstTy +/// If no such cast is permited, the function returns 0. +unsigned CastInst::isEliminableCastPair( + Instruction::CastOps firstOp, Instruction::CastOps secondOp, + Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, + Type *DstIntPtrTy) { + // Define the 144 possibilities for these two cast instructions. The values + // in this matrix determine what to do in a given situation and select the + // case in the switch below. The rows correspond to firstOp, the columns + // correspond to secondOp. In looking at the table below, keep in mind + // the following cast properties: + // + // Size Compare Source Destination + // Operator Src ? Size Type Sign Type Sign + // -------- ------------ ------------------- --------------------- + // TRUNC > Integer Any Integral Any + // ZEXT < Integral Unsigned Integer Any + // SEXT < Integral Signed Integer Any + // FPTOUI n/a FloatPt n/a Integral Unsigned + // FPTOSI n/a FloatPt n/a Integral Signed + // UITOFP n/a Integral Unsigned FloatPt n/a + // SITOFP n/a Integral Signed FloatPt n/a + // FPTRUNC > FloatPt n/a FloatPt n/a + // FPEXT < FloatPt n/a FloatPt n/a + // PTRTOINT n/a Pointer n/a Integral Unsigned + // INTTOPTR n/a Integral Unsigned Pointer n/a + // BITCAST = FirstClass n/a FirstClass n/a + // + // NOTE: some transforms are safe, but we consider them to be non-profitable. + // For example, we could merge "fptoui double to i32" + "zext i32 to i64", + // into "fptoui double to i64", but this loses information about the range + // of the produced value (we no longer know the top-part is all zeros). + // Further this conversion is often much more expensive for typical hardware, + // and causes issues when building libgcc. We disallow fptosi+sext for the + // same reason. + const unsigned numCastOps = + Instruction::CastOpsEnd - Instruction::CastOpsBegin; + static const uint8_t CastResults[numCastOps][numCastOps] = { + // T F F U S F F P I B -+ + // R Z S P P I I T P 2 N T | + // U E E 2 2 2 2 R E I T C +- secondOp + // N X X U S F F N X N 2 V | + // C T T I I P P C T T P T -+ + { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3 }, // Trunc -+ + { 8, 1, 9,99,99, 2, 0,99,99,99, 2, 3 }, // ZExt | + { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3 }, // SExt | + { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3 }, // FPToUI | + { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3 }, // FPToSI | + { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4 }, // UIToFP +- firstOp + { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4 }, // SIToFP | + { 99,99,99, 0, 0,99,99, 1, 0,99,99, 4 }, // FPTrunc | + { 99,99,99, 2, 2,99,99,10, 2,99,99, 4 }, // FPExt | + { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3 }, // PtrToInt | + { 99,99,99,99,99,99,99,99,99,13,99,12 }, // IntToPtr | + { 5, 5, 5, 6, 6, 5, 5, 6, 6,11, 5, 1 }, // BitCast -+ + }; + + // If either of the casts are a bitcast from scalar to vector, disallow the + // merging. However, bitcast of A->B->A are allowed. + bool isFirstBitcast = (firstOp == Instruction::BitCast); + bool isSecondBitcast = (secondOp == Instruction::BitCast); + bool chainedBitcast = (SrcTy == DstTy && isFirstBitcast && isSecondBitcast); + + // Check if any of the bitcasts convert scalars<->vectors. + if ((isFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || + (isSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) + // Unless we are bitcasing to the original type, disallow optimizations. + if (!chainedBitcast) return 0; + + int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] + [secondOp-Instruction::CastOpsBegin]; + switch (ElimCase) { + case 0: + // categorically disallowed + return 0; + case 1: + // allowed, use first cast's opcode + return firstOp; + case 2: + // allowed, use second cast's opcode + return secondOp; + case 3: + // no-op cast in second op implies firstOp as long as the DestTy + // is integer and we are not converting between a vector and a + // non vector type. + if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) + return firstOp; + return 0; + case 4: + // no-op cast in second op implies firstOp as long as the DestTy + // is floating point. + if (DstTy->isFloatingPointTy()) + return firstOp; + return 0; + case 5: + // no-op cast in first op implies secondOp as long as the SrcTy + // is an integer. + if (SrcTy->isIntegerTy()) + return secondOp; + return 0; + case 6: + // no-op cast in first op implies secondOp as long as the SrcTy + // is a floating point. + if (SrcTy->isFloatingPointTy()) + return secondOp; + return 0; + case 7: { + // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size + if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) + return 0; + unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); + unsigned MidSize = MidTy->getScalarSizeInBits(); + if (MidSize >= PtrSize) + return Instruction::BitCast; + return 0; + } + case 8: { + // ext, trunc -> bitcast, if the SrcTy and DstTy are same size + // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) + // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) + unsigned SrcSize = SrcTy->getScalarSizeInBits(); + unsigned DstSize = DstTy->getScalarSizeInBits(); + if (SrcSize == DstSize) + return Instruction::BitCast; + else if (SrcSize < DstSize) + return firstOp; + return secondOp; + } + case 9: // zext, sext -> zext, because sext can't sign extend after zext + return Instruction::ZExt; + case 10: + // fpext followed by ftrunc is allowed if the bit size returned to is + // the same as the original, in which case its just a bitcast + if (SrcTy == DstTy) + return Instruction::BitCast; + return 0; // If the types are not the same we can't eliminate it. + case 11: + // bitcast followed by ptrtoint is allowed as long as the bitcast + // is a pointer to pointer cast. + if (SrcTy->isPointerTy() && MidTy->isPointerTy()) + return secondOp; + return 0; + case 12: + // inttoptr, bitcast -> intptr if bitcast is a ptr to ptr cast + if (MidTy->isPointerTy() && DstTy->isPointerTy()) + return firstOp; + return 0; + case 13: { + // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize + if (!MidIntPtrTy) + return 0; + unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); + unsigned SrcSize = SrcTy->getScalarSizeInBits(); + unsigned DstSize = DstTy->getScalarSizeInBits(); + if (SrcSize <= PtrSize && SrcSize == DstSize) + return Instruction::BitCast; + return 0; + } + case 99: + // cast combination can't happen (error in input). This is for all cases + // where the MidTy is not the same for the two cast instructions. + llvm_unreachable("Invalid Cast Combination"); + default: + llvm_unreachable("Error in CastResults table!!!"); + } +} + +CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, + const Twine &Name, Instruction *InsertBefore) { + assert(castIsValid(op, S, Ty) && "Invalid cast!"); + // Construct and return the appropriate CastInst subclass + switch (op) { + case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); + case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); + case SExt: return new SExtInst (S, Ty, Name, InsertBefore); + case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); + case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); + case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); + case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); + case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); + case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); + case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); + case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); + case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); + default: llvm_unreachable("Invalid opcode provided"); + } +} + +CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, + const Twine &Name, BasicBlock *InsertAtEnd) { + assert(castIsValid(op, S, Ty) && "Invalid cast!"); + // Construct and return the appropriate CastInst subclass + switch (op) { + case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); + case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); + case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); + case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); + case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); + case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); + case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); + case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); + case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); + case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); + case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); + case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); + default: llvm_unreachable("Invalid opcode provided"); + } +} + +CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, + const Twine &Name, + Instruction *InsertBefore) { + if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) + return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); + return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); +} + +CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, + const Twine &Name, + BasicBlock *InsertAtEnd) { + if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) + return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); + return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); +} + +CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, + const Twine &Name, + Instruction *InsertBefore) { + if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) + return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); + return Create(Instruction::SExt, S, Ty, Name, InsertBefore); +} + +CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, + const Twine &Name, + BasicBlock *InsertAtEnd) { + if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) + return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); + return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); +} + +CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, + const Twine &Name, + Instruction *InsertBefore) { + if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) + return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); + return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); +} + +CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, + const Twine &Name, + BasicBlock *InsertAtEnd) { + if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) + return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); + return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); +} + +CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, + const Twine &Name, + BasicBlock *InsertAtEnd) { + assert(S->getType()->isPointerTy() && "Invalid cast"); + assert((Ty->isIntegerTy() || Ty->isPointerTy()) && + "Invalid cast"); + + if (Ty->isIntegerTy()) + return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); + return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); +} + +/// @brief Create a BitCast or a PtrToInt cast instruction +CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, + const Twine &Name, + Instruction *InsertBefore) { + assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); + assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && + "Invalid cast"); + + if (Ty->isIntOrIntVectorTy()) + return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); + return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); +} + +CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, + bool isSigned, const Twine &Name, + Instruction *InsertBefore) { + assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && + "Invalid integer cast"); + unsigned SrcBits = C->getType()->getScalarSizeInBits(); + unsigned DstBits = Ty->getScalarSizeInBits(); + Instruction::CastOps opcode = + (SrcBits == DstBits ? Instruction::BitCast : + (SrcBits > DstBits ? Instruction::Trunc : + (isSigned ? Instruction::SExt : Instruction::ZExt))); + return Create(opcode, C, Ty, Name, InsertBefore); +} + +CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, + bool isSigned, const Twine &Name, + BasicBlock *InsertAtEnd) { + assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && + "Invalid cast"); + unsigned SrcBits = C->getType()->getScalarSizeInBits(); + unsigned DstBits = Ty->getScalarSizeInBits(); + Instruction::CastOps opcode = + (SrcBits == DstBits ? Instruction::BitCast : + (SrcBits > DstBits ? Instruction::Trunc : + (isSigned ? Instruction::SExt : Instruction::ZExt))); + return Create(opcode, C, Ty, Name, InsertAtEnd); +} + +CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, + const Twine &Name, + Instruction *InsertBefore) { + assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && + "Invalid cast"); + unsigned SrcBits = C->getType()->getScalarSizeInBits(); + unsigned DstBits = Ty->getScalarSizeInBits(); + Instruction::CastOps opcode = + (SrcBits == DstBits ? Instruction::BitCast : + (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); + return Create(opcode, C, Ty, Name, InsertBefore); +} + +CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, + const Twine &Name, + BasicBlock *InsertAtEnd) { + assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && + "Invalid cast"); + unsigned SrcBits = C->getType()->getScalarSizeInBits(); + unsigned DstBits = Ty->getScalarSizeInBits(); + Instruction::CastOps opcode = + (SrcBits == DstBits ? Instruction::BitCast : + (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); + return Create(opcode, C, Ty, Name, InsertAtEnd); +} + +// Check whether it is valid to call getCastOpcode for these types. +// This routine must be kept in sync with getCastOpcode. +bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { + if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) + return false; + + if (SrcTy == DestTy) + return true; + + if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) + if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) + if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { + // An element by element cast. Valid if casting the elements is valid. + SrcTy = SrcVecTy->getElementType(); + DestTy = DestVecTy->getElementType(); + } + + // Get the bit sizes, we'll need these + unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr + unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr + + // Run through the possibilities ... + if (DestTy->isIntegerTy()) { // Casting to integral + if (SrcTy->isIntegerTy()) { // Casting from integral + return true; + } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt + return true; + } else if (SrcTy->isVectorTy()) { // Casting from vector + return DestBits == SrcBits; + } else { // Casting from something else + return SrcTy->isPointerTy(); + } + } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt + if (SrcTy->isIntegerTy()) { // Casting from integral + return true; + } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt + return true; + } else if (SrcTy->isVectorTy()) { // Casting from vector + return DestBits == SrcBits; + } else { // Casting from something else + return false; + } + } else if (DestTy->isVectorTy()) { // Casting to vector + return DestBits == SrcBits; + } else if (DestTy->isPointerTy()) { // Casting to pointer + if (SrcTy->isPointerTy()) { // Casting from pointer + return true; + } else if (SrcTy->isIntegerTy()) { // Casting from integral + return true; + } else { // Casting from something else + return false; + } + } else if (DestTy->isX86_MMXTy()) { + if (SrcTy->isVectorTy()) { + return DestBits == SrcBits; // 64-bit vector to MMX + } else { + return false; + } + } else { // Casting to something else + return false; + } +} + +// Provide a way to get a "cast" where the cast opcode is inferred from the +// types and size of the operand. This, basically, is a parallel of the +// logic in the castIsValid function below. This axiom should hold: +// castIsValid( getCastOpcode(Val, Ty), Val, Ty) +// should not assert in castIsValid. In other words, this produces a "correct" +// casting opcode for the arguments passed to it. +// This routine must be kept in sync with isCastable. +Instruction::CastOps +CastInst::getCastOpcode( + const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { + Type *SrcTy = Src->getType(); + + assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && + "Only first class types are castable!"); + + if (SrcTy == DestTy) + return BitCast; + + if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) + if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) + if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { + // An element by element cast. Find the appropriate opcode based on the + // element types. + SrcTy = SrcVecTy->getElementType(); + DestTy = DestVecTy->getElementType(); + } + + // Get the bit sizes, we'll need these + unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr + unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr + + // Run through the possibilities ... + if (DestTy->isIntegerTy()) { // Casting to integral + if (SrcTy->isIntegerTy()) { // Casting from integral + if (DestBits < SrcBits) + return Trunc; // int -> smaller int + else if (DestBits > SrcBits) { // its an extension + if (SrcIsSigned) + return SExt; // signed -> SEXT + else + return ZExt; // unsigned -> ZEXT + } else { + return BitCast; // Same size, No-op cast + } + } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt + if (DestIsSigned) + return FPToSI; // FP -> sint + else + return FPToUI; // FP -> uint + } else if (SrcTy->isVectorTy()) { + assert(DestBits == SrcBits && + "Casting vector to integer of different width"); + return BitCast; // Same size, no-op cast + } else { + assert(SrcTy->isPointerTy() && + "Casting from a value that is not first-class type"); + return PtrToInt; // ptr -> int + } + } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt + if (SrcTy->isIntegerTy()) { // Casting from integral + if (SrcIsSigned) + return SIToFP; // sint -> FP + else + return UIToFP; // uint -> FP + } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt + if (DestBits < SrcBits) { + return FPTrunc; // FP -> smaller FP + } else if (DestBits > SrcBits) { + return FPExt; // FP -> larger FP + } else { + return BitCast; // same size, no-op cast + } + } else if (SrcTy->isVectorTy()) { + assert(DestBits == SrcBits && + "Casting vector to floating point of different width"); + return BitCast; // same size, no-op cast + } + llvm_unreachable("Casting pointer or non-first class to float"); + } else if (DestTy->isVectorTy()) { + assert(DestBits == SrcBits && + "Illegal cast to vector (wrong type or size)"); + return BitCast; + } else if (DestTy->isPointerTy()) { + if (SrcTy->isPointerTy()) { + return BitCast; // ptr -> ptr + } else if (SrcTy->isIntegerTy()) { + return IntToPtr; // int -> ptr + } + llvm_unreachable("Casting pointer to other than pointer or int"); + } else if (DestTy->isX86_MMXTy()) { + if (SrcTy->isVectorTy()) { + assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); + return BitCast; // 64-bit vector to MMX + } + llvm_unreachable("Illegal cast to X86_MMX"); + } + llvm_unreachable("Casting to type that is not first-class"); +} + +//===----------------------------------------------------------------------===// +// CastInst SubClass Constructors +//===----------------------------------------------------------------------===// + +/// Check that the construction parameters for a CastInst are correct. This +/// could be broken out into the separate constructors but it is useful to have +/// it in one place and to eliminate the redundant code for getting the sizes +/// of the types involved. +bool +CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { + + // Check for type sanity on the arguments + Type *SrcTy = S->getType(); + + // If this is a cast to the same type then it's trivially true. + if (SrcTy == DstTy) + return true; + + if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || + SrcTy->isAggregateType() || DstTy->isAggregateType()) + return false; + + // Get the size of the types in bits, we'll need this later + unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); + unsigned DstBitSize = DstTy->getScalarSizeInBits(); + + // If these are vector types, get the lengths of the vectors (using zero for + // scalar types means that checking that vector lengths match also checks that + // scalars are not being converted to vectors or vectors to scalars). + unsigned SrcLength = SrcTy->isVectorTy() ? + cast<VectorType>(SrcTy)->getNumElements() : 0; + unsigned DstLength = DstTy->isVectorTy() ? + cast<VectorType>(DstTy)->getNumElements() : 0; + + // Switch on the opcode provided + switch (op) { + default: return false; // This is an input error + case Instruction::Trunc: + return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && + SrcLength == DstLength && SrcBitSize > DstBitSize; + case Instruction::ZExt: + return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && + SrcLength == DstLength && SrcBitSize < DstBitSize; + case Instruction::SExt: + return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && + SrcLength == DstLength && SrcBitSize < DstBitSize; + case Instruction::FPTrunc: + return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && + SrcLength == DstLength && SrcBitSize > DstBitSize; + case Instruction::FPExt: + return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && + SrcLength == DstLength && SrcBitSize < DstBitSize; + case Instruction::UIToFP: + case Instruction::SIToFP: + return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && + SrcLength == DstLength; + case Instruction::FPToUI: + case Instruction::FPToSI: + return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && + SrcLength == DstLength; + case Instruction::PtrToInt: + if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) + return false; + if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) + if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) + return false; + return SrcTy->getScalarType()->isPointerTy() && + DstTy->getScalarType()->isIntegerTy(); + case Instruction::IntToPtr: + if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) + return false; + if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) + if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) + return false; + return SrcTy->getScalarType()->isIntegerTy() && + DstTy->getScalarType()->isPointerTy(); + case Instruction::BitCast: + // BitCast implies a no-op cast of type only. No bits change. + // However, you can't cast pointers to anything but pointers. + if (SrcTy->isPointerTy() != DstTy->isPointerTy()) + return false; + + // Now we know we're not dealing with a pointer/non-pointer mismatch. In all + // these cases, the cast is okay if the source and destination bit widths + // are identical. + return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); + } +} + +TruncInst::TruncInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, Trunc, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); +} + +TruncInst::TruncInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); +} + +ZExtInst::ZExtInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, ZExt, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); +} + +ZExtInst::ZExtInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); +} +SExtInst::SExtInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, SExt, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); +} + +SExtInst::SExtInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); +} + +FPTruncInst::FPTruncInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); +} + +FPTruncInst::FPTruncInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); +} + +FPExtInst::FPExtInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, FPExt, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); +} + +FPExtInst::FPExtInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); +} + +UIToFPInst::UIToFPInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); +} + +UIToFPInst::UIToFPInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); +} + +SIToFPInst::SIToFPInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); +} + +SIToFPInst::SIToFPInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); +} + +FPToUIInst::FPToUIInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); +} + +FPToUIInst::FPToUIInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); +} + +FPToSIInst::FPToSIInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); +} + +FPToSIInst::FPToSIInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); +} + +PtrToIntInst::PtrToIntInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); +} + +PtrToIntInst::PtrToIntInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); +} + +IntToPtrInst::IntToPtrInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); +} + +IntToPtrInst::IntToPtrInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); +} + +BitCastInst::BitCastInst( + Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore +) : CastInst(Ty, BitCast, S, Name, InsertBefore) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); +} + +BitCastInst::BitCastInst( + Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd +) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { + assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); +} + +//===----------------------------------------------------------------------===// +// CmpInst Classes +//===----------------------------------------------------------------------===// + +void CmpInst::anchor() {} + +CmpInst::CmpInst(Type *ty, OtherOps op, unsigned short predicate, + Value *LHS, Value *RHS, const Twine &Name, + Instruction *InsertBefore) + : Instruction(ty, op, + OperandTraits<CmpInst>::op_begin(this), + OperandTraits<CmpInst>::operands(this), + InsertBefore) { + Op<0>() = LHS; + Op<1>() = RHS; + setPredicate((Predicate)predicate); + setName(Name); +} + +CmpInst::CmpInst(Type *ty, OtherOps op, unsigned short predicate, + Value *LHS, Value *RHS, const Twine &Name, + BasicBlock *InsertAtEnd) + : Instruction(ty, op, + OperandTraits<CmpInst>::op_begin(this), + OperandTraits<CmpInst>::operands(this), + InsertAtEnd) { + Op<0>() = LHS; + Op<1>() = RHS; + setPredicate((Predicate)predicate); + setName(Name); +} + +CmpInst * +CmpInst::Create(OtherOps Op, unsigned short predicate, + Value *S1, Value *S2, + const Twine &Name, Instruction *InsertBefore) { + if (Op == Instruction::ICmp) { + if (InsertBefore) + return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), + S1, S2, Name); + else + return new ICmpInst(CmpInst::Predicate(predicate), + S1, S2, Name); + } + + if (InsertBefore) + return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), + S1, S2, Name); + else + return new FCmpInst(CmpInst::Predicate(predicate), + S1, S2, Name); +} + +CmpInst * +CmpInst::Create(OtherOps Op, unsigned short predicate, Value *S1, Value *S2, + const Twine &Name, BasicBlock *InsertAtEnd) { + if (Op == Instruction::ICmp) { + return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), + S1, S2, Name); + } + return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), + S1, S2, Name); +} + +void CmpInst::swapOperands() { + if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) + IC->swapOperands(); + else + cast<FCmpInst>(this)->swapOperands(); +} + +bool CmpInst::isCommutative() const { + if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) + return IC->isCommutative(); + return cast<FCmpInst>(this)->isCommutative(); +} + +bool CmpInst::isEquality() const { + if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) + return IC->isEquality(); + return cast<FCmpInst>(this)->isEquality(); +} + + +CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { + switch (pred) { + default: llvm_unreachable("Unknown cmp predicate!"); + case ICMP_EQ: return ICMP_NE; + case ICMP_NE: return ICMP_EQ; + case ICMP_UGT: return ICMP_ULE; + case ICMP_ULT: return ICMP_UGE; + case ICMP_UGE: return ICMP_ULT; + case ICMP_ULE: return ICMP_UGT; + case ICMP_SGT: return ICMP_SLE; + case ICMP_SLT: return ICMP_SGE; + case ICMP_SGE: return ICMP_SLT; + case ICMP_SLE: return ICMP_SGT; + + case FCMP_OEQ: return FCMP_UNE; + case FCMP_ONE: return FCMP_UEQ; + case FCMP_OGT: return FCMP_ULE; + case FCMP_OLT: return FCMP_UGE; + case FCMP_OGE: return FCMP_ULT; + case FCMP_OLE: return FCMP_UGT; + case FCMP_UEQ: return FCMP_ONE; + case FCMP_UNE: return FCMP_OEQ; + case FCMP_UGT: return FCMP_OLE; + case FCMP_ULT: return FCMP_OGE; + case FCMP_UGE: return FCMP_OLT; + case FCMP_ULE: return FCMP_OGT; + case FCMP_ORD: return FCMP_UNO; + case FCMP_UNO: return FCMP_ORD; + case FCMP_TRUE: return FCMP_FALSE; + case FCMP_FALSE: return FCMP_TRUE; + } +} + +ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { + switch (pred) { + default: llvm_unreachable("Unknown icmp predicate!"); + case ICMP_EQ: case ICMP_NE: + case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: + return pred; + case ICMP_UGT: return ICMP_SGT; + case ICMP_ULT: return ICMP_SLT; + case ICMP_UGE: return ICMP_SGE; + case ICMP_ULE: return ICMP_SLE; + } +} + +ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { + switch (pred) { + default: llvm_unreachable("Unknown icmp predicate!"); + case ICMP_EQ: case ICMP_NE: + case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: + return pred; + case ICMP_SGT: return ICMP_UGT; + case ICMP_SLT: return ICMP_ULT; + case ICMP_SGE: return ICMP_UGE; + case ICMP_SLE: return ICMP_ULE; + } +} + +/// Initialize a set of values that all satisfy the condition with C. +/// +ConstantRange +ICmpInst::makeConstantRange(Predicate pred, const APInt &C) { + APInt Lower(C); + APInt Upper(C); + uint32_t BitWidth = C.getBitWidth(); + switch (pred) { + default: llvm_unreachable("Invalid ICmp opcode to ConstantRange ctor!"); + case ICmpInst::ICMP_EQ: Upper++; break; + case ICmpInst::ICMP_NE: Lower++; break; + case ICmpInst::ICMP_ULT: + Lower = APInt::getMinValue(BitWidth); + // Check for an empty-set condition. + if (Lower == Upper) + return ConstantRange(BitWidth, /*isFullSet=*/false); + break; + case ICmpInst::ICMP_SLT: + Lower = APInt::getSignedMinValue(BitWidth); + // Check for an empty-set condition. + if (Lower == Upper) + return ConstantRange(BitWidth, /*isFullSet=*/false); + break; + case ICmpInst::ICMP_UGT: + Lower++; Upper = APInt::getMinValue(BitWidth); // Min = Next(Max) + // Check for an empty-set condition. + if (Lower == Upper) + return ConstantRange(BitWidth, /*isFullSet=*/false); + break; + case ICmpInst::ICMP_SGT: + Lower++; Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max) + // Check for an empty-set condition. + if (Lower == Upper) + return ConstantRange(BitWidth, /*isFullSet=*/false); + break; + case ICmpInst::ICMP_ULE: + Lower = APInt::getMinValue(BitWidth); Upper++; + // Check for a full-set condition. + if (Lower == Upper) + return ConstantRange(BitWidth, /*isFullSet=*/true); + break; + case ICmpInst::ICMP_SLE: + Lower = APInt::getSignedMinValue(BitWidth); Upper++; + // Check for a full-set condition. + if (Lower == Upper) + return ConstantRange(BitWidth, /*isFullSet=*/true); + break; + case ICmpInst::ICMP_UGE: + Upper = APInt::getMinValue(BitWidth); // Min = Next(Max) + // Check for a full-set condition. + if (Lower == Upper) + return ConstantRange(BitWidth, /*isFullSet=*/true); + break; + case ICmpInst::ICMP_SGE: + Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max) + // Check for a full-set condition. + if (Lower == Upper) + return ConstantRange(BitWidth, /*isFullSet=*/true); + break; + } + return ConstantRange(Lower, Upper); +} + +CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { + switch (pred) { + default: llvm_unreachable("Unknown cmp predicate!"); + case ICMP_EQ: case ICMP_NE: + return pred; + case ICMP_SGT: return ICMP_SLT; + case ICMP_SLT: return ICMP_SGT; + case ICMP_SGE: return ICMP_SLE; + case ICMP_SLE: return ICMP_SGE; + case ICMP_UGT: return ICMP_ULT; + case ICMP_ULT: return ICMP_UGT; + case ICMP_UGE: return ICMP_ULE; + case ICMP_ULE: return ICMP_UGE; + + case FCMP_FALSE: case FCMP_TRUE: + case FCMP_OEQ: case FCMP_ONE: + case FCMP_UEQ: case FCMP_UNE: + case FCMP_ORD: case FCMP_UNO: + return pred; + case FCMP_OGT: return FCMP_OLT; + case FCMP_OLT: return FCMP_OGT; + case FCMP_OGE: return FCMP_OLE; + case FCMP_OLE: return FCMP_OGE; + case FCMP_UGT: return FCMP_ULT; + case FCMP_ULT: return FCMP_UGT; + case FCMP_UGE: return FCMP_ULE; + case FCMP_ULE: return FCMP_UGE; + } +} + +bool CmpInst::isUnsigned(unsigned short predicate) { + switch (predicate) { + default: return false; + case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: + case ICmpInst::ICMP_UGE: return true; + } +} + +bool CmpInst::isSigned(unsigned short predicate) { + switch (predicate) { + default: return false; + case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: + case ICmpInst::ICMP_SGE: return true; + } +} + +bool CmpInst::isOrdered(unsigned short predicate) { + switch (predicate) { + default: return false; + case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: + case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: + case FCmpInst::FCMP_ORD: return true; + } +} + +bool CmpInst::isUnordered(unsigned short predicate) { + switch (predicate) { + default: return false; + case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: + case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: + case FCmpInst::FCMP_UNO: return true; + } +} + +bool CmpInst::isTrueWhenEqual(unsigned short predicate) { + switch(predicate) { + default: return false; + case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: + case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; + } +} + +bool CmpInst::isFalseWhenEqual(unsigned short predicate) { + switch(predicate) { + case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: + case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; + default: return false; + } +} + + +//===----------------------------------------------------------------------===// +// SwitchInst Implementation +//===----------------------------------------------------------------------===// + +void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { + assert(Value && Default && NumReserved); + ReservedSpace = NumReserved; + NumOperands = 2; + OperandList = allocHungoffUses(ReservedSpace); + + OperandList[0] = Value; + OperandList[1] = Default; +} + +/// SwitchInst ctor - Create a new switch instruction, specifying a value to +/// switch on and a default destination. The number of additional cases can +/// be specified here to make memory allocation more efficient. This +/// constructor can also autoinsert before another instruction. +SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, + Instruction *InsertBefore) + : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch, + 0, 0, InsertBefore) { + init(Value, Default, 2+NumCases*2); +} + +/// SwitchInst ctor - Create a new switch instruction, specifying a value to +/// switch on and a default destination. The number of additional cases can +/// be specified here to make memory allocation more efficient. This +/// constructor also autoinserts at the end of the specified BasicBlock. +SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, + BasicBlock *InsertAtEnd) + : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch, + 0, 0, InsertAtEnd) { + init(Value, Default, 2+NumCases*2); +} + +SwitchInst::SwitchInst(const SwitchInst &SI) + : TerminatorInst(SI.getType(), Instruction::Switch, 0, 0) { + init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); + NumOperands = SI.getNumOperands(); + Use *OL = OperandList, *InOL = SI.OperandList; + for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { + OL[i] = InOL[i]; + OL[i+1] = InOL[i+1]; + } + TheSubsets = SI.TheSubsets; + SubclassOptionalData = SI.SubclassOptionalData; +} + +SwitchInst::~SwitchInst() { + dropHungoffUses(); +} + + +/// addCase - Add an entry to the switch instruction... +/// +void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { + IntegersSubsetToBB Mapping; + + // FIXME: Currently we work with ConstantInt based cases. + // So inititalize IntItem container directly from ConstantInt. + Mapping.add(IntItem::fromConstantInt(OnVal)); + IntegersSubset CaseRanges = Mapping.getCase(); + addCase(CaseRanges, Dest); +} + +void SwitchInst::addCase(IntegersSubset& OnVal, BasicBlock *Dest) { + unsigned NewCaseIdx = getNumCases(); + unsigned OpNo = NumOperands; + if (OpNo+2 > ReservedSpace) + growOperands(); // Get more space! + // Initialize some new operands. + assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); + NumOperands = OpNo+2; + + SubsetsIt TheSubsetsIt = TheSubsets.insert(TheSubsets.end(), OnVal); + + CaseIt Case(this, NewCaseIdx, TheSubsetsIt); + Case.updateCaseValueOperand(OnVal); + Case.setSuccessor(Dest); +} + +/// removeCase - This method removes the specified case and its successor +/// from the switch instruction. +void SwitchInst::removeCase(CaseIt& i) { + unsigned idx = i.getCaseIndex(); + + assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); + + unsigned NumOps = getNumOperands(); + Use *OL = OperandList; + + // Overwrite this case with the end of the list. + if (2 + (idx + 1) * 2 != NumOps) { + OL[2 + idx * 2] = OL[NumOps - 2]; + OL[2 + idx * 2 + 1] = OL[NumOps - 1]; + } + + // Nuke the last value. + OL[NumOps-2].set(0); + OL[NumOps-2+1].set(0); + + // Do the same with TheCases collection: + if (i.SubsetIt != --TheSubsets.end()) { + *i.SubsetIt = TheSubsets.back(); + TheSubsets.pop_back(); + } else { + TheSubsets.pop_back(); + i.SubsetIt = TheSubsets.end(); + } + + NumOperands = NumOps-2; +} + +/// growOperands - grow operands - This grows the operand list in response +/// to a push_back style of operation. This grows the number of ops by 3 times. +/// +void SwitchInst::growOperands() { + unsigned e = getNumOperands(); + unsigned NumOps = e*3; + + ReservedSpace = NumOps; + Use *NewOps = allocHungoffUses(NumOps); + Use *OldOps = OperandList; + for (unsigned i = 0; i != e; ++i) { + NewOps[i] = OldOps[i]; + } + OperandList = NewOps; + Use::zap(OldOps, OldOps + e, true); +} + + +BasicBlock *SwitchInst::getSuccessorV(unsigned idx) const { + return getSuccessor(idx); +} +unsigned SwitchInst::getNumSuccessorsV() const { + return getNumSuccessors(); +} +void SwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) { + setSuccessor(idx, B); +} + +//===----------------------------------------------------------------------===// +// IndirectBrInst Implementation +//===----------------------------------------------------------------------===// + +void IndirectBrInst::init(Value *Address, unsigned NumDests) { + assert(Address && Address->getType()->isPointerTy() && + "Address of indirectbr must be a pointer"); + ReservedSpace = 1+NumDests; + NumOperands = 1; + OperandList = allocHungoffUses(ReservedSpace); + + OperandList[0] = Address; +} + + +/// growOperands - grow operands - This grows the operand list in response +/// to a push_back style of operation. This grows the number of ops by 2 times. +/// +void IndirectBrInst::growOperands() { + unsigned e = getNumOperands(); + unsigned NumOps = e*2; + + ReservedSpace = NumOps; + Use *NewOps = allocHungoffUses(NumOps); + Use *OldOps = OperandList; + for (unsigned i = 0; i != e; ++i) + NewOps[i] = OldOps[i]; + OperandList = NewOps; + Use::zap(OldOps, OldOps + e, true); +} + +IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, + Instruction *InsertBefore) +: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr, + 0, 0, InsertBefore) { + init(Address, NumCases); +} + +IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, + BasicBlock *InsertAtEnd) +: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr, + 0, 0, InsertAtEnd) { + init(Address, NumCases); +} + +IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) + : TerminatorInst(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, + allocHungoffUses(IBI.getNumOperands()), + IBI.getNumOperands()) { + Use *OL = OperandList, *InOL = IBI.OperandList; + for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) + OL[i] = InOL[i]; + SubclassOptionalData = IBI.SubclassOptionalData; +} + +IndirectBrInst::~IndirectBrInst() { + dropHungoffUses(); +} + +/// addDestination - Add a destination. +/// +void IndirectBrInst::addDestination(BasicBlock *DestBB) { + unsigned OpNo = NumOperands; + if (OpNo+1 > ReservedSpace) + growOperands(); // Get more space! + // Initialize some new operands. + assert(OpNo < ReservedSpace && "Growing didn't work!"); + NumOperands = OpNo+1; + OperandList[OpNo] = DestBB; +} + +/// removeDestination - This method removes the specified successor from the +/// indirectbr instruction. +void IndirectBrInst::removeDestination(unsigned idx) { + assert(idx < getNumOperands()-1 && "Successor index out of range!"); + + unsigned NumOps = getNumOperands(); + Use *OL = OperandList; + + // Replace this value with the last one. + OL[idx+1] = OL[NumOps-1]; + + // Nuke the last value. + OL[NumOps-1].set(0); + NumOperands = NumOps-1; +} + +BasicBlock *IndirectBrInst::getSuccessorV(unsigned idx) const { + return getSuccessor(idx); +} +unsigned IndirectBrInst::getNumSuccessorsV() const { + return getNumSuccessors(); +} +void IndirectBrInst::setSuccessorV(unsigned idx, BasicBlock *B) { + setSuccessor(idx, B); +} + +//===----------------------------------------------------------------------===// +// clone_impl() implementations +//===----------------------------------------------------------------------===// + +// Define these methods here so vtables don't get emitted into every translation +// unit that uses these classes. + +GetElementPtrInst *GetElementPtrInst::clone_impl() const { + return new (getNumOperands()) GetElementPtrInst(*this); +} + +BinaryOperator *BinaryOperator::clone_impl() const { + return Create(getOpcode(), Op<0>(), Op<1>()); +} + +FCmpInst* FCmpInst::clone_impl() const { + return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); +} + +ICmpInst* ICmpInst::clone_impl() const { + return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); +} + +ExtractValueInst *ExtractValueInst::clone_impl() const { + return new ExtractValueInst(*this); +} + +InsertValueInst *InsertValueInst::clone_impl() const { + return new InsertValueInst(*this); +} + +AllocaInst *AllocaInst::clone_impl() const { + return new AllocaInst(getAllocatedType(), + (Value*)getOperand(0), + getAlignment()); +} + +LoadInst *LoadInst::clone_impl() const { + return new LoadInst(getOperand(0), Twine(), isVolatile(), + getAlignment(), getOrdering(), getSynchScope()); +} + +StoreInst *StoreInst::clone_impl() const { + return new StoreInst(getOperand(0), getOperand(1), isVolatile(), + getAlignment(), getOrdering(), getSynchScope()); + +} + +AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const { + AtomicCmpXchgInst *Result = + new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), + getOrdering(), getSynchScope()); + Result->setVolatile(isVolatile()); + return Result; +} + +AtomicRMWInst *AtomicRMWInst::clone_impl() const { + AtomicRMWInst *Result = + new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1), + getOrdering(), getSynchScope()); + Result->setVolatile(isVolatile()); + return Result; +} + +FenceInst *FenceInst::clone_impl() const { + return new FenceInst(getContext(), getOrdering(), getSynchScope()); +} + +TruncInst *TruncInst::clone_impl() const { + return new TruncInst(getOperand(0), getType()); +} + +ZExtInst *ZExtInst::clone_impl() const { + return new ZExtInst(getOperand(0), getType()); +} + +SExtInst *SExtInst::clone_impl() const { + return new SExtInst(getOperand(0), getType()); +} + +FPTruncInst *FPTruncInst::clone_impl() const { + return new FPTruncInst(getOperand(0), getType()); +} + +FPExtInst *FPExtInst::clone_impl() const { + return new FPExtInst(getOperand(0), getType()); +} + +UIToFPInst *UIToFPInst::clone_impl() const { + return new UIToFPInst(getOperand(0), getType()); +} + +SIToFPInst *SIToFPInst::clone_impl() const { + return new SIToFPInst(getOperand(0), getType()); +} + +FPToUIInst *FPToUIInst::clone_impl() const { + return new FPToUIInst(getOperand(0), getType()); +} + +FPToSIInst *FPToSIInst::clone_impl() const { + return new FPToSIInst(getOperand(0), getType()); +} + +PtrToIntInst *PtrToIntInst::clone_impl() const { + return new PtrToIntInst(getOperand(0), getType()); +} + +IntToPtrInst *IntToPtrInst::clone_impl() const { + return new IntToPtrInst(getOperand(0), getType()); +} + +BitCastInst *BitCastInst::clone_impl() const { + return new BitCastInst(getOperand(0), getType()); +} + +CallInst *CallInst::clone_impl() const { + return new(getNumOperands()) CallInst(*this); +} + +SelectInst *SelectInst::clone_impl() const { + return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); +} + +VAArgInst *VAArgInst::clone_impl() const { + return new VAArgInst(getOperand(0), getType()); +} + +ExtractElementInst *ExtractElementInst::clone_impl() const { + return ExtractElementInst::Create(getOperand(0), getOperand(1)); +} + +InsertElementInst *InsertElementInst::clone_impl() const { + return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); +} + +ShuffleVectorInst *ShuffleVectorInst::clone_impl() const { + return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); +} + +PHINode *PHINode::clone_impl() const { + return new PHINode(*this); +} + +LandingPadInst *LandingPadInst::clone_impl() const { + return new LandingPadInst(*this); +} + +ReturnInst *ReturnInst::clone_impl() const { + return new(getNumOperands()) ReturnInst(*this); +} + +BranchInst *BranchInst::clone_impl() const { + return new(getNumOperands()) BranchInst(*this); +} + +SwitchInst *SwitchInst::clone_impl() const { + return new SwitchInst(*this); +} + +IndirectBrInst *IndirectBrInst::clone_impl() const { + return new IndirectBrInst(*this); +} + + +InvokeInst *InvokeInst::clone_impl() const { + return new(getNumOperands()) InvokeInst(*this); +} + +ResumeInst *ResumeInst::clone_impl() const { + return new(1) ResumeInst(*this); +} + +UnreachableInst *UnreachableInst::clone_impl() const { + LLVMContext &Context = getContext(); + return new UnreachableInst(Context); +} diff --git a/lib/IR/IntrinsicInst.cpp b/lib/IR/IntrinsicInst.cpp new file mode 100644 index 0000000000..51f88d2e6f --- /dev/null +++ b/lib/IR/IntrinsicInst.cpp @@ -0,0 +1,73 @@ +//===-- InstrinsicInst.cpp - Intrinsic Instruction Wrappers -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements methods that make it really easy to deal with intrinsic +// functions. +// +// All intrinsic function calls are instances of the call instruction, so these +// are all subclasses of the CallInst class. Note that none of these classes +// has state or virtual methods, which is an important part of this gross/neat +// hack working. +// +// In some cases, arguments to intrinsics need to be generic and are defined as +// type pointer to empty struct { }*. To access the real item of interest the +// cast instruction needs to be stripped away. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Metadata.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +/// DbgInfoIntrinsic - This is the common base class for debug info intrinsics +/// + +static Value *CastOperand(Value *C) { + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) + if (CE->isCast()) + return CE->getOperand(0); + return NULL; +} + +Value *DbgInfoIntrinsic::StripCast(Value *C) { + if (Value *CO = CastOperand(C)) { + C = StripCast(CO); + } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) { + if (GV->hasInitializer()) + if (Value *CO = CastOperand(GV->getInitializer())) + C = StripCast(CO); + } + return dyn_cast<GlobalVariable>(C); +} + +//===----------------------------------------------------------------------===// +/// DbgDeclareInst - This represents the llvm.dbg.declare instruction. +/// + +Value *DbgDeclareInst::getAddress() const { + if (MDNode* MD = cast_or_null<MDNode>(getArgOperand(0))) + return MD->getOperand(0); + else + return NULL; +} + +//===----------------------------------------------------------------------===// +/// DbgValueInst - This represents the llvm.dbg.value instruction. +/// + +const Value *DbgValueInst::getValue() const { + return cast<MDNode>(getArgOperand(0))->getOperand(0); +} + +Value *DbgValueInst::getValue() { + return cast<MDNode>(getArgOperand(0))->getOperand(0); +} diff --git a/lib/IR/LLVMBuild.txt b/lib/IR/LLVMBuild.txt new file mode 100644 index 0000000000..cd90ef5b16 --- /dev/null +++ b/lib/IR/LLVMBuild.txt @@ -0,0 +1,22 @@ +;===- ./lib/IR/LLVMBuild.txt -----------------------------------*- Conf -*--===; +; +; The LLVM Compiler Infrastructure +; +; This file is distributed under the University of Illinois Open Source +; License. See LICENSE.TXT for details. +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = Core +parent = Libraries +required_libraries = Support diff --git a/lib/IR/LLVMContext.cpp b/lib/IR/LLVMContext.cpp new file mode 100644 index 0000000000..883bb9878f --- /dev/null +++ b/lib/IR/LLVMContext.cpp @@ -0,0 +1,168 @@ +//===-- LLVMContext.cpp - Implement LLVMContext ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements LLVMContext, as a wrapper around the opaque +// class LLVMContextImpl. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/LLVMContext.h" +#include "LLVMContextImpl.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Metadata.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/SourceMgr.h" +#include <cctype> +using namespace llvm; + +static ManagedStatic<LLVMContext> GlobalContext; + +LLVMContext& llvm::getGlobalContext() { + return *GlobalContext; +} + +LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) { + // Create the fixed metadata kinds. This is done in the same order as the + // MD_* enum values so that they correspond. + + // Create the 'dbg' metadata kind. + unsigned DbgID = getMDKindID("dbg"); + assert(DbgID == MD_dbg && "dbg kind id drifted"); (void)DbgID; + + // Create the 'tbaa' metadata kind. + unsigned TBAAID = getMDKindID("tbaa"); + assert(TBAAID == MD_tbaa && "tbaa kind id drifted"); (void)TBAAID; + + // Create the 'prof' metadata kind. + unsigned ProfID = getMDKindID("prof"); + assert(ProfID == MD_prof && "prof kind id drifted"); (void)ProfID; + + // Create the 'fpmath' metadata kind. + unsigned FPAccuracyID = getMDKindID("fpmath"); + assert(FPAccuracyID == MD_fpmath && "fpmath kind id drifted"); + (void)FPAccuracyID; + + // Create the 'range' metadata kind. + unsigned RangeID = getMDKindID("range"); + assert(RangeID == MD_range && "range kind id drifted"); + (void)RangeID; + + // Create the 'tbaa.struct' metadata kind. + unsigned TBAAStructID = getMDKindID("tbaa.struct"); + assert(TBAAStructID == MD_tbaa_struct && "tbaa.struct kind id drifted"); + (void)TBAAStructID; + + // Create the 'invariant.load' metadata kind. + unsigned InvariantLdId = getMDKindID("invariant.load"); + assert(InvariantLdId == MD_invariant_load && "invariant.load kind id drifted"); + (void)InvariantLdId; +} +LLVMContext::~LLVMContext() { delete pImpl; } + +void LLVMContext::addModule(Module *M) { + pImpl->OwnedModules.insert(M); +} + +void LLVMContext::removeModule(Module *M) { + pImpl->OwnedModules.erase(M); +} + +//===----------------------------------------------------------------------===// +// Recoverable Backend Errors +//===----------------------------------------------------------------------===// + +void LLVMContext:: +setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler, + void *DiagContext) { + pImpl->InlineAsmDiagHandler = DiagHandler; + pImpl->InlineAsmDiagContext = DiagContext; +} + +/// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by +/// setInlineAsmDiagnosticHandler. +LLVMContext::InlineAsmDiagHandlerTy +LLVMContext::getInlineAsmDiagnosticHandler() const { + return pImpl->InlineAsmDiagHandler; +} + +/// getInlineAsmDiagnosticContext - Return the diagnostic context set by +/// setInlineAsmDiagnosticHandler. +void *LLVMContext::getInlineAsmDiagnosticContext() const { + return pImpl->InlineAsmDiagContext; +} + +void LLVMContext::emitError(const Twine &ErrorStr) { + emitError(0U, ErrorStr); +} + +void LLVMContext::emitError(const Instruction *I, const Twine &ErrorStr) { + unsigned LocCookie = 0; + if (const MDNode *SrcLoc = I->getMetadata("srcloc")) { + if (SrcLoc->getNumOperands() != 0) + if (const ConstantInt *CI = dyn_cast<ConstantInt>(SrcLoc->getOperand(0))) + LocCookie = CI->getZExtValue(); + } + return emitError(LocCookie, ErrorStr); +} + +void LLVMContext::emitError(unsigned LocCookie, const Twine &ErrorStr) { + // If there is no error handler installed, just print the error and exit. + if (pImpl->InlineAsmDiagHandler == 0) { + errs() << "error: " << ErrorStr << "\n"; + exit(1); + } + + // If we do have an error handler, we can report the error and keep going. + SMDiagnostic Diag("", SourceMgr::DK_Error, ErrorStr.str()); + + pImpl->InlineAsmDiagHandler(Diag, pImpl->InlineAsmDiagContext, LocCookie); +} + +//===----------------------------------------------------------------------===// +// Metadata Kind Uniquing +//===----------------------------------------------------------------------===// + +#ifndef NDEBUG +/// isValidName - Return true if Name is a valid custom metadata handler name. +static bool isValidName(StringRef MDName) { + if (MDName.empty()) + return false; + + if (!std::isalpha(static_cast<unsigned char>(MDName[0]))) + return false; + + for (StringRef::iterator I = MDName.begin() + 1, E = MDName.end(); I != E; + ++I) { + if (!std::isalnum(static_cast<unsigned char>(*I)) && *I != '_' && + *I != '-' && *I != '.') + return false; + } + return true; +} +#endif + +/// getMDKindID - Return a unique non-zero ID for the specified metadata kind. +unsigned LLVMContext::getMDKindID(StringRef Name) const { + assert(isValidName(Name) && "Invalid MDNode name"); + + // If this is new, assign it its ID. + return + pImpl->CustomMDKindNames.GetOrCreateValue( + Name, pImpl->CustomMDKindNames.size()).second; +} + +/// getHandlerNames - Populate client supplied smallvector using custome +/// metadata name and ID. +void LLVMContext::getMDKindNames(SmallVectorImpl<StringRef> &Names) const { + Names.resize(pImpl->CustomMDKindNames.size()); + for (StringMap<unsigned>::const_iterator I = pImpl->CustomMDKindNames.begin(), + E = pImpl->CustomMDKindNames.end(); I != E; ++I) + Names[I->second] = I->first(); +} diff --git a/lib/IR/LLVMContextImpl.cpp b/lib/IR/LLVMContextImpl.cpp new file mode 100644 index 0000000000..6a6a4d6801 --- /dev/null +++ b/lib/IR/LLVMContextImpl.cpp @@ -0,0 +1,156 @@ +//===-- LLVMContextImpl.cpp - Implement LLVMContextImpl -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the opaque LLVMContextImpl. +// +//===----------------------------------------------------------------------===// + +#include "LLVMContextImpl.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Module.h" +#include <algorithm> +using namespace llvm; + +LLVMContextImpl::LLVMContextImpl(LLVMContext &C) + : TheTrueVal(0), TheFalseVal(0), + VoidTy(C, Type::VoidTyID), + LabelTy(C, Type::LabelTyID), + HalfTy(C, Type::HalfTyID), + FloatTy(C, Type::FloatTyID), + DoubleTy(C, Type::DoubleTyID), + MetadataTy(C, Type::MetadataTyID), + X86_FP80Ty(C, Type::X86_FP80TyID), + FP128Ty(C, Type::FP128TyID), + PPC_FP128Ty(C, Type::PPC_FP128TyID), + X86_MMXTy(C, Type::X86_MMXTyID), + Int1Ty(C, 1), + Int8Ty(C, 8), + Int16Ty(C, 16), + Int32Ty(C, 32), + Int64Ty(C, 64) { + InlineAsmDiagHandler = 0; + InlineAsmDiagContext = 0; + NamedStructTypesUniqueID = 0; +} + +namespace { +struct DropReferences { + // Takes the value_type of a ConstantUniqueMap's internal map, whose 'second' + // is a Constant*. + template<typename PairT> + void operator()(const PairT &P) { + P.second->dropAllReferences(); + } +}; + +// Temporary - drops pair.first instead of second. +struct DropFirst { + // Takes the value_type of a ConstantUniqueMap's internal map, whose 'second' + // is a Constant*. + template<typename PairT> + void operator()(const PairT &P) { + P.first->dropAllReferences(); + } +}; +} + +LLVMContextImpl::~LLVMContextImpl() { + // NOTE: We need to delete the contents of OwnedModules, but we have to + // duplicate it into a temporary vector, because the destructor of Module + // will try to remove itself from OwnedModules set. This would cause + // iterator invalidation if we iterated on the set directly. + std::vector<Module*> Modules(OwnedModules.begin(), OwnedModules.end()); + DeleteContainerPointers(Modules); + + // Free the constants. This is important to do here to ensure that they are + // freed before the LeakDetector is torn down. + std::for_each(ExprConstants.map_begin(), ExprConstants.map_end(), + DropReferences()); + std::for_each(ArrayConstants.map_begin(), ArrayConstants.map_end(), + DropFirst()); + std::for_each(StructConstants.map_begin(), StructConstants.map_end(), + DropFirst()); + std::for_each(VectorConstants.map_begin(), VectorConstants.map_end(), + DropFirst()); + ExprConstants.freeConstants(); + ArrayConstants.freeConstants(); + StructConstants.freeConstants(); + VectorConstants.freeConstants(); + DeleteContainerSeconds(CAZConstants); + DeleteContainerSeconds(CPNConstants); + DeleteContainerSeconds(UVConstants); + InlineAsms.freeConstants(); + DeleteContainerSeconds(IntConstants); + DeleteContainerSeconds(FPConstants); + + for (StringMap<ConstantDataSequential*>::iterator I = CDSConstants.begin(), + E = CDSConstants.end(); I != E; ++I) + delete I->second; + CDSConstants.clear(); + + // Destroy attributes. + for (FoldingSetIterator<AttributeImpl> I = AttrsSet.begin(), + E = AttrsSet.end(); I != E; ) { + FoldingSetIterator<AttributeImpl> Elem = I++; + delete &*Elem; + } + + // Destroy attribute lists. + for (FoldingSetIterator<AttributeSetImpl> I = AttrsLists.begin(), + E = AttrsLists.end(); I != E; ) { + FoldingSetIterator<AttributeSetImpl> Elem = I++; + delete &*Elem; + } + + // Destroy attribute node lists. + for (FoldingSetIterator<AttributeSetNode> I = AttrsSetNodes.begin(), + E = AttrsSetNodes.end(); I != E; ) { + FoldingSetIterator<AttributeSetNode> Elem = I++; + delete &*Elem; + } + + // Destroy MDNodes. ~MDNode can move and remove nodes between the MDNodeSet + // and the NonUniquedMDNodes sets, so copy the values out first. + SmallVector<MDNode*, 8> MDNodes; + MDNodes.reserve(MDNodeSet.size() + NonUniquedMDNodes.size()); + for (FoldingSetIterator<MDNode> I = MDNodeSet.begin(), E = MDNodeSet.end(); + I != E; ++I) + MDNodes.push_back(&*I); + MDNodes.append(NonUniquedMDNodes.begin(), NonUniquedMDNodes.end()); + for (SmallVectorImpl<MDNode *>::iterator I = MDNodes.begin(), + E = MDNodes.end(); I != E; ++I) + (*I)->destroy(); + assert(MDNodeSet.empty() && NonUniquedMDNodes.empty() && + "Destroying all MDNodes didn't empty the Context's sets."); + + // Destroy MDStrings. + DeleteContainerSeconds(MDStringCache); +} + +// ConstantsContext anchors +void UnaryConstantExpr::anchor() { } + +void BinaryConstantExpr::anchor() { } + +void SelectConstantExpr::anchor() { } + +void ExtractElementConstantExpr::anchor() { } + +void InsertElementConstantExpr::anchor() { } + +void ShuffleVectorConstantExpr::anchor() { } + +void ExtractValueConstantExpr::anchor() { } + +void InsertValueConstantExpr::anchor() { } + +void GetElementPtrConstantExpr::anchor() { } + +void CompareConstantExpr::anchor() { } diff --git a/lib/IR/LLVMContextImpl.h b/lib/IR/LLVMContextImpl.h new file mode 100644 index 0000000000..0c659b81b7 --- /dev/null +++ b/lib/IR/LLVMContextImpl.h @@ -0,0 +1,367 @@ +//===-- LLVMContextImpl.h - The LLVMContextImpl opaque class ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares LLVMContextImpl, the opaque implementation +// of LLVMContext. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LLVMCONTEXT_IMPL_H +#define LLVM_LLVMCONTEXT_IMPL_H + +#include "AttributeImpl.h" +#include "ConstantsContext.h" +#include "LeaksContext.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/Hashing.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" +#include "llvm/Support/ValueHandle.h" +#include <vector> + +namespace llvm { + +class ConstantInt; +class ConstantFP; +class LLVMContext; +class Type; +class Value; + +struct DenseMapAPIntKeyInfo { + struct KeyTy { + APInt val; + Type* type; + KeyTy(const APInt& V, Type* Ty) : val(V), type(Ty) {} + bool operator==(const KeyTy& that) const { + return type == that.type && this->val == that.val; + } + bool operator!=(const KeyTy& that) const { + return !this->operator==(that); + } + friend hash_code hash_value(const KeyTy &Key) { + return hash_combine(Key.type, Key.val); + } + }; + static inline KeyTy getEmptyKey() { return KeyTy(APInt(1,0), 0); } + static inline KeyTy getTombstoneKey() { return KeyTy(APInt(1,1), 0); } + static unsigned getHashValue(const KeyTy &Key) { + return static_cast<unsigned>(hash_value(Key)); + } + static bool isEqual(const KeyTy &LHS, const KeyTy &RHS) { + return LHS == RHS; + } +}; + +struct DenseMapAPFloatKeyInfo { + struct KeyTy { + APFloat val; + KeyTy(const APFloat& V) : val(V){} + bool operator==(const KeyTy& that) const { + return this->val.bitwiseIsEqual(that.val); + } + bool operator!=(const KeyTy& that) const { + return !this->operator==(that); + } + friend hash_code hash_value(const KeyTy &Key) { + return hash_combine(Key.val); + } + }; + static inline KeyTy getEmptyKey() { + return KeyTy(APFloat(APFloat::Bogus,1)); + } + static inline KeyTy getTombstoneKey() { + return KeyTy(APFloat(APFloat::Bogus,2)); + } + static unsigned getHashValue(const KeyTy &Key) { + return static_cast<unsigned>(hash_value(Key)); + } + static bool isEqual(const KeyTy &LHS, const KeyTy &RHS) { + return LHS == RHS; + } +}; + +struct AnonStructTypeKeyInfo { + struct KeyTy { + ArrayRef<Type*> ETypes; + bool isPacked; + KeyTy(const ArrayRef<Type*>& E, bool P) : + ETypes(E), isPacked(P) {} + KeyTy(const StructType* ST) : + ETypes(ArrayRef<Type*>(ST->element_begin(), ST->element_end())), + isPacked(ST->isPacked()) {} + bool operator==(const KeyTy& that) const { + if (isPacked != that.isPacked) + return false; + if (ETypes != that.ETypes) + return false; + return true; + } + bool operator!=(const KeyTy& that) const { + return !this->operator==(that); + } + }; + static inline StructType* getEmptyKey() { + return DenseMapInfo<StructType*>::getEmptyKey(); + } + static inline StructType* getTombstoneKey() { + return DenseMapInfo<StructType*>::getTombstoneKey(); + } + static unsigned getHashValue(const KeyTy& Key) { + return hash_combine(hash_combine_range(Key.ETypes.begin(), + Key.ETypes.end()), + Key.isPacked); + } + static unsigned getHashValue(const StructType *ST) { + return getHashValue(KeyTy(ST)); + } + static bool isEqual(const KeyTy& LHS, const StructType *RHS) { + if (RHS == getEmptyKey() || RHS == getTombstoneKey()) + return false; + return LHS == KeyTy(RHS); + } + static bool isEqual(const StructType *LHS, const StructType *RHS) { + return LHS == RHS; + } +}; + +struct FunctionTypeKeyInfo { + struct KeyTy { + const Type *ReturnType; + ArrayRef<Type*> Params; + bool isVarArg; + KeyTy(const Type* R, const ArrayRef<Type*>& P, bool V) : + ReturnType(R), Params(P), isVarArg(V) {} + KeyTy(const FunctionType* FT) : + ReturnType(FT->getReturnType()), + Params(ArrayRef<Type*>(FT->param_begin(), FT->param_end())), + isVarArg(FT->isVarArg()) {} + bool operator==(const KeyTy& that) const { + if (ReturnType != that.ReturnType) + return false; + if (isVarArg != that.isVarArg) + return false; + if (Params != that.Params) + return false; + return true; + } + bool operator!=(const KeyTy& that) const { + return !this->operator==(that); + } + }; + static inline FunctionType* getEmptyKey() { + return DenseMapInfo<FunctionType*>::getEmptyKey(); + } + static inline FunctionType* getTombstoneKey() { + return DenseMapInfo<FunctionType*>::getTombstoneKey(); + } + static unsigned getHashValue(const KeyTy& Key) { + return hash_combine(Key.ReturnType, + hash_combine_range(Key.Params.begin(), + Key.Params.end()), + Key.isVarArg); + } + static unsigned getHashValue(const FunctionType *FT) { + return getHashValue(KeyTy(FT)); + } + static bool isEqual(const KeyTy& LHS, const FunctionType *RHS) { + if (RHS == getEmptyKey() || RHS == getTombstoneKey()) + return false; + return LHS == KeyTy(RHS); + } + static bool isEqual(const FunctionType *LHS, const FunctionType *RHS) { + return LHS == RHS; + } +}; + +// Provide a FoldingSetTrait::Equals specialization for MDNode that can use a +// shortcut to avoid comparing all operands. +template<> struct FoldingSetTrait<MDNode> : DefaultFoldingSetTrait<MDNode> { + static bool Equals(const MDNode &X, const FoldingSetNodeID &ID, + unsigned IDHash, FoldingSetNodeID &TempID) { + assert(!X.isNotUniqued() && "Non-uniqued MDNode in FoldingSet?"); + // First, check if the cached hashes match. If they don't we can skip the + // expensive operand walk. + if (X.Hash != IDHash) + return false; + + // If they match we have to compare the operands. + X.Profile(TempID); + return TempID == ID; + } + static unsigned ComputeHash(const MDNode &X, FoldingSetNodeID &) { + return X.Hash; // Return cached hash. + } +}; + +/// DebugRecVH - This is a CallbackVH used to keep the Scope -> index maps +/// up to date as MDNodes mutate. This class is implemented in DebugLoc.cpp. +class DebugRecVH : public CallbackVH { + /// Ctx - This is the LLVM Context being referenced. + LLVMContextImpl *Ctx; + + /// Idx - The index into either ScopeRecordIdx or ScopeInlinedAtRecords that + /// this reference lives in. If this is zero, then it represents a + /// non-canonical entry that has no DenseMap value. This can happen due to + /// RAUW. + int Idx; +public: + DebugRecVH(MDNode *n, LLVMContextImpl *ctx, int idx) + : CallbackVH(n), Ctx(ctx), Idx(idx) {} + + MDNode *get() const { + return cast_or_null<MDNode>(getValPtr()); + } + + virtual void deleted(); + virtual void allUsesReplacedWith(Value *VNew); +}; + +class LLVMContextImpl { +public: + /// OwnedModules - The set of modules instantiated in this context, and which + /// will be automatically deleted if this context is deleted. + SmallPtrSet<Module*, 4> OwnedModules; + + LLVMContext::InlineAsmDiagHandlerTy InlineAsmDiagHandler; + void *InlineAsmDiagContext; + + typedef DenseMap<DenseMapAPIntKeyInfo::KeyTy, ConstantInt*, + DenseMapAPIntKeyInfo> IntMapTy; + IntMapTy IntConstants; + + typedef DenseMap<DenseMapAPFloatKeyInfo::KeyTy, ConstantFP*, + DenseMapAPFloatKeyInfo> FPMapTy; + FPMapTy FPConstants; + + FoldingSet<AttributeImpl> AttrsSet; + FoldingSet<AttributeSetImpl> AttrsLists; + FoldingSet<AttributeSetNode> AttrsSetNodes; + + StringMap<Value*> MDStringCache; + + FoldingSet<MDNode> MDNodeSet; + + // MDNodes may be uniqued or not uniqued. When they're not uniqued, they + // aren't in the MDNodeSet, but they're still shared between objects, so no + // one object can destroy them. This set allows us to at least destroy them + // on Context destruction. + SmallPtrSet<MDNode*, 1> NonUniquedMDNodes; + + DenseMap<Type*, ConstantAggregateZero*> CAZConstants; + + typedef ConstantAggrUniqueMap<ArrayType, ConstantArray> ArrayConstantsTy; + ArrayConstantsTy ArrayConstants; + + typedef ConstantAggrUniqueMap<StructType, ConstantStruct> StructConstantsTy; + StructConstantsTy StructConstants; + + typedef ConstantAggrUniqueMap<VectorType, ConstantVector> VectorConstantsTy; + VectorConstantsTy VectorConstants; + + DenseMap<PointerType*, ConstantPointerNull*> CPNConstants; + + DenseMap<Type*, UndefValue*> UVConstants; + + StringMap<ConstantDataSequential*> CDSConstants; + + + DenseMap<std::pair<Function*, BasicBlock*> , BlockAddress*> BlockAddresses; + ConstantUniqueMap<ExprMapKeyType, const ExprMapKeyType&, Type, ConstantExpr> + ExprConstants; + + ConstantUniqueMap<InlineAsmKeyType, const InlineAsmKeyType&, PointerType, + InlineAsm> InlineAsms; + + ConstantInt *TheTrueVal; + ConstantInt *TheFalseVal; + + LeakDetectorImpl<Value> LLVMObjects; + + // Basic type instances. + Type VoidTy, LabelTy, HalfTy, FloatTy, DoubleTy, MetadataTy; + Type X86_FP80Ty, FP128Ty, PPC_FP128Ty, X86_MMXTy; + IntegerType Int1Ty, Int8Ty, Int16Ty, Int32Ty, Int64Ty; + + + /// TypeAllocator - All dynamically allocated types are allocated from this. + /// They live forever until the context is torn down. + BumpPtrAllocator TypeAllocator; + + DenseMap<unsigned, IntegerType*> IntegerTypes; + + typedef DenseMap<FunctionType*, bool, FunctionTypeKeyInfo> FunctionTypeMap; + FunctionTypeMap FunctionTypes; + typedef DenseMap<StructType*, bool, AnonStructTypeKeyInfo> StructTypeMap; + StructTypeMap AnonStructTypes; + StringMap<StructType*> NamedStructTypes; + unsigned NamedStructTypesUniqueID; + + DenseMap<std::pair<Type *, uint64_t>, ArrayType*> ArrayTypes; + DenseMap<std::pair<Type *, unsigned>, VectorType*> VectorTypes; + DenseMap<Type*, PointerType*> PointerTypes; // Pointers in AddrSpace = 0 + DenseMap<std::pair<Type*, unsigned>, PointerType*> ASPointerTypes; + + + /// ValueHandles - This map keeps track of all of the value handles that are + /// watching a Value*. The Value::HasValueHandle bit is used to know + /// whether or not a value has an entry in this map. + typedef DenseMap<Value*, ValueHandleBase*> ValueHandlesTy; + ValueHandlesTy ValueHandles; + + /// CustomMDKindNames - Map to hold the metadata string to ID mapping. + StringMap<unsigned> CustomMDKindNames; + + typedef std::pair<unsigned, TrackingVH<MDNode> > MDPairTy; + typedef SmallVector<MDPairTy, 2> MDMapTy; + + /// MetadataStore - Collection of per-instruction metadata used in this + /// context. + DenseMap<const Instruction *, MDMapTy> MetadataStore; + + /// ScopeRecordIdx - This is the index in ScopeRecords for an MDNode scope + /// entry with no "inlined at" element. + DenseMap<MDNode*, int> ScopeRecordIdx; + + /// ScopeRecords - These are the actual mdnodes (in a value handle) for an + /// index. The ValueHandle ensures that ScopeRecordIdx stays up to date if + /// the MDNode is RAUW'd. + std::vector<DebugRecVH> ScopeRecords; + + /// ScopeInlinedAtIdx - This is the index in ScopeInlinedAtRecords for an + /// scope/inlined-at pair. + DenseMap<std::pair<MDNode*, MDNode*>, int> ScopeInlinedAtIdx; + + /// ScopeInlinedAtRecords - These are the actual mdnodes (in value handles) + /// for an index. The ValueHandle ensures that ScopeINlinedAtIdx stays up + /// to date. + std::vector<std::pair<DebugRecVH, DebugRecVH> > ScopeInlinedAtRecords; + + /// IntrinsicIDCache - Cache of intrinsic name (string) to numeric ID mappings + /// requested in this context + typedef DenseMap<const Function*, unsigned> IntrinsicIDCacheTy; + IntrinsicIDCacheTy IntrinsicIDCache; + + int getOrAddScopeRecordIdxEntry(MDNode *N, int ExistingIdx); + int getOrAddScopeInlinedAtIdxEntry(MDNode *Scope, MDNode *IA,int ExistingIdx); + + LLVMContextImpl(LLVMContext &C); + ~LLVMContextImpl(); +}; + +} + +#endif diff --git a/lib/IR/LeakDetector.cpp b/lib/IR/LeakDetector.cpp new file mode 100644 index 0000000000..835e5e61cd --- /dev/null +++ b/lib/IR/LeakDetector.cpp @@ -0,0 +1,69 @@ +//===-- LeakDetector.cpp - Implement LeakDetector interface ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LeakDetector class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/LeakDetector.h" +#include "LLVMContextImpl.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/IR/Value.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/Mutex.h" +#include "llvm/Support/Threading.h" +using namespace llvm; + +static ManagedStatic<sys::SmartMutex<true> > ObjectsLock; +static ManagedStatic<LeakDetectorImpl<void> > Objects; + +static void clearGarbage(LLVMContext &Context) { + Objects->clear(); + Context.pImpl->LLVMObjects.clear(); +} + +void LeakDetector::addGarbageObjectImpl(void *Object) { + sys::SmartScopedLock<true> Lock(*ObjectsLock); + Objects->addGarbage(Object); +} + +void LeakDetector::addGarbageObjectImpl(const Value *Object) { + LLVMContextImpl *pImpl = Object->getContext().pImpl; + pImpl->LLVMObjects.addGarbage(Object); +} + +void LeakDetector::removeGarbageObjectImpl(void *Object) { + sys::SmartScopedLock<true> Lock(*ObjectsLock); + Objects->removeGarbage(Object); +} + +void LeakDetector::removeGarbageObjectImpl(const Value *Object) { + LLVMContextImpl *pImpl = Object->getContext().pImpl; + pImpl->LLVMObjects.removeGarbage(Object); +} + +void LeakDetector::checkForGarbageImpl(LLVMContext &Context, + const std::string &Message) { + LLVMContextImpl *pImpl = Context.pImpl; + sys::SmartScopedLock<true> Lock(*ObjectsLock); + + Objects->setName("GENERIC"); + pImpl->LLVMObjects.setName("LLVM"); + + // use non-short-circuit version so that both checks are performed + if (Objects->hasGarbage(Message) | + pImpl->LLVMObjects.hasGarbage(Message)) + errs() << "\nThis is probably because you removed an object, but didn't " + << "delete it. Please check your code for memory leaks.\n"; + + // Clear out results so we don't get duplicate warnings on + // next call... + clearGarbage(Context); +} diff --git a/lib/IR/LeaksContext.h b/lib/IR/LeaksContext.h new file mode 100644 index 0000000000..5038dc9d6d --- /dev/null +++ b/lib/IR/LeaksContext.h @@ -0,0 +1,92 @@ +//===- LeaksContext.h - LeadDetector Implementation ------------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines various helper methods and classes used by +// LLVMContextImpl for leaks detectors. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/IR/Value.h" + +namespace llvm { + +template <class T> +struct PrinterTrait { + static void print(const T* P) { errs() << P; } +}; + +template<> +struct PrinterTrait<Value> { + static void print(const Value* P) { errs() << *P; } +}; + +template <typename T> +struct LeakDetectorImpl { + explicit LeakDetectorImpl(const char* const name = "") : + Cache(0), Name(name) { } + + void clear() { + Cache = 0; + Ts.clear(); + } + + void setName(const char* n) { + Name = n; + } + + // Because the most common usage pattern, by far, is to add a + // garbage object, then remove it immediately, we optimize this + // case. When an object is added, it is not added to the set + // immediately, it is added to the CachedValue Value. If it is + // immediately removed, no set search need be performed. + void addGarbage(const T* o) { + assert(Ts.count(o) == 0 && "Object already in set!"); + if (Cache) { + assert(Cache != o && "Object already in set!"); + Ts.insert(Cache); + } + Cache = o; + } + + void removeGarbage(const T* o) { + if (o == Cache) + Cache = 0; // Cache hit + else + Ts.erase(o); + } + + bool hasGarbage(const std::string& Message) { + addGarbage(0); // Flush the Cache + + assert(Cache == 0 && "No value should be cached anymore!"); + + if (!Ts.empty()) { + errs() << "Leaked " << Name << " objects found: " << Message << ":\n"; + for (typename SmallPtrSet<const T*, 8>::iterator I = Ts.begin(), + E = Ts.end(); I != E; ++I) { + errs() << '\t'; + PrinterTrait<T>::print(*I); + errs() << '\n'; + } + errs() << '\n'; + + return true; + } + + return false; + } + +private: + SmallPtrSet<const T*, 8> Ts; + const T* Cache; + const char* Name; +}; + +} diff --git a/lib/IR/Makefile b/lib/IR/Makefile new file mode 100644 index 0000000000..cc403f38dd --- /dev/null +++ b/lib/IR/Makefile @@ -0,0 +1,33 @@ +##===- lib/IR/Makefile -------------------------------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## +LEVEL = ../.. +LIBRARYNAME = LLVMCore +BUILD_ARCHIVE = 1 + +BUILT_SOURCES = $(PROJ_OBJ_ROOT)/include/llvm/IR/Intrinsics.gen + +include $(LEVEL)/Makefile.common + +GENFILE:=$(PROJ_OBJ_ROOT)/include/llvm/IR/Intrinsics.gen + +INTRINSICTD := $(PROJ_SRC_ROOT)/include/llvm/IR/Intrinsics.td +INTRINSICTDS := $(wildcard $(PROJ_SRC_ROOT)/include/llvm/IR/Intrinsics*.td) + +$(ObjDir)/Intrinsics.gen.tmp: $(ObjDir)/.dir $(INTRINSICTDS) $(LLVM_TBLGEN) + $(Echo) Building Intrinsics.gen.tmp from Intrinsics.td + $(Verb) $(LLVMTableGen) $(call SYSPATH, $(INTRINSICTD)) -o $(call SYSPATH, $@) -gen-intrinsic + +$(GENFILE): $(ObjDir)/Intrinsics.gen.tmp $(PROJ_OBJ_ROOT)/include/llvm/IR/.dir + $(Verb) $(CMP) -s $@ $< || ( $(CP) $< $@ && \ + $(EchoCmd) Updated Intrinsics.gen because Intrinsics.gen.tmp \ + changed significantly. ) + +install-local:: $(GENFILE) + $(Echo) Installing $(DESTDIR)$(PROJ_includedir)/llvm/IR/Intrinsics.gen + $(Verb) $(DataInstall) $(GENFILE) $(DESTDIR)$(PROJ_includedir)/llvm/IR/Intrinsics.gen diff --git a/lib/IR/Metadata.cpp b/lib/IR/Metadata.cpp new file mode 100644 index 0000000000..0228aeb31f --- /dev/null +++ b/lib/IR/Metadata.cpp @@ -0,0 +1,745 @@ +//===-- Metadata.cpp - Implement Metadata classes -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Metadata classes. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Metadata.h" +#include "LLVMContextImpl.h" +#include "SymbolTableListTraitsImpl.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/ConstantRange.h" +#include "llvm/Support/LeakDetector.h" +#include "llvm/Support/ValueHandle.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// MDString implementation. +// + +void MDString::anchor() { } + +MDString::MDString(LLVMContext &C) + : Value(Type::getMetadataTy(C), Value::MDStringVal) {} + +MDString *MDString::get(LLVMContext &Context, StringRef Str) { + LLVMContextImpl *pImpl = Context.pImpl; + StringMapEntry<Value*> &Entry = + pImpl->MDStringCache.GetOrCreateValue(Str); + Value *&S = Entry.getValue(); + if (!S) S = new MDString(Context); + S->setValueName(&Entry); + return cast<MDString>(S); +} + +//===----------------------------------------------------------------------===// +// MDNodeOperand implementation. +// + +// Use CallbackVH to hold MDNode operands. +namespace llvm { +class MDNodeOperand : public CallbackVH { + MDNode *getParent() { + MDNodeOperand *Cur = this; + + while (Cur->getValPtrInt() != 1) + --Cur; + + assert(Cur->getValPtrInt() == 1 && + "Couldn't find the beginning of the operand list!"); + return reinterpret_cast<MDNode*>(Cur) - 1; + } + +public: + MDNodeOperand(Value *V) : CallbackVH(V) {} + ~MDNodeOperand() {} + + void set(Value *V) { + unsigned IsFirst = this->getValPtrInt(); + this->setValPtr(V); + this->setAsFirstOperand(IsFirst); + } + + /// setAsFirstOperand - Accessor method to mark the operand as the first in + /// the list. + void setAsFirstOperand(unsigned V) { this->setValPtrInt(V); } + + virtual void deleted(); + virtual void allUsesReplacedWith(Value *NV); +}; +} // end namespace llvm. + + +void MDNodeOperand::deleted() { + getParent()->replaceOperand(this, 0); +} + +void MDNodeOperand::allUsesReplacedWith(Value *NV) { + getParent()->replaceOperand(this, NV); +} + +//===----------------------------------------------------------------------===// +// MDNode implementation. +// + +/// getOperandPtr - Helper function to get the MDNodeOperand's coallocated on +/// the end of the MDNode. +static MDNodeOperand *getOperandPtr(MDNode *N, unsigned Op) { + // Use <= instead of < to permit a one-past-the-end address. + assert(Op <= N->getNumOperands() && "Invalid operand number"); + return reinterpret_cast<MDNodeOperand*>(N + 1) + Op; +} + +void MDNode::replaceOperandWith(unsigned i, Value *Val) { + MDNodeOperand *Op = getOperandPtr(this, i); + replaceOperand(Op, Val); +} + +MDNode::MDNode(LLVMContext &C, ArrayRef<Value*> Vals, bool isFunctionLocal) +: Value(Type::getMetadataTy(C), Value::MDNodeVal) { + NumOperands = Vals.size(); + + if (isFunctionLocal) + setValueSubclassData(getSubclassDataFromValue() | FunctionLocalBit); + + // Initialize the operand list, which is co-allocated on the end of the node. + unsigned i = 0; + for (MDNodeOperand *Op = getOperandPtr(this, 0), *E = Op+NumOperands; + Op != E; ++Op, ++i) { + new (Op) MDNodeOperand(Vals[i]); + + // Mark the first MDNodeOperand as being the first in the list of operands. + if (i == 0) + Op->setAsFirstOperand(1); + } +} + +/// ~MDNode - Destroy MDNode. +MDNode::~MDNode() { + assert((getSubclassDataFromValue() & DestroyFlag) != 0 && + "Not being destroyed through destroy()?"); + LLVMContextImpl *pImpl = getType()->getContext().pImpl; + if (isNotUniqued()) { + pImpl->NonUniquedMDNodes.erase(this); + } else { + pImpl->MDNodeSet.RemoveNode(this); + } + + // Destroy the operands. + for (MDNodeOperand *Op = getOperandPtr(this, 0), *E = Op+NumOperands; + Op != E; ++Op) + Op->~MDNodeOperand(); +} + +static const Function *getFunctionForValue(Value *V) { + if (!V) return NULL; + if (Instruction *I = dyn_cast<Instruction>(V)) { + BasicBlock *BB = I->getParent(); + return BB ? BB->getParent() : 0; + } + if (Argument *A = dyn_cast<Argument>(V)) + return A->getParent(); + if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) + return BB->getParent(); + if (MDNode *MD = dyn_cast<MDNode>(V)) + return MD->getFunction(); + return NULL; +} + +#ifndef NDEBUG +static const Function *assertLocalFunction(const MDNode *N) { + if (!N->isFunctionLocal()) return 0; + + // FIXME: This does not handle cyclic function local metadata. + const Function *F = 0, *NewF = 0; + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + if (Value *V = N->getOperand(i)) { + if (MDNode *MD = dyn_cast<MDNode>(V)) + NewF = assertLocalFunction(MD); + else + NewF = getFunctionForValue(V); + } + if (F == 0) + F = NewF; + else + assert((NewF == 0 || F == NewF) &&"inconsistent function-local metadata"); + } + return F; +} +#endif + +// getFunction - If this metadata is function-local and recursively has a +// function-local operand, return the first such operand's parent function. +// Otherwise, return null. getFunction() should not be used for performance- +// critical code because it recursively visits all the MDNode's operands. +const Function *MDNode::getFunction() const { +#ifndef NDEBUG + return assertLocalFunction(this); +#else + if (!isFunctionLocal()) return NULL; + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + if (const Function *F = getFunctionForValue(getOperand(i))) + return F; + return NULL; +#endif +} + +// destroy - Delete this node. Only when there are no uses. +void MDNode::destroy() { + setValueSubclassData(getSubclassDataFromValue() | DestroyFlag); + // Placement delete, then free the memory. + this->~MDNode(); + free(this); +} + +/// isFunctionLocalValue - Return true if this is a value that would require a +/// function-local MDNode. +static bool isFunctionLocalValue(Value *V) { + return isa<Instruction>(V) || isa<Argument>(V) || isa<BasicBlock>(V) || + (isa<MDNode>(V) && cast<MDNode>(V)->isFunctionLocal()); +} + +MDNode *MDNode::getMDNode(LLVMContext &Context, ArrayRef<Value*> Vals, + FunctionLocalness FL, bool Insert) { + LLVMContextImpl *pImpl = Context.pImpl; + + // Add all the operand pointers. Note that we don't have to add the + // isFunctionLocal bit because that's implied by the operands. + // Note that if the operands are later nulled out, the node will be + // removed from the uniquing map. + FoldingSetNodeID ID; + for (unsigned i = 0; i != Vals.size(); ++i) + ID.AddPointer(Vals[i]); + + void *InsertPoint; + MDNode *N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint); + + if (N || !Insert) + return N; + + bool isFunctionLocal = false; + switch (FL) { + case FL_Unknown: + for (unsigned i = 0; i != Vals.size(); ++i) { + Value *V = Vals[i]; + if (!V) continue; + if (isFunctionLocalValue(V)) { + isFunctionLocal = true; + break; + } + } + break; + case FL_No: + isFunctionLocal = false; + break; + case FL_Yes: + isFunctionLocal = true; + break; + } + + // Coallocate space for the node and Operands together, then placement new. + void *Ptr = malloc(sizeof(MDNode) + Vals.size() * sizeof(MDNodeOperand)); + N = new (Ptr) MDNode(Context, Vals, isFunctionLocal); + + // Cache the operand hash. + N->Hash = ID.ComputeHash(); + + // InsertPoint will have been set by the FindNodeOrInsertPos call. + pImpl->MDNodeSet.InsertNode(N, InsertPoint); + + return N; +} + +MDNode *MDNode::get(LLVMContext &Context, ArrayRef<Value*> Vals) { + return getMDNode(Context, Vals, FL_Unknown); +} + +MDNode *MDNode::getWhenValsUnresolved(LLVMContext &Context, + ArrayRef<Value*> Vals, + bool isFunctionLocal) { + return getMDNode(Context, Vals, isFunctionLocal ? FL_Yes : FL_No); +} + +MDNode *MDNode::getIfExists(LLVMContext &Context, ArrayRef<Value*> Vals) { + return getMDNode(Context, Vals, FL_Unknown, false); +} + +MDNode *MDNode::getTemporary(LLVMContext &Context, ArrayRef<Value*> Vals) { + MDNode *N = + (MDNode *)malloc(sizeof(MDNode) + Vals.size() * sizeof(MDNodeOperand)); + N = new (N) MDNode(Context, Vals, FL_No); + N->setValueSubclassData(N->getSubclassDataFromValue() | + NotUniquedBit); + LeakDetector::addGarbageObject(N); + return N; +} + +void MDNode::deleteTemporary(MDNode *N) { + assert(N->use_empty() && "Temporary MDNode has uses!"); + assert(!N->getContext().pImpl->MDNodeSet.RemoveNode(N) && + "Deleting a non-temporary uniqued node!"); + assert(!N->getContext().pImpl->NonUniquedMDNodes.erase(N) && + "Deleting a non-temporary non-uniqued node!"); + assert((N->getSubclassDataFromValue() & NotUniquedBit) && + "Temporary MDNode does not have NotUniquedBit set!"); + assert((N->getSubclassDataFromValue() & DestroyFlag) == 0 && + "Temporary MDNode has DestroyFlag set!"); + LeakDetector::removeGarbageObject(N); + N->destroy(); +} + +/// getOperand - Return specified operand. +Value *MDNode::getOperand(unsigned i) const { + assert(i < getNumOperands() && "Invalid operand number"); + return *getOperandPtr(const_cast<MDNode*>(this), i); +} + +void MDNode::Profile(FoldingSetNodeID &ID) const { + // Add all the operand pointers. Note that we don't have to add the + // isFunctionLocal bit because that's implied by the operands. + // Note that if the operands are later nulled out, the node will be + // removed from the uniquing map. + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + ID.AddPointer(getOperand(i)); +} + +void MDNode::setIsNotUniqued() { + setValueSubclassData(getSubclassDataFromValue() | NotUniquedBit); + LLVMContextImpl *pImpl = getType()->getContext().pImpl; + pImpl->NonUniquedMDNodes.insert(this); +} + +// Replace value from this node's operand list. +void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) { + Value *From = *Op; + + // If is possible that someone did GV->RAUW(inst), replacing a global variable + // with an instruction or some other function-local object. If this is a + // non-function-local MDNode, it can't point to a function-local object. + // Handle this case by implicitly dropping the MDNode reference to null. + // Likewise if the MDNode is function-local but for a different function. + if (To && isFunctionLocalValue(To)) { + if (!isFunctionLocal()) + To = 0; + else { + const Function *F = getFunction(); + const Function *FV = getFunctionForValue(To); + // Metadata can be function-local without having an associated function. + // So only consider functions to have changed if non-null. + if (F && FV && F != FV) + To = 0; + } + } + + if (From == To) + return; + + // Update the operand. + Op->set(To); + + // If this node is already not being uniqued (because one of the operands + // already went to null), then there is nothing else to do here. + if (isNotUniqued()) return; + + LLVMContextImpl *pImpl = getType()->getContext().pImpl; + + // Remove "this" from the context map. FoldingSet doesn't have to reprofile + // this node to remove it, so we don't care what state the operands are in. + pImpl->MDNodeSet.RemoveNode(this); + + // If we are dropping an argument to null, we choose to not unique the MDNode + // anymore. This commonly occurs during destruction, and uniquing these + // brings little reuse. Also, this means we don't need to include + // isFunctionLocal bits in FoldingSetNodeIDs for MDNodes. + if (To == 0) { + setIsNotUniqued(); + return; + } + + // Now that the node is out of the folding set, get ready to reinsert it. + // First, check to see if another node with the same operands already exists + // in the set. If so, then this node is redundant. + FoldingSetNodeID ID; + Profile(ID); + void *InsertPoint; + if (MDNode *N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint)) { + replaceAllUsesWith(N); + destroy(); + return; + } + + // Cache the operand hash. + Hash = ID.ComputeHash(); + // InsertPoint will have been set by the FindNodeOrInsertPos call. + pImpl->MDNodeSet.InsertNode(this, InsertPoint); + + // If this MDValue was previously function-local but no longer is, clear + // its function-local flag. + if (isFunctionLocal() && !isFunctionLocalValue(To)) { + bool isStillFunctionLocal = false; + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { + Value *V = getOperand(i); + if (!V) continue; + if (isFunctionLocalValue(V)) { + isStillFunctionLocal = true; + break; + } + } + if (!isStillFunctionLocal) + setValueSubclassData(getSubclassDataFromValue() & ~FunctionLocalBit); + } +} + +MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) { + if (!A || !B) + return NULL; + + if (A == B) + return A; + + SmallVector<MDNode *, 4> PathA; + MDNode *T = A; + while (T) { + PathA.push_back(T); + T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0; + } + + SmallVector<MDNode *, 4> PathB; + T = B; + while (T) { + PathB.push_back(T); + T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0; + } + + int IA = PathA.size() - 1; + int IB = PathB.size() - 1; + + MDNode *Ret = 0; + while (IA >= 0 && IB >=0) { + if (PathA[IA] == PathB[IB]) + Ret = PathA[IA]; + else + break; + --IA; + --IB; + } + return Ret; +} + +MDNode *MDNode::getMostGenericFPMath(MDNode *A, MDNode *B) { + if (!A || !B) + return NULL; + + APFloat AVal = cast<ConstantFP>(A->getOperand(0))->getValueAPF(); + APFloat BVal = cast<ConstantFP>(B->getOperand(0))->getValueAPF(); + if (AVal.compare(BVal) == APFloat::cmpLessThan) + return A; + return B; +} + +static bool isContiguous(const ConstantRange &A, const ConstantRange &B) { + return A.getUpper() == B.getLower() || A.getLower() == B.getUpper(); +} + +static bool canBeMerged(const ConstantRange &A, const ConstantRange &B) { + return !A.intersectWith(B).isEmptySet() || isContiguous(A, B); +} + +static bool tryMergeRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low, + ConstantInt *High) { + ConstantRange NewRange(Low->getValue(), High->getValue()); + unsigned Size = EndPoints.size(); + APInt LB = cast<ConstantInt>(EndPoints[Size - 2])->getValue(); + APInt LE = cast<ConstantInt>(EndPoints[Size - 1])->getValue(); + ConstantRange LastRange(LB, LE); + if (canBeMerged(NewRange, LastRange)) { + ConstantRange Union = LastRange.unionWith(NewRange); + Type *Ty = High->getType(); + EndPoints[Size - 2] = ConstantInt::get(Ty, Union.getLower()); + EndPoints[Size - 1] = ConstantInt::get(Ty, Union.getUpper()); + return true; + } + return false; +} + +static void addRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low, + ConstantInt *High) { + if (!EndPoints.empty()) + if (tryMergeRange(EndPoints, Low, High)) + return; + + EndPoints.push_back(Low); + EndPoints.push_back(High); +} + +MDNode *MDNode::getMostGenericRange(MDNode *A, MDNode *B) { + // Given two ranges, we want to compute the union of the ranges. This + // is slightly complitade by having to combine the intervals and merge + // the ones that overlap. + + if (!A || !B) + return NULL; + + if (A == B) + return A; + + // First, walk both lists in older of the lower boundary of each interval. + // At each step, try to merge the new interval to the last one we adedd. + SmallVector<Value*, 4> EndPoints; + int AI = 0; + int BI = 0; + int AN = A->getNumOperands() / 2; + int BN = B->getNumOperands() / 2; + while (AI < AN && BI < BN) { + ConstantInt *ALow = cast<ConstantInt>(A->getOperand(2 * AI)); + ConstantInt *BLow = cast<ConstantInt>(B->getOperand(2 * BI)); + + if (ALow->getValue().slt(BLow->getValue())) { + addRange(EndPoints, ALow, cast<ConstantInt>(A->getOperand(2 * AI + 1))); + ++AI; + } else { + addRange(EndPoints, BLow, cast<ConstantInt>(B->getOperand(2 * BI + 1))); + ++BI; + } + } + while (AI < AN) { + addRange(EndPoints, cast<ConstantInt>(A->getOperand(2 * AI)), + cast<ConstantInt>(A->getOperand(2 * AI + 1))); + ++AI; + } + while (BI < BN) { + addRange(EndPoints, cast<ConstantInt>(B->getOperand(2 * BI)), + cast<ConstantInt>(B->getOperand(2 * BI + 1))); + ++BI; + } + + // If we have more than 2 ranges (4 endpoints) we have to try to merge + // the last and first ones. + unsigned Size = EndPoints.size(); + if (Size > 4) { + ConstantInt *FB = cast<ConstantInt>(EndPoints[0]); + ConstantInt *FE = cast<ConstantInt>(EndPoints[1]); + if (tryMergeRange(EndPoints, FB, FE)) { + for (unsigned i = 0; i < Size - 2; ++i) { + EndPoints[i] = EndPoints[i + 2]; + } + EndPoints.resize(Size - 2); + } + } + + // If in the end we have a single range, it is possible that it is now the + // full range. Just drop the metadata in that case. + if (EndPoints.size() == 2) { + ConstantRange Range(cast<ConstantInt>(EndPoints[0])->getValue(), + cast<ConstantInt>(EndPoints[1])->getValue()); + if (Range.isFullSet()) + return NULL; + } + + return MDNode::get(A->getContext(), EndPoints); +} + +//===----------------------------------------------------------------------===// +// NamedMDNode implementation. +// + +static SmallVector<TrackingVH<MDNode>, 4> &getNMDOps(void *Operands) { + return *(SmallVector<TrackingVH<MDNode>, 4>*)Operands; +} + +NamedMDNode::NamedMDNode(const Twine &N) + : Name(N.str()), Parent(0), + Operands(new SmallVector<TrackingVH<MDNode>, 4>()) { +} + +NamedMDNode::~NamedMDNode() { + dropAllReferences(); + delete &getNMDOps(Operands); +} + +/// getNumOperands - Return number of NamedMDNode operands. +unsigned NamedMDNode::getNumOperands() const { + return (unsigned)getNMDOps(Operands).size(); +} + +/// getOperand - Return specified operand. +MDNode *NamedMDNode::getOperand(unsigned i) const { + assert(i < getNumOperands() && "Invalid Operand number!"); + return dyn_cast<MDNode>(&*getNMDOps(Operands)[i]); +} + +/// addOperand - Add metadata Operand. +void NamedMDNode::addOperand(MDNode *M) { + assert(!M->isFunctionLocal() && + "NamedMDNode operands must not be function-local!"); + getNMDOps(Operands).push_back(TrackingVH<MDNode>(M)); +} + +/// eraseFromParent - Drop all references and remove the node from parent +/// module. +void NamedMDNode::eraseFromParent() { + getParent()->eraseNamedMetadata(this); +} + +/// dropAllReferences - Remove all uses and clear node vector. +void NamedMDNode::dropAllReferences() { + getNMDOps(Operands).clear(); +} + +/// getName - Return a constant reference to this named metadata's name. +StringRef NamedMDNode::getName() const { + return StringRef(Name); +} + +//===----------------------------------------------------------------------===// +// Instruction Metadata method implementations. +// + +void Instruction::setMetadata(StringRef Kind, MDNode *Node) { + if (Node == 0 && !hasMetadata()) return; + setMetadata(getContext().getMDKindID(Kind), Node); +} + +MDNode *Instruction::getMetadataImpl(StringRef Kind) const { + return getMetadataImpl(getContext().getMDKindID(Kind)); +} + +/// setMetadata - Set the metadata of of the specified kind to the specified +/// node. This updates/replaces metadata if already present, or removes it if +/// Node is null. +void Instruction::setMetadata(unsigned KindID, MDNode *Node) { + if (Node == 0 && !hasMetadata()) return; + + // Handle 'dbg' as a special case since it is not stored in the hash table. + if (KindID == LLVMContext::MD_dbg) { + DbgLoc = DebugLoc::getFromDILocation(Node); + return; + } + + // Handle the case when we're adding/updating metadata on an instruction. + if (Node) { + LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this]; + assert(!Info.empty() == hasMetadataHashEntry() && + "HasMetadata bit is wonked"); + if (Info.empty()) { + setHasMetadataHashEntry(true); + } else { + // Handle replacement of an existing value. + for (unsigned i = 0, e = Info.size(); i != e; ++i) + if (Info[i].first == KindID) { + Info[i].second = Node; + return; + } + } + + // No replacement, just add it to the list. + Info.push_back(std::make_pair(KindID, Node)); + return; + } + + // Otherwise, we're removing metadata from an instruction. + assert((hasMetadataHashEntry() == + getContext().pImpl->MetadataStore.count(this)) && + "HasMetadata bit out of date!"); + if (!hasMetadataHashEntry()) + return; // Nothing to remove! + LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this]; + + // Common case is removing the only entry. + if (Info.size() == 1 && Info[0].first == KindID) { + getContext().pImpl->MetadataStore.erase(this); + setHasMetadataHashEntry(false); + return; + } + + // Handle removal of an existing value. + for (unsigned i = 0, e = Info.size(); i != e; ++i) + if (Info[i].first == KindID) { + Info[i] = Info.back(); + Info.pop_back(); + assert(!Info.empty() && "Removing last entry should be handled above"); + return; + } + // Otherwise, removing an entry that doesn't exist on the instruction. +} + +MDNode *Instruction::getMetadataImpl(unsigned KindID) const { + // Handle 'dbg' as a special case since it is not stored in the hash table. + if (KindID == LLVMContext::MD_dbg) + return DbgLoc.getAsMDNode(getContext()); + + if (!hasMetadataHashEntry()) return 0; + + LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this]; + assert(!Info.empty() && "bit out of sync with hash table"); + + for (LLVMContextImpl::MDMapTy::iterator I = Info.begin(), E = Info.end(); + I != E; ++I) + if (I->first == KindID) + return I->second; + return 0; +} + +void Instruction::getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, + MDNode*> > &Result) const { + Result.clear(); + + // Handle 'dbg' as a special case since it is not stored in the hash table. + if (!DbgLoc.isUnknown()) { + Result.push_back(std::make_pair((unsigned)LLVMContext::MD_dbg, + DbgLoc.getAsMDNode(getContext()))); + if (!hasMetadataHashEntry()) return; + } + + assert(hasMetadataHashEntry() && + getContext().pImpl->MetadataStore.count(this) && + "Shouldn't have called this"); + const LLVMContextImpl::MDMapTy &Info = + getContext().pImpl->MetadataStore.find(this)->second; + assert(!Info.empty() && "Shouldn't have called this"); + + Result.append(Info.begin(), Info.end()); + + // Sort the resulting array so it is stable. + if (Result.size() > 1) + array_pod_sort(Result.begin(), Result.end()); +} + +void Instruction:: +getAllMetadataOtherThanDebugLocImpl(SmallVectorImpl<std::pair<unsigned, + MDNode*> > &Result) const { + Result.clear(); + assert(hasMetadataHashEntry() && + getContext().pImpl->MetadataStore.count(this) && + "Shouldn't have called this"); + const LLVMContextImpl::MDMapTy &Info = + getContext().pImpl->MetadataStore.find(this)->second; + assert(!Info.empty() && "Shouldn't have called this"); + Result.append(Info.begin(), Info.end()); + + // Sort the resulting array so it is stable. + if (Result.size() > 1) + array_pod_sort(Result.begin(), Result.end()); +} + +/// clearMetadataHashEntries - Clear all hashtable-based metadata from +/// this instruction. +void Instruction::clearMetadataHashEntries() { + assert(hasMetadataHashEntry() && "Caller should check"); + getContext().pImpl->MetadataStore.erase(this); + setHasMetadataHashEntry(false); +} + diff --git a/lib/IR/Module.cpp b/lib/IR/Module.cpp new file mode 100644 index 0000000000..8affcc9469 --- /dev/null +++ b/lib/IR/Module.cpp @@ -0,0 +1,451 @@ +//===-- Module.cpp - Implement the Module class ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Module class for the IR library. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Module.h" +#include "SymbolTableListTraitsImpl.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/GVMaterializer.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/Support/LeakDetector.h" +#include <algorithm> +#include <cstdarg> +#include <cstdlib> +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Methods to implement the globals and functions lists. +// + +// Explicit instantiations of SymbolTableListTraits since some of the methods +// are not in the public header file. +template class llvm::SymbolTableListTraits<Function, Module>; +template class llvm::SymbolTableListTraits<GlobalVariable, Module>; +template class llvm::SymbolTableListTraits<GlobalAlias, Module>; + +//===----------------------------------------------------------------------===// +// Primitive Module methods. +// + +Module::Module(StringRef MID, LLVMContext& C) + : Context(C), Materializer(NULL), ModuleID(MID) { + ValSymTab = new ValueSymbolTable(); + NamedMDSymTab = new StringMap<NamedMDNode *>(); + Context.addModule(this); +} + +Module::~Module() { + Context.removeModule(this); + dropAllReferences(); + GlobalList.clear(); + FunctionList.clear(); + AliasList.clear(); + NamedMDList.clear(); + delete ValSymTab; + delete static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab); +} + +/// Target endian information. +Module::Endianness Module::getEndianness() const { + StringRef temp = DataLayout; + Module::Endianness ret = AnyEndianness; + + while (!temp.empty()) { + std::pair<StringRef, StringRef> P = getToken(temp, "-"); + + StringRef token = P.first; + temp = P.second; + + if (token[0] == 'e') { + ret = LittleEndian; + } else if (token[0] == 'E') { + ret = BigEndian; + } + } + + return ret; +} + +/// Target Pointer Size information. +Module::PointerSize Module::getPointerSize() const { + StringRef temp = DataLayout; + Module::PointerSize ret = AnyPointerSize; + + while (!temp.empty()) { + std::pair<StringRef, StringRef> TmpP = getToken(temp, "-"); + temp = TmpP.second; + TmpP = getToken(TmpP.first, ":"); + StringRef token = TmpP.second, signalToken = TmpP.first; + + if (signalToken[0] == 'p') { + int size = 0; + getToken(token, ":").first.getAsInteger(10, size); + if (size == 32) + ret = Pointer32; + else if (size == 64) + ret = Pointer64; + } + } + + return ret; +} + +/// getNamedValue - Return the first global value in the module with +/// the specified name, of arbitrary type. This method returns null +/// if a global with the specified name is not found. +GlobalValue *Module::getNamedValue(StringRef Name) const { + return cast_or_null<GlobalValue>(getValueSymbolTable().lookup(Name)); +} + +/// getMDKindID - Return a unique non-zero ID for the specified metadata kind. +/// This ID is uniqued across modules in the current LLVMContext. +unsigned Module::getMDKindID(StringRef Name) const { + return Context.getMDKindID(Name); +} + +/// getMDKindNames - Populate client supplied SmallVector with the name for +/// custom metadata IDs registered in this LLVMContext. ID #0 is not used, +/// so it is filled in as an empty string. +void Module::getMDKindNames(SmallVectorImpl<StringRef> &Result) const { + return Context.getMDKindNames(Result); +} + + +//===----------------------------------------------------------------------===// +// Methods for easy access to the functions in the module. +// + +// getOrInsertFunction - Look up the specified function in the module symbol +// table. If it does not exist, add a prototype for the function and return +// it. This is nice because it allows most passes to get away with not handling +// the symbol table directly for this common task. +// +Constant *Module::getOrInsertFunction(StringRef Name, + FunctionType *Ty, + AttributeSet AttributeList) { + // See if we have a definition for the specified function already. + GlobalValue *F = getNamedValue(Name); + if (F == 0) { + // Nope, add it + Function *New = Function::Create(Ty, GlobalVariable::ExternalLinkage, Name); + if (!New->isIntrinsic()) // Intrinsics get attrs set on construction + New->setAttributes(AttributeList); + FunctionList.push_back(New); + return New; // Return the new prototype. + } + + // Okay, the function exists. Does it have externally visible linkage? + if (F->hasLocalLinkage()) { + // Clear the function's name. + F->setName(""); + // Retry, now there won't be a conflict. + Constant *NewF = getOrInsertFunction(Name, Ty); + F->setName(Name); + return NewF; + } + + // If the function exists but has the wrong type, return a bitcast to the + // right type. + if (F->getType() != PointerType::getUnqual(Ty)) + return ConstantExpr::getBitCast(F, PointerType::getUnqual(Ty)); + + // Otherwise, we just found the existing function or a prototype. + return F; +} + +Constant *Module::getOrInsertTargetIntrinsic(StringRef Name, + FunctionType *Ty, + AttributeSet AttributeList) { + // See if we have a definition for the specified function already. + GlobalValue *F = getNamedValue(Name); + if (F == 0) { + // Nope, add it + Function *New = Function::Create(Ty, GlobalVariable::ExternalLinkage, Name); + New->setAttributes(AttributeList); + FunctionList.push_back(New); + return New; // Return the new prototype. + } + + // Otherwise, we just found the existing function or a prototype. + return F; +} + +Constant *Module::getOrInsertFunction(StringRef Name, + FunctionType *Ty) { + return getOrInsertFunction(Name, Ty, AttributeSet()); +} + +// getOrInsertFunction - Look up the specified function in the module symbol +// table. If it does not exist, add a prototype for the function and return it. +// This version of the method takes a null terminated list of function +// arguments, which makes it easier for clients to use. +// +Constant *Module::getOrInsertFunction(StringRef Name, + AttributeSet AttributeList, + Type *RetTy, ...) { + va_list Args; + va_start(Args, RetTy); + + // Build the list of argument types... + std::vector<Type*> ArgTys; + while (Type *ArgTy = va_arg(Args, Type*)) + ArgTys.push_back(ArgTy); + + va_end(Args); + + // Build the function type and chain to the other getOrInsertFunction... + return getOrInsertFunction(Name, + FunctionType::get(RetTy, ArgTys, false), + AttributeList); +} + +Constant *Module::getOrInsertFunction(StringRef Name, + Type *RetTy, ...) { + va_list Args; + va_start(Args, RetTy); + + // Build the list of argument types... + std::vector<Type*> ArgTys; + while (Type *ArgTy = va_arg(Args, Type*)) + ArgTys.push_back(ArgTy); + + va_end(Args); + + // Build the function type and chain to the other getOrInsertFunction... + return getOrInsertFunction(Name, + FunctionType::get(RetTy, ArgTys, false), + AttributeSet()); +} + +// getFunction - Look up the specified function in the module symbol table. +// If it does not exist, return null. +// +Function *Module::getFunction(StringRef Name) const { + return dyn_cast_or_null<Function>(getNamedValue(Name)); +} + +//===----------------------------------------------------------------------===// +// Methods for easy access to the global variables in the module. +// + +/// getGlobalVariable - Look up the specified global variable in the module +/// symbol table. If it does not exist, return null. The type argument +/// should be the underlying type of the global, i.e., it should not have +/// the top-level PointerType, which represents the address of the global. +/// If AllowLocal is set to true, this function will return types that +/// have an local. By default, these types are not returned. +/// +GlobalVariable *Module::getGlobalVariable(StringRef Name, + bool AllowLocal) const { + if (GlobalVariable *Result = + dyn_cast_or_null<GlobalVariable>(getNamedValue(Name))) + if (AllowLocal || !Result->hasLocalLinkage()) + return Result; + return 0; +} + +/// getOrInsertGlobal - Look up the specified global in the module symbol table. +/// 1. If it does not exist, add a declaration of the global and return it. +/// 2. Else, the global exists but has the wrong type: return the function +/// with a constantexpr cast to the right type. +/// 3. Finally, if the existing global is the correct delclaration, return the +/// existing global. +Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) { + // See if we have a definition for the specified global already. + GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(getNamedValue(Name)); + if (GV == 0) { + // Nope, add it + GlobalVariable *New = + new GlobalVariable(*this, Ty, false, GlobalVariable::ExternalLinkage, + 0, Name); + return New; // Return the new declaration. + } + + // If the variable exists but has the wrong type, return a bitcast to the + // right type. + if (GV->getType() != PointerType::getUnqual(Ty)) + return ConstantExpr::getBitCast(GV, PointerType::getUnqual(Ty)); + + // Otherwise, we just found the existing function or a prototype. + return GV; +} + +//===----------------------------------------------------------------------===// +// Methods for easy access to the global variables in the module. +// + +// getNamedAlias - Look up the specified global in the module symbol table. +// If it does not exist, return null. +// +GlobalAlias *Module::getNamedAlias(StringRef Name) const { + return dyn_cast_or_null<GlobalAlias>(getNamedValue(Name)); +} + +/// getNamedMetadata - Return the first NamedMDNode in the module with the +/// specified name. This method returns null if a NamedMDNode with the +/// specified name is not found. +NamedMDNode *Module::getNamedMetadata(const Twine &Name) const { + SmallString<256> NameData; + StringRef NameRef = Name.toStringRef(NameData); + return static_cast<StringMap<NamedMDNode*> *>(NamedMDSymTab)->lookup(NameRef); +} + +/// getOrInsertNamedMetadata - Return the first named MDNode in the module +/// with the specified name. This method returns a new NamedMDNode if a +/// NamedMDNode with the specified name is not found. +NamedMDNode *Module::getOrInsertNamedMetadata(StringRef Name) { + NamedMDNode *&NMD = + (*static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab))[Name]; + if (!NMD) { + NMD = new NamedMDNode(Name); + NMD->setParent(this); + NamedMDList.push_back(NMD); + } + return NMD; +} + +/// eraseNamedMetadata - Remove the given NamedMDNode from this module and +/// delete it. +void Module::eraseNamedMetadata(NamedMDNode *NMD) { + static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab)->erase(NMD->getName()); + NamedMDList.erase(NMD); +} + +/// getModuleFlagsMetadata - Returns the module flags in the provided vector. +void Module:: +getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const { + const NamedMDNode *ModFlags = getModuleFlagsMetadata(); + if (!ModFlags) return; + + for (unsigned i = 0, e = ModFlags->getNumOperands(); i != e; ++i) { + MDNode *Flag = ModFlags->getOperand(i); + ConstantInt *Behavior = cast<ConstantInt>(Flag->getOperand(0)); + MDString *Key = cast<MDString>(Flag->getOperand(1)); + Value *Val = Flag->getOperand(2); + Flags.push_back(ModuleFlagEntry(ModFlagBehavior(Behavior->getZExtValue()), + Key, Val)); + } +} + +/// getModuleFlagsMetadata - Returns the NamedMDNode in the module that +/// represents module-level flags. This method returns null if there are no +/// module-level flags. +NamedMDNode *Module::getModuleFlagsMetadata() const { + return getNamedMetadata("llvm.module.flags"); +} + +/// getOrInsertModuleFlagsMetadata - Returns the NamedMDNode in the module that +/// represents module-level flags. If module-level flags aren't found, it +/// creates the named metadata that contains them. +NamedMDNode *Module::getOrInsertModuleFlagsMetadata() { + return getOrInsertNamedMetadata("llvm.module.flags"); +} + +/// addModuleFlag - Add a module-level flag to the module-level flags +/// metadata. It will create the module-level flags named metadata if it doesn't +/// already exist. +void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key, + Value *Val) { + Type *Int32Ty = Type::getInt32Ty(Context); + Value *Ops[3] = { + ConstantInt::get(Int32Ty, Behavior), MDString::get(Context, Key), Val + }; + getOrInsertModuleFlagsMetadata()->addOperand(MDNode::get(Context, Ops)); +} +void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key, + uint32_t Val) { + Type *Int32Ty = Type::getInt32Ty(Context); + addModuleFlag(Behavior, Key, ConstantInt::get(Int32Ty, Val)); +} +void Module::addModuleFlag(MDNode *Node) { + assert(Node->getNumOperands() == 3 && + "Invalid number of operands for module flag!"); + assert(isa<ConstantInt>(Node->getOperand(0)) && + isa<MDString>(Node->getOperand(1)) && + "Invalid operand types for module flag!"); + getOrInsertModuleFlagsMetadata()->addOperand(Node); +} + +//===----------------------------------------------------------------------===// +// Methods to control the materialization of GlobalValues in the Module. +// +void Module::setMaterializer(GVMaterializer *GVM) { + assert(!Materializer && + "Module already has a GVMaterializer. Call MaterializeAllPermanently" + " to clear it out before setting another one."); + Materializer.reset(GVM); +} + +bool Module::isMaterializable(const GlobalValue *GV) const { + if (Materializer) + return Materializer->isMaterializable(GV); + return false; +} + +bool Module::isDematerializable(const GlobalValue *GV) const { + if (Materializer) + return Materializer->isDematerializable(GV); + return false; +} + +bool Module::Materialize(GlobalValue *GV, std::string *ErrInfo) { + if (Materializer) + return Materializer->Materialize(GV, ErrInfo); + return false; +} + +void Module::Dematerialize(GlobalValue *GV) { + if (Materializer) + return Materializer->Dematerialize(GV); +} + +bool Module::MaterializeAll(std::string *ErrInfo) { + if (!Materializer) + return false; + return Materializer->MaterializeModule(this, ErrInfo); +} + +bool Module::MaterializeAllPermanently(std::string *ErrInfo) { + if (MaterializeAll(ErrInfo)) + return true; + Materializer.reset(); + return false; +} + +//===----------------------------------------------------------------------===// +// Other module related stuff. +// + + +// dropAllReferences() - This function causes all the subelements to "let go" +// of all references that they are maintaining. This allows one to 'delete' a +// whole module at a time, even though there may be circular references... first +// all references are dropped, and all use counts go to zero. Then everything +// is deleted for real. Note that no operations are valid on an object that +// has "dropped all references", except operator delete. +// +void Module::dropAllReferences() { + for(Module::iterator I = begin(), E = end(); I != E; ++I) + I->dropAllReferences(); + + for(Module::global_iterator I = global_begin(), E = global_end(); I != E; ++I) + I->dropAllReferences(); + + for(Module::alias_iterator I = alias_begin(), E = alias_end(); I != E; ++I) + I->dropAllReferences(); +} diff --git a/lib/IR/Pass.cpp b/lib/IR/Pass.cpp new file mode 100644 index 0000000000..7fc4828238 --- /dev/null +++ b/lib/IR/Pass.cpp @@ -0,0 +1,276 @@ +//===- Pass.cpp - LLVM Pass Infrastructure Implementation -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LLVM Pass infrastructure. It is primarily +// responsible with ensuring that passes are executed and batched together +// optimally. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Pass.h" +#include "llvm/Assembly/PrintModulePass.h" +#include "llvm/PassRegistry.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/PassNameParser.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Pass Implementation +// + +// Force out-of-line virtual method. +Pass::~Pass() { + delete Resolver; +} + +// Force out-of-line virtual method. +ModulePass::~ModulePass() { } + +Pass *ModulePass::createPrinterPass(raw_ostream &O, + const std::string &Banner) const { + return createPrintModulePass(&O, false, Banner); +} + +PassManagerType ModulePass::getPotentialPassManagerType() const { + return PMT_ModulePassManager; +} + +bool Pass::mustPreserveAnalysisID(char &AID) const { + return Resolver->getAnalysisIfAvailable(&AID, true) != 0; +} + +// dumpPassStructure - Implement the -debug-pass=Structure option +void Pass::dumpPassStructure(unsigned Offset) { + dbgs().indent(Offset*2) << getPassName() << "\n"; +} + +/// getPassName - Return a nice clean name for a pass. This usually +/// implemented in terms of the name that is registered by one of the +/// Registration templates, but can be overloaded directly. +/// +const char *Pass::getPassName() const { + AnalysisID AID = getPassID(); + const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(AID); + if (PI) + return PI->getPassName(); + return "Unnamed pass: implement Pass::getPassName()"; +} + +void Pass::preparePassManager(PMStack &) { + // By default, don't do anything. +} + +PassManagerType Pass::getPotentialPassManagerType() const { + // Default implementation. + return PMT_Unknown; +} + +void Pass::getAnalysisUsage(AnalysisUsage &) const { + // By default, no analysis results are used, all are invalidated. +} + +void Pass::releaseMemory() { + // By default, don't do anything. +} + +void Pass::verifyAnalysis() const { + // By default, don't do anything. +} + +void *Pass::getAdjustedAnalysisPointer(AnalysisID AID) { + return this; +} + +ImmutablePass *Pass::getAsImmutablePass() { + return 0; +} + +PMDataManager *Pass::getAsPMDataManager() { + return 0; +} + +void Pass::setResolver(AnalysisResolver *AR) { + assert(!Resolver && "Resolver is already set"); + Resolver = AR; +} + +// print - Print out the internal state of the pass. This is called by Analyze +// to print out the contents of an analysis. Otherwise it is not necessary to +// implement this method. +// +void Pass::print(raw_ostream &O,const Module*) const { + O << "Pass::print not implemented for pass: '" << getPassName() << "'!\n"; +} + +// dump - call print(cerr); +void Pass::dump() const { + print(dbgs(), 0); +} + +//===----------------------------------------------------------------------===// +// ImmutablePass Implementation +// +// Force out-of-line virtual method. +ImmutablePass::~ImmutablePass() { } + +void ImmutablePass::initializePass() { + // By default, don't do anything. +} + +//===----------------------------------------------------------------------===// +// FunctionPass Implementation +// + +Pass *FunctionPass::createPrinterPass(raw_ostream &O, + const std::string &Banner) const { + return createPrintFunctionPass(Banner, &O); +} + +PassManagerType FunctionPass::getPotentialPassManagerType() const { + return PMT_FunctionPassManager; +} + +//===----------------------------------------------------------------------===// +// BasicBlockPass Implementation +// + +Pass *BasicBlockPass::createPrinterPass(raw_ostream &O, + const std::string &Banner) const { + return createPrintBasicBlockPass(&O, false, Banner); +} + +bool BasicBlockPass::doInitialization(Function &) { + // By default, don't do anything. + return false; +} + +bool BasicBlockPass::doFinalization(Function &) { + // By default, don't do anything. + return false; +} + +PassManagerType BasicBlockPass::getPotentialPassManagerType() const { + return PMT_BasicBlockPassManager; +} + +const PassInfo *Pass::lookupPassInfo(const void *TI) { + return PassRegistry::getPassRegistry()->getPassInfo(TI); +} + +const PassInfo *Pass::lookupPassInfo(StringRef Arg) { + return PassRegistry::getPassRegistry()->getPassInfo(Arg); +} + +Pass *Pass::createPass(AnalysisID ID) { + const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(ID); + if (!PI) + return NULL; + return PI->createPass(); +} + +Pass *PassInfo::createPass() const { + assert((!isAnalysisGroup() || NormalCtor) && + "No default implementation found for analysis group!"); + assert(NormalCtor && + "Cannot call createPass on PassInfo without default ctor!"); + return NormalCtor(); +} + +//===----------------------------------------------------------------------===// +// Analysis Group Implementation Code +//===----------------------------------------------------------------------===// + +// RegisterAGBase implementation +// +RegisterAGBase::RegisterAGBase(const char *Name, const void *InterfaceID, + const void *PassID, bool isDefault) + : PassInfo(Name, InterfaceID) { + PassRegistry::getPassRegistry()->registerAnalysisGroup(InterfaceID, PassID, + *this, isDefault); +} + +//===----------------------------------------------------------------------===// +// PassRegistrationListener implementation +// + +// PassRegistrationListener ctor - Add the current object to the list of +// PassRegistrationListeners... +PassRegistrationListener::PassRegistrationListener() { + PassRegistry::getPassRegistry()->addRegistrationListener(this); +} + +// dtor - Remove object from list of listeners... +PassRegistrationListener::~PassRegistrationListener() { + PassRegistry::getPassRegistry()->removeRegistrationListener(this); +} + +// enumeratePasses - Iterate over the registered passes, calling the +// passEnumerate callback on each PassInfo object. +// +void PassRegistrationListener::enumeratePasses() { + PassRegistry::getPassRegistry()->enumerateWith(this); +} + +PassNameParser::~PassNameParser() {} + +//===----------------------------------------------------------------------===// +// AnalysisUsage Class Implementation +// + +namespace { + struct GetCFGOnlyPasses : public PassRegistrationListener { + typedef AnalysisUsage::VectorType VectorType; + VectorType &CFGOnlyList; + GetCFGOnlyPasses(VectorType &L) : CFGOnlyList(L) {} + + void passEnumerate(const PassInfo *P) { + if (P->isCFGOnlyPass()) + CFGOnlyList.push_back(P->getTypeInfo()); + } + }; +} + +// setPreservesCFG - This function should be called to by the pass, iff they do +// not: +// +// 1. Add or remove basic blocks from the function +// 2. Modify terminator instructions in any way. +// +// This function annotates the AnalysisUsage info object to say that analyses +// that only depend on the CFG are preserved by this pass. +// +void AnalysisUsage::setPreservesCFG() { + // Since this transformation doesn't modify the CFG, it preserves all analyses + // that only depend on the CFG (like dominators, loop info, etc...) + GetCFGOnlyPasses(Preserved).enumeratePasses(); +} + +AnalysisUsage &AnalysisUsage::addPreserved(StringRef Arg) { + const PassInfo *PI = Pass::lookupPassInfo(Arg); + // If the pass exists, preserve it. Otherwise silently do nothing. + if (PI) Preserved.push_back(PI->getTypeInfo()); + return *this; +} + +AnalysisUsage &AnalysisUsage::addRequiredID(const void *ID) { + Required.push_back(ID); + return *this; +} + +AnalysisUsage &AnalysisUsage::addRequiredID(char &ID) { + Required.push_back(&ID); + return *this; +} + +AnalysisUsage &AnalysisUsage::addRequiredTransitiveID(char &ID) { + Required.push_back(&ID); + RequiredTransitive.push_back(&ID); + return *this; +} diff --git a/lib/IR/PassManager.cpp b/lib/IR/PassManager.cpp new file mode 100644 index 0000000000..5a8df703db --- /dev/null +++ b/lib/IR/PassManager.cpp @@ -0,0 +1,1914 @@ +//===- PassManager.cpp - LLVM Pass Infrastructure Implementation ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LLVM Pass Manager infrastructure. +// +//===----------------------------------------------------------------------===// + + +#include "llvm/PassManagers.h" +#include "llvm/Assembly/PrintModulePass.h" +#include "llvm/Assembly/Writer.h" +#include "llvm/IR/Module.h" +#include "llvm/PassManager.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/Mutex.h" +#include "llvm/Support/PassNameParser.h" +#include "llvm/Support/Timer.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <map> +using namespace llvm; + +// See PassManagers.h for Pass Manager infrastructure overview. + +namespace llvm { + +//===----------------------------------------------------------------------===// +// Pass debugging information. Often it is useful to find out what pass is +// running when a crash occurs in a utility. When this library is compiled with +// debugging on, a command line option (--debug-pass) is enabled that causes the +// pass name to be printed before it executes. +// + +// Different debug levels that can be enabled... +enum PassDebugLevel { + None, Arguments, Structure, Executions, Details +}; + +static cl::opt<enum PassDebugLevel> +PassDebugging("debug-pass", cl::Hidden, + cl::desc("Print PassManager debugging information"), + cl::values( + clEnumVal(None , "disable debug output"), + clEnumVal(Arguments , "print pass arguments to pass to 'opt'"), + clEnumVal(Structure , "print pass structure before run()"), + clEnumVal(Executions, "print pass name before it is executed"), + clEnumVal(Details , "print pass details when it is executed"), + clEnumValEnd)); + +typedef llvm::cl::list<const llvm::PassInfo *, bool, PassNameParser> +PassOptionList; + +// Print IR out before/after specified passes. +static PassOptionList +PrintBefore("print-before", + llvm::cl::desc("Print IR before specified passes"), + cl::Hidden); + +static PassOptionList +PrintAfter("print-after", + llvm::cl::desc("Print IR after specified passes"), + cl::Hidden); + +static cl::opt<bool> +PrintBeforeAll("print-before-all", + llvm::cl::desc("Print IR before each pass"), + cl::init(false)); +static cl::opt<bool> +PrintAfterAll("print-after-all", + llvm::cl::desc("Print IR after each pass"), + cl::init(false)); + +/// This is a helper to determine whether to print IR before or +/// after a pass. + +static bool ShouldPrintBeforeOrAfterPass(const PassInfo *PI, + PassOptionList &PassesToPrint) { + for (unsigned i = 0, ie = PassesToPrint.size(); i < ie; ++i) { + const llvm::PassInfo *PassInf = PassesToPrint[i]; + if (PassInf) + if (PassInf->getPassArgument() == PI->getPassArgument()) { + return true; + } + } + return false; +} + +/// This is a utility to check whether a pass should have IR dumped +/// before it. +static bool ShouldPrintBeforePass(const PassInfo *PI) { + return PrintBeforeAll || ShouldPrintBeforeOrAfterPass(PI, PrintBefore); +} + +/// This is a utility to check whether a pass should have IR dumped +/// after it. +static bool ShouldPrintAfterPass(const PassInfo *PI) { + return PrintAfterAll || ShouldPrintBeforeOrAfterPass(PI, PrintAfter); +} + +} // End of llvm namespace + +/// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions +/// or higher is specified. +bool PMDataManager::isPassDebuggingExecutionsOrMore() const { + return PassDebugging >= Executions; +} + + + + +void PassManagerPrettyStackEntry::print(raw_ostream &OS) const { + if (V == 0 && M == 0) + OS << "Releasing pass '"; + else + OS << "Running pass '"; + + OS << P->getPassName() << "'"; + + if (M) { + OS << " on module '" << M->getModuleIdentifier() << "'.\n"; + return; + } + if (V == 0) { + OS << '\n'; + return; + } + + OS << " on "; + if (isa<Function>(V)) + OS << "function"; + else if (isa<BasicBlock>(V)) + OS << "basic block"; + else + OS << "value"; + + OS << " '"; + WriteAsOperand(OS, V, /*PrintTy=*/false, M); + OS << "'\n"; +} + + +namespace { + +//===----------------------------------------------------------------------===// +// BBPassManager +// +/// BBPassManager manages BasicBlockPass. It batches all the +/// pass together and sequence them to process one basic block before +/// processing next basic block. +class BBPassManager : public PMDataManager, public FunctionPass { + +public: + static char ID; + explicit BBPassManager() + : PMDataManager(), FunctionPass(ID) {} + + /// Execute all of the passes scheduled for execution. Keep track of + /// whether any of the passes modifies the function, and if so, return true. + bool runOnFunction(Function &F); + + /// Pass Manager itself does not invalidate any analysis info. + void getAnalysisUsage(AnalysisUsage &Info) const { + Info.setPreservesAll(); + } + + bool doInitialization(Module &M); + bool doInitialization(Function &F); + bool doFinalization(Module &M); + bool doFinalization(Function &F); + + virtual PMDataManager *getAsPMDataManager() { return this; } + virtual Pass *getAsPass() { return this; } + + virtual const char *getPassName() const { + return "BasicBlock Pass Manager"; + } + + // Print passes managed by this manager + void dumpPassStructure(unsigned Offset) { + llvm::dbgs().indent(Offset*2) << "BasicBlockPass Manager\n"; + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { + BasicBlockPass *BP = getContainedPass(Index); + BP->dumpPassStructure(Offset + 1); + dumpLastUses(BP, Offset+1); + } + } + + BasicBlockPass *getContainedPass(unsigned N) { + assert(N < PassVector.size() && "Pass number out of range!"); + BasicBlockPass *BP = static_cast<BasicBlockPass *>(PassVector[N]); + return BP; + } + + virtual PassManagerType getPassManagerType() const { + return PMT_BasicBlockPassManager; + } +}; + +char BBPassManager::ID = 0; +} + +namespace llvm { + +//===----------------------------------------------------------------------===// +// FunctionPassManagerImpl +// +/// FunctionPassManagerImpl manages FPPassManagers +class FunctionPassManagerImpl : public Pass, + public PMDataManager, + public PMTopLevelManager { + virtual void anchor(); +private: + bool wasRun; +public: + static char ID; + explicit FunctionPassManagerImpl() : + Pass(PT_PassManager, ID), PMDataManager(), + PMTopLevelManager(new FPPassManager()), wasRun(false) {} + + /// add - Add a pass to the queue of passes to run. This passes ownership of + /// the Pass to the PassManager. When the PassManager is destroyed, the pass + /// will be destroyed as well, so there is no need to delete the pass. This + /// implies that all passes MUST be allocated with 'new'. + void add(Pass *P) { + schedulePass(P); + } + + /// createPrinterPass - Get a function printer pass. + Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const { + return createPrintFunctionPass(Banner, &O); + } + + // Prepare for running an on the fly pass, freeing memory if needed + // from a previous run. + void releaseMemoryOnTheFly(); + + /// run - Execute all of the passes scheduled for execution. Keep track of + /// whether any of the passes modifies the module, and if so, return true. + bool run(Function &F); + + /// doInitialization - Run all of the initializers for the function passes. + /// + bool doInitialization(Module &M); + + /// doFinalization - Run all of the finalizers for the function passes. + /// + bool doFinalization(Module &M); + + + virtual PMDataManager *getAsPMDataManager() { return this; } + virtual Pass *getAsPass() { return this; } + virtual PassManagerType getTopLevelPassManagerType() { + return PMT_FunctionPassManager; + } + + /// Pass Manager itself does not invalidate any analysis info. + void getAnalysisUsage(AnalysisUsage &Info) const { + Info.setPreservesAll(); + } + + FPPassManager *getContainedManager(unsigned N) { + assert(N < PassManagers.size() && "Pass number out of range!"); + FPPassManager *FP = static_cast<FPPassManager *>(PassManagers[N]); + return FP; + } +}; + +void FunctionPassManagerImpl::anchor() {} + +char FunctionPassManagerImpl::ID = 0; + +//===----------------------------------------------------------------------===// +// MPPassManager +// +/// MPPassManager manages ModulePasses and function pass managers. +/// It batches all Module passes and function pass managers together and +/// sequences them to process one module. +class MPPassManager : public Pass, public PMDataManager { +public: + static char ID; + explicit MPPassManager() : + Pass(PT_PassManager, ID), PMDataManager() { } + + // Delete on the fly managers. + virtual ~MPPassManager() { + for (std::map<Pass *, FunctionPassManagerImpl *>::iterator + I = OnTheFlyManagers.begin(), E = OnTheFlyManagers.end(); + I != E; ++I) { + FunctionPassManagerImpl *FPP = I->second; + delete FPP; + } + } + + /// createPrinterPass - Get a module printer pass. + Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const { + return createPrintModulePass(&O, false, Banner); + } + + /// run - Execute all of the passes scheduled for execution. Keep track of + /// whether any of the passes modifies the module, and if so, return true. + bool runOnModule(Module &M); + + using llvm::Pass::doInitialization; + using llvm::Pass::doFinalization; + + /// doInitialization - Run all of the initializers for the module passes. + /// + bool doInitialization(); + + /// doFinalization - Run all of the finalizers for the module passes. + /// + bool doFinalization(); + + /// Pass Manager itself does not invalidate any analysis info. + void getAnalysisUsage(AnalysisUsage &Info) const { + Info.setPreservesAll(); + } + + /// Add RequiredPass into list of lower level passes required by pass P. + /// RequiredPass is run on the fly by Pass Manager when P requests it + /// through getAnalysis interface. + virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass); + + /// Return function pass corresponding to PassInfo PI, that is + /// required by module pass MP. Instantiate analysis pass, by using + /// its runOnFunction() for function F. + virtual Pass* getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F); + + virtual const char *getPassName() const { + return "Module Pass Manager"; + } + + virtual PMDataManager *getAsPMDataManager() { return this; } + virtual Pass *getAsPass() { return this; } + + // Print passes managed by this manager + void dumpPassStructure(unsigned Offset) { + llvm::dbgs().indent(Offset*2) << "ModulePass Manager\n"; + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { + ModulePass *MP = getContainedPass(Index); + MP->dumpPassStructure(Offset + 1); + std::map<Pass *, FunctionPassManagerImpl *>::const_iterator I = + OnTheFlyManagers.find(MP); + if (I != OnTheFlyManagers.end()) + I->second->dumpPassStructure(Offset + 2); + dumpLastUses(MP, Offset+1); + } + } + + ModulePass *getContainedPass(unsigned N) { + assert(N < PassVector.size() && "Pass number out of range!"); + return static_cast<ModulePass *>(PassVector[N]); + } + + virtual PassManagerType getPassManagerType() const { + return PMT_ModulePassManager; + } + + private: + /// Collection of on the fly FPPassManagers. These managers manage + /// function passes that are required by module passes. + std::map<Pass *, FunctionPassManagerImpl *> OnTheFlyManagers; +}; + +char MPPassManager::ID = 0; +//===----------------------------------------------------------------------===// +// PassManagerImpl +// + +/// PassManagerImpl manages MPPassManagers +class PassManagerImpl : public Pass, + public PMDataManager, + public PMTopLevelManager { + virtual void anchor(); + +public: + static char ID; + explicit PassManagerImpl() : + Pass(PT_PassManager, ID), PMDataManager(), + PMTopLevelManager(new MPPassManager()) {} + + /// add - Add a pass to the queue of passes to run. This passes ownership of + /// the Pass to the PassManager. When the PassManager is destroyed, the pass + /// will be destroyed as well, so there is no need to delete the pass. This + /// implies that all passes MUST be allocated with 'new'. + void add(Pass *P) { + schedulePass(P); + } + + /// createPrinterPass - Get a module printer pass. + Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const { + return createPrintModulePass(&O, false, Banner); + } + + /// run - Execute all of the passes scheduled for execution. Keep track of + /// whether any of the passes modifies the module, and if so, return true. + bool run(Module &M); + + using llvm::Pass::doInitialization; + using llvm::Pass::doFinalization; + + /// doInitialization - Run all of the initializers for the module passes. + /// + bool doInitialization(); + + /// doFinalization - Run all of the finalizers for the module passes. + /// + bool doFinalization(); + + /// Pass Manager itself does not invalidate any analysis info. + void getAnalysisUsage(AnalysisUsage &Info) const { + Info.setPreservesAll(); + } + + virtual PMDataManager *getAsPMDataManager() { return this; } + virtual Pass *getAsPass() { return this; } + virtual PassManagerType getTopLevelPassManagerType() { + return PMT_ModulePassManager; + } + + MPPassManager *getContainedManager(unsigned N) { + assert(N < PassManagers.size() && "Pass number out of range!"); + MPPassManager *MP = static_cast<MPPassManager *>(PassManagers[N]); + return MP; + } +}; + +void PassManagerImpl::anchor() {} + +char PassManagerImpl::ID = 0; +} // End of llvm namespace + +namespace { + +//===----------------------------------------------------------------------===// +/// TimingInfo Class - This class is used to calculate information about the +/// amount of time each pass takes to execute. This only happens when +/// -time-passes is enabled on the command line. +/// + +static ManagedStatic<sys::SmartMutex<true> > TimingInfoMutex; + +class TimingInfo { + DenseMap<Pass*, Timer*> TimingData; + TimerGroup TG; +public: + // Use 'create' member to get this. + TimingInfo() : TG("... Pass execution timing report ...") {} + + // TimingDtor - Print out information about timing information + ~TimingInfo() { + // Delete all of the timers, which accumulate their info into the + // TimerGroup. + for (DenseMap<Pass*, Timer*>::iterator I = TimingData.begin(), + E = TimingData.end(); I != E; ++I) + delete I->second; + // TimerGroup is deleted next, printing the report. + } + + // createTheTimeInfo - This method either initializes the TheTimeInfo pointer + // to a non null value (if the -time-passes option is enabled) or it leaves it + // null. It may be called multiple times. + static void createTheTimeInfo(); + + /// getPassTimer - Return the timer for the specified pass if it exists. + Timer *getPassTimer(Pass *P) { + if (P->getAsPMDataManager()) + return 0; + + sys::SmartScopedLock<true> Lock(*TimingInfoMutex); + Timer *&T = TimingData[P]; + if (T == 0) + T = new Timer(P->getPassName(), TG); + return T; + } +}; + +} // End of anon namespace + +static TimingInfo *TheTimeInfo; + +//===----------------------------------------------------------------------===// +// PMTopLevelManager implementation + +/// Initialize top level manager. Create first pass manager. +PMTopLevelManager::PMTopLevelManager(PMDataManager *PMDM) { + PMDM->setTopLevelManager(this); + addPassManager(PMDM); + activeStack.push(PMDM); +} + +/// Set pass P as the last user of the given analysis passes. +void +PMTopLevelManager::setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P) { + unsigned PDepth = 0; + if (P->getResolver()) + PDepth = P->getResolver()->getPMDataManager().getDepth(); + + for (SmallVectorImpl<Pass *>::const_iterator I = AnalysisPasses.begin(), + E = AnalysisPasses.end(); I != E; ++I) { + Pass *AP = *I; + LastUser[AP] = P; + + if (P == AP) + continue; + + // Update the last users of passes that are required transitive by AP. + AnalysisUsage *AnUsage = findAnalysisUsage(AP); + const AnalysisUsage::VectorType &IDs = AnUsage->getRequiredTransitiveSet(); + SmallVector<Pass *, 12> LastUses; + SmallVector<Pass *, 12> LastPMUses; + for (AnalysisUsage::VectorType::const_iterator I = IDs.begin(), + E = IDs.end(); I != E; ++I) { + Pass *AnalysisPass = findAnalysisPass(*I); + assert(AnalysisPass && "Expected analysis pass to exist."); + AnalysisResolver *AR = AnalysisPass->getResolver(); + assert(AR && "Expected analysis resolver to exist."); + unsigned APDepth = AR->getPMDataManager().getDepth(); + + if (PDepth == APDepth) + LastUses.push_back(AnalysisPass); + else if (PDepth > APDepth) + LastPMUses.push_back(AnalysisPass); + } + + setLastUser(LastUses, P); + + // If this pass has a corresponding pass manager, push higher level + // analysis to this pass manager. + if (P->getResolver()) + setLastUser(LastPMUses, P->getResolver()->getPMDataManager().getAsPass()); + + + // If AP is the last user of other passes then make P last user of + // such passes. + for (DenseMap<Pass *, Pass *>::iterator LUI = LastUser.begin(), + LUE = LastUser.end(); LUI != LUE; ++LUI) { + if (LUI->second == AP) + // DenseMap iterator is not invalidated here because + // this is just updating existing entries. + LastUser[LUI->first] = P; + } + } +} + +/// Collect passes whose last user is P +void PMTopLevelManager::collectLastUses(SmallVectorImpl<Pass *> &LastUses, + Pass *P) { + DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator DMI = + InversedLastUser.find(P); + if (DMI == InversedLastUser.end()) + return; + + SmallPtrSet<Pass *, 8> &LU = DMI->second; + for (SmallPtrSet<Pass *, 8>::iterator I = LU.begin(), + E = LU.end(); I != E; ++I) { + LastUses.push_back(*I); + } + +} + +AnalysisUsage *PMTopLevelManager::findAnalysisUsage(Pass *P) { + AnalysisUsage *AnUsage = NULL; + DenseMap<Pass *, AnalysisUsage *>::iterator DMI = AnUsageMap.find(P); + if (DMI != AnUsageMap.end()) + AnUsage = DMI->second; + else { + AnUsage = new AnalysisUsage(); + P->getAnalysisUsage(*AnUsage); + AnUsageMap[P] = AnUsage; + } + return AnUsage; +} + +/// Schedule pass P for execution. Make sure that passes required by +/// P are run before P is run. Update analysis info maintained by +/// the manager. Remove dead passes. This is a recursive function. +void PMTopLevelManager::schedulePass(Pass *P) { + + // TODO : Allocate function manager for this pass, other wise required set + // may be inserted into previous function manager + + // Give pass a chance to prepare the stage. + P->preparePassManager(activeStack); + + // If P is an analysis pass and it is available then do not + // generate the analysis again. Stale analysis info should not be + // available at this point. + const PassInfo *PI = + PassRegistry::getPassRegistry()->getPassInfo(P->getPassID()); + if (PI && PI->isAnalysis() && findAnalysisPass(P->getPassID())) { + delete P; + return; + } + + AnalysisUsage *AnUsage = findAnalysisUsage(P); + + bool checkAnalysis = true; + while (checkAnalysis) { + checkAnalysis = false; + + const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet(); + for (AnalysisUsage::VectorType::const_iterator I = RequiredSet.begin(), + E = RequiredSet.end(); I != E; ++I) { + + Pass *AnalysisPass = findAnalysisPass(*I); + if (!AnalysisPass) { + const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(*I); + + if (PI == NULL) { + // Pass P is not in the global PassRegistry + dbgs() << "Pass '" << P->getPassName() << "' is not initialized." << "\n"; + dbgs() << "Verify if there is a pass dependency cycle." << "\n"; + dbgs() << "Required Passes:" << "\n"; + for (AnalysisUsage::VectorType::const_iterator I2 = RequiredSet.begin(), + E = RequiredSet.end(); I2 != E && I2 != I; ++I2) { + Pass *AnalysisPass2 = findAnalysisPass(*I2); + if (AnalysisPass2) { + dbgs() << "\t" << AnalysisPass2->getPassName() << "\n"; + } else { + dbgs() << "\t" << "Error: Required pass not found! Possible causes:" << "\n"; + dbgs() << "\t\t" << "- Pass misconfiguration (e.g.: missing macros)" << "\n"; + dbgs() << "\t\t" << "- Corruption of the global PassRegistry" << "\n"; + } + } + } + + assert(PI && "Expected required passes to be initialized"); + AnalysisPass = PI->createPass(); + if (P->getPotentialPassManagerType () == + AnalysisPass->getPotentialPassManagerType()) + // Schedule analysis pass that is managed by the same pass manager. + schedulePass(AnalysisPass); + else if (P->getPotentialPassManagerType () > + AnalysisPass->getPotentialPassManagerType()) { + // Schedule analysis pass that is managed by a new manager. + schedulePass(AnalysisPass); + // Recheck analysis passes to ensure that required analyses that + // are already checked are still available. + checkAnalysis = true; + } else + // Do not schedule this analysis. Lower level analsyis + // passes are run on the fly. + delete AnalysisPass; + } + } + } + + // Now all required passes are available. + if (ImmutablePass *IP = P->getAsImmutablePass()) { + // P is a immutable pass and it will be managed by this + // top level manager. Set up analysis resolver to connect them. + PMDataManager *DM = getAsPMDataManager(); + AnalysisResolver *AR = new AnalysisResolver(*DM); + P->setResolver(AR); + DM->initializeAnalysisImpl(P); + addImmutablePass(IP); + DM->recordAvailableAnalysis(IP); + return; + } + + if (PI && !PI->isAnalysis() && ShouldPrintBeforePass(PI)) { + Pass *PP = P->createPrinterPass( + dbgs(), std::string("*** IR Dump Before ") + P->getPassName() + " ***"); + PP->assignPassManager(activeStack, getTopLevelPassManagerType()); + } + + // Add the requested pass to the best available pass manager. + P->assignPassManager(activeStack, getTopLevelPassManagerType()); + + if (PI && !PI->isAnalysis() && ShouldPrintAfterPass(PI)) { + Pass *PP = P->createPrinterPass( + dbgs(), std::string("*** IR Dump After ") + P->getPassName() + " ***"); + PP->assignPassManager(activeStack, getTopLevelPassManagerType()); + } +} + +/// Find the pass that implements Analysis AID. Search immutable +/// passes and all pass managers. If desired pass is not found +/// then return NULL. +Pass *PMTopLevelManager::findAnalysisPass(AnalysisID AID) { + + // Check pass managers + for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(), + E = PassManagers.end(); I != E; ++I) + if (Pass *P = (*I)->findAnalysisPass(AID, false)) + return P; + + // Check other pass managers + for (SmallVectorImpl<PMDataManager *>::iterator + I = IndirectPassManagers.begin(), + E = IndirectPassManagers.end(); I != E; ++I) + if (Pass *P = (*I)->findAnalysisPass(AID, false)) + return P; + + // Check the immutable passes. Iterate in reverse order so that we find + // the most recently registered passes first. + for (SmallVector<ImmutablePass *, 8>::reverse_iterator I = + ImmutablePasses.rbegin(), E = ImmutablePasses.rend(); I != E; ++I) { + AnalysisID PI = (*I)->getPassID(); + if (PI == AID) + return *I; + + // If Pass not found then check the interfaces implemented by Immutable Pass + const PassInfo *PassInf = + PassRegistry::getPassRegistry()->getPassInfo(PI); + assert(PassInf && "Expected all immutable passes to be initialized"); + const std::vector<const PassInfo*> &ImmPI = + PassInf->getInterfacesImplemented(); + for (std::vector<const PassInfo*>::const_iterator II = ImmPI.begin(), + EE = ImmPI.end(); II != EE; ++II) { + if ((*II)->getTypeInfo() == AID) + return *I; + } + } + + return 0; +} + +// Print passes managed by this top level manager. +void PMTopLevelManager::dumpPasses() const { + + if (PassDebugging < Structure) + return; + + // Print out the immutable passes + for (unsigned i = 0, e = ImmutablePasses.size(); i != e; ++i) { + ImmutablePasses[i]->dumpPassStructure(0); + } + + // Every class that derives from PMDataManager also derives from Pass + // (sometimes indirectly), but there's no inheritance relationship + // between PMDataManager and Pass, so we have to getAsPass to get + // from a PMDataManager* to a Pass*. + for (SmallVector<PMDataManager *, 8>::const_iterator I = PassManagers.begin(), + E = PassManagers.end(); I != E; ++I) + (*I)->getAsPass()->dumpPassStructure(1); +} + +void PMTopLevelManager::dumpArguments() const { + + if (PassDebugging < Arguments) + return; + + dbgs() << "Pass Arguments: "; + for (SmallVector<ImmutablePass *, 8>::const_iterator I = + ImmutablePasses.begin(), E = ImmutablePasses.end(); I != E; ++I) + if (const PassInfo *PI = + PassRegistry::getPassRegistry()->getPassInfo((*I)->getPassID())) { + assert(PI && "Expected all immutable passes to be initialized"); + if (!PI->isAnalysisGroup()) + dbgs() << " -" << PI->getPassArgument(); + } + for (SmallVector<PMDataManager *, 8>::const_iterator I = PassManagers.begin(), + E = PassManagers.end(); I != E; ++I) + (*I)->dumpPassArguments(); + dbgs() << "\n"; +} + +void PMTopLevelManager::initializeAllAnalysisInfo() { + for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(), + E = PassManagers.end(); I != E; ++I) + (*I)->initializeAnalysisInfo(); + + // Initailize other pass managers + for (SmallVectorImpl<PMDataManager *>::iterator + I = IndirectPassManagers.begin(), E = IndirectPassManagers.end(); + I != E; ++I) + (*I)->initializeAnalysisInfo(); + + for (DenseMap<Pass *, Pass *>::iterator DMI = LastUser.begin(), + DME = LastUser.end(); DMI != DME; ++DMI) { + DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator InvDMI = + InversedLastUser.find(DMI->second); + if (InvDMI != InversedLastUser.end()) { + SmallPtrSet<Pass *, 8> &L = InvDMI->second; + L.insert(DMI->first); + } else { + SmallPtrSet<Pass *, 8> L; L.insert(DMI->first); + InversedLastUser[DMI->second] = L; + } + } +} + +/// Destructor +PMTopLevelManager::~PMTopLevelManager() { + for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(), + E = PassManagers.end(); I != E; ++I) + delete *I; + + for (SmallVectorImpl<ImmutablePass *>::iterator + I = ImmutablePasses.begin(), E = ImmutablePasses.end(); I != E; ++I) + delete *I; + + for (DenseMap<Pass *, AnalysisUsage *>::iterator DMI = AnUsageMap.begin(), + DME = AnUsageMap.end(); DMI != DME; ++DMI) + delete DMI->second; +} + +//===----------------------------------------------------------------------===// +// PMDataManager implementation + +/// Augement AvailableAnalysis by adding analysis made available by pass P. +void PMDataManager::recordAvailableAnalysis(Pass *P) { + AnalysisID PI = P->getPassID(); + + AvailableAnalysis[PI] = P; + + assert(!AvailableAnalysis.empty()); + + // This pass is the current implementation of all of the interfaces it + // implements as well. + const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(PI); + if (PInf == 0) return; + const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented(); + for (unsigned i = 0, e = II.size(); i != e; ++i) + AvailableAnalysis[II[i]->getTypeInfo()] = P; +} + +// Return true if P preserves high level analysis used by other +// passes managed by this manager +bool PMDataManager::preserveHigherLevelAnalysis(Pass *P) { + AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P); + if (AnUsage->getPreservesAll()) + return true; + + const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet(); + for (SmallVectorImpl<Pass *>::iterator I = HigherLevelAnalysis.begin(), + E = HigherLevelAnalysis.end(); I != E; ++I) { + Pass *P1 = *I; + if (P1->getAsImmutablePass() == 0 && + std::find(PreservedSet.begin(), PreservedSet.end(), + P1->getPassID()) == + PreservedSet.end()) + return false; + } + + return true; +} + +/// verifyPreservedAnalysis -- Verify analysis preserved by pass P. +void PMDataManager::verifyPreservedAnalysis(Pass *P) { + // Don't do this unless assertions are enabled. +#ifdef NDEBUG + return; +#endif + AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P); + const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet(); + + // Verify preserved analysis + for (AnalysisUsage::VectorType::const_iterator I = PreservedSet.begin(), + E = PreservedSet.end(); I != E; ++I) { + AnalysisID AID = *I; + if (Pass *AP = findAnalysisPass(AID, true)) { + TimeRegion PassTimer(getPassTimer(AP)); + AP->verifyAnalysis(); + } + } +} + +/// Remove Analysis not preserved by Pass P +void PMDataManager::removeNotPreservedAnalysis(Pass *P) { + AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P); + if (AnUsage->getPreservesAll()) + return; + + const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet(); + for (DenseMap<AnalysisID, Pass*>::iterator I = AvailableAnalysis.begin(), + E = AvailableAnalysis.end(); I != E; ) { + DenseMap<AnalysisID, Pass*>::iterator Info = I++; + if (Info->second->getAsImmutablePass() == 0 && + std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) == + PreservedSet.end()) { + // Remove this analysis + if (PassDebugging >= Details) { + Pass *S = Info->second; + dbgs() << " -- '" << P->getPassName() << "' is not preserving '"; + dbgs() << S->getPassName() << "'\n"; + } + AvailableAnalysis.erase(Info); + } + } + + // Check inherited analysis also. If P is not preserving analysis + // provided by parent manager then remove it here. + for (unsigned Index = 0; Index < PMT_Last; ++Index) { + + if (!InheritedAnalysis[Index]) + continue; + + for (DenseMap<AnalysisID, Pass*>::iterator + I = InheritedAnalysis[Index]->begin(), + E = InheritedAnalysis[Index]->end(); I != E; ) { + DenseMap<AnalysisID, Pass *>::iterator Info = I++; + if (Info->second->getAsImmutablePass() == 0 && + std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) == + PreservedSet.end()) { + // Remove this analysis + if (PassDebugging >= Details) { + Pass *S = Info->second; + dbgs() << " -- '" << P->getPassName() << "' is not preserving '"; + dbgs() << S->getPassName() << "'\n"; + } + InheritedAnalysis[Index]->erase(Info); + } + } + } +} + +/// Remove analysis passes that are not used any longer +void PMDataManager::removeDeadPasses(Pass *P, StringRef Msg, + enum PassDebuggingString DBG_STR) { + + SmallVector<Pass *, 12> DeadPasses; + + // If this is a on the fly manager then it does not have TPM. + if (!TPM) + return; + + TPM->collectLastUses(DeadPasses, P); + + if (PassDebugging >= Details && !DeadPasses.empty()) { + dbgs() << " -*- '" << P->getPassName(); + dbgs() << "' is the last user of following pass instances."; + dbgs() << " Free these instances\n"; + } + + for (SmallVectorImpl<Pass *>::iterator I = DeadPasses.begin(), + E = DeadPasses.end(); I != E; ++I) + freePass(*I, Msg, DBG_STR); +} + +void PMDataManager::freePass(Pass *P, StringRef Msg, + enum PassDebuggingString DBG_STR) { + dumpPassInfo(P, FREEING_MSG, DBG_STR, Msg); + + { + // If the pass crashes releasing memory, remember this. + PassManagerPrettyStackEntry X(P); + TimeRegion PassTimer(getPassTimer(P)); + + P->releaseMemory(); + } + + AnalysisID PI = P->getPassID(); + if (const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(PI)) { + // Remove the pass itself (if it is not already removed). + AvailableAnalysis.erase(PI); + + // Remove all interfaces this pass implements, for which it is also + // listed as the available implementation. + const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented(); + for (unsigned i = 0, e = II.size(); i != e; ++i) { + DenseMap<AnalysisID, Pass*>::iterator Pos = + AvailableAnalysis.find(II[i]->getTypeInfo()); + if (Pos != AvailableAnalysis.end() && Pos->second == P) + AvailableAnalysis.erase(Pos); + } + } +} + +/// Add pass P into the PassVector. Update +/// AvailableAnalysis appropriately if ProcessAnalysis is true. +void PMDataManager::add(Pass *P, bool ProcessAnalysis) { + // This manager is going to manage pass P. Set up analysis resolver + // to connect them. + AnalysisResolver *AR = new AnalysisResolver(*this); + P->setResolver(AR); + + // If a FunctionPass F is the last user of ModulePass info M + // then the F's manager, not F, records itself as a last user of M. + SmallVector<Pass *, 12> TransferLastUses; + + if (!ProcessAnalysis) { + // Add pass + PassVector.push_back(P); + return; + } + + // At the moment, this pass is the last user of all required passes. + SmallVector<Pass *, 12> LastUses; + SmallVector<Pass *, 8> RequiredPasses; + SmallVector<AnalysisID, 8> ReqAnalysisNotAvailable; + + unsigned PDepth = this->getDepth(); + + collectRequiredAnalysis(RequiredPasses, + ReqAnalysisNotAvailable, P); + for (SmallVectorImpl<Pass *>::iterator I = RequiredPasses.begin(), + E = RequiredPasses.end(); I != E; ++I) { + Pass *PRequired = *I; + unsigned RDepth = 0; + + assert(PRequired->getResolver() && "Analysis Resolver is not set"); + PMDataManager &DM = PRequired->getResolver()->getPMDataManager(); + RDepth = DM.getDepth(); + + if (PDepth == RDepth) + LastUses.push_back(PRequired); + else if (PDepth > RDepth) { + // Let the parent claim responsibility of last use + TransferLastUses.push_back(PRequired); + // Keep track of higher level analysis used by this manager. + HigherLevelAnalysis.push_back(PRequired); + } else + llvm_unreachable("Unable to accommodate Required Pass"); + } + + // Set P as P's last user until someone starts using P. + // However, if P is a Pass Manager then it does not need + // to record its last user. + if (P->getAsPMDataManager() == 0) + LastUses.push_back(P); + TPM->setLastUser(LastUses, P); + + if (!TransferLastUses.empty()) { + Pass *My_PM = getAsPass(); + TPM->setLastUser(TransferLastUses, My_PM); + TransferLastUses.clear(); + } + + // Now, take care of required analyses that are not available. + for (SmallVectorImpl<AnalysisID>::iterator + I = ReqAnalysisNotAvailable.begin(), + E = ReqAnalysisNotAvailable.end() ;I != E; ++I) { + const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(*I); + Pass *AnalysisPass = PI->createPass(); + this->addLowerLevelRequiredPass(P, AnalysisPass); + } + + // Take a note of analysis required and made available by this pass. + // Remove the analysis not preserved by this pass + removeNotPreservedAnalysis(P); + recordAvailableAnalysis(P); + + // Add pass + PassVector.push_back(P); +} + + +/// Populate RP with analysis pass that are required by +/// pass P and are available. Populate RP_NotAvail with analysis +/// pass that are required by pass P but are not available. +void PMDataManager::collectRequiredAnalysis(SmallVectorImpl<Pass *> &RP, + SmallVectorImpl<AnalysisID> &RP_NotAvail, + Pass *P) { + AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P); + const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet(); + for (AnalysisUsage::VectorType::const_iterator + I = RequiredSet.begin(), E = RequiredSet.end(); I != E; ++I) { + if (Pass *AnalysisPass = findAnalysisPass(*I, true)) + RP.push_back(AnalysisPass); + else + RP_NotAvail.push_back(*I); + } + + const AnalysisUsage::VectorType &IDs = AnUsage->getRequiredTransitiveSet(); + for (AnalysisUsage::VectorType::const_iterator I = IDs.begin(), + E = IDs.end(); I != E; ++I) { + if (Pass *AnalysisPass = findAnalysisPass(*I, true)) + RP.push_back(AnalysisPass); + else + RP_NotAvail.push_back(*I); + } +} + +// All Required analyses should be available to the pass as it runs! Here +// we fill in the AnalysisImpls member of the pass so that it can +// successfully use the getAnalysis() method to retrieve the +// implementations it needs. +// +void PMDataManager::initializeAnalysisImpl(Pass *P) { + AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P); + + for (AnalysisUsage::VectorType::const_iterator + I = AnUsage->getRequiredSet().begin(), + E = AnUsage->getRequiredSet().end(); I != E; ++I) { + Pass *Impl = findAnalysisPass(*I, true); + if (Impl == 0) + // This may be analysis pass that is initialized on the fly. + // If that is not the case then it will raise an assert when it is used. + continue; + AnalysisResolver *AR = P->getResolver(); + assert(AR && "Analysis Resolver is not set"); + AR->addAnalysisImplsPair(*I, Impl); + } +} + +/// Find the pass that implements Analysis AID. If desired pass is not found +/// then return NULL. +Pass *PMDataManager::findAnalysisPass(AnalysisID AID, bool SearchParent) { + + // Check if AvailableAnalysis map has one entry. + DenseMap<AnalysisID, Pass*>::const_iterator I = AvailableAnalysis.find(AID); + + if (I != AvailableAnalysis.end()) + return I->second; + + // Search Parents through TopLevelManager + if (SearchParent) + return TPM->findAnalysisPass(AID); + + return NULL; +} + +// Print list of passes that are last used by P. +void PMDataManager::dumpLastUses(Pass *P, unsigned Offset) const{ + + SmallVector<Pass *, 12> LUses; + + // If this is a on the fly manager then it does not have TPM. + if (!TPM) + return; + + TPM->collectLastUses(LUses, P); + + for (SmallVectorImpl<Pass *>::iterator I = LUses.begin(), + E = LUses.end(); I != E; ++I) { + llvm::dbgs() << "--" << std::string(Offset*2, ' '); + (*I)->dumpPassStructure(0); + } +} + +void PMDataManager::dumpPassArguments() const { + for (SmallVectorImpl<Pass *>::const_iterator I = PassVector.begin(), + E = PassVector.end(); I != E; ++I) { + if (PMDataManager *PMD = (*I)->getAsPMDataManager()) + PMD->dumpPassArguments(); + else + if (const PassInfo *PI = + PassRegistry::getPassRegistry()->getPassInfo((*I)->getPassID())) + if (!PI->isAnalysisGroup()) + dbgs() << " -" << PI->getPassArgument(); + } +} + +void PMDataManager::dumpPassInfo(Pass *P, enum PassDebuggingString S1, + enum PassDebuggingString S2, + StringRef Msg) { + if (PassDebugging < Executions) + return; + dbgs() << (void*)this << std::string(getDepth()*2+1, ' '); + switch (S1) { + case EXECUTION_MSG: + dbgs() << "Executing Pass '" << P->getPassName(); + break; + case MODIFICATION_MSG: + dbgs() << "Made Modification '" << P->getPassName(); + break; + case FREEING_MSG: + dbgs() << " Freeing Pass '" << P->getPassName(); + break; + default: + break; + } + switch (S2) { + case ON_BASICBLOCK_MSG: + dbgs() << "' on BasicBlock '" << Msg << "'...\n"; + break; + case ON_FUNCTION_MSG: + dbgs() << "' on Function '" << Msg << "'...\n"; + break; + case ON_MODULE_MSG: + dbgs() << "' on Module '" << Msg << "'...\n"; + break; + case ON_REGION_MSG: + dbgs() << "' on Region '" << Msg << "'...\n"; + break; + case ON_LOOP_MSG: + dbgs() << "' on Loop '" << Msg << "'...\n"; + break; + case ON_CG_MSG: + dbgs() << "' on Call Graph Nodes '" << Msg << "'...\n"; + break; + default: + break; + } +} + +void PMDataManager::dumpRequiredSet(const Pass *P) const { + if (PassDebugging < Details) + return; + + AnalysisUsage analysisUsage; + P->getAnalysisUsage(analysisUsage); + dumpAnalysisUsage("Required", P, analysisUsage.getRequiredSet()); +} + +void PMDataManager::dumpPreservedSet(const Pass *P) const { + if (PassDebugging < Details) + return; + + AnalysisUsage analysisUsage; + P->getAnalysisUsage(analysisUsage); + dumpAnalysisUsage("Preserved", P, analysisUsage.getPreservedSet()); +} + +void PMDataManager::dumpAnalysisUsage(StringRef Msg, const Pass *P, + const AnalysisUsage::VectorType &Set) const { + assert(PassDebugging >= Details); + if (Set.empty()) + return; + dbgs() << (const void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:"; + for (unsigned i = 0; i != Set.size(); ++i) { + if (i) dbgs() << ','; + const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(Set[i]); + if (!PInf) { + // Some preserved passes, such as AliasAnalysis, may not be initialized by + // all drivers. + dbgs() << " Uninitialized Pass"; + continue; + } + dbgs() << ' ' << PInf->getPassName(); + } + dbgs() << '\n'; +} + +/// Add RequiredPass into list of lower level passes required by pass P. +/// RequiredPass is run on the fly by Pass Manager when P requests it +/// through getAnalysis interface. +/// This should be handled by specific pass manager. +void PMDataManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) { + if (TPM) { + TPM->dumpArguments(); + TPM->dumpPasses(); + } + + // Module Level pass may required Function Level analysis info + // (e.g. dominator info). Pass manager uses on the fly function pass manager + // to provide this on demand. In that case, in Pass manager terminology, + // module level pass is requiring lower level analysis info managed by + // lower level pass manager. + + // When Pass manager is not able to order required analysis info, Pass manager + // checks whether any lower level manager will be able to provide this + // analysis info on demand or not. +#ifndef NDEBUG + dbgs() << "Unable to schedule '" << RequiredPass->getPassName(); + dbgs() << "' required by '" << P->getPassName() << "'\n"; +#endif + llvm_unreachable("Unable to schedule pass"); +} + +Pass *PMDataManager::getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F) { + llvm_unreachable("Unable to find on the fly pass"); +} + +// Destructor +PMDataManager::~PMDataManager() { + for (SmallVectorImpl<Pass *>::iterator I = PassVector.begin(), + E = PassVector.end(); I != E; ++I) + delete *I; +} + +//===----------------------------------------------------------------------===// +// NOTE: Is this the right place to define this method ? +// getAnalysisIfAvailable - Return analysis result or null if it doesn't exist. +Pass *AnalysisResolver::getAnalysisIfAvailable(AnalysisID ID, bool dir) const { + return PM.findAnalysisPass(ID, dir); +} + +Pass *AnalysisResolver::findImplPass(Pass *P, AnalysisID AnalysisPI, + Function &F) { + return PM.getOnTheFlyPass(P, AnalysisPI, F); +} + +//===----------------------------------------------------------------------===// +// BBPassManager implementation + +/// Execute all of the passes scheduled for execution by invoking +/// runOnBasicBlock method. Keep track of whether any of the passes modifies +/// the function, and if so, return true. +bool BBPassManager::runOnFunction(Function &F) { + if (F.isDeclaration()) + return false; + + bool Changed = doInitialization(F); + + for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { + BasicBlockPass *BP = getContainedPass(Index); + bool LocalChanged = false; + + dumpPassInfo(BP, EXECUTION_MSG, ON_BASICBLOCK_MSG, I->getName()); + dumpRequiredSet(BP); + + initializeAnalysisImpl(BP); + + { + // If the pass crashes, remember this. + PassManagerPrettyStackEntry X(BP, *I); + TimeRegion PassTimer(getPassTimer(BP)); + + LocalChanged |= BP->runOnBasicBlock(*I); + } + + Changed |= LocalChanged; + if (LocalChanged) + dumpPassInfo(BP, MODIFICATION_MSG, ON_BASICBLOCK_MSG, + I->getName()); + dumpPreservedSet(BP); + + verifyPreservedAnalysis(BP); + removeNotPreservedAnalysis(BP); + recordAvailableAnalysis(BP); + removeDeadPasses(BP, I->getName(), ON_BASICBLOCK_MSG); + } + + return doFinalization(F) || Changed; +} + +// Implement doInitialization and doFinalization +bool BBPassManager::doInitialization(Module &M) { + bool Changed = false; + + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) + Changed |= getContainedPass(Index)->doInitialization(M); + + return Changed; +} + +bool BBPassManager::doFinalization(Module &M) { + bool Changed = false; + + for (int Index = getNumContainedPasses() - 1; Index >= 0; --Index) + Changed |= getContainedPass(Index)->doFinalization(M); + + return Changed; +} + +bool BBPassManager::doInitialization(Function &F) { + bool Changed = false; + + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { + BasicBlockPass *BP = getContainedPass(Index); + Changed |= BP->doInitialization(F); + } + + return Changed; +} + +bool BBPassManager::doFinalization(Function &F) { + bool Changed = false; + + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { + BasicBlockPass *BP = getContainedPass(Index); + Changed |= BP->doFinalization(F); + } + + return Changed; +} + + +//===----------------------------------------------------------------------===// +// FunctionPassManager implementation + +/// Create new Function pass manager +FunctionPassManager::FunctionPassManager(Module *m) : M(m) { + FPM = new FunctionPassManagerImpl(); + // FPM is the top level manager. + FPM->setTopLevelManager(FPM); + + AnalysisResolver *AR = new AnalysisResolver(*FPM); + FPM->setResolver(AR); +} + +FunctionPassManager::~FunctionPassManager() { + delete FPM; +} + +/// add - Add a pass to the queue of passes to run. This passes +/// ownership of the Pass to the PassManager. When the +/// PassManager_X is destroyed, the pass will be destroyed as well, so +/// there is no need to delete the pass. (TODO delete passes.) +/// This implies that all passes MUST be allocated with 'new'. +void FunctionPassManager::add(Pass *P) { + FPM->add(P); +} + +/// run - Execute all of the passes scheduled for execution. Keep +/// track of whether any of the passes modifies the function, and if +/// so, return true. +/// +bool FunctionPassManager::run(Function &F) { + if (F.isMaterializable()) { + std::string errstr; + if (F.Materialize(&errstr)) + report_fatal_error("Error reading bitcode file: " + Twine(errstr)); + } + return FPM->run(F); +} + + +/// doInitialization - Run all of the initializers for the function passes. +/// +bool FunctionPassManager::doInitialization() { + return FPM->doInitialization(*M); +} + +/// doFinalization - Run all of the finalizers for the function passes. +/// +bool FunctionPassManager::doFinalization() { + return FPM->doFinalization(*M); +} + +//===----------------------------------------------------------------------===// +// FunctionPassManagerImpl implementation +// +bool FunctionPassManagerImpl::doInitialization(Module &M) { + bool Changed = false; + + dumpArguments(); + dumpPasses(); + + SmallVectorImpl<ImmutablePass *>& IPV = getImmutablePasses(); + for (SmallVectorImpl<ImmutablePass *>::const_iterator I = IPV.begin(), + E = IPV.end(); I != E; ++I) { + Changed |= (*I)->doInitialization(M); + } + + for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) + Changed |= getContainedManager(Index)->doInitialization(M); + + return Changed; +} + +bool FunctionPassManagerImpl::doFinalization(Module &M) { + bool Changed = false; + + for (int Index = getNumContainedManagers() - 1; Index >= 0; --Index) + Changed |= getContainedManager(Index)->doFinalization(M); + + SmallVectorImpl<ImmutablePass *>& IPV = getImmutablePasses(); + for (SmallVectorImpl<ImmutablePass *>::const_iterator I = IPV.begin(), + E = IPV.end(); I != E; ++I) { + Changed |= (*I)->doFinalization(M); + } + + return Changed; +} + +/// cleanup - After running all passes, clean up pass manager cache. +void FPPassManager::cleanup() { + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { + FunctionPass *FP = getContainedPass(Index); + AnalysisResolver *AR = FP->getResolver(); + assert(AR && "Analysis Resolver is not set"); + AR->clearAnalysisImpls(); + } +} + +void FunctionPassManagerImpl::releaseMemoryOnTheFly() { + if (!wasRun) + return; + for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) { + FPPassManager *FPPM = getContainedManager(Index); + for (unsigned Index = 0; Index < FPPM->getNumContainedPasses(); ++Index) { + FPPM->getContainedPass(Index)->releaseMemory(); + } + } + wasRun = false; +} + +// Execute all the passes managed by this top level manager. +// Return true if any function is modified by a pass. +bool FunctionPassManagerImpl::run(Function &F) { + bool Changed = false; + TimingInfo::createTheTimeInfo(); + + initializeAllAnalysisInfo(); + for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) + Changed |= getContainedManager(Index)->runOnFunction(F); + + for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) + getContainedManager(Index)->cleanup(); + + wasRun = true; + return Changed; +} + +//===----------------------------------------------------------------------===// +// FPPassManager implementation + +char FPPassManager::ID = 0; +/// Print passes managed by this manager +void FPPassManager::dumpPassStructure(unsigned Offset) { + dbgs().indent(Offset*2) << "FunctionPass Manager\n"; + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { + FunctionPass *FP = getContainedPass(Index); + FP->dumpPassStructure(Offset + 1); + dumpLastUses(FP, Offset+1); + } +} + + +/// Execute all of the passes scheduled for execution by invoking +/// runOnFunction method. Keep track of whether any of the passes modifies +/// the function, and if so, return true. +bool FPPassManager::runOnFunction(Function &F) { + if (F.isDeclaration()) + return false; + + bool Changed = false; + + // Collect inherited analysis from Module level pass manager. + populateInheritedAnalysis(TPM->activeStack); + + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { + FunctionPass *FP = getContainedPass(Index); + bool LocalChanged = false; + + dumpPassInfo(FP, EXECUTION_MSG, ON_FUNCTION_MSG, F.getName()); + dumpRequiredSet(FP); + + initializeAnalysisImpl(FP); + + { + PassManagerPrettyStackEntry X(FP, F); + TimeRegion PassTimer(getPassTimer(FP)); + + LocalChanged |= FP->runOnFunction(F); + } + + Changed |= LocalChanged; + if (LocalChanged) + dumpPassInfo(FP, MODIFICATION_MSG, ON_FUNCTION_MSG, F.getName()); + dumpPreservedSet(FP); + + verifyPreservedAnalysis(FP); + removeNotPreservedAnalysis(FP); + recordAvailableAnalysis(FP); + removeDeadPasses(FP, F.getName(), ON_FUNCTION_MSG); + } + return Changed; +} + +bool FPPassManager::runOnModule(Module &M) { + bool Changed = false; + + for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) + Changed |= runOnFunction(*I); + + return Changed; +} + +bool FPPassManager::doInitialization(Module &M) { + bool Changed = false; + + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) + Changed |= getContainedPass(Index)->doInitialization(M); + + return Changed; +} + +bool FPPassManager::doFinalization(Module &M) { + bool Changed = false; + + for (int Index = getNumContainedPasses() - 1; Index >= 0; --Index) + Changed |= getContainedPass(Index)->doFinalization(M); + + return Changed; +} + +//===----------------------------------------------------------------------===// +// MPPassManager implementation + +/// Execute all of the passes scheduled for execution by invoking +/// runOnModule method. Keep track of whether any of the passes modifies +/// the module, and if so, return true. +bool +MPPassManager::runOnModule(Module &M) { + bool Changed = false; + + // Initialize on-the-fly passes + for (std::map<Pass *, FunctionPassManagerImpl *>::iterator + I = OnTheFlyManagers.begin(), E = OnTheFlyManagers.end(); + I != E; ++I) { + FunctionPassManagerImpl *FPP = I->second; + Changed |= FPP->doInitialization(M); + } + + // Initialize module passes + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) + Changed |= getContainedPass(Index)->doInitialization(M); + + for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { + ModulePass *MP = getContainedPass(Index); + bool LocalChanged = false; + + dumpPassInfo(MP, EXECUTION_MSG, ON_MODULE_MSG, M.getModuleIdentifier()); + dumpRequiredSet(MP); + + initializeAnalysisImpl(MP); + + { + PassManagerPrettyStackEntry X(MP, M); + TimeRegion PassTimer(getPassTimer(MP)); + + LocalChanged |= MP->runOnModule(M); + } + + Changed |= LocalChanged; + if (LocalChanged) + dumpPassInfo(MP, MODIFICATION_MSG, ON_MODULE_MSG, + M.getModuleIdentifier()); + dumpPreservedSet(MP); + + verifyPreservedAnalysis(MP); + removeNotPreservedAnalysis(MP); + recordAvailableAnalysis(MP); + removeDeadPasses(MP, M.getModuleIdentifier(), ON_MODULE_MSG); + } + + // Finalize module passes + for (int Index = getNumContainedPasses() - 1; Index >= 0; --Index) + Changed |= getContainedPass(Index)->doFinalization(M); + + // Finalize on-the-fly passes + for (std::map<Pass *, FunctionPassManagerImpl *>::iterator + I = OnTheFlyManagers.begin(), E = OnTheFlyManagers.end(); + I != E; ++I) { + FunctionPassManagerImpl *FPP = I->second; + // We don't know when is the last time an on-the-fly pass is run, + // so we need to releaseMemory / finalize here + FPP->releaseMemoryOnTheFly(); + Changed |= FPP->doFinalization(M); + } + + return Changed; +} + +/// Add RequiredPass into list of lower level passes required by pass P. +/// RequiredPass is run on the fly by Pass Manager when P requests it +/// through getAnalysis interface. +void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) { + assert(P->getPotentialPassManagerType() == PMT_ModulePassManager && + "Unable to handle Pass that requires lower level Analysis pass"); + assert((P->getPotentialPassManagerType() < + RequiredPass->getPotentialPassManagerType()) && + "Unable to handle Pass that requires lower level Analysis pass"); + + FunctionPassManagerImpl *FPP = OnTheFlyManagers[P]; + if (!FPP) { + FPP = new FunctionPassManagerImpl(); + // FPP is the top level manager. + FPP->setTopLevelManager(FPP); + + OnTheFlyManagers[P] = FPP; + } + FPP->add(RequiredPass); + + // Register P as the last user of RequiredPass. + if (RequiredPass) { + SmallVector<Pass *, 1> LU; + LU.push_back(RequiredPass); + FPP->setLastUser(LU, P); + } +} + +/// Return function pass corresponding to PassInfo PI, that is +/// required by module pass MP. Instantiate analysis pass, by using +/// its runOnFunction() for function F. +Pass* MPPassManager::getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F){ + FunctionPassManagerImpl *FPP = OnTheFlyManagers[MP]; + assert(FPP && "Unable to find on the fly pass"); + + FPP->releaseMemoryOnTheFly(); + FPP->run(F); + return ((PMTopLevelManager*)FPP)->findAnalysisPass(PI); +} + + +//===----------------------------------------------------------------------===// +// PassManagerImpl implementation + +// +/// run - Execute all of the passes scheduled for execution. Keep track of +/// whether any of the passes modifies the module, and if so, return true. +bool PassManagerImpl::run(Module &M) { + bool Changed = false; + TimingInfo::createTheTimeInfo(); + + dumpArguments(); + dumpPasses(); + + SmallVectorImpl<ImmutablePass *>& IPV = getImmutablePasses(); + for (SmallVectorImpl<ImmutablePass *>::const_iterator I = IPV.begin(), + E = IPV.end(); I != E; ++I) { + Changed |= (*I)->doInitialization(M); + } + + initializeAllAnalysisInfo(); + for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) + Changed |= getContainedManager(Index)->runOnModule(M); + + for (SmallVectorImpl<ImmutablePass *>::const_iterator I = IPV.begin(), + E = IPV.end(); I != E; ++I) { + Changed |= (*I)->doFinalization(M); + } + + return Changed; +} + +//===----------------------------------------------------------------------===// +// PassManager implementation + +/// Create new pass manager +PassManager::PassManager() { + PM = new PassManagerImpl(); + // PM is the top level manager + PM->setTopLevelManager(PM); +} + +PassManager::~PassManager() { + delete PM; +} + +/// add - Add a pass to the queue of passes to run. This passes ownership of +/// the Pass to the PassManager. When the PassManager is destroyed, the pass +/// will be destroyed as well, so there is no need to delete the pass. This +/// implies that all passes MUST be allocated with 'new'. +void PassManager::add(Pass *P) { + PM->add(P); +} + +/// run - Execute all of the passes scheduled for execution. Keep track of +/// whether any of the passes modifies the module, and if so, return true. +bool PassManager::run(Module &M) { + return PM->run(M); +} + +//===----------------------------------------------------------------------===// +// TimingInfo Class - This class is used to calculate information about the +// amount of time each pass takes to execute. This only happens with +// -time-passes is enabled on the command line. +// +bool llvm::TimePassesIsEnabled = false; +static cl::opt<bool,true> +EnableTiming("time-passes", cl::location(TimePassesIsEnabled), + cl::desc("Time each pass, printing elapsed time for each on exit")); + +// createTheTimeInfo - This method either initializes the TheTimeInfo pointer to +// a non null value (if the -time-passes option is enabled) or it leaves it +// null. It may be called multiple times. +void TimingInfo::createTheTimeInfo() { + if (!TimePassesIsEnabled || TheTimeInfo) return; + + // Constructed the first time this is called, iff -time-passes is enabled. + // This guarantees that the object will be constructed before static globals, + // thus it will be destroyed before them. + static ManagedStatic<TimingInfo> TTI; + TheTimeInfo = &*TTI; +} + +/// If TimingInfo is enabled then start pass timer. +Timer *llvm::getPassTimer(Pass *P) { + if (TheTimeInfo) + return TheTimeInfo->getPassTimer(P); + return 0; +} + +//===----------------------------------------------------------------------===// +// PMStack implementation +// + +// Pop Pass Manager from the stack and clear its analysis info. +void PMStack::pop() { + + PMDataManager *Top = this->top(); + Top->initializeAnalysisInfo(); + + S.pop_back(); +} + +// Push PM on the stack and set its top level manager. +void PMStack::push(PMDataManager *PM) { + assert(PM && "Unable to push. Pass Manager expected"); + assert(PM->getDepth()==0 && "Pass Manager depth set too early"); + + if (!this->empty()) { + assert(PM->getPassManagerType() > this->top()->getPassManagerType() + && "pushing bad pass manager to PMStack"); + PMTopLevelManager *TPM = this->top()->getTopLevelManager(); + + assert(TPM && "Unable to find top level manager"); + TPM->addIndirectPassManager(PM); + PM->setTopLevelManager(TPM); + PM->setDepth(this->top()->getDepth()+1); + } else { + assert((PM->getPassManagerType() == PMT_ModulePassManager + || PM->getPassManagerType() == PMT_FunctionPassManager) + && "pushing bad pass manager to PMStack"); + PM->setDepth(1); + } + + S.push_back(PM); +} + +// Dump content of the pass manager stack. +void PMStack::dump() const { + for (std::vector<PMDataManager *>::const_iterator I = S.begin(), + E = S.end(); I != E; ++I) + dbgs() << (*I)->getAsPass()->getPassName() << ' '; + + if (!S.empty()) + dbgs() << '\n'; +} + +/// Find appropriate Module Pass Manager in the PM Stack and +/// add self into that manager. +void ModulePass::assignPassManager(PMStack &PMS, + PassManagerType PreferredType) { + // Find Module Pass Manager + while (!PMS.empty()) { + PassManagerType TopPMType = PMS.top()->getPassManagerType(); + if (TopPMType == PreferredType) + break; // We found desired pass manager + else if (TopPMType > PMT_ModulePassManager) + PMS.pop(); // Pop children pass managers + else + break; + } + assert(!PMS.empty() && "Unable to find appropriate Pass Manager"); + PMS.top()->add(this); +} + +/// Find appropriate Function Pass Manager or Call Graph Pass Manager +/// in the PM Stack and add self into that manager. +void FunctionPass::assignPassManager(PMStack &PMS, + PassManagerType PreferredType) { + + // Find Function Pass Manager + while (!PMS.empty()) { + if (PMS.top()->getPassManagerType() > PMT_FunctionPassManager) + PMS.pop(); + else + break; + } + + // Create new Function Pass Manager if needed. + FPPassManager *FPP; + if (PMS.top()->getPassManagerType() == PMT_FunctionPassManager) { + FPP = (FPPassManager *)PMS.top(); + } else { + assert(!PMS.empty() && "Unable to create Function Pass Manager"); + PMDataManager *PMD = PMS.top(); + + // [1] Create new Function Pass Manager + FPP = new FPPassManager(); + FPP->populateInheritedAnalysis(PMS); + + // [2] Set up new manager's top level manager + PMTopLevelManager *TPM = PMD->getTopLevelManager(); + TPM->addIndirectPassManager(FPP); + + // [3] Assign manager to manage this new manager. This may create + // and push new managers into PMS + FPP->assignPassManager(PMS, PMD->getPassManagerType()); + + // [4] Push new manager into PMS + PMS.push(FPP); + } + + // Assign FPP as the manager of this pass. + FPP->add(this); +} + +/// Find appropriate Basic Pass Manager or Call Graph Pass Manager +/// in the PM Stack and add self into that manager. +void BasicBlockPass::assignPassManager(PMStack &PMS, + PassManagerType PreferredType) { + BBPassManager *BBP; + + // Basic Pass Manager is a leaf pass manager. It does not handle + // any other pass manager. + if (!PMS.empty() && + PMS.top()->getPassManagerType() == PMT_BasicBlockPassManager) { + BBP = (BBPassManager *)PMS.top(); + } else { + // If leaf manager is not Basic Block Pass manager then create new + // basic Block Pass manager. + assert(!PMS.empty() && "Unable to create BasicBlock Pass Manager"); + PMDataManager *PMD = PMS.top(); + + // [1] Create new Basic Block Manager + BBP = new BBPassManager(); + + // [2] Set up new manager's top level manager + // Basic Block Pass Manager does not live by itself + PMTopLevelManager *TPM = PMD->getTopLevelManager(); + TPM->addIndirectPassManager(BBP); + + // [3] Assign manager to manage this new manager. This may create + // and push new managers into PMS + BBP->assignPassManager(PMS, PreferredType); + + // [4] Push new manager into PMS + PMS.push(BBP); + } + + // Assign BBP as the manager of this pass. + BBP->add(this); +} + +PassManagerBase::~PassManagerBase() {} diff --git a/lib/IR/PassRegistry.cpp b/lib/IR/PassRegistry.cpp new file mode 100644 index 0000000000..a0b64ed78f --- /dev/null +++ b/lib/IR/PassRegistry.cpp @@ -0,0 +1,209 @@ +//===- PassRegistry.cpp - Pass Registration Implementation ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the PassRegistry, with which passes are registered on +// initialization, and supports the PassManager in dependency resolution. +// +//===----------------------------------------------------------------------===// + +#include "llvm/PassRegistry.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/IR/Function.h" +#include "llvm/PassSupport.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/Mutex.h" +#include <vector> + +using namespace llvm; + +// FIXME: We use ManagedStatic to erase the pass registrar on shutdown. +// Unfortunately, passes are registered with static ctors, and having +// llvm_shutdown clear this map prevents successful resurrection after +// llvm_shutdown is run. Ideally we should find a solution so that we don't +// leak the map, AND can still resurrect after shutdown. +static ManagedStatic<PassRegistry> PassRegistryObj; +PassRegistry *PassRegistry::getPassRegistry() { + return &*PassRegistryObj; +} + +static ManagedStatic<sys::SmartMutex<true> > Lock; + +//===----------------------------------------------------------------------===// +// PassRegistryImpl +// + +namespace { +struct PassRegistryImpl { + /// PassInfoMap - Keep track of the PassInfo object for each registered pass. + typedef DenseMap<const void*, const PassInfo*> MapType; + MapType PassInfoMap; + + typedef StringMap<const PassInfo*> StringMapType; + StringMapType PassInfoStringMap; + + /// AnalysisGroupInfo - Keep track of information for each analysis group. + struct AnalysisGroupInfo { + SmallPtrSet<const PassInfo *, 8> Implementations; + }; + DenseMap<const PassInfo*, AnalysisGroupInfo> AnalysisGroupInfoMap; + + std::vector<const PassInfo*> ToFree; + std::vector<PassRegistrationListener*> Listeners; +}; +} // end anonymous namespace + +void *PassRegistry::getImpl() const { + if (!pImpl) + pImpl = new PassRegistryImpl(); + return pImpl; +} + +//===----------------------------------------------------------------------===// +// Accessors +// + +PassRegistry::~PassRegistry() { + sys::SmartScopedLock<true> Guard(*Lock); + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(pImpl); + + for (std::vector<const PassInfo*>::iterator I = Impl->ToFree.begin(), + E = Impl->ToFree.end(); I != E; ++I) + delete *I; + + delete Impl; + pImpl = 0; +} + +const PassInfo *PassRegistry::getPassInfo(const void *TI) const { + sys::SmartScopedLock<true> Guard(*Lock); + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl()); + PassRegistryImpl::MapType::const_iterator I = Impl->PassInfoMap.find(TI); + return I != Impl->PassInfoMap.end() ? I->second : 0; +} + +const PassInfo *PassRegistry::getPassInfo(StringRef Arg) const { + sys::SmartScopedLock<true> Guard(*Lock); + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl()); + PassRegistryImpl::StringMapType::const_iterator + I = Impl->PassInfoStringMap.find(Arg); + return I != Impl->PassInfoStringMap.end() ? I->second : 0; +} + +//===----------------------------------------------------------------------===// +// Pass Registration mechanism +// + +void PassRegistry::registerPass(const PassInfo &PI, bool ShouldFree) { + sys::SmartScopedLock<true> Guard(*Lock); + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl()); + bool Inserted = + Impl->PassInfoMap.insert(std::make_pair(PI.getTypeInfo(),&PI)).second; + assert(Inserted && "Pass registered multiple times!"); + (void)Inserted; + Impl->PassInfoStringMap[PI.getPassArgument()] = &PI; + + // Notify any listeners. + for (std::vector<PassRegistrationListener*>::iterator + I = Impl->Listeners.begin(), E = Impl->Listeners.end(); I != E; ++I) + (*I)->passRegistered(&PI); + + if (ShouldFree) Impl->ToFree.push_back(&PI); +} + +void PassRegistry::unregisterPass(const PassInfo &PI) { + sys::SmartScopedLock<true> Guard(*Lock); + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl()); + PassRegistryImpl::MapType::iterator I = + Impl->PassInfoMap.find(PI.getTypeInfo()); + assert(I != Impl->PassInfoMap.end() && "Pass registered but not in map!"); + + // Remove pass from the map. + Impl->PassInfoMap.erase(I); + Impl->PassInfoStringMap.erase(PI.getPassArgument()); +} + +void PassRegistry::enumerateWith(PassRegistrationListener *L) { + sys::SmartScopedLock<true> Guard(*Lock); + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl()); + for (PassRegistryImpl::MapType::const_iterator I = Impl->PassInfoMap.begin(), + E = Impl->PassInfoMap.end(); I != E; ++I) + L->passEnumerate(I->second); +} + + +/// Analysis Group Mechanisms. +void PassRegistry::registerAnalysisGroup(const void *InterfaceID, + const void *PassID, + PassInfo& Registeree, + bool isDefault, + bool ShouldFree) { + PassInfo *InterfaceInfo = const_cast<PassInfo*>(getPassInfo(InterfaceID)); + if (InterfaceInfo == 0) { + // First reference to Interface, register it now. + registerPass(Registeree); + InterfaceInfo = &Registeree; + } + assert(Registeree.isAnalysisGroup() && + "Trying to join an analysis group that is a normal pass!"); + + if (PassID) { + PassInfo *ImplementationInfo = const_cast<PassInfo*>(getPassInfo(PassID)); + assert(ImplementationInfo && + "Must register pass before adding to AnalysisGroup!"); + + sys::SmartScopedLock<true> Guard(*Lock); + + // Make sure we keep track of the fact that the implementation implements + // the interface. + ImplementationInfo->addInterfaceImplemented(InterfaceInfo); + + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl()); + PassRegistryImpl::AnalysisGroupInfo &AGI = + Impl->AnalysisGroupInfoMap[InterfaceInfo]; + assert(AGI.Implementations.count(ImplementationInfo) == 0 && + "Cannot add a pass to the same analysis group more than once!"); + AGI.Implementations.insert(ImplementationInfo); + if (isDefault) { + assert(InterfaceInfo->getNormalCtor() == 0 && + "Default implementation for analysis group already specified!"); + assert(ImplementationInfo->getNormalCtor() && + "Cannot specify pass as default if it does not have a default ctor"); + InterfaceInfo->setNormalCtor(ImplementationInfo->getNormalCtor()); + } + } + + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl()); + if (ShouldFree) Impl->ToFree.push_back(&Registeree); +} + +void PassRegistry::addRegistrationListener(PassRegistrationListener *L) { + sys::SmartScopedLock<true> Guard(*Lock); + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl()); + Impl->Listeners.push_back(L); +} + +void PassRegistry::removeRegistrationListener(PassRegistrationListener *L) { + sys::SmartScopedLock<true> Guard(*Lock); + + // NOTE: This is necessary, because removeRegistrationListener() can be called + // as part of the llvm_shutdown sequence. Since we have no control over the + // order of that sequence, we need to gracefully handle the case where the + // PassRegistry is destructed before the object that triggers this call. + if (!pImpl) return; + + PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl()); + std::vector<PassRegistrationListener*>::iterator I = + std::find(Impl->Listeners.begin(), Impl->Listeners.end(), L); + assert(I != Impl->Listeners.end() && + "PassRegistrationListener not registered!"); + Impl->Listeners.erase(I); +} diff --git a/lib/IR/PrintModulePass.cpp b/lib/IR/PrintModulePass.cpp new file mode 100644 index 0000000000..5026bc2d98 --- /dev/null +++ b/lib/IR/PrintModulePass.cpp @@ -0,0 +1,136 @@ +//===--- IR/PrintModulePass.cpp - Module/Function Printer -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// PrintModulePass and PrintFunctionPass implementations. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Assembly/PrintModulePass.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Module.h" +#include "llvm/Pass.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +namespace { + + class PrintModulePass : public ModulePass { + std::string Banner; + raw_ostream *Out; // raw_ostream to print on + bool DeleteStream; // Delete the ostream in our dtor? + public: + static char ID; + PrintModulePass() : ModulePass(ID), Out(&dbgs()), + DeleteStream(false) {} + PrintModulePass(const std::string &B, raw_ostream *o, bool DS) + : ModulePass(ID), Banner(B), Out(o), DeleteStream(DS) {} + + ~PrintModulePass() { + if (DeleteStream) delete Out; + } + + bool runOnModule(Module &M) { + (*Out) << Banner << M; + return false; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } + }; + + class PrintFunctionPass : public FunctionPass { + std::string Banner; // String to print before each function + raw_ostream *Out; // raw_ostream to print on + bool DeleteStream; // Delete the ostream in our dtor? + public: + static char ID; + PrintFunctionPass() : FunctionPass(ID), Banner(""), Out(&dbgs()), + DeleteStream(false) {} + PrintFunctionPass(const std::string &B, raw_ostream *o, bool DS) + : FunctionPass(ID), Banner(B), Out(o), DeleteStream(DS) {} + + ~PrintFunctionPass() { + if (DeleteStream) delete Out; + } + + // runOnFunction - This pass just prints a banner followed by the + // function as it's processed. + // + bool runOnFunction(Function &F) { + (*Out) << Banner << static_cast<Value&>(F); + return false; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } + }; + + class PrintBasicBlockPass : public BasicBlockPass { + std::string Banner; + raw_ostream *Out; // raw_ostream to print on + bool DeleteStream; // Delete the ostream in our dtor? + public: + static char ID; + PrintBasicBlockPass() : BasicBlockPass(ID), Out(&dbgs()), + DeleteStream(false) {} + PrintBasicBlockPass(const std::string &B, raw_ostream *o, bool DS) + : BasicBlockPass(ID), Banner(B), Out(o), DeleteStream(DS) {} + + ~PrintBasicBlockPass() { + if (DeleteStream) delete Out; + } + + bool runOnBasicBlock(BasicBlock &BB) { + (*Out) << Banner << BB; + return false; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } + }; +} + +char PrintModulePass::ID = 0; +INITIALIZE_PASS(PrintModulePass, "print-module", + "Print module to stderr", false, false) +char PrintFunctionPass::ID = 0; +INITIALIZE_PASS(PrintFunctionPass, "print-function", + "Print function to stderr", false, false) +char PrintBasicBlockPass::ID = 0; +INITIALIZE_PASS(PrintBasicBlockPass, "print-bb", + "Print BB to stderr", false, false) + +/// createPrintModulePass - Create and return a pass that writes the +/// module to the specified raw_ostream. +ModulePass *llvm::createPrintModulePass(llvm::raw_ostream *OS, + bool DeleteStream, + const std::string &Banner) { + return new PrintModulePass(Banner, OS, DeleteStream); +} + +/// createPrintFunctionPass - Create and return a pass that prints +/// functions to the specified raw_ostream as they are processed. +FunctionPass *llvm::createPrintFunctionPass(const std::string &Banner, + llvm::raw_ostream *OS, + bool DeleteStream) { + return new PrintFunctionPass(Banner, OS, DeleteStream); +} + +/// createPrintBasicBlockPass - Create and return a pass that writes the +/// BB to the specified raw_ostream. +BasicBlockPass *llvm::createPrintBasicBlockPass(llvm::raw_ostream *OS, + bool DeleteStream, + const std::string &Banner) { + return new PrintBasicBlockPass(Banner, OS, DeleteStream); +} + diff --git a/lib/IR/SymbolTableListTraitsImpl.h b/lib/IR/SymbolTableListTraitsImpl.h new file mode 100644 index 0000000000..5a383eee56 --- /dev/null +++ b/lib/IR/SymbolTableListTraitsImpl.h @@ -0,0 +1,118 @@ +//===-- llvm/SymbolTableListTraitsImpl.h - Implementation ------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the stickier parts of the SymbolTableListTraits class, +// and is explicitly instantiated where needed to avoid defining all this code +// in a widely used header. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYMBOLTABLELISTTRAITS_IMPL_H +#define LLVM_SYMBOLTABLELISTTRAITS_IMPL_H + +#include "llvm/IR/SymbolTableListTraits.h" +#include "llvm/IR/ValueSymbolTable.h" + +namespace llvm { + +/// setSymTabObject - This is called when (f.e.) the parent of a basic block +/// changes. This requires us to remove all the instruction symtab entries from +/// the current function and reinsert them into the new function. +template<typename ValueSubClass, typename ItemParentClass> +template<typename TPtr> +void SymbolTableListTraits<ValueSubClass,ItemParentClass> +::setSymTabObject(TPtr *Dest, TPtr Src) { + // Get the old symtab and value list before doing the assignment. + ValueSymbolTable *OldST = TraitsClass::getSymTab(getListOwner()); + + // Do it. + *Dest = Src; + + // Get the new SymTab object. + ValueSymbolTable *NewST = TraitsClass::getSymTab(getListOwner()); + + // If there is nothing to do, quick exit. + if (OldST == NewST) return; + + // Move all the elements from the old symtab to the new one. + iplist<ValueSubClass> &ItemList = TraitsClass::getList(getListOwner()); + if (ItemList.empty()) return; + + if (OldST) { + // Remove all entries from the previous symtab. + for (typename iplist<ValueSubClass>::iterator I = ItemList.begin(); + I != ItemList.end(); ++I) + if (I->hasName()) + OldST->removeValueName(I->getValueName()); + } + + if (NewST) { + // Add all of the items to the new symtab. + for (typename iplist<ValueSubClass>::iterator I = ItemList.begin(); + I != ItemList.end(); ++I) + if (I->hasName()) + NewST->reinsertValue(I); + } + +} + +template<typename ValueSubClass, typename ItemParentClass> +void SymbolTableListTraits<ValueSubClass,ItemParentClass> +::addNodeToList(ValueSubClass *V) { + assert(V->getParent() == 0 && "Value already in a container!!"); + ItemParentClass *Owner = getListOwner(); + V->setParent(Owner); + if (V->hasName()) + if (ValueSymbolTable *ST = TraitsClass::getSymTab(Owner)) + ST->reinsertValue(V); +} + +template<typename ValueSubClass, typename ItemParentClass> +void SymbolTableListTraits<ValueSubClass,ItemParentClass> +::removeNodeFromList(ValueSubClass *V) { + V->setParent(0); + if (V->hasName()) + if (ValueSymbolTable *ST = TraitsClass::getSymTab(getListOwner())) + ST->removeValueName(V->getValueName()); +} + +template<typename ValueSubClass, typename ItemParentClass> +void SymbolTableListTraits<ValueSubClass,ItemParentClass> +::transferNodesFromList(ilist_traits<ValueSubClass> &L2, + ilist_iterator<ValueSubClass> first, + ilist_iterator<ValueSubClass> last) { + // We only have to do work here if transferring instructions between BBs + ItemParentClass *NewIP = getListOwner(), *OldIP = L2.getListOwner(); + if (NewIP == OldIP) return; // No work to do at all... + + // We only have to update symbol table entries if we are transferring the + // instructions to a different symtab object... + ValueSymbolTable *NewST = TraitsClass::getSymTab(NewIP); + ValueSymbolTable *OldST = TraitsClass::getSymTab(OldIP); + if (NewST != OldST) { + for (; first != last; ++first) { + ValueSubClass &V = *first; + bool HasName = V.hasName(); + if (OldST && HasName) + OldST->removeValueName(V.getValueName()); + V.setParent(NewIP); + if (NewST && HasName) + NewST->reinsertValue(&V); + } + } else { + // Just transferring between blocks in the same function, simply update the + // parent fields in the instructions... + for (; first != last; ++first) + first->setParent(NewIP); + } +} + +} // End llvm namespace + +#endif diff --git a/lib/IR/Type.cpp b/lib/IR/Type.cpp new file mode 100644 index 0000000000..1e6a51ab10 --- /dev/null +++ b/lib/IR/Type.cpp @@ -0,0 +1,767 @@ +//===-- Type.cpp - Implement the Type class -------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Type class for the IR library. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Type.h" +#include "LLVMContextImpl.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/IR/Module.h" +#include <algorithm> +#include <cstdarg> +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Type Class Implementation +//===----------------------------------------------------------------------===// + +Type *Type::getPrimitiveType(LLVMContext &C, TypeID IDNumber) { + switch (IDNumber) { + case VoidTyID : return getVoidTy(C); + case HalfTyID : return getHalfTy(C); + case FloatTyID : return getFloatTy(C); + case DoubleTyID : return getDoubleTy(C); + case X86_FP80TyID : return getX86_FP80Ty(C); + case FP128TyID : return getFP128Ty(C); + case PPC_FP128TyID : return getPPC_FP128Ty(C); + case LabelTyID : return getLabelTy(C); + case MetadataTyID : return getMetadataTy(C); + case X86_MMXTyID : return getX86_MMXTy(C); + default: + return 0; + } +} + +/// getScalarType - If this is a vector type, return the element type, +/// otherwise return this. +Type *Type::getScalarType() { + if (VectorType *VTy = dyn_cast<VectorType>(this)) + return VTy->getElementType(); + return this; +} + +const Type *Type::getScalarType() const { + if (const VectorType *VTy = dyn_cast<VectorType>(this)) + return VTy->getElementType(); + return this; +} + +/// isIntegerTy - Return true if this is an IntegerType of the specified width. +bool Type::isIntegerTy(unsigned Bitwidth) const { + return isIntegerTy() && cast<IntegerType>(this)->getBitWidth() == Bitwidth; +} + +// canLosslesslyBitCastTo - Return true if this type can be converted to +// 'Ty' without any reinterpretation of bits. For example, i8* to i32*. +// +bool Type::canLosslesslyBitCastTo(Type *Ty) const { + // Identity cast means no change so return true + if (this == Ty) + return true; + + // They are not convertible unless they are at least first class types + if (!this->isFirstClassType() || !Ty->isFirstClassType()) + return false; + + // Vector -> Vector conversions are always lossless if the two vector types + // have the same size, otherwise not. Also, 64-bit vector types can be + // converted to x86mmx. + if (const VectorType *thisPTy = dyn_cast<VectorType>(this)) { + if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty)) + return thisPTy->getBitWidth() == thatPTy->getBitWidth(); + if (Ty->getTypeID() == Type::X86_MMXTyID && + thisPTy->getBitWidth() == 64) + return true; + } + + if (this->getTypeID() == Type::X86_MMXTyID) + if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty)) + if (thatPTy->getBitWidth() == 64) + return true; + + // At this point we have only various mismatches of the first class types + // remaining and ptr->ptr. Just select the lossless conversions. Everything + // else is not lossless. + if (this->isPointerTy()) + return Ty->isPointerTy(); + return false; // Other types have no identity values +} + +bool Type::isEmptyTy() const { + const ArrayType *ATy = dyn_cast<ArrayType>(this); + if (ATy) { + unsigned NumElements = ATy->getNumElements(); + return NumElements == 0 || ATy->getElementType()->isEmptyTy(); + } + + const StructType *STy = dyn_cast<StructType>(this); + if (STy) { + unsigned NumElements = STy->getNumElements(); + for (unsigned i = 0; i < NumElements; ++i) + if (!STy->getElementType(i)->isEmptyTy()) + return false; + return true; + } + + return false; +} + +unsigned Type::getPrimitiveSizeInBits() const { + switch (getTypeID()) { + case Type::HalfTyID: return 16; + case Type::FloatTyID: return 32; + case Type::DoubleTyID: return 64; + case Type::X86_FP80TyID: return 80; + case Type::FP128TyID: return 128; + case Type::PPC_FP128TyID: return 128; + case Type::X86_MMXTyID: return 64; + case Type::IntegerTyID: return cast<IntegerType>(this)->getBitWidth(); + case Type::VectorTyID: return cast<VectorType>(this)->getBitWidth(); + default: return 0; + } +} + +/// getScalarSizeInBits - If this is a vector type, return the +/// getPrimitiveSizeInBits value for the element type. Otherwise return the +/// getPrimitiveSizeInBits value for this type. +unsigned Type::getScalarSizeInBits() { + return getScalarType()->getPrimitiveSizeInBits(); +} + +/// getFPMantissaWidth - Return the width of the mantissa of this type. This +/// is only valid on floating point types. If the FP type does not +/// have a stable mantissa (e.g. ppc long double), this method returns -1. +int Type::getFPMantissaWidth() const { + if (const VectorType *VTy = dyn_cast<VectorType>(this)) + return VTy->getElementType()->getFPMantissaWidth(); + assert(isFloatingPointTy() && "Not a floating point type!"); + if (getTypeID() == HalfTyID) return 11; + if (getTypeID() == FloatTyID) return 24; + if (getTypeID() == DoubleTyID) return 53; + if (getTypeID() == X86_FP80TyID) return 64; + if (getTypeID() == FP128TyID) return 113; + assert(getTypeID() == PPC_FP128TyID && "unknown fp type"); + return -1; +} + +/// isSizedDerivedType - Derived types like structures and arrays are sized +/// iff all of the members of the type are sized as well. Since asking for +/// their size is relatively uncommon, move this operation out of line. +bool Type::isSizedDerivedType() const { + if (this->isIntegerTy()) + return true; + + if (const ArrayType *ATy = dyn_cast<ArrayType>(this)) + return ATy->getElementType()->isSized(); + + if (const VectorType *VTy = dyn_cast<VectorType>(this)) + return VTy->getElementType()->isSized(); + + if (!this->isStructTy()) + return false; + + return cast<StructType>(this)->isSized(); +} + +//===----------------------------------------------------------------------===// +// Subclass Helper Methods +//===----------------------------------------------------------------------===// + +unsigned Type::getIntegerBitWidth() const { + return cast<IntegerType>(this)->getBitWidth(); +} + +bool Type::isFunctionVarArg() const { + return cast<FunctionType>(this)->isVarArg(); +} + +Type *Type::getFunctionParamType(unsigned i) const { + return cast<FunctionType>(this)->getParamType(i); +} + +unsigned Type::getFunctionNumParams() const { + return cast<FunctionType>(this)->getNumParams(); +} + +StringRef Type::getStructName() const { + return cast<StructType>(this)->getName(); +} + +unsigned Type::getStructNumElements() const { + return cast<StructType>(this)->getNumElements(); +} + +Type *Type::getStructElementType(unsigned N) const { + return cast<StructType>(this)->getElementType(N); +} + +Type *Type::getSequentialElementType() const { + return cast<SequentialType>(this)->getElementType(); +} + +uint64_t Type::getArrayNumElements() const { + return cast<ArrayType>(this)->getNumElements(); +} + +unsigned Type::getVectorNumElements() const { + return cast<VectorType>(this)->getNumElements(); +} + +unsigned Type::getPointerAddressSpace() const { + return cast<PointerType>(getScalarType())->getAddressSpace(); +} + + +//===----------------------------------------------------------------------===// +// Primitive 'Type' data +//===----------------------------------------------------------------------===// + +Type *Type::getVoidTy(LLVMContext &C) { return &C.pImpl->VoidTy; } +Type *Type::getLabelTy(LLVMContext &C) { return &C.pImpl->LabelTy; } +Type *Type::getHalfTy(LLVMContext &C) { return &C.pImpl->HalfTy; } +Type *Type::getFloatTy(LLVMContext &C) { return &C.pImpl->FloatTy; } +Type *Type::getDoubleTy(LLVMContext &C) { return &C.pImpl->DoubleTy; } +Type *Type::getMetadataTy(LLVMContext &C) { return &C.pImpl->MetadataTy; } +Type *Type::getX86_FP80Ty(LLVMContext &C) { return &C.pImpl->X86_FP80Ty; } +Type *Type::getFP128Ty(LLVMContext &C) { return &C.pImpl->FP128Ty; } +Type *Type::getPPC_FP128Ty(LLVMContext &C) { return &C.pImpl->PPC_FP128Ty; } +Type *Type::getX86_MMXTy(LLVMContext &C) { return &C.pImpl->X86_MMXTy; } + +IntegerType *Type::getInt1Ty(LLVMContext &C) { return &C.pImpl->Int1Ty; } +IntegerType *Type::getInt8Ty(LLVMContext &C) { return &C.pImpl->Int8Ty; } +IntegerType *Type::getInt16Ty(LLVMContext &C) { return &C.pImpl->Int16Ty; } +IntegerType *Type::getInt32Ty(LLVMContext &C) { return &C.pImpl->Int32Ty; } +IntegerType *Type::getInt64Ty(LLVMContext &C) { return &C.pImpl->Int64Ty; } + +IntegerType *Type::getIntNTy(LLVMContext &C, unsigned N) { + return IntegerType::get(C, N); +} + +PointerType *Type::getHalfPtrTy(LLVMContext &C, unsigned AS) { + return getHalfTy(C)->getPointerTo(AS); +} + +PointerType *Type::getFloatPtrTy(LLVMContext &C, unsigned AS) { + return getFloatTy(C)->getPointerTo(AS); +} + +PointerType *Type::getDoublePtrTy(LLVMContext &C, unsigned AS) { + return getDoubleTy(C)->getPointerTo(AS); +} + +PointerType *Type::getX86_FP80PtrTy(LLVMContext &C, unsigned AS) { + return getX86_FP80Ty(C)->getPointerTo(AS); +} + +PointerType *Type::getFP128PtrTy(LLVMContext &C, unsigned AS) { + return getFP128Ty(C)->getPointerTo(AS); +} + +PointerType *Type::getPPC_FP128PtrTy(LLVMContext &C, unsigned AS) { + return getPPC_FP128Ty(C)->getPointerTo(AS); +} + +PointerType *Type::getX86_MMXPtrTy(LLVMContext &C, unsigned AS) { + return getX86_MMXTy(C)->getPointerTo(AS); +} + +PointerType *Type::getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS) { + return getIntNTy(C, N)->getPointerTo(AS); +} + +PointerType *Type::getInt1PtrTy(LLVMContext &C, unsigned AS) { + return getInt1Ty(C)->getPointerTo(AS); +} + +PointerType *Type::getInt8PtrTy(LLVMContext &C, unsigned AS) { + return getInt8Ty(C)->getPointerTo(AS); +} + +PointerType *Type::getInt16PtrTy(LLVMContext &C, unsigned AS) { + return getInt16Ty(C)->getPointerTo(AS); +} + +PointerType *Type::getInt32PtrTy(LLVMContext &C, unsigned AS) { + return getInt32Ty(C)->getPointerTo(AS); +} + +PointerType *Type::getInt64PtrTy(LLVMContext &C, unsigned AS) { + return getInt64Ty(C)->getPointerTo(AS); +} + + +//===----------------------------------------------------------------------===// +// IntegerType Implementation +//===----------------------------------------------------------------------===// + +IntegerType *IntegerType::get(LLVMContext &C, unsigned NumBits) { + assert(NumBits >= MIN_INT_BITS && "bitwidth too small"); + assert(NumBits <= MAX_INT_BITS && "bitwidth too large"); + + // Check for the built-in integer types + switch (NumBits) { + case 1: return cast<IntegerType>(Type::getInt1Ty(C)); + case 8: return cast<IntegerType>(Type::getInt8Ty(C)); + case 16: return cast<IntegerType>(Type::getInt16Ty(C)); + case 32: return cast<IntegerType>(Type::getInt32Ty(C)); + case 64: return cast<IntegerType>(Type::getInt64Ty(C)); + default: + break; + } + + IntegerType *&Entry = C.pImpl->IntegerTypes[NumBits]; + + if (Entry == 0) + Entry = new (C.pImpl->TypeAllocator) IntegerType(C, NumBits); + + return Entry; +} + +bool IntegerType::isPowerOf2ByteWidth() const { + unsigned BitWidth = getBitWidth(); + return (BitWidth > 7) && isPowerOf2_32(BitWidth); +} + +APInt IntegerType::getMask() const { + return APInt::getAllOnesValue(getBitWidth()); +} + +//===----------------------------------------------------------------------===// +// FunctionType Implementation +//===----------------------------------------------------------------------===// + +FunctionType::FunctionType(Type *Result, ArrayRef<Type*> Params, + bool IsVarArgs) + : Type(Result->getContext(), FunctionTyID) { + Type **SubTys = reinterpret_cast<Type**>(this+1); + assert(isValidReturnType(Result) && "invalid return type for function"); + setSubclassData(IsVarArgs); + + SubTys[0] = const_cast<Type*>(Result); + + for (unsigned i = 0, e = Params.size(); i != e; ++i) { + assert(isValidArgumentType(Params[i]) && + "Not a valid type for function argument!"); + SubTys[i+1] = Params[i]; + } + + ContainedTys = SubTys; + NumContainedTys = Params.size() + 1; // + 1 for result type +} + +// FunctionType::get - The factory function for the FunctionType class. +FunctionType *FunctionType::get(Type *ReturnType, + ArrayRef<Type*> Params, bool isVarArg) { + LLVMContextImpl *pImpl = ReturnType->getContext().pImpl; + FunctionTypeKeyInfo::KeyTy Key(ReturnType, Params, isVarArg); + LLVMContextImpl::FunctionTypeMap::iterator I = + pImpl->FunctionTypes.find_as(Key); + FunctionType *FT; + + if (I == pImpl->FunctionTypes.end()) { + FT = (FunctionType*) pImpl->TypeAllocator. + Allocate(sizeof(FunctionType) + sizeof(Type*) * (Params.size() + 1), + AlignOf<FunctionType>::Alignment); + new (FT) FunctionType(ReturnType, Params, isVarArg); + pImpl->FunctionTypes[FT] = true; + } else { + FT = I->first; + } + + return FT; +} + +FunctionType *FunctionType::get(Type *Result, bool isVarArg) { + return get(Result, ArrayRef<Type *>(), isVarArg); +} + +/// isValidReturnType - Return true if the specified type is valid as a return +/// type. +bool FunctionType::isValidReturnType(Type *RetTy) { + return !RetTy->isFunctionTy() && !RetTy->isLabelTy() && + !RetTy->isMetadataTy(); +} + +/// isValidArgumentType - Return true if the specified type is valid as an +/// argument type. +bool FunctionType::isValidArgumentType(Type *ArgTy) { + return ArgTy->isFirstClassType(); +} + +//===----------------------------------------------------------------------===// +// StructType Implementation +//===----------------------------------------------------------------------===// + +// Primitive Constructors. + +StructType *StructType::get(LLVMContext &Context, ArrayRef<Type*> ETypes, + bool isPacked) { + LLVMContextImpl *pImpl = Context.pImpl; + AnonStructTypeKeyInfo::KeyTy Key(ETypes, isPacked); + LLVMContextImpl::StructTypeMap::iterator I = + pImpl->AnonStructTypes.find_as(Key); + StructType *ST; + + if (I == pImpl->AnonStructTypes.end()) { + // Value not found. Create a new type! + ST = new (Context.pImpl->TypeAllocator) StructType(Context); + ST->setSubclassData(SCDB_IsLiteral); // Literal struct. + ST->setBody(ETypes, isPacked); + Context.pImpl->AnonStructTypes[ST] = true; + } else { + ST = I->first; + } + + return ST; +} + +void StructType::setBody(ArrayRef<Type*> Elements, bool isPacked) { + assert(isOpaque() && "Struct body already set!"); + + setSubclassData(getSubclassData() | SCDB_HasBody); + if (isPacked) + setSubclassData(getSubclassData() | SCDB_Packed); + + unsigned NumElements = Elements.size(); + Type **Elts = getContext().pImpl->TypeAllocator.Allocate<Type*>(NumElements); + memcpy(Elts, Elements.data(), sizeof(Elements[0]) * NumElements); + + ContainedTys = Elts; + NumContainedTys = NumElements; +} + +void StructType::setName(StringRef Name) { + if (Name == getName()) return; + + StringMap<StructType *> &SymbolTable = getContext().pImpl->NamedStructTypes; + typedef StringMap<StructType *>::MapEntryTy EntryTy; + + // If this struct already had a name, remove its symbol table entry. Don't + // delete the data yet because it may be part of the new name. + if (SymbolTableEntry) + SymbolTable.remove((EntryTy *)SymbolTableEntry); + + // If this is just removing the name, we're done. + if (Name.empty()) { + if (SymbolTableEntry) { + // Delete the old string data. + ((EntryTy *)SymbolTableEntry)->Destroy(SymbolTable.getAllocator()); + SymbolTableEntry = 0; + } + return; + } + + // Look up the entry for the name. + EntryTy *Entry = &getContext().pImpl->NamedStructTypes.GetOrCreateValue(Name); + + // While we have a name collision, try a random rename. + if (Entry->getValue()) { + SmallString<64> TempStr(Name); + TempStr.push_back('.'); + raw_svector_ostream TmpStream(TempStr); + unsigned NameSize = Name.size(); + + do { + TempStr.resize(NameSize + 1); + TmpStream.resync(); + TmpStream << getContext().pImpl->NamedStructTypesUniqueID++; + + Entry = &getContext().pImpl-> + NamedStructTypes.GetOrCreateValue(TmpStream.str()); + } while (Entry->getValue()); + } + + // Okay, we found an entry that isn't used. It's us! + Entry->setValue(this); + + // Delete the old string data. + if (SymbolTableEntry) + ((EntryTy *)SymbolTableEntry)->Destroy(SymbolTable.getAllocator()); + SymbolTableEntry = Entry; +} + +//===----------------------------------------------------------------------===// +// StructType Helper functions. + +StructType *StructType::create(LLVMContext &Context, StringRef Name) { + StructType *ST = new (Context.pImpl->TypeAllocator) StructType(Context); + if (!Name.empty()) + ST->setName(Name); + return ST; +} + +StructType *StructType::get(LLVMContext &Context, bool isPacked) { + return get(Context, llvm::ArrayRef<Type*>(), isPacked); +} + +StructType *StructType::get(Type *type, ...) { + assert(type != 0 && "Cannot create a struct type with no elements with this"); + LLVMContext &Ctx = type->getContext(); + va_list ap; + SmallVector<llvm::Type*, 8> StructFields; + va_start(ap, type); + while (type) { + StructFields.push_back(type); + type = va_arg(ap, llvm::Type*); + } + return llvm::StructType::get(Ctx, StructFields); +} + +StructType *StructType::create(LLVMContext &Context, ArrayRef<Type*> Elements, + StringRef Name, bool isPacked) { + StructType *ST = create(Context, Name); + ST->setBody(Elements, isPacked); + return ST; +} + +StructType *StructType::create(LLVMContext &Context, ArrayRef<Type*> Elements) { + return create(Context, Elements, StringRef()); +} + +StructType *StructType::create(LLVMContext &Context) { + return create(Context, StringRef()); +} + +StructType *StructType::create(ArrayRef<Type*> Elements, StringRef Name, + bool isPacked) { + assert(!Elements.empty() && + "This method may not be invoked with an empty list"); + return create(Elements[0]->getContext(), Elements, Name, isPacked); +} + +StructType *StructType::create(ArrayRef<Type*> Elements) { + assert(!Elements.empty() && + "This method may not be invoked with an empty list"); + return create(Elements[0]->getContext(), Elements, StringRef()); +} + +StructType *StructType::create(StringRef Name, Type *type, ...) { + assert(type != 0 && "Cannot create a struct type with no elements with this"); + LLVMContext &Ctx = type->getContext(); + va_list ap; + SmallVector<llvm::Type*, 8> StructFields; + va_start(ap, type); + while (type) { + StructFields.push_back(type); + type = va_arg(ap, llvm::Type*); + } + return llvm::StructType::create(Ctx, StructFields, Name); +} + +bool StructType::isSized() const { + if ((getSubclassData() & SCDB_IsSized) != 0) + return true; + if (isOpaque()) + return false; + + // Okay, our struct is sized if all of the elements are, but if one of the + // elements is opaque, the struct isn't sized *yet*, but may become sized in + // the future, so just bail out without caching. + for (element_iterator I = element_begin(), E = element_end(); I != E; ++I) + if (!(*I)->isSized()) + return false; + + // Here we cheat a bit and cast away const-ness. The goal is to memoize when + // we find a sized type, as types can only move from opaque to sized, not the + // other way. + const_cast<StructType*>(this)->setSubclassData( + getSubclassData() | SCDB_IsSized); + return true; +} + +StringRef StructType::getName() const { + assert(!isLiteral() && "Literal structs never have names"); + if (SymbolTableEntry == 0) return StringRef(); + + return ((StringMapEntry<StructType*> *)SymbolTableEntry)->getKey(); +} + +void StructType::setBody(Type *type, ...) { + assert(type != 0 && "Cannot create a struct type with no elements with this"); + va_list ap; + SmallVector<llvm::Type*, 8> StructFields; + va_start(ap, type); + while (type) { + StructFields.push_back(type); + type = va_arg(ap, llvm::Type*); + } + setBody(StructFields); +} + +bool StructType::isValidElementType(Type *ElemTy) { + return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() && + !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy(); +} + +/// isLayoutIdentical - Return true if this is layout identical to the +/// specified struct. +bool StructType::isLayoutIdentical(StructType *Other) const { + if (this == Other) return true; + + if (isPacked() != Other->isPacked() || + getNumElements() != Other->getNumElements()) + return false; + + return std::equal(element_begin(), element_end(), Other->element_begin()); +} + +/// getTypeByName - Return the type with the specified name, or null if there +/// is none by that name. +StructType *Module::getTypeByName(StringRef Name) const { + StringMap<StructType*>::iterator I = + getContext().pImpl->NamedStructTypes.find(Name); + if (I != getContext().pImpl->NamedStructTypes.end()) + return I->second; + return 0; +} + + +//===----------------------------------------------------------------------===// +// CompositeType Implementation +//===----------------------------------------------------------------------===// + +Type *CompositeType::getTypeAtIndex(const Value *V) { + if (StructType *STy = dyn_cast<StructType>(this)) { + unsigned Idx = + (unsigned)cast<Constant>(V)->getUniqueInteger().getZExtValue(); + assert(indexValid(Idx) && "Invalid structure index!"); + return STy->getElementType(Idx); + } + + return cast<SequentialType>(this)->getElementType(); +} +Type *CompositeType::getTypeAtIndex(unsigned Idx) { + if (StructType *STy = dyn_cast<StructType>(this)) { + assert(indexValid(Idx) && "Invalid structure index!"); + return STy->getElementType(Idx); + } + + return cast<SequentialType>(this)->getElementType(); +} +bool CompositeType::indexValid(const Value *V) const { + if (const StructType *STy = dyn_cast<StructType>(this)) { + // Structure indexes require (vectors of) 32-bit integer constants. In the + // vector case all of the indices must be equal. + if (!V->getType()->getScalarType()->isIntegerTy(32)) + return false; + const Constant *C = dyn_cast<Constant>(V); + if (C && V->getType()->isVectorTy()) + C = C->getSplatValue(); + const ConstantInt *CU = dyn_cast_or_null<ConstantInt>(C); + return CU && CU->getZExtValue() < STy->getNumElements(); + } + + // Sequential types can be indexed by any integer. + return V->getType()->isIntOrIntVectorTy(); +} + +bool CompositeType::indexValid(unsigned Idx) const { + if (const StructType *STy = dyn_cast<StructType>(this)) + return Idx < STy->getNumElements(); + // Sequential types can be indexed by any integer. + return true; +} + + +//===----------------------------------------------------------------------===// +// ArrayType Implementation +//===----------------------------------------------------------------------===// + +ArrayType::ArrayType(Type *ElType, uint64_t NumEl) + : SequentialType(ArrayTyID, ElType) { + NumElements = NumEl; +} + +ArrayType *ArrayType::get(Type *elementType, uint64_t NumElements) { + Type *ElementType = const_cast<Type*>(elementType); + assert(isValidElementType(ElementType) && "Invalid type for array element!"); + + LLVMContextImpl *pImpl = ElementType->getContext().pImpl; + ArrayType *&Entry = + pImpl->ArrayTypes[std::make_pair(ElementType, NumElements)]; + + if (Entry == 0) + Entry = new (pImpl->TypeAllocator) ArrayType(ElementType, NumElements); + return Entry; +} + +bool ArrayType::isValidElementType(Type *ElemTy) { + return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() && + !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy(); +} + +//===----------------------------------------------------------------------===// +// VectorType Implementation +//===----------------------------------------------------------------------===// + +VectorType::VectorType(Type *ElType, unsigned NumEl) + : SequentialType(VectorTyID, ElType) { + NumElements = NumEl; +} + +VectorType *VectorType::get(Type *elementType, unsigned NumElements) { + Type *ElementType = const_cast<Type*>(elementType); + assert(NumElements > 0 && "#Elements of a VectorType must be greater than 0"); + assert(isValidElementType(ElementType) && + "Elements of a VectorType must be a primitive type"); + + LLVMContextImpl *pImpl = ElementType->getContext().pImpl; + VectorType *&Entry = ElementType->getContext().pImpl + ->VectorTypes[std::make_pair(ElementType, NumElements)]; + + if (Entry == 0) + Entry = new (pImpl->TypeAllocator) VectorType(ElementType, NumElements); + return Entry; +} + +bool VectorType::isValidElementType(Type *ElemTy) { + return ElemTy->isIntegerTy() || ElemTy->isFloatingPointTy() || + ElemTy->isPointerTy(); +} + +//===----------------------------------------------------------------------===// +// PointerType Implementation +//===----------------------------------------------------------------------===// + +PointerType *PointerType::get(Type *EltTy, unsigned AddressSpace) { + assert(EltTy && "Can't get a pointer to <null> type!"); + assert(isValidElementType(EltTy) && "Invalid type for pointer element!"); + + LLVMContextImpl *CImpl = EltTy->getContext().pImpl; + + // Since AddressSpace #0 is the common case, we special case it. + PointerType *&Entry = AddressSpace == 0 ? CImpl->PointerTypes[EltTy] + : CImpl->ASPointerTypes[std::make_pair(EltTy, AddressSpace)]; + + if (Entry == 0) + Entry = new (CImpl->TypeAllocator) PointerType(EltTy, AddressSpace); + return Entry; +} + + +PointerType::PointerType(Type *E, unsigned AddrSpace) + : SequentialType(PointerTyID, E) { +#ifndef NDEBUG + const unsigned oldNCT = NumContainedTys; +#endif + setSubclassData(AddrSpace); + // Check for miscompile. PR11652. + assert(oldNCT == NumContainedTys && "bitfield written out of bounds?"); +} + +PointerType *Type::getPointerTo(unsigned addrs) { + return PointerType::get(this, addrs); +} + +bool PointerType::isValidElementType(Type *ElemTy) { + return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() && + !ElemTy->isMetadataTy(); +} diff --git a/lib/IR/TypeFinder.cpp b/lib/IR/TypeFinder.cpp new file mode 100644 index 0000000000..d5e6203507 --- /dev/null +++ b/lib/IR/TypeFinder.cpp @@ -0,0 +1,148 @@ +//===-- TypeFinder.cpp - Implement the TypeFinder class -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the TypeFinder class for the IR library. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/TypeFinder.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +using namespace llvm; + +void TypeFinder::run(const Module &M, bool onlyNamed) { + OnlyNamed = onlyNamed; + + // Get types from global variables. + for (Module::const_global_iterator I = M.global_begin(), + E = M.global_end(); I != E; ++I) { + incorporateType(I->getType()); + if (I->hasInitializer()) + incorporateValue(I->getInitializer()); + } + + // Get types from aliases. + for (Module::const_alias_iterator I = M.alias_begin(), + E = M.alias_end(); I != E; ++I) { + incorporateType(I->getType()); + if (const Value *Aliasee = I->getAliasee()) + incorporateValue(Aliasee); + } + + // Get types from functions. + SmallVector<std::pair<unsigned, MDNode*>, 4> MDForInst; + for (Module::const_iterator FI = M.begin(), E = M.end(); FI != E; ++FI) { + incorporateType(FI->getType()); + + // First incorporate the arguments. + for (Function::const_arg_iterator AI = FI->arg_begin(), + AE = FI->arg_end(); AI != AE; ++AI) + incorporateValue(AI); + + for (Function::const_iterator BB = FI->begin(), E = FI->end(); + BB != E;++BB) + for (BasicBlock::const_iterator II = BB->begin(), + E = BB->end(); II != E; ++II) { + const Instruction &I = *II; + + // Incorporate the type of the instruction. + incorporateType(I.getType()); + + // Incorporate non-instruction operand types. (We are incorporating all + // instructions with this loop.) + for (User::const_op_iterator OI = I.op_begin(), OE = I.op_end(); + OI != OE; ++OI) + if (!isa<Instruction>(OI)) + incorporateValue(*OI); + + // Incorporate types hiding in metadata. + I.getAllMetadataOtherThanDebugLoc(MDForInst); + for (unsigned i = 0, e = MDForInst.size(); i != e; ++i) + incorporateMDNode(MDForInst[i].second); + + MDForInst.clear(); + } + } + + for (Module::const_named_metadata_iterator I = M.named_metadata_begin(), + E = M.named_metadata_end(); I != E; ++I) { + const NamedMDNode *NMD = I; + for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) + incorporateMDNode(NMD->getOperand(i)); + } +} + +void TypeFinder::clear() { + VisitedConstants.clear(); + VisitedTypes.clear(); + StructTypes.clear(); +} + +/// incorporateType - This method adds the type to the list of used structures +/// if it's not in there already. +void TypeFinder::incorporateType(Type *Ty) { + // Check to see if we're already visited this type. + if (!VisitedTypes.insert(Ty).second) + return; + + // If this is a structure or opaque type, add a name for the type. + if (StructType *STy = dyn_cast<StructType>(Ty)) + if (!OnlyNamed || STy->hasName()) + StructTypes.push_back(STy); + + // Recursively walk all contained types. + for (Type::subtype_iterator I = Ty->subtype_begin(), + E = Ty->subtype_end(); I != E; ++I) + incorporateType(*I); +} + +/// incorporateValue - This method is used to walk operand lists finding types +/// hiding in constant expressions and other operands that won't be walked in +/// other ways. GlobalValues, basic blocks, instructions, and inst operands are +/// all explicitly enumerated. +void TypeFinder::incorporateValue(const Value *V) { + if (const MDNode *M = dyn_cast<MDNode>(V)) + return incorporateMDNode(M); + + if (!isa<Constant>(V) || isa<GlobalValue>(V)) return; + + // Already visited? + if (!VisitedConstants.insert(V).second) + return; + + // Check this type. + incorporateType(V->getType()); + + // If this is an instruction, we incorporate it separately. + if (isa<Instruction>(V)) + return; + + // Look in operands for types. + const User *U = cast<User>(V); + for (Constant::const_op_iterator I = U->op_begin(), + E = U->op_end(); I != E;++I) + incorporateValue(*I); +} + +/// incorporateMDNode - This method is used to walk the operands of an MDNode to +/// find types hiding within. +void TypeFinder::incorporateMDNode(const MDNode *V) { + // Already visited? + if (!VisitedConstants.insert(V).second) + return; + + // Look in operands for types. + for (unsigned i = 0, e = V->getNumOperands(); i != e; ++i) + if (Value *Op = V->getOperand(i)) + incorporateValue(Op); +} diff --git a/lib/IR/Use.cpp b/lib/IR/Use.cpp new file mode 100644 index 0000000000..1d343e8030 --- /dev/null +++ b/lib/IR/Use.cpp @@ -0,0 +1,145 @@ +//===-- Use.cpp - Implement the Use class ---------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the algorithm for finding the User of a Use. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Value.h" +#include <new> + +namespace llvm { + +//===----------------------------------------------------------------------===// +// Use swap Implementation +//===----------------------------------------------------------------------===// + +void Use::swap(Use &RHS) { + Value *V1(Val); + Value *V2(RHS.Val); + if (V1 != V2) { + if (V1) { + removeFromList(); + } + + if (V2) { + RHS.removeFromList(); + Val = V2; + V2->addUse(*this); + } else { + Val = 0; + } + + if (V1) { + RHS.Val = V1; + V1->addUse(RHS); + } else { + RHS.Val = 0; + } + } +} + +//===----------------------------------------------------------------------===// +// Use getImpliedUser Implementation +//===----------------------------------------------------------------------===// + +const Use *Use::getImpliedUser() const { + const Use *Current = this; + + while (true) { + unsigned Tag = (Current++)->Prev.getInt(); + switch (Tag) { + case zeroDigitTag: + case oneDigitTag: + continue; + + case stopTag: { + ++Current; + ptrdiff_t Offset = 1; + while (true) { + unsigned Tag = Current->Prev.getInt(); + switch (Tag) { + case zeroDigitTag: + case oneDigitTag: + ++Current; + Offset = (Offset << 1) + Tag; + continue; + default: + return Current + Offset; + } + } + } + + case fullStopTag: + return Current; + } + } +} + +//===----------------------------------------------------------------------===// +// Use initTags Implementation +//===----------------------------------------------------------------------===// + +Use *Use::initTags(Use * const Start, Use *Stop) { + ptrdiff_t Done = 0; + while (Done < 20) { + if (Start == Stop--) + return Start; + static const PrevPtrTag tags[20] = { fullStopTag, oneDigitTag, stopTag, + oneDigitTag, oneDigitTag, stopTag, + zeroDigitTag, oneDigitTag, oneDigitTag, + stopTag, zeroDigitTag, oneDigitTag, + zeroDigitTag, oneDigitTag, stopTag, + oneDigitTag, oneDigitTag, oneDigitTag, + oneDigitTag, stopTag + }; + new(Stop) Use(tags[Done++]); + } + + ptrdiff_t Count = Done; + while (Start != Stop) { + --Stop; + if (!Count) { + new(Stop) Use(stopTag); + ++Done; + Count = Done; + } else { + new(Stop) Use(PrevPtrTag(Count & 1)); + Count >>= 1; + ++Done; + } + } + + return Start; +} + +//===----------------------------------------------------------------------===// +// Use zap Implementation +//===----------------------------------------------------------------------===// + +void Use::zap(Use *Start, const Use *Stop, bool del) { + while (Start != Stop) + (--Stop)->~Use(); + if (del) + ::operator delete(Start); +} + +//===----------------------------------------------------------------------===// +// Use getUser Implementation +//===----------------------------------------------------------------------===// + +User *Use::getUser() const { + const Use *End = getImpliedUser(); + const UserRef *ref = reinterpret_cast<const UserRef*>(End); + return ref->getInt() + ? ref->getPointer() + : reinterpret_cast<User*>(const_cast<Use*>(End)); +} + +} // End llvm namespace diff --git a/lib/IR/User.cpp b/lib/IR/User.cpp new file mode 100644 index 0000000000..940682826a --- /dev/null +++ b/lib/IR/User.cpp @@ -0,0 +1,90 @@ +//===-- User.cpp - Implement the User class -------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/User.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Operator.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +// User Class +//===----------------------------------------------------------------------===// + +void User::anchor() {} + +// replaceUsesOfWith - Replaces all references to the "From" definition with +// references to the "To" definition. +// +void User::replaceUsesOfWith(Value *From, Value *To) { + if (From == To) return; // Duh what? + + assert((!isa<Constant>(this) || isa<GlobalValue>(this)) && + "Cannot call User::replaceUsesOfWith on a constant!"); + + for (unsigned i = 0, E = getNumOperands(); i != E; ++i) + if (getOperand(i) == From) { // Is This operand is pointing to oldval? + // The side effects of this setOperand call include linking to + // "To", adding "this" to the uses list of To, and + // most importantly, removing "this" from the use list of "From". + setOperand(i, To); // Fix it now... + } +} + +//===----------------------------------------------------------------------===// +// User allocHungoffUses Implementation +//===----------------------------------------------------------------------===// + +Use *User::allocHungoffUses(unsigned N) const { + // Allocate the array of Uses, followed by a pointer (with bottom bit set) to + // the User. + size_t size = N * sizeof(Use) + sizeof(Use::UserRef); + Use *Begin = static_cast<Use*>(::operator new(size)); + Use *End = Begin + N; + (void) new(End) Use::UserRef(const_cast<User*>(this), 1); + return Use::initTags(Begin, End); +} + +//===----------------------------------------------------------------------===// +// User operator new Implementations +//===----------------------------------------------------------------------===// + +void *User::operator new(size_t s, unsigned Us) { + void *Storage = ::operator new(s + sizeof(Use) * Us); + Use *Start = static_cast<Use*>(Storage); + Use *End = Start + Us; + User *Obj = reinterpret_cast<User*>(End); + Obj->OperandList = Start; + Obj->NumOperands = Us; + Use::initTags(Start, End); + return Obj; +} + +//===----------------------------------------------------------------------===// +// User operator delete Implementation +//===----------------------------------------------------------------------===// + +void User::operator delete(void *Usr) { + User *Start = static_cast<User*>(Usr); + Use *Storage = static_cast<Use*>(Usr) - Start->NumOperands; + // If there were hung-off uses, they will have been freed already and + // NumOperands reset to 0, so here we just free the User itself. + ::operator delete(Storage); +} + +//===----------------------------------------------------------------------===// +// Operator Class +//===----------------------------------------------------------------------===// + +Operator::~Operator() { + llvm_unreachable("should never destroy an Operator"); +} + +} // End llvm namespace diff --git a/lib/IR/Value.cpp b/lib/IR/Value.cpp new file mode 100644 index 0000000000..adc702e05e --- /dev/null +++ b/lib/IR/Value.cpp @@ -0,0 +1,701 @@ +//===-- Value.cpp - Implement the Value class -----------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Value, ValueHandle, and User classes. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Value.h" +#include "LLVMContextImpl.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" +#include "llvm/IR/ValueSymbolTable.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/LeakDetector.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/ValueHandle.h" +#include <algorithm> +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Value Class +//===----------------------------------------------------------------------===// + +static inline Type *checkType(Type *Ty) { + assert(Ty && "Value defined with a null type: Error!"); + return const_cast<Type*>(Ty); +} + +Value::Value(Type *ty, unsigned scid) + : SubclassID(scid), HasValueHandle(0), + SubclassOptionalData(0), SubclassData(0), VTy((Type*)checkType(ty)), + UseList(0), Name(0) { + // FIXME: Why isn't this in the subclass gunk?? + // Note, we cannot call isa<CallInst> before the CallInst has been + // constructed. + if (SubclassID == Instruction::Call || SubclassID == Instruction::Invoke) + assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) && + "invalid CallInst type!"); + else if (SubclassID != BasicBlockVal && + (SubclassID < ConstantFirstVal || SubclassID > ConstantLastVal)) + assert((VTy->isFirstClassType() || VTy->isVoidTy()) && + "Cannot create non-first-class values except for constants!"); +} + +Value::~Value() { + // Notify all ValueHandles (if present) that this value is going away. + if (HasValueHandle) + ValueHandleBase::ValueIsDeleted(this); + +#ifndef NDEBUG // Only in -g mode... + // Check to make sure that there are no uses of this value that are still + // around when the value is destroyed. If there are, then we have a dangling + // reference and something is wrong. This code is here to print out what is + // still being referenced. The value in question should be printed as + // a <badref> + // + if (!use_empty()) { + dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n"; + for (use_iterator I = use_begin(), E = use_end(); I != E; ++I) + dbgs() << "Use still stuck around after Def is destroyed:" + << **I << "\n"; + } +#endif + assert(use_empty() && "Uses remain when a value is destroyed!"); + + // If this value is named, destroy the name. This should not be in a symtab + // at this point. + if (Name && SubclassID != MDStringVal) + Name->Destroy(); + + // There should be no uses of this object anymore, remove it. + LeakDetector::removeGarbageObject(this); +} + +/// hasNUses - Return true if this Value has exactly N users. +/// +bool Value::hasNUses(unsigned N) const { + const_use_iterator UI = use_begin(), E = use_end(); + + for (; N; --N, ++UI) + if (UI == E) return false; // Too few. + return UI == E; +} + +/// hasNUsesOrMore - Return true if this value has N users or more. This is +/// logically equivalent to getNumUses() >= N. +/// +bool Value::hasNUsesOrMore(unsigned N) const { + const_use_iterator UI = use_begin(), E = use_end(); + + for (; N; --N, ++UI) + if (UI == E) return false; // Too few. + + return true; +} + +/// isUsedInBasicBlock - Return true if this value is used in the specified +/// basic block. +bool Value::isUsedInBasicBlock(const BasicBlock *BB) const { + // Start by scanning over the instructions looking for a use before we start + // the expensive use iteration. + unsigned MaxBlockSize = 3; + for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) { + if (std::find(I->op_begin(), I->op_end(), this) != I->op_end()) + return true; + if (MaxBlockSize-- == 0) // If the block is larger fall back to use_iterator + break; + } + + if (MaxBlockSize != 0) // We scanned the entire block and found no use. + return false; + + for (const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) { + const Instruction *User = dyn_cast<Instruction>(*I); + if (User && User->getParent() == BB) + return true; + } + return false; +} + + +/// getNumUses - This method computes the number of uses of this Value. This +/// is a linear time operation. Use hasOneUse or hasNUses to check for specific +/// values. +unsigned Value::getNumUses() const { + return (unsigned)std::distance(use_begin(), use_end()); +} + +static bool getSymTab(Value *V, ValueSymbolTable *&ST) { + ST = 0; + if (Instruction *I = dyn_cast<Instruction>(V)) { + if (BasicBlock *P = I->getParent()) + if (Function *PP = P->getParent()) + ST = &PP->getValueSymbolTable(); + } else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) { + if (Function *P = BB->getParent()) + ST = &P->getValueSymbolTable(); + } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { + if (Module *P = GV->getParent()) + ST = &P->getValueSymbolTable(); + } else if (Argument *A = dyn_cast<Argument>(V)) { + if (Function *P = A->getParent()) + ST = &P->getValueSymbolTable(); + } else if (isa<MDString>(V)) + return true; + else { + assert(isa<Constant>(V) && "Unknown value type!"); + return true; // no name is setable for this. + } + return false; +} + +StringRef Value::getName() const { + // Make sure the empty string is still a C string. For historical reasons, + // some clients want to call .data() on the result and expect it to be null + // terminated. + if (!Name) return StringRef("", 0); + return Name->getKey(); +} + +void Value::setName(const Twine &NewName) { + assert(SubclassID != MDStringVal && + "Cannot set the name of MDString with this method!"); + + // Fast path for common IRBuilder case of setName("") when there is no name. + if (NewName.isTriviallyEmpty() && !hasName()) + return; + + SmallString<256> NameData; + StringRef NameRef = NewName.toStringRef(NameData); + + // Name isn't changing? + if (getName() == NameRef) + return; + + assert(!getType()->isVoidTy() && "Cannot assign a name to void values!"); + + // Get the symbol table to update for this object. + ValueSymbolTable *ST; + if (getSymTab(this, ST)) + return; // Cannot set a name on this value (e.g. constant). + + if (Function *F = dyn_cast<Function>(this)) + getContext().pImpl->IntrinsicIDCache.erase(F); + + if (!ST) { // No symbol table to update? Just do the change. + if (NameRef.empty()) { + // Free the name for this value. + Name->Destroy(); + Name = 0; + return; + } + + if (Name) + Name->Destroy(); + + // NOTE: Could optimize for the case the name is shrinking to not deallocate + // then reallocated. + + // Create the new name. + Name = ValueName::Create(NameRef.begin(), NameRef.end()); + Name->setValue(this); + return; + } + + // NOTE: Could optimize for the case the name is shrinking to not deallocate + // then reallocated. + if (hasName()) { + // Remove old name. + ST->removeValueName(Name); + Name->Destroy(); + Name = 0; + + if (NameRef.empty()) + return; + } + + // Name is changing to something new. + Name = ST->createValueName(NameRef, this); +} + + +/// takeName - transfer the name from V to this value, setting V's name to +/// empty. It is an error to call V->takeName(V). +void Value::takeName(Value *V) { + assert(SubclassID != MDStringVal && "Cannot take the name of an MDString!"); + + ValueSymbolTable *ST = 0; + // If this value has a name, drop it. + if (hasName()) { + // Get the symtab this is in. + if (getSymTab(this, ST)) { + // We can't set a name on this value, but we need to clear V's name if + // it has one. + if (V->hasName()) V->setName(""); + return; // Cannot set a name on this value (e.g. constant). + } + + // Remove old name. + if (ST) + ST->removeValueName(Name); + Name->Destroy(); + Name = 0; + } + + // Now we know that this has no name. + + // If V has no name either, we're done. + if (!V->hasName()) return; + + // Get this's symtab if we didn't before. + if (!ST) { + if (getSymTab(this, ST)) { + // Clear V's name. + V->setName(""); + return; // Cannot set a name on this value (e.g. constant). + } + } + + // Get V's ST, this should always succed, because V has a name. + ValueSymbolTable *VST; + bool Failure = getSymTab(V, VST); + assert(!Failure && "V has a name, so it should have a ST!"); (void)Failure; + + // If these values are both in the same symtab, we can do this very fast. + // This works even if both values have no symtab yet. + if (ST == VST) { + // Take the name! + Name = V->Name; + V->Name = 0; + Name->setValue(this); + return; + } + + // Otherwise, things are slightly more complex. Remove V's name from VST and + // then reinsert it into ST. + + if (VST) + VST->removeValueName(V->Name); + Name = V->Name; + V->Name = 0; + Name->setValue(this); + + if (ST) + ST->reinsertValue(this); +} + + +void Value::replaceAllUsesWith(Value *New) { + assert(New && "Value::replaceAllUsesWith(<null>) is invalid!"); + assert(New != this && "this->replaceAllUsesWith(this) is NOT valid!"); + assert(New->getType() == getType() && + "replaceAllUses of value with new value of different type!"); + + // Notify all ValueHandles (if present) that this value is going away. + if (HasValueHandle) + ValueHandleBase::ValueIsRAUWd(this, New); + + while (!use_empty()) { + Use &U = *UseList; + // Must handle Constants specially, we cannot call replaceUsesOfWith on a + // constant because they are uniqued. + if (Constant *C = dyn_cast<Constant>(U.getUser())) { + if (!isa<GlobalValue>(C)) { + C->replaceUsesOfWithOnConstant(this, New, &U); + continue; + } + } + + U.set(New); + } + + if (BasicBlock *BB = dyn_cast<BasicBlock>(this)) + BB->replaceSuccessorsPhiUsesWith(cast<BasicBlock>(New)); +} + +namespace { +// Various metrics for how much to strip off of pointers. +enum PointerStripKind { + PSK_ZeroIndices, + PSK_InBoundsConstantIndices, + PSK_InBounds +}; + +template <PointerStripKind StripKind> +static Value *stripPointerCastsAndOffsets(Value *V) { + if (!V->getType()->isPointerTy()) + return V; + + // Even though we don't look through PHI nodes, we could be called on an + // instruction in an unreachable block, which may be on a cycle. + SmallPtrSet<Value *, 4> Visited; + + Visited.insert(V); + do { + if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { + switch (StripKind) { + case PSK_ZeroIndices: + if (!GEP->hasAllZeroIndices()) + return V; + break; + case PSK_InBoundsConstantIndices: + if (!GEP->hasAllConstantIndices()) + return V; + // fallthrough + case PSK_InBounds: + if (!GEP->isInBounds()) + return V; + break; + } + V = GEP->getPointerOperand(); + } else if (Operator::getOpcode(V) == Instruction::BitCast) { + V = cast<Operator>(V)->getOperand(0); + } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { + if (GA->mayBeOverridden()) + return V; + V = GA->getAliasee(); + } else { + return V; + } + assert(V->getType()->isPointerTy() && "Unexpected operand type!"); + } while (Visited.insert(V)); + + return V; +} +} // namespace + +Value *Value::stripPointerCasts() { + return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this); +} + +Value *Value::stripInBoundsConstantOffsets() { + return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this); +} + +Value *Value::stripInBoundsOffsets() { + return stripPointerCastsAndOffsets<PSK_InBounds>(this); +} + +/// isDereferenceablePointer - Test if this value is always a pointer to +/// allocated and suitably aligned memory for a simple load or store. +static bool isDereferenceablePointer(const Value *V, + SmallPtrSet<const Value *, 32> &Visited) { + // Note that it is not safe to speculate into a malloc'd region because + // malloc may return null. + // It's also not always safe to follow a bitcast, for example: + // bitcast i8* (alloca i8) to i32* + // would result in a 4-byte load from a 1-byte alloca. Some cases could + // be handled using DataLayout to check sizes and alignments though. + + // These are obviously ok. + if (isa<AllocaInst>(V)) return true; + + // Global variables which can't collapse to null are ok. + if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) + return !GV->hasExternalWeakLinkage(); + + // byval arguments are ok. + if (const Argument *A = dyn_cast<Argument>(V)) + return A->hasByValAttr(); + + // For GEPs, determine if the indexing lands within the allocated object. + if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { + // Conservatively require that the base pointer be fully dereferenceable. + if (!Visited.insert(GEP->getOperand(0))) + return false; + if (!isDereferenceablePointer(GEP->getOperand(0), Visited)) + return false; + // Check the indices. + gep_type_iterator GTI = gep_type_begin(GEP); + for (User::const_op_iterator I = GEP->op_begin()+1, + E = GEP->op_end(); I != E; ++I) { + Value *Index = *I; + Type *Ty = *GTI++; + // Struct indices can't be out of bounds. + if (isa<StructType>(Ty)) + continue; + ConstantInt *CI = dyn_cast<ConstantInt>(Index); + if (!CI) + return false; + // Zero is always ok. + if (CI->isZero()) + continue; + // Check to see that it's within the bounds of an array. + ArrayType *ATy = dyn_cast<ArrayType>(Ty); + if (!ATy) + return false; + if (CI->getValue().getActiveBits() > 64) + return false; + if (CI->getZExtValue() >= ATy->getNumElements()) + return false; + } + // Indices check out; this is dereferenceable. + return true; + } + + // If we don't know, assume the worst. + return false; +} + +/// isDereferenceablePointer - Test if this value is always a pointer to +/// allocated and suitably aligned memory for a simple load or store. +bool Value::isDereferenceablePointer() const { + SmallPtrSet<const Value *, 32> Visited; + return ::isDereferenceablePointer(this, Visited); +} + +/// DoPHITranslation - If this value is a PHI node with CurBB as its parent, +/// return the value in the PHI node corresponding to PredBB. If not, return +/// ourself. This is useful if you want to know the value something has in a +/// predecessor block. +Value *Value::DoPHITranslation(const BasicBlock *CurBB, + const BasicBlock *PredBB) { + PHINode *PN = dyn_cast<PHINode>(this); + if (PN && PN->getParent() == CurBB) + return PN->getIncomingValueForBlock(PredBB); + return this; +} + +LLVMContext &Value::getContext() const { return VTy->getContext(); } + +//===----------------------------------------------------------------------===// +// ValueHandleBase Class +//===----------------------------------------------------------------------===// + +/// AddToExistingUseList - Add this ValueHandle to the use list for VP, where +/// List is known to point into the existing use list. +void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) { + assert(List && "Handle list is null?"); + + // Splice ourselves into the list. + Next = *List; + *List = this; + setPrevPtr(List); + if (Next) { + Next->setPrevPtr(&Next); + assert(VP.getPointer() == Next->VP.getPointer() && "Added to wrong list?"); + } +} + +void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) { + assert(List && "Must insert after existing node"); + + Next = List->Next; + setPrevPtr(&List->Next); + List->Next = this; + if (Next) + Next->setPrevPtr(&Next); +} + +/// AddToUseList - Add this ValueHandle to the use list for VP. +void ValueHandleBase::AddToUseList() { + assert(VP.getPointer() && "Null pointer doesn't have a use list!"); + + LLVMContextImpl *pImpl = VP.getPointer()->getContext().pImpl; + + if (VP.getPointer()->HasValueHandle) { + // If this value already has a ValueHandle, then it must be in the + // ValueHandles map already. + ValueHandleBase *&Entry = pImpl->ValueHandles[VP.getPointer()]; + assert(Entry != 0 && "Value doesn't have any handles?"); + AddToExistingUseList(&Entry); + return; + } + + // Ok, it doesn't have any handles yet, so we must insert it into the + // DenseMap. However, doing this insertion could cause the DenseMap to + // reallocate itself, which would invalidate all of the PrevP pointers that + // point into the old table. Handle this by checking for reallocation and + // updating the stale pointers only if needed. + DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles; + const void *OldBucketPtr = Handles.getPointerIntoBucketsArray(); + + ValueHandleBase *&Entry = Handles[VP.getPointer()]; + assert(Entry == 0 && "Value really did already have handles?"); + AddToExistingUseList(&Entry); + VP.getPointer()->HasValueHandle = true; + + // If reallocation didn't happen or if this was the first insertion, don't + // walk the table. + if (Handles.isPointerIntoBucketsArray(OldBucketPtr) || + Handles.size() == 1) { + return; + } + + // Okay, reallocation did happen. Fix the Prev Pointers. + for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(), + E = Handles.end(); I != E; ++I) { + assert(I->second && I->first == I->second->VP.getPointer() && + "List invariant broken!"); + I->second->setPrevPtr(&I->second); + } +} + +/// RemoveFromUseList - Remove this ValueHandle from its current use list. +void ValueHandleBase::RemoveFromUseList() { + assert(VP.getPointer() && VP.getPointer()->HasValueHandle && + "Pointer doesn't have a use list!"); + + // Unlink this from its use list. + ValueHandleBase **PrevPtr = getPrevPtr(); + assert(*PrevPtr == this && "List invariant broken"); + + *PrevPtr = Next; + if (Next) { + assert(Next->getPrevPtr() == &Next && "List invariant broken"); + Next->setPrevPtr(PrevPtr); + return; + } + + // If the Next pointer was null, then it is possible that this was the last + // ValueHandle watching VP. If so, delete its entry from the ValueHandles + // map. + LLVMContextImpl *pImpl = VP.getPointer()->getContext().pImpl; + DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles; + if (Handles.isPointerIntoBucketsArray(PrevPtr)) { + Handles.erase(VP.getPointer()); + VP.getPointer()->HasValueHandle = false; + } +} + + +void ValueHandleBase::ValueIsDeleted(Value *V) { + assert(V->HasValueHandle && "Should only be called if ValueHandles present"); + + // Get the linked list base, which is guaranteed to exist since the + // HasValueHandle flag is set. + LLVMContextImpl *pImpl = V->getContext().pImpl; + ValueHandleBase *Entry = pImpl->ValueHandles[V]; + assert(Entry && "Value bit set but no entries exist"); + + // We use a local ValueHandleBase as an iterator so that ValueHandles can add + // and remove themselves from the list without breaking our iteration. This + // is not really an AssertingVH; we just have to give ValueHandleBase a kind. + // Note that we deliberately do not the support the case when dropping a value + // handle results in a new value handle being permanently added to the list + // (as might occur in theory for CallbackVH's): the new value handle will not + // be processed and the checking code will mete out righteous punishment if + // the handle is still present once we have finished processing all the other + // value handles (it is fine to momentarily add then remove a value handle). + for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) { + Iterator.RemoveFromUseList(); + Iterator.AddToExistingUseListAfter(Entry); + assert(Entry->Next == &Iterator && "Loop invariant broken."); + + switch (Entry->getKind()) { + case Assert: + break; + case Tracking: + // Mark that this value has been deleted by setting it to an invalid Value + // pointer. + Entry->operator=(DenseMapInfo<Value *>::getTombstoneKey()); + break; + case Weak: + // Weak just goes to null, which will unlink it from the list. + Entry->operator=(0); + break; + case Callback: + // Forward to the subclass's implementation. + static_cast<CallbackVH*>(Entry)->deleted(); + break; + } + } + + // All callbacks, weak references, and assertingVHs should be dropped by now. + if (V->HasValueHandle) { +#ifndef NDEBUG // Only in +Asserts mode... + dbgs() << "While deleting: " << *V->getType() << " %" << V->getName() + << "\n"; + if (pImpl->ValueHandles[V]->getKind() == Assert) + llvm_unreachable("An asserting value handle still pointed to this" + " value!"); + +#endif + llvm_unreachable("All references to V were not removed?"); + } +} + + +void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) { + assert(Old->HasValueHandle &&"Should only be called if ValueHandles present"); + assert(Old != New && "Changing value into itself!"); + + // Get the linked list base, which is guaranteed to exist since the + // HasValueHandle flag is set. + LLVMContextImpl *pImpl = Old->getContext().pImpl; + ValueHandleBase *Entry = pImpl->ValueHandles[Old]; + + assert(Entry && "Value bit set but no entries exist"); + + // We use a local ValueHandleBase as an iterator so that + // ValueHandles can add and remove themselves from the list without + // breaking our iteration. This is not really an AssertingVH; we + // just have to give ValueHandleBase some kind. + for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) { + Iterator.RemoveFromUseList(); + Iterator.AddToExistingUseListAfter(Entry); + assert(Entry->Next == &Iterator && "Loop invariant broken."); + + switch (Entry->getKind()) { + case Assert: + // Asserting handle does not follow RAUW implicitly. + break; + case Tracking: + // Tracking goes to new value like a WeakVH. Note that this may make it + // something incompatible with its templated type. We don't want to have a + // virtual (or inline) interface to handle this though, so instead we make + // the TrackingVH accessors guarantee that a client never sees this value. + + // FALLTHROUGH + case Weak: + // Weak goes to the new value, which will unlink it from Old's list. + Entry->operator=(New); + break; + case Callback: + // Forward to the subclass's implementation. + static_cast<CallbackVH*>(Entry)->allUsesReplacedWith(New); + break; + } + } + +#ifndef NDEBUG + // If any new tracking or weak value handles were added while processing the + // list, then complain about it now. + if (Old->HasValueHandle) + for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next) + switch (Entry->getKind()) { + case Tracking: + case Weak: + dbgs() << "After RAUW from " << *Old->getType() << " %" + << Old->getName() << " to " << *New->getType() << " %" + << New->getName() << "\n"; + llvm_unreachable("A tracking or weak value handle still pointed to the" + " old value!\n"); + default: + break; + } +#endif +} + +// Default implementation for CallbackVH. +void CallbackVH::allUsesReplacedWith(Value *) {} + +void CallbackVH::deleted() { + setValPtr(NULL); +} diff --git a/lib/IR/ValueSymbolTable.cpp b/lib/IR/ValueSymbolTable.cpp new file mode 100644 index 0000000000..fffacb3777 --- /dev/null +++ b/lib/IR/ValueSymbolTable.cpp @@ -0,0 +1,117 @@ +//===-- ValueSymbolTable.cpp - Implement the ValueSymbolTable class -------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the ValueSymbolTable class for the IR library. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "valuesymtab" +#include "llvm/IR/ValueSymbolTable.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +// Class destructor +ValueSymbolTable::~ValueSymbolTable() { +#ifndef NDEBUG // Only do this in -g mode... + for (iterator VI = vmap.begin(), VE = vmap.end(); VI != VE; ++VI) + dbgs() << "Value still in symbol table! Type = '" + << *VI->getValue()->getType() << "' Name = '" + << VI->getKeyData() << "'\n"; + assert(vmap.empty() && "Values remain in symbol table!"); +#endif +} + +// Insert a value into the symbol table with the specified name... +// +void ValueSymbolTable::reinsertValue(Value* V) { + assert(V->hasName() && "Can't insert nameless Value into symbol table"); + + // Try inserting the name, assuming it won't conflict. + if (vmap.insert(V->Name)) { + //DEBUG(dbgs() << " Inserted value: " << V->Name << ": " << *V << "\n"); + return; + } + + // Otherwise, there is a naming conflict. Rename this value. + SmallString<256> UniqueName(V->getName().begin(), V->getName().end()); + + // The name is too already used, just free it so we can allocate a new name. + V->Name->Destroy(); + + unsigned BaseSize = UniqueName.size(); + while (1) { + // Trim any suffix off and append the next number. + UniqueName.resize(BaseSize); + raw_svector_ostream(UniqueName) << ++LastUnique; + + // Try insert the vmap entry with this suffix. + ValueName &NewName = vmap.GetOrCreateValue(UniqueName); + if (NewName.getValue() == 0) { + // Newly inserted name. Success! + NewName.setValue(V); + V->Name = &NewName; + //DEBUG(dbgs() << " Inserted value: " << UniqueName << ": " << *V << "\n"); + return; + } + } +} + +void ValueSymbolTable::removeValueName(ValueName *V) { + //DEBUG(dbgs() << " Removing Value: " << V->getKeyData() << "\n"); + // Remove the value from the symbol table. + vmap.remove(V); +} + +/// createValueName - This method attempts to create a value name and insert +/// it into the symbol table with the specified name. If it conflicts, it +/// auto-renames the name and returns that instead. +ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) { + // In the common case, the name is not already in the symbol table. + ValueName &Entry = vmap.GetOrCreateValue(Name); + if (Entry.getValue() == 0) { + Entry.setValue(V); + //DEBUG(dbgs() << " Inserted value: " << Entry.getKeyData() << ": " + // << *V << "\n"); + return &Entry; + } + + // Otherwise, there is a naming conflict. Rename this value. + SmallString<256> UniqueName(Name.begin(), Name.end()); + + while (1) { + // Trim any suffix off and append the next number. + UniqueName.resize(Name.size()); + raw_svector_ostream(UniqueName) << ++LastUnique; + + // Try insert the vmap entry with this suffix. + ValueName &NewName = vmap.GetOrCreateValue(UniqueName); + if (NewName.getValue() == 0) { + // Newly inserted name. Success! + NewName.setValue(V); + //DEBUG(dbgs() << " Inserted value: " << UniqueName << ": " << *V << "\n"); + return &NewName; + } + } +} + + +// dump - print out the symbol table +// +void ValueSymbolTable::dump() const { + //DEBUG(dbgs() << "ValueSymbolTable:\n"); + for (const_iterator I = begin(), E = end(); I != E; ++I) { + //DEBUG(dbgs() << " '" << I->getKeyData() << "' = "); + I->getValue()->dump(); + //DEBUG(dbgs() << "\n"); + } +} diff --git a/lib/IR/ValueTypes.cpp b/lib/IR/ValueTypes.cpp new file mode 100644 index 0000000000..ba04d60c24 --- /dev/null +++ b/lib/IR/ValueTypes.cpp @@ -0,0 +1,277 @@ +//===----------- ValueTypes.cpp - Implementation of EVT methods -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements methods in the CodeGen/ValueTypes.h header. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/ErrorHandling.h" +using namespace llvm; + +EVT EVT::changeExtendedVectorElementTypeToInteger() const { + LLVMContext &Context = LLVMTy->getContext(); + EVT IntTy = getIntegerVT(Context, getVectorElementType().getSizeInBits()); + return getVectorVT(Context, IntTy, getVectorNumElements()); +} + +EVT EVT::getExtendedIntegerVT(LLVMContext &Context, unsigned BitWidth) { + EVT VT; + VT.LLVMTy = IntegerType::get(Context, BitWidth); + assert(VT.isExtended() && "Type is not extended!"); + return VT; +} + +EVT EVT::getExtendedVectorVT(LLVMContext &Context, EVT VT, + unsigned NumElements) { + EVT ResultVT; + ResultVT.LLVMTy = VectorType::get(VT.getTypeForEVT(Context), NumElements); + assert(ResultVT.isExtended() && "Type is not extended!"); + return ResultVT; +} + +bool EVT::isExtendedFloatingPoint() const { + assert(isExtended() && "Type is not extended!"); + return LLVMTy->isFPOrFPVectorTy(); +} + +bool EVT::isExtendedInteger() const { + assert(isExtended() && "Type is not extended!"); + return LLVMTy->isIntOrIntVectorTy(); +} + +bool EVT::isExtendedVector() const { + assert(isExtended() && "Type is not extended!"); + return LLVMTy->isVectorTy(); +} + +bool EVT::isExtended16BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 16; +} + +bool EVT::isExtended32BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 32; +} + +bool EVT::isExtended64BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 64; +} + +bool EVT::isExtended128BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 128; +} + +bool EVT::isExtended256BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 256; +} + +bool EVT::isExtended512BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 512; +} + +bool EVT::isExtended1024BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 1024; +} + +EVT EVT::getExtendedVectorElementType() const { + assert(isExtended() && "Type is not extended!"); + return EVT::getEVT(cast<VectorType>(LLVMTy)->getElementType()); +} + +unsigned EVT::getExtendedVectorNumElements() const { + assert(isExtended() && "Type is not extended!"); + return cast<VectorType>(LLVMTy)->getNumElements(); +} + +unsigned EVT::getExtendedSizeInBits() const { + assert(isExtended() && "Type is not extended!"); + if (IntegerType *ITy = dyn_cast<IntegerType>(LLVMTy)) + return ITy->getBitWidth(); + if (VectorType *VTy = dyn_cast<VectorType>(LLVMTy)) + return VTy->getBitWidth(); + llvm_unreachable("Unrecognized extended type!"); +} + +/// getEVTString - This function returns value type as a string, e.g. "i32". +std::string EVT::getEVTString() const { + switch (V.SimpleTy) { + default: + if (isVector()) + return "v" + utostr(getVectorNumElements()) + + getVectorElementType().getEVTString(); + if (isInteger()) + return "i" + utostr(getSizeInBits()); + llvm_unreachable("Invalid EVT!"); + case MVT::i1: return "i1"; + case MVT::i8: return "i8"; + case MVT::i16: return "i16"; + case MVT::i32: return "i32"; + case MVT::i64: return "i64"; + case MVT::i128: return "i128"; + case MVT::f16: return "f16"; + case MVT::f32: return "f32"; + case MVT::f64: return "f64"; + case MVT::f80: return "f80"; + case MVT::f128: return "f128"; + case MVT::ppcf128: return "ppcf128"; + case MVT::isVoid: return "isVoid"; + case MVT::Other: return "ch"; + case MVT::Glue: return "glue"; + case MVT::x86mmx: return "x86mmx"; + case MVT::v2i1: return "v2i1"; + case MVT::v4i1: return "v4i1"; + case MVT::v8i1: return "v8i1"; + case MVT::v16i1: return "v16i1"; + case MVT::v32i1: return "v32i1"; + case MVT::v64i1: return "v64i1"; + case MVT::v2i8: return "v2i8"; + case MVT::v4i8: return "v4i8"; + case MVT::v8i8: return "v8i8"; + case MVT::v16i8: return "v16i8"; + case MVT::v32i8: return "v32i8"; + case MVT::v64i8: return "v64i8"; + case MVT::v1i16: return "v1i16"; + case MVT::v2i16: return "v2i16"; + case MVT::v4i16: return "v4i16"; + case MVT::v8i16: return "v8i16"; + case MVT::v16i16: return "v16i16"; + case MVT::v32i16: return "v32i16"; + case MVT::v1i32: return "v1i32"; + case MVT::v2i32: return "v2i32"; + case MVT::v4i32: return "v4i32"; + case MVT::v8i32: return "v8i32"; + case MVT::v16i32: return "v16i32"; + case MVT::v1i64: return "v1i64"; + case MVT::v2i64: return "v2i64"; + case MVT::v4i64: return "v4i64"; + case MVT::v8i64: return "v8i64"; + case MVT::v16i64: return "v16i64"; + case MVT::v2f32: return "v2f32"; + case MVT::v2f16: return "v2f16"; + case MVT::v4f32: return "v4f32"; + case MVT::v8f32: return "v8f32"; + case MVT::v16f32: return "v16f32"; + case MVT::v2f64: return "v2f64"; + case MVT::v4f64: return "v4f64"; + case MVT::v8f64: return "v8f64"; + case MVT::Metadata:return "Metadata"; + case MVT::Untyped: return "Untyped"; + } +} + +/// getTypeForEVT - This method returns an LLVM type corresponding to the +/// specified EVT. For integer types, this returns an unsigned type. Note +/// that this will abort for types that cannot be represented. +Type *EVT::getTypeForEVT(LLVMContext &Context) const { + switch (V.SimpleTy) { + default: + assert(isExtended() && "Type is not extended!"); + return LLVMTy; + case MVT::isVoid: return Type::getVoidTy(Context); + case MVT::i1: return Type::getInt1Ty(Context); + case MVT::i8: return Type::getInt8Ty(Context); + case MVT::i16: return Type::getInt16Ty(Context); + case MVT::i32: return Type::getInt32Ty(Context); + case MVT::i64: return Type::getInt64Ty(Context); + case MVT::i128: return IntegerType::get(Context, 128); + case MVT::f16: return Type::getHalfTy(Context); + case MVT::f32: return Type::getFloatTy(Context); + case MVT::f64: return Type::getDoubleTy(Context); + case MVT::f80: return Type::getX86_FP80Ty(Context); + case MVT::f128: return Type::getFP128Ty(Context); + case MVT::ppcf128: return Type::getPPC_FP128Ty(Context); + case MVT::x86mmx: return Type::getX86_MMXTy(Context); + case MVT::v2i1: return VectorType::get(Type::getInt1Ty(Context), 2); + case MVT::v4i1: return VectorType::get(Type::getInt1Ty(Context), 4); + case MVT::v8i1: return VectorType::get(Type::getInt1Ty(Context), 8); + case MVT::v16i1: return VectorType::get(Type::getInt1Ty(Context), 16); + case MVT::v32i1: return VectorType::get(Type::getInt1Ty(Context), 32); + case MVT::v64i1: return VectorType::get(Type::getInt1Ty(Context), 64); + case MVT::v2i8: return VectorType::get(Type::getInt8Ty(Context), 2); + case MVT::v4i8: return VectorType::get(Type::getInt8Ty(Context), 4); + case MVT::v8i8: return VectorType::get(Type::getInt8Ty(Context), 8); + case MVT::v16i8: return VectorType::get(Type::getInt8Ty(Context), 16); + case MVT::v32i8: return VectorType::get(Type::getInt8Ty(Context), 32); + case MVT::v64i8: return VectorType::get(Type::getInt8Ty(Context), 64); + case MVT::v1i16: return VectorType::get(Type::getInt16Ty(Context), 1); + case MVT::v2i16: return VectorType::get(Type::getInt16Ty(Context), 2); + case MVT::v4i16: return VectorType::get(Type::getInt16Ty(Context), 4); + case MVT::v8i16: return VectorType::get(Type::getInt16Ty(Context), 8); + case MVT::v16i16: return VectorType::get(Type::getInt16Ty(Context), 16); + case MVT::v32i16: return VectorType::get(Type::getInt16Ty(Context), 32); + case MVT::v1i32: return VectorType::get(Type::getInt32Ty(Context), 1); + case MVT::v2i32: return VectorType::get(Type::getInt32Ty(Context), 2); + case MVT::v4i32: return VectorType::get(Type::getInt32Ty(Context), 4); + case MVT::v8i32: return VectorType::get(Type::getInt32Ty(Context), 8); + case MVT::v16i32: return VectorType::get(Type::getInt32Ty(Context), 16); + case MVT::v1i64: return VectorType::get(Type::getInt64Ty(Context), 1); + case MVT::v2i64: return VectorType::get(Type::getInt64Ty(Context), 2); + case MVT::v4i64: return VectorType::get(Type::getInt64Ty(Context), 4); + case MVT::v8i64: return VectorType::get(Type::getInt64Ty(Context), 8); + case MVT::v16i64: return VectorType::get(Type::getInt64Ty(Context), 16); + case MVT::v2f16: return VectorType::get(Type::getHalfTy(Context), 2); + case MVT::v2f32: return VectorType::get(Type::getFloatTy(Context), 2); + case MVT::v4f32: return VectorType::get(Type::getFloatTy(Context), 4); + case MVT::v8f32: return VectorType::get(Type::getFloatTy(Context), 8); + case MVT::v16f32: return VectorType::get(Type::getFloatTy(Context), 16); + case MVT::v2f64: return VectorType::get(Type::getDoubleTy(Context), 2); + case MVT::v4f64: return VectorType::get(Type::getDoubleTy(Context), 4); + case MVT::v8f64: return VectorType::get(Type::getDoubleTy(Context), 8); + case MVT::Metadata: return Type::getMetadataTy(Context); + } +} + +/// Return the value type corresponding to the specified type. This returns all +/// pointers as MVT::iPTR. If HandleUnknown is true, unknown types are returned +/// as Other, otherwise they are invalid. +MVT MVT::getVT(Type *Ty, bool HandleUnknown){ + switch (Ty->getTypeID()) { + default: + if (HandleUnknown) return MVT(MVT::Other); + llvm_unreachable("Unknown type!"); + case Type::VoidTyID: + return MVT::isVoid; + case Type::IntegerTyID: + return getIntegerVT(cast<IntegerType>(Ty)->getBitWidth()); + case Type::HalfTyID: return MVT(MVT::f16); + case Type::FloatTyID: return MVT(MVT::f32); + case Type::DoubleTyID: return MVT(MVT::f64); + case Type::X86_FP80TyID: return MVT(MVT::f80); + case Type::X86_MMXTyID: return MVT(MVT::x86mmx); + case Type::FP128TyID: return MVT(MVT::f128); + case Type::PPC_FP128TyID: return MVT(MVT::ppcf128); + case Type::PointerTyID: return MVT(MVT::iPTR); + case Type::VectorTyID: { + VectorType *VTy = cast<VectorType>(Ty); + return getVectorVT( + getVT(VTy->getElementType(), false), VTy->getNumElements()); + } + } +} + +/// getEVT - Return the value type corresponding to the specified type. This +/// returns all pointers as MVT::iPTR. If HandleUnknown is true, unknown types +/// are returned as Other, otherwise they are invalid. +EVT EVT::getEVT(Type *Ty, bool HandleUnknown){ + switch (Ty->getTypeID()) { + default: + return MVT::getVT(Ty, HandleUnknown); + case Type::IntegerTyID: + return getIntegerVT(Ty->getContext(), cast<IntegerType>(Ty)->getBitWidth()); + case Type::VectorTyID: { + VectorType *VTy = cast<VectorType>(Ty); + return getVectorVT(Ty->getContext(), getEVT(VTy->getElementType(), false), + VTy->getNumElements()); + } + } +} diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp new file mode 100644 index 0000000000..8bfbb322cf --- /dev/null +++ b/lib/IR/Verifier.cpp @@ -0,0 +1,2144 @@ +//===-- Verifier.cpp - Implement the Module Verifier -----------------------==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the function verifier interface, that can be used for some +// sanity checking of input to the system. +// +// Note that this does not provide full `Java style' security and verifications, +// instead it just tries to ensure that code is well-formed. +// +// * Both of a binary operator's parameters are of the same type +// * Verify that the indices of mem access instructions match other operands +// * Verify that arithmetic and other things are only performed on first-class +// types. Verify that shifts & logicals only happen on integrals f.e. +// * All of the constants in a switch statement are of the correct type +// * The code is in valid SSA form +// * It should be illegal to put a label into any other type (like a structure) +// or to return one. [except constant arrays!] +// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad +// * PHI nodes must have an entry for each predecessor, with no extras. +// * PHI nodes must be the first thing in a basic block, all grouped together +// * PHI nodes must have at least one entry +// * All basic blocks should only end with terminator insts, not contain them +// * The entry node to a function must not have predecessors +// * All Instructions must be embedded into a basic block +// * Functions cannot take a void-typed parameter +// * Verify that a function's argument list agrees with it's declared type. +// * It is illegal to specify a name for a void value. +// * It is illegal to have a internal global value with no initializer +// * It is illegal to have a ret instruction that returns a value that does not +// agree with the function return value type. +// * Function call argument types match the function prototype +// * A landing pad is defined by a landingpad instruction, and can be jumped to +// only by the unwind edge of an invoke instruction. +// * A landingpad instruction must be the first non-PHI instruction in the +// block. +// * All landingpad instructions must use the same personality function with +// the same function. +// * All other things that are tested by asserts spread about the code... +// +//===----------------------------------------------------------------------===// + +#include "llvm/Analysis/Verifier.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Analysis/Dominators.h" +#include "llvm/Assembly/Writer.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/InstVisitor.h" +#include "llvm/Pass.h" +#include "llvm/PassManager.h" +#include "llvm/Support/CFG.h" +#include "llvm/Support/CallSite.h" +#include "llvm/Support/ConstantRange.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cstdarg> +using namespace llvm; + +namespace { // Anonymous namespace for class + struct PreVerifier : public FunctionPass { + static char ID; // Pass ID, replacement for typeid + + PreVerifier() : FunctionPass(ID) { + initializePreVerifierPass(*PassRegistry::getPassRegistry()); + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } + + // Check that the prerequisites for successful DominatorTree construction + // are satisfied. + bool runOnFunction(Function &F) { + bool Broken = false; + + for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) { + if (I->empty() || !I->back().isTerminator()) { + dbgs() << "Basic Block in function '" << F.getName() + << "' does not have terminator!\n"; + WriteAsOperand(dbgs(), I, true); + dbgs() << "\n"; + Broken = true; + } + } + + if (Broken) + report_fatal_error("Broken module, no Basic Block terminator!"); + + return false; + } + }; +} + +char PreVerifier::ID = 0; +INITIALIZE_PASS(PreVerifier, "preverify", "Preliminary module verification", + false, false) +static char &PreVerifyID = PreVerifier::ID; + +namespace { + struct Verifier : public FunctionPass, public InstVisitor<Verifier> { + static char ID; // Pass ID, replacement for typeid + bool Broken; // Is this module found to be broken? + VerifierFailureAction action; + // What to do if verification fails. + Module *Mod; // Module we are verifying right now + LLVMContext *Context; // Context within which we are verifying + DominatorTree *DT; // Dominator Tree, caution can be null! + + std::string Messages; + raw_string_ostream MessagesStr; + + /// InstInThisBlock - when verifying a basic block, keep track of all of the + /// instructions we have seen so far. This allows us to do efficient + /// dominance checks for the case when an instruction has an operand that is + /// an instruction in the same block. + SmallPtrSet<Instruction*, 16> InstsInThisBlock; + + /// MDNodes - keep track of the metadata nodes that have been checked + /// already. + SmallPtrSet<MDNode *, 32> MDNodes; + + /// PersonalityFn - The personality function referenced by the + /// LandingPadInsts. All LandingPadInsts within the same function must use + /// the same personality function. + const Value *PersonalityFn; + + Verifier() + : FunctionPass(ID), Broken(false), + action(AbortProcessAction), Mod(0), Context(0), DT(0), + MessagesStr(Messages), PersonalityFn(0) { + initializeVerifierPass(*PassRegistry::getPassRegistry()); + } + explicit Verifier(VerifierFailureAction ctn) + : FunctionPass(ID), Broken(false), action(ctn), Mod(0), + Context(0), DT(0), MessagesStr(Messages), PersonalityFn(0) { + initializeVerifierPass(*PassRegistry::getPassRegistry()); + } + + bool doInitialization(Module &M) { + Mod = &M; + Context = &M.getContext(); + + // We must abort before returning back to the pass manager, or else the + // pass manager may try to run other passes on the broken module. + return abortIfBroken(); + } + + bool runOnFunction(Function &F) { + // Get dominator information if we are being run by PassManager + DT = &getAnalysis<DominatorTree>(); + + Mod = F.getParent(); + if (!Context) Context = &F.getContext(); + + visit(F); + InstsInThisBlock.clear(); + PersonalityFn = 0; + + // We must abort before returning back to the pass manager, or else the + // pass manager may try to run other passes on the broken module. + return abortIfBroken(); + } + + bool doFinalization(Module &M) { + // Scan through, checking all of the external function's linkage now... + for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { + visitGlobalValue(*I); + + // Check to make sure function prototypes are okay. + if (I->isDeclaration()) visitFunction(*I); + } + + for (Module::global_iterator I = M.global_begin(), E = M.global_end(); + I != E; ++I) + visitGlobalVariable(*I); + + for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); + I != E; ++I) + visitGlobalAlias(*I); + + for (Module::named_metadata_iterator I = M.named_metadata_begin(), + E = M.named_metadata_end(); I != E; ++I) + visitNamedMDNode(*I); + + visitModuleFlags(M); + + // If the module is broken, abort at this time. + return abortIfBroken(); + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + AU.addRequiredID(PreVerifyID); + AU.addRequired<DominatorTree>(); + } + + /// abortIfBroken - If the module is broken and we are supposed to abort on + /// this condition, do so. + /// + bool abortIfBroken() { + if (!Broken) return false; + MessagesStr << "Broken module found, "; + switch (action) { + case AbortProcessAction: + MessagesStr << "compilation aborted!\n"; + dbgs() << MessagesStr.str(); + // Client should choose different reaction if abort is not desired + abort(); + case PrintMessageAction: + MessagesStr << "verification continues.\n"; + dbgs() << MessagesStr.str(); + return false; + case ReturnStatusAction: + MessagesStr << "compilation terminated.\n"; + return true; + } + llvm_unreachable("Invalid action"); + } + + + // Verification methods... + void visitGlobalValue(GlobalValue &GV); + void visitGlobalVariable(GlobalVariable &GV); + void visitGlobalAlias(GlobalAlias &GA); + void visitNamedMDNode(NamedMDNode &NMD); + void visitMDNode(MDNode &MD, Function *F); + void visitModuleFlags(Module &M); + void visitModuleFlag(MDNode *Op, DenseMap<MDString*, MDNode*> &SeenIDs, + SmallVectorImpl<MDNode*> &Requirements); + void visitFunction(Function &F); + void visitBasicBlock(BasicBlock &BB); + using InstVisitor<Verifier>::visit; + + void visit(Instruction &I); + + void visitTruncInst(TruncInst &I); + void visitZExtInst(ZExtInst &I); + void visitSExtInst(SExtInst &I); + void visitFPTruncInst(FPTruncInst &I); + void visitFPExtInst(FPExtInst &I); + void visitFPToUIInst(FPToUIInst &I); + void visitFPToSIInst(FPToSIInst &I); + void visitUIToFPInst(UIToFPInst &I); + void visitSIToFPInst(SIToFPInst &I); + void visitIntToPtrInst(IntToPtrInst &I); + void visitPtrToIntInst(PtrToIntInst &I); + void visitBitCastInst(BitCastInst &I); + void visitPHINode(PHINode &PN); + void visitBinaryOperator(BinaryOperator &B); + void visitICmpInst(ICmpInst &IC); + void visitFCmpInst(FCmpInst &FC); + void visitExtractElementInst(ExtractElementInst &EI); + void visitInsertElementInst(InsertElementInst &EI); + void visitShuffleVectorInst(ShuffleVectorInst &EI); + void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); } + void visitCallInst(CallInst &CI); + void visitInvokeInst(InvokeInst &II); + void visitGetElementPtrInst(GetElementPtrInst &GEP); + void visitLoadInst(LoadInst &LI); + void visitStoreInst(StoreInst &SI); + void verifyDominatesUse(Instruction &I, unsigned i); + void visitInstruction(Instruction &I); + void visitTerminatorInst(TerminatorInst &I); + void visitBranchInst(BranchInst &BI); + void visitReturnInst(ReturnInst &RI); + void visitSwitchInst(SwitchInst &SI); + void visitIndirectBrInst(IndirectBrInst &BI); + void visitSelectInst(SelectInst &SI); + void visitUserOp1(Instruction &I); + void visitUserOp2(Instruction &I) { visitUserOp1(I); } + void visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI); + void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI); + void visitAtomicRMWInst(AtomicRMWInst &RMWI); + void visitFenceInst(FenceInst &FI); + void visitAllocaInst(AllocaInst &AI); + void visitExtractValueInst(ExtractValueInst &EVI); + void visitInsertValueInst(InsertValueInst &IVI); + void visitLandingPadInst(LandingPadInst &LPI); + + void VerifyCallSite(CallSite CS); + bool PerformTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, + int VT, unsigned ArgNo, std::string &Suffix); + bool VerifyIntrinsicType(Type *Ty, + ArrayRef<Intrinsic::IITDescriptor> &Infos, + SmallVectorImpl<Type*> &ArgTys); + void VerifyParameterAttrs(AttributeSet Attrs, uint64_t Idx, Type *Ty, + bool isReturnValue, const Value *V); + void VerifyFunctionAttrs(FunctionType *FT, const AttributeSet &Attrs, + const Value *V); + + void WriteValue(const Value *V) { + if (!V) return; + if (isa<Instruction>(V)) { + MessagesStr << *V << '\n'; + } else { + WriteAsOperand(MessagesStr, V, true, Mod); + MessagesStr << '\n'; + } + } + + void WriteType(Type *T) { + if (!T) return; + MessagesStr << ' ' << *T; + } + + + // CheckFailed - A check failed, so print out the condition and the message + // that failed. This provides a nice place to put a breakpoint if you want + // to see why something is not correct. + void CheckFailed(const Twine &Message, + const Value *V1 = 0, const Value *V2 = 0, + const Value *V3 = 0, const Value *V4 = 0) { + MessagesStr << Message.str() << "\n"; + WriteValue(V1); + WriteValue(V2); + WriteValue(V3); + WriteValue(V4); + Broken = true; + } + + void CheckFailed(const Twine &Message, const Value *V1, + Type *T2, const Value *V3 = 0) { + MessagesStr << Message.str() << "\n"; + WriteValue(V1); + WriteType(T2); + WriteValue(V3); + Broken = true; + } + + void CheckFailed(const Twine &Message, Type *T1, + Type *T2 = 0, Type *T3 = 0) { + MessagesStr << Message.str() << "\n"; + WriteType(T1); + WriteType(T2); + WriteType(T3); + Broken = true; + } + }; +} // End anonymous namespace + +char Verifier::ID = 0; +INITIALIZE_PASS_BEGIN(Verifier, "verify", "Module Verifier", false, false) +INITIALIZE_PASS_DEPENDENCY(PreVerifier) +INITIALIZE_PASS_DEPENDENCY(DominatorTree) +INITIALIZE_PASS_END(Verifier, "verify", "Module Verifier", false, false) + +// Assert - We know that cond should be true, if not print an error message. +#define Assert(C, M) \ + do { if (!(C)) { CheckFailed(M); return; } } while (0) +#define Assert1(C, M, V1) \ + do { if (!(C)) { CheckFailed(M, V1); return; } } while (0) +#define Assert2(C, M, V1, V2) \ + do { if (!(C)) { CheckFailed(M, V1, V2); return; } } while (0) +#define Assert3(C, M, V1, V2, V3) \ + do { if (!(C)) { CheckFailed(M, V1, V2, V3); return; } } while (0) +#define Assert4(C, M, V1, V2, V3, V4) \ + do { if (!(C)) { CheckFailed(M, V1, V2, V3, V4); return; } } while (0) + +void Verifier::visit(Instruction &I) { + for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) + Assert1(I.getOperand(i) != 0, "Operand is null", &I); + InstVisitor<Verifier>::visit(I); +} + + +void Verifier::visitGlobalValue(GlobalValue &GV) { + Assert1(!GV.isDeclaration() || + GV.isMaterializable() || + GV.hasExternalLinkage() || + GV.hasDLLImportLinkage() || + GV.hasExternalWeakLinkage() || + (isa<GlobalAlias>(GV) && + (GV.hasLocalLinkage() || GV.hasWeakLinkage())), + "Global is external, but doesn't have external or dllimport or weak linkage!", + &GV); + + Assert1(!GV.hasDLLImportLinkage() || GV.isDeclaration(), + "Global is marked as dllimport, but not external", &GV); + + Assert1(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV), + "Only global variables can have appending linkage!", &GV); + + if (GV.hasAppendingLinkage()) { + GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV); + Assert1(GVar && GVar->getType()->getElementType()->isArrayTy(), + "Only global arrays can have appending linkage!", GVar); + } + + Assert1(!GV.hasLinkOnceODRAutoHideLinkage() || GV.hasDefaultVisibility(), + "linkonce_odr_auto_hide can only have default visibility!", + &GV); +} + +void Verifier::visitGlobalVariable(GlobalVariable &GV) { + if (GV.hasInitializer()) { + Assert1(GV.getInitializer()->getType() == GV.getType()->getElementType(), + "Global variable initializer type does not match global " + "variable type!", &GV); + + // If the global has common linkage, it must have a zero initializer and + // cannot be constant. + if (GV.hasCommonLinkage()) { + Assert1(GV.getInitializer()->isNullValue(), + "'common' global must have a zero initializer!", &GV); + Assert1(!GV.isConstant(), "'common' global may not be marked constant!", + &GV); + } + } else { + Assert1(GV.hasExternalLinkage() || GV.hasDLLImportLinkage() || + GV.hasExternalWeakLinkage(), + "invalid linkage type for global declaration", &GV); + } + + if (GV.hasName() && (GV.getName() == "llvm.global_ctors" || + GV.getName() == "llvm.global_dtors")) { + Assert1(!GV.hasInitializer() || GV.hasAppendingLinkage(), + "invalid linkage for intrinsic global variable", &GV); + // Don't worry about emitting an error for it not being an array, + // visitGlobalValue will complain on appending non-array. + if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getType())) { + StructType *STy = dyn_cast<StructType>(ATy->getElementType()); + PointerType *FuncPtrTy = + FunctionType::get(Type::getVoidTy(*Context), false)->getPointerTo(); + Assert1(STy && STy->getNumElements() == 2 && + STy->getTypeAtIndex(0u)->isIntegerTy(32) && + STy->getTypeAtIndex(1) == FuncPtrTy, + "wrong type for intrinsic global variable", &GV); + } + } + + visitGlobalValue(GV); +} + +void Verifier::visitGlobalAlias(GlobalAlias &GA) { + Assert1(!GA.getName().empty(), + "Alias name cannot be empty!", &GA); + Assert1(GA.hasExternalLinkage() || GA.hasLocalLinkage() || + GA.hasWeakLinkage(), + "Alias should have external or external weak linkage!", &GA); + Assert1(GA.getAliasee(), + "Aliasee cannot be NULL!", &GA); + Assert1(GA.getType() == GA.getAliasee()->getType(), + "Alias and aliasee types should match!", &GA); + Assert1(!GA.hasUnnamedAddr(), "Alias cannot have unnamed_addr!", &GA); + + if (!isa<GlobalValue>(GA.getAliasee())) { + const ConstantExpr *CE = dyn_cast<ConstantExpr>(GA.getAliasee()); + Assert1(CE && + (CE->getOpcode() == Instruction::BitCast || + CE->getOpcode() == Instruction::GetElementPtr) && + isa<GlobalValue>(CE->getOperand(0)), + "Aliasee should be either GlobalValue or bitcast of GlobalValue", + &GA); + } + + const GlobalValue* Aliasee = GA.resolveAliasedGlobal(/*stopOnWeak*/ false); + Assert1(Aliasee, + "Aliasing chain should end with function or global variable", &GA); + + visitGlobalValue(GA); +} + +void Verifier::visitNamedMDNode(NamedMDNode &NMD) { + for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i) { + MDNode *MD = NMD.getOperand(i); + if (!MD) + continue; + + Assert1(!MD->isFunctionLocal(), + "Named metadata operand cannot be function local!", MD); + visitMDNode(*MD, 0); + } +} + +void Verifier::visitMDNode(MDNode &MD, Function *F) { + // Only visit each node once. Metadata can be mutually recursive, so this + // avoids infinite recursion here, as well as being an optimization. + if (!MDNodes.insert(&MD)) + return; + + for (unsigned i = 0, e = MD.getNumOperands(); i != e; ++i) { + Value *Op = MD.getOperand(i); + if (!Op) + continue; + if (isa<Constant>(Op) || isa<MDString>(Op)) + continue; + if (MDNode *N = dyn_cast<MDNode>(Op)) { + Assert2(MD.isFunctionLocal() || !N->isFunctionLocal(), + "Global metadata operand cannot be function local!", &MD, N); + visitMDNode(*N, F); + continue; + } + Assert2(MD.isFunctionLocal(), "Invalid operand for global metadata!", &MD, Op); + + // If this was an instruction, bb, or argument, verify that it is in the + // function that we expect. + Function *ActualF = 0; + if (Instruction *I = dyn_cast<Instruction>(Op)) + ActualF = I->getParent()->getParent(); + else if (BasicBlock *BB = dyn_cast<BasicBlock>(Op)) + ActualF = BB->getParent(); + else if (Argument *A = dyn_cast<Argument>(Op)) + ActualF = A->getParent(); + assert(ActualF && "Unimplemented function local metadata case!"); + + Assert2(ActualF == F, "function-local metadata used in wrong function", + &MD, Op); + } +} + +void Verifier::visitModuleFlags(Module &M) { + const NamedMDNode *Flags = M.getModuleFlagsMetadata(); + if (!Flags) return; + + // Scan each flag, and track the flags and requirements. + DenseMap<MDString*, MDNode*> SeenIDs; + SmallVector<MDNode*, 16> Requirements; + for (unsigned I = 0, E = Flags->getNumOperands(); I != E; ++I) { + visitModuleFlag(Flags->getOperand(I), SeenIDs, Requirements); + } + + // Validate that the requirements in the module are valid. + for (unsigned I = 0, E = Requirements.size(); I != E; ++I) { + MDNode *Requirement = Requirements[I]; + MDString *Flag = cast<MDString>(Requirement->getOperand(0)); + Value *ReqValue = Requirement->getOperand(1); + + MDNode *Op = SeenIDs.lookup(Flag); + if (!Op) { + CheckFailed("invalid requirement on flag, flag is not present in module", + Flag); + continue; + } + + if (Op->getOperand(2) != ReqValue) { + CheckFailed(("invalid requirement on flag, " + "flag does not have the required value"), + Flag); + continue; + } + } +} + +void Verifier::visitModuleFlag(MDNode *Op, DenseMap<MDString*, MDNode*>&SeenIDs, + SmallVectorImpl<MDNode*> &Requirements) { + // Each module flag should have three arguments, the merge behavior (a + // constant int), the flag ID (an MDString), and the value. + Assert1(Op->getNumOperands() == 3, + "incorrect number of operands in module flag", Op); + ConstantInt *Behavior = dyn_cast<ConstantInt>(Op->getOperand(0)); + MDString *ID = dyn_cast<MDString>(Op->getOperand(1)); + Assert1(Behavior, + "invalid behavior operand in module flag (expected constant integer)", + Op->getOperand(0)); + unsigned BehaviorValue = Behavior->getZExtValue(); + Assert1(ID, + "invalid ID operand in module flag (expected metadata string)", + Op->getOperand(1)); + + // Sanity check the values for behaviors with additional requirements. + switch (BehaviorValue) { + default: + Assert1(false, + "invalid behavior operand in module flag (unexpected constant)", + Op->getOperand(0)); + break; + + case Module::Error: + case Module::Warning: + case Module::Override: + // These behavior types accept any value. + break; + + case Module::Require: { + // The value should itself be an MDNode with two operands, a flag ID (an + // MDString), and a value. + MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2)); + Assert1(Value && Value->getNumOperands() == 2, + "invalid value for 'require' module flag (expected metadata pair)", + Op->getOperand(2)); + Assert1(isa<MDString>(Value->getOperand(0)), + ("invalid value for 'require' module flag " + "(first value operand should be a string)"), + Value->getOperand(0)); + + // Append it to the list of requirements, to check once all module flags are + // scanned. + Requirements.push_back(Value); + break; + } + + case Module::Append: + case Module::AppendUnique: { + // These behavior types require the operand be an MDNode. + Assert1(isa<MDNode>(Op->getOperand(2)), + "invalid value for 'append'-type module flag " + "(expected a metadata node)", Op->getOperand(2)); + break; + } + } + + // Unless this is a "requires" flag, check the ID is unique. + if (BehaviorValue != Module::Require) { + bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second; + Assert1(Inserted, + "module flag identifiers must be unique (or of 'require' type)", + ID); + } +} + +// VerifyParameterAttrs - Check the given attributes for an argument or return +// value of the specified type. The value V is printed in error messages. +void Verifier::VerifyParameterAttrs(AttributeSet Attrs, uint64_t Idx, Type *Ty, + bool isReturnValue, const Value *V) { + if (!Attrs.hasAttributes(Idx)) + return; + + Assert1(!Attrs.hasAttribute(Idx, Attribute::NoReturn) && + !Attrs.hasAttribute(Idx, Attribute::NoUnwind) && + !Attrs.hasAttribute(Idx, Attribute::ReadNone) && + !Attrs.hasAttribute(Idx, Attribute::ReadOnly) && + !Attrs.hasAttribute(Idx, Attribute::NoInline) && + !Attrs.hasAttribute(Idx, Attribute::AlwaysInline) && + !Attrs.hasAttribute(Idx, Attribute::OptimizeForSize) && + !Attrs.hasAttribute(Idx, Attribute::StackProtect) && + !Attrs.hasAttribute(Idx, Attribute::StackProtectReq) && + !Attrs.hasAttribute(Idx, Attribute::NoRedZone) && + !Attrs.hasAttribute(Idx, Attribute::NoImplicitFloat) && + !Attrs.hasAttribute(Idx, Attribute::Naked) && + !Attrs.hasAttribute(Idx, Attribute::InlineHint) && + !Attrs.hasAttribute(Idx, Attribute::StackAlignment) && + !Attrs.hasAttribute(Idx, Attribute::UWTable) && + !Attrs.hasAttribute(Idx, Attribute::NonLazyBind) && + !Attrs.hasAttribute(Idx, Attribute::ReturnsTwice) && + !Attrs.hasAttribute(Idx, Attribute::SanitizeAddress) && + !Attrs.hasAttribute(Idx, Attribute::SanitizeThread) && + !Attrs.hasAttribute(Idx, Attribute::SanitizeMemory) && + !Attrs.hasAttribute(Idx, Attribute::MinSize) && + !Attrs.hasAttribute(Idx, Attribute::NoBuiltin), + "Some attributes in '" + Attrs.getAsString(Idx) + + "' only apply to functions!", V); + + if (isReturnValue) + Assert1(!Attrs.hasAttribute(Idx, Attribute::ByVal) && + !Attrs.hasAttribute(Idx, Attribute::Nest) && + !Attrs.hasAttribute(Idx, Attribute::StructRet) && + !Attrs.hasAttribute(Idx, Attribute::NoCapture), + "Attribute 'byval', 'nest', 'sret', and 'nocapture' " + "do not apply to return values!", V); + + // Check for mutually incompatible attributes. + Assert1(!((Attrs.hasAttribute(Idx, Attribute::ByVal) && + Attrs.hasAttribute(Idx, Attribute::Nest)) || + (Attrs.hasAttribute(Idx, Attribute::ByVal) && + Attrs.hasAttribute(Idx, Attribute::StructRet)) || + (Attrs.hasAttribute(Idx, Attribute::Nest) && + Attrs.hasAttribute(Idx, Attribute::StructRet))), "Attributes " + "'byval, nest, and sret' are incompatible!", V); + + Assert1(!((Attrs.hasAttribute(Idx, Attribute::ByVal) && + Attrs.hasAttribute(Idx, Attribute::Nest)) || + (Attrs.hasAttribute(Idx, Attribute::ByVal) && + Attrs.hasAttribute(Idx, Attribute::InReg)) || + (Attrs.hasAttribute(Idx, Attribute::Nest) && + Attrs.hasAttribute(Idx, Attribute::InReg))), "Attributes " + "'byval, nest, and inreg' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(Idx, Attribute::ZExt) && + Attrs.hasAttribute(Idx, Attribute::SExt)), "Attributes " + "'zeroext and signext' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) && + Attrs.hasAttribute(Idx, Attribute::ReadOnly)), "Attributes " + "'readnone and readonly' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(Idx, Attribute::NoInline) && + Attrs.hasAttribute(Idx, Attribute::AlwaysInline)), "Attributes " + "'noinline and alwaysinline' are incompatible!", V); + + Assert1(!AttrBuilder(Attrs, Idx). + hasAttributes(AttributeFuncs::typeIncompatible(Ty, Idx), Idx), + "Wrong types for attribute: " + + AttributeFuncs::typeIncompatible(Ty, Idx).getAsString(Idx), V); + + if (PointerType *PTy = dyn_cast<PointerType>(Ty)) + Assert1(!Attrs.hasAttribute(Idx, Attribute::ByVal) || + PTy->getElementType()->isSized(), + "Attribute 'byval' does not support unsized types!", V); + else + Assert1(!Attrs.hasAttribute(Idx, Attribute::ByVal), + "Attribute 'byval' only applies to parameters with pointer type!", + V); +} + +// VerifyFunctionAttrs - Check parameter attributes against a function type. +// The value V is printed in error messages. +void Verifier::VerifyFunctionAttrs(FunctionType *FT, + const AttributeSet &Attrs, + const Value *V) { + if (Attrs.isEmpty()) + return; + + bool SawNest = false; + + for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { + unsigned Index = Attrs.getSlotIndex(i); + + Type *Ty; + if (Index == 0) + Ty = FT->getReturnType(); + else if (Index-1 < FT->getNumParams()) + Ty = FT->getParamType(Index-1); + else + break; // VarArgs attributes, verified elsewhere. + + VerifyParameterAttrs(Attrs, Index, Ty, Index == 0, V); + + if (Attrs.hasAttribute(i, Attribute::Nest)) { + Assert1(!SawNest, "More than one parameter has attribute nest!", V); + SawNest = true; + } + + if (Attrs.hasAttribute(Index, Attribute::StructRet)) + Assert1(Index == 1, "Attribute sret is not on first parameter!", V); + } + + if (!Attrs.hasAttributes(AttributeSet::FunctionIndex)) + return; + + AttrBuilder NotFn(Attrs, AttributeSet::FunctionIndex); + NotFn.removeFunctionOnlyAttrs(); + Assert1(NotFn.empty(), "Attributes '" + + AttributeSet::get(V->getContext(), + AttributeSet::FunctionIndex, + NotFn).getAsString(AttributeSet::FunctionIndex) + + "' do not apply to the function!", V); + + // Check for mutually incompatible attributes. + Assert1(!((Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::ByVal) && + Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::Nest)) || + (Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::ByVal) && + Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::StructRet)) || + (Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::Nest) && + Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::StructRet))), + "Attributes 'byval, nest, and sret' are incompatible!", V); + + Assert1(!((Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::ByVal) && + Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::Nest)) || + (Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::ByVal) && + Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::InReg)) || + (Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::Nest) && + Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::InReg))), + "Attributes 'byval, nest, and inreg' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::ZExt) && + Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::SExt)), + "Attributes 'zeroext and signext' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::ReadNone) && + Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::ReadOnly)), + "Attributes 'readnone and readonly' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoInline) && + Attrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::AlwaysInline)), + "Attributes 'noinline and alwaysinline' are incompatible!", V); +} + +static bool VerifyAttributeCount(const AttributeSet &Attrs, unsigned Params) { + if (Attrs.getNumSlots() == 0) + return true; + + unsigned LastSlot = Attrs.getNumSlots() - 1; + unsigned LastIndex = Attrs.getSlotIndex(LastSlot); + if (LastIndex <= Params + || (LastIndex == AttributeSet::FunctionIndex + && (LastSlot == 0 || Attrs.getSlotIndex(LastSlot - 1) <= Params))) + return true; + + return false; +} + +// visitFunction - Verify that a function is ok. +// +void Verifier::visitFunction(Function &F) { + // Check function arguments. + FunctionType *FT = F.getFunctionType(); + unsigned NumArgs = F.arg_size(); + + Assert1(Context == &F.getContext(), + "Function context does not match Module context!", &F); + + Assert1(!F.hasCommonLinkage(), "Functions may not have common linkage", &F); + Assert2(FT->getNumParams() == NumArgs, + "# formal arguments must match # of arguments for function type!", + &F, FT); + Assert1(F.getReturnType()->isFirstClassType() || + F.getReturnType()->isVoidTy() || + F.getReturnType()->isStructTy(), + "Functions cannot return aggregate values!", &F); + + Assert1(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(), + "Invalid struct return type!", &F); + + const AttributeSet &Attrs = F.getAttributes(); + + Assert1(VerifyAttributeCount(Attrs, FT->getNumParams()), + "Attribute after last parameter!", &F); + + // Check function attributes. + VerifyFunctionAttrs(FT, Attrs, &F); + + // Check that this function meets the restrictions on this calling convention. + switch (F.getCallingConv()) { + default: + break; + case CallingConv::C: + break; + case CallingConv::Fast: + case CallingConv::Cold: + case CallingConv::X86_FastCall: + case CallingConv::X86_ThisCall: + case CallingConv::Intel_OCL_BI: + case CallingConv::PTX_Kernel: + case CallingConv::PTX_Device: + Assert1(!F.isVarArg(), + "Varargs functions must have C calling conventions!", &F); + break; + } + + bool isLLVMdotName = F.getName().size() >= 5 && + F.getName().substr(0, 5) == "llvm."; + + // Check that the argument values match the function type for this function... + unsigned i = 0; + for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); + I != E; ++I, ++i) { + Assert2(I->getType() == FT->getParamType(i), + "Argument value does not match function argument type!", + I, FT->getParamType(i)); + Assert1(I->getType()->isFirstClassType(), + "Function arguments must have first-class types!", I); + if (!isLLVMdotName) + Assert2(!I->getType()->isMetadataTy(), + "Function takes metadata but isn't an intrinsic", I, &F); + } + + if (F.isMaterializable()) { + // Function has a body somewhere we can't see. + } else if (F.isDeclaration()) { + Assert1(F.hasExternalLinkage() || F.hasDLLImportLinkage() || + F.hasExternalWeakLinkage(), + "invalid linkage type for function declaration", &F); + } else { + // Verify that this function (which has a body) is not named "llvm.*". It + // is not legal to define intrinsics. + Assert1(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F); + + // Check the entry node + BasicBlock *Entry = &F.getEntryBlock(); + Assert1(pred_begin(Entry) == pred_end(Entry), + "Entry block to function must not have predecessors!", Entry); + + // The address of the entry block cannot be taken, unless it is dead. + if (Entry->hasAddressTaken()) { + Assert1(!BlockAddress::get(Entry)->isConstantUsed(), + "blockaddress may not be used with the entry block!", Entry); + } + } + + // If this function is actually an intrinsic, verify that it is only used in + // direct call/invokes, never having its "address taken". + if (F.getIntrinsicID()) { + const User *U; + if (F.hasAddressTaken(&U)) + Assert1(0, "Invalid user of intrinsic instruction!", U); + } +} + +// verifyBasicBlock - Verify that a basic block is well formed... +// +void Verifier::visitBasicBlock(BasicBlock &BB) { + InstsInThisBlock.clear(); + + // Ensure that basic blocks have terminators! + Assert1(BB.getTerminator(), "Basic Block does not have terminator!", &BB); + + // Check constraints that this basic block imposes on all of the PHI nodes in + // it. + if (isa<PHINode>(BB.front())) { + SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB)); + SmallVector<std::pair<BasicBlock*, Value*>, 8> Values; + std::sort(Preds.begin(), Preds.end()); + PHINode *PN; + for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I));++I) { + // Ensure that PHI nodes have at least one entry! + Assert1(PN->getNumIncomingValues() != 0, + "PHI nodes must have at least one entry. If the block is dead, " + "the PHI should be removed!", PN); + Assert1(PN->getNumIncomingValues() == Preds.size(), + "PHINode should have one entry for each predecessor of its " + "parent basic block!", PN); + + // Get and sort all incoming values in the PHI node... + Values.clear(); + Values.reserve(PN->getNumIncomingValues()); + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) + Values.push_back(std::make_pair(PN->getIncomingBlock(i), + PN->getIncomingValue(i))); + std::sort(Values.begin(), Values.end()); + + for (unsigned i = 0, e = Values.size(); i != e; ++i) { + // Check to make sure that if there is more than one entry for a + // particular basic block in this PHI node, that the incoming values are + // all identical. + // + Assert4(i == 0 || Values[i].first != Values[i-1].first || + Values[i].second == Values[i-1].second, + "PHI node has multiple entries for the same basic block with " + "different incoming values!", PN, Values[i].first, + Values[i].second, Values[i-1].second); + + // Check to make sure that the predecessors and PHI node entries are + // matched up. + Assert3(Values[i].first == Preds[i], + "PHI node entries do not match predecessors!", PN, + Values[i].first, Preds[i]); + } + } + } +} + +void Verifier::visitTerminatorInst(TerminatorInst &I) { + // Ensure that terminators only exist at the end of the basic block. + Assert1(&I == I.getParent()->getTerminator(), + "Terminator found in the middle of a basic block!", I.getParent()); + visitInstruction(I); +} + +void Verifier::visitBranchInst(BranchInst &BI) { + if (BI.isConditional()) { + Assert2(BI.getCondition()->getType()->isIntegerTy(1), + "Branch condition is not 'i1' type!", &BI, BI.getCondition()); + } + visitTerminatorInst(BI); +} + +void Verifier::visitReturnInst(ReturnInst &RI) { + Function *F = RI.getParent()->getParent(); + unsigned N = RI.getNumOperands(); + if (F->getReturnType()->isVoidTy()) + Assert2(N == 0, + "Found return instr that returns non-void in Function of void " + "return type!", &RI, F->getReturnType()); + else + Assert2(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(), + "Function return type does not match operand " + "type of return inst!", &RI, F->getReturnType()); + + // Check to make sure that the return value has necessary properties for + // terminators... + visitTerminatorInst(RI); +} + +void Verifier::visitSwitchInst(SwitchInst &SI) { + // Check to make sure that all of the constants in the switch instruction + // have the same type as the switched-on value. + Type *SwitchTy = SI.getCondition()->getType(); + IntegerType *IntTy = cast<IntegerType>(SwitchTy); + IntegersSubsetToBB Mapping; + std::map<IntegersSubset::Range, unsigned> RangeSetMap; + for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end(); i != e; ++i) { + IntegersSubset CaseRanges = i.getCaseValueEx(); + for (unsigned ri = 0, rie = CaseRanges.getNumItems(); ri < rie; ++ri) { + IntegersSubset::Range r = CaseRanges.getItem(ri); + Assert1(((const APInt&)r.getLow()).getBitWidth() == IntTy->getBitWidth(), + "Switch constants must all be same type as switch value!", &SI); + Assert1(((const APInt&)r.getHigh()).getBitWidth() == IntTy->getBitWidth(), + "Switch constants must all be same type as switch value!", &SI); + Mapping.add(r); + RangeSetMap[r] = i.getCaseIndex(); + } + } + + IntegersSubsetToBB::RangeIterator errItem; + if (!Mapping.verify(errItem)) { + unsigned CaseIndex = RangeSetMap[errItem->first]; + SwitchInst::CaseIt i(&SI, CaseIndex); + Assert2(false, "Duplicate integer as switch case", &SI, i.getCaseValueEx()); + } + + visitTerminatorInst(SI); +} + +void Verifier::visitIndirectBrInst(IndirectBrInst &BI) { + Assert1(BI.getAddress()->getType()->isPointerTy(), + "Indirectbr operand must have pointer type!", &BI); + for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i) + Assert1(BI.getDestination(i)->getType()->isLabelTy(), + "Indirectbr destinations must all have pointer type!", &BI); + + visitTerminatorInst(BI); +} + +void Verifier::visitSelectInst(SelectInst &SI) { + Assert1(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1), + SI.getOperand(2)), + "Invalid operands for select instruction!", &SI); + + Assert1(SI.getTrueValue()->getType() == SI.getType(), + "Select values must have same type as select instruction!", &SI); + visitInstruction(SI); +} + +/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of +/// a pass, if any exist, it's an error. +/// +void Verifier::visitUserOp1(Instruction &I) { + Assert1(0, "User-defined operators should not live outside of a pass!", &I); +} + +void Verifier::visitTruncInst(TruncInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + // Get the size of the types in bits, we'll need this later + unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); + unsigned DestBitSize = DestTy->getScalarSizeInBits(); + + Assert1(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I); + Assert1(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I); + Assert1(SrcTy->isVectorTy() == DestTy->isVectorTy(), + "trunc source and destination must both be a vector or neither", &I); + Assert1(SrcBitSize > DestBitSize,"DestTy too big for Trunc", &I); + + visitInstruction(I); +} + +void Verifier::visitZExtInst(ZExtInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + // Get the size of the types in bits, we'll need this later + Assert1(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I); + Assert1(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I); + Assert1(SrcTy->isVectorTy() == DestTy->isVectorTy(), + "zext source and destination must both be a vector or neither", &I); + unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); + unsigned DestBitSize = DestTy->getScalarSizeInBits(); + + Assert1(SrcBitSize < DestBitSize,"Type too small for ZExt", &I); + + visitInstruction(I); +} + +void Verifier::visitSExtInst(SExtInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + // Get the size of the types in bits, we'll need this later + unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); + unsigned DestBitSize = DestTy->getScalarSizeInBits(); + + Assert1(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I); + Assert1(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I); + Assert1(SrcTy->isVectorTy() == DestTy->isVectorTy(), + "sext source and destination must both be a vector or neither", &I); + Assert1(SrcBitSize < DestBitSize,"Type too small for SExt", &I); + + visitInstruction(I); +} + +void Verifier::visitFPTruncInst(FPTruncInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + // Get the size of the types in bits, we'll need this later + unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); + unsigned DestBitSize = DestTy->getScalarSizeInBits(); + + Assert1(SrcTy->isFPOrFPVectorTy(),"FPTrunc only operates on FP", &I); + Assert1(DestTy->isFPOrFPVectorTy(),"FPTrunc only produces an FP", &I); + Assert1(SrcTy->isVectorTy() == DestTy->isVectorTy(), + "fptrunc source and destination must both be a vector or neither",&I); + Assert1(SrcBitSize > DestBitSize,"DestTy too big for FPTrunc", &I); + + visitInstruction(I); +} + +void Verifier::visitFPExtInst(FPExtInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + // Get the size of the types in bits, we'll need this later + unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); + unsigned DestBitSize = DestTy->getScalarSizeInBits(); + + Assert1(SrcTy->isFPOrFPVectorTy(),"FPExt only operates on FP", &I); + Assert1(DestTy->isFPOrFPVectorTy(),"FPExt only produces an FP", &I); + Assert1(SrcTy->isVectorTy() == DestTy->isVectorTy(), + "fpext source and destination must both be a vector or neither", &I); + Assert1(SrcBitSize < DestBitSize,"DestTy too small for FPExt", &I); + + visitInstruction(I); +} + +void Verifier::visitUIToFPInst(UIToFPInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + bool SrcVec = SrcTy->isVectorTy(); + bool DstVec = DestTy->isVectorTy(); + + Assert1(SrcVec == DstVec, + "UIToFP source and dest must both be vector or scalar", &I); + Assert1(SrcTy->isIntOrIntVectorTy(), + "UIToFP source must be integer or integer vector", &I); + Assert1(DestTy->isFPOrFPVectorTy(), + "UIToFP result must be FP or FP vector", &I); + + if (SrcVec && DstVec) + Assert1(cast<VectorType>(SrcTy)->getNumElements() == + cast<VectorType>(DestTy)->getNumElements(), + "UIToFP source and dest vector length mismatch", &I); + + visitInstruction(I); +} + +void Verifier::visitSIToFPInst(SIToFPInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + bool SrcVec = SrcTy->isVectorTy(); + bool DstVec = DestTy->isVectorTy(); + + Assert1(SrcVec == DstVec, + "SIToFP source and dest must both be vector or scalar", &I); + Assert1(SrcTy->isIntOrIntVectorTy(), + "SIToFP source must be integer or integer vector", &I); + Assert1(DestTy->isFPOrFPVectorTy(), + "SIToFP result must be FP or FP vector", &I); + + if (SrcVec && DstVec) + Assert1(cast<VectorType>(SrcTy)->getNumElements() == + cast<VectorType>(DestTy)->getNumElements(), + "SIToFP source and dest vector length mismatch", &I); + + visitInstruction(I); +} + +void Verifier::visitFPToUIInst(FPToUIInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + bool SrcVec = SrcTy->isVectorTy(); + bool DstVec = DestTy->isVectorTy(); + + Assert1(SrcVec == DstVec, + "FPToUI source and dest must both be vector or scalar", &I); + Assert1(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", + &I); + Assert1(DestTy->isIntOrIntVectorTy(), + "FPToUI result must be integer or integer vector", &I); + + if (SrcVec && DstVec) + Assert1(cast<VectorType>(SrcTy)->getNumElements() == + cast<VectorType>(DestTy)->getNumElements(), + "FPToUI source and dest vector length mismatch", &I); + + visitInstruction(I); +} + +void Verifier::visitFPToSIInst(FPToSIInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + bool SrcVec = SrcTy->isVectorTy(); + bool DstVec = DestTy->isVectorTy(); + + Assert1(SrcVec == DstVec, + "FPToSI source and dest must both be vector or scalar", &I); + Assert1(SrcTy->isFPOrFPVectorTy(), + "FPToSI source must be FP or FP vector", &I); + Assert1(DestTy->isIntOrIntVectorTy(), + "FPToSI result must be integer or integer vector", &I); + + if (SrcVec && DstVec) + Assert1(cast<VectorType>(SrcTy)->getNumElements() == + cast<VectorType>(DestTy)->getNumElements(), + "FPToSI source and dest vector length mismatch", &I); + + visitInstruction(I); +} + +void Verifier::visitPtrToIntInst(PtrToIntInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + Assert1(SrcTy->getScalarType()->isPointerTy(), + "PtrToInt source must be pointer", &I); + Assert1(DestTy->getScalarType()->isIntegerTy(), + "PtrToInt result must be integral", &I); + Assert1(SrcTy->isVectorTy() == DestTy->isVectorTy(), + "PtrToInt type mismatch", &I); + + if (SrcTy->isVectorTy()) { + VectorType *VSrc = dyn_cast<VectorType>(SrcTy); + VectorType *VDest = dyn_cast<VectorType>(DestTy); + Assert1(VSrc->getNumElements() == VDest->getNumElements(), + "PtrToInt Vector width mismatch", &I); + } + + visitInstruction(I); +} + +void Verifier::visitIntToPtrInst(IntToPtrInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + Assert1(SrcTy->getScalarType()->isIntegerTy(), + "IntToPtr source must be an integral", &I); + Assert1(DestTy->getScalarType()->isPointerTy(), + "IntToPtr result must be a pointer",&I); + Assert1(SrcTy->isVectorTy() == DestTy->isVectorTy(), + "IntToPtr type mismatch", &I); + if (SrcTy->isVectorTy()) { + VectorType *VSrc = dyn_cast<VectorType>(SrcTy); + VectorType *VDest = dyn_cast<VectorType>(DestTy); + Assert1(VSrc->getNumElements() == VDest->getNumElements(), + "IntToPtr Vector width mismatch", &I); + } + visitInstruction(I); +} + +void Verifier::visitBitCastInst(BitCastInst &I) { + // Get the source and destination types + Type *SrcTy = I.getOperand(0)->getType(); + Type *DestTy = I.getType(); + + // Get the size of the types in bits, we'll need this later + unsigned SrcBitSize = SrcTy->getPrimitiveSizeInBits(); + unsigned DestBitSize = DestTy->getPrimitiveSizeInBits(); + + // BitCast implies a no-op cast of type only. No bits change. + // However, you can't cast pointers to anything but pointers. + Assert1(SrcTy->isPointerTy() == DestTy->isPointerTy(), + "Bitcast requires both operands to be pointer or neither", &I); + Assert1(SrcBitSize == DestBitSize, "Bitcast requires types of same width",&I); + + // Disallow aggregates. + Assert1(!SrcTy->isAggregateType(), + "Bitcast operand must not be aggregate", &I); + Assert1(!DestTy->isAggregateType(), + "Bitcast type must not be aggregate", &I); + + visitInstruction(I); +} + +/// visitPHINode - Ensure that a PHI node is well formed. +/// +void Verifier::visitPHINode(PHINode &PN) { + // Ensure that the PHI nodes are all grouped together at the top of the block. + // This can be tested by checking whether the instruction before this is + // either nonexistent (because this is begin()) or is a PHI node. If not, + // then there is some other instruction before a PHI. + Assert2(&PN == &PN.getParent()->front() || + isa<PHINode>(--BasicBlock::iterator(&PN)), + "PHI nodes not grouped at top of basic block!", + &PN, PN.getParent()); + + // Check that all of the values of the PHI node have the same type as the + // result, and that the incoming blocks are really basic blocks. + for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { + Assert1(PN.getType() == PN.getIncomingValue(i)->getType(), + "PHI node operands are not the same type as the result!", &PN); + } + + // All other PHI node constraints are checked in the visitBasicBlock method. + + visitInstruction(PN); +} + +void Verifier::VerifyCallSite(CallSite CS) { + Instruction *I = CS.getInstruction(); + + Assert1(CS.getCalledValue()->getType()->isPointerTy(), + "Called function must be a pointer!", I); + PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType()); + + Assert1(FPTy->getElementType()->isFunctionTy(), + "Called function is not pointer to function type!", I); + FunctionType *FTy = cast<FunctionType>(FPTy->getElementType()); + + // Verify that the correct number of arguments are being passed + if (FTy->isVarArg()) + Assert1(CS.arg_size() >= FTy->getNumParams(), + "Called function requires more parameters than were provided!",I); + else + Assert1(CS.arg_size() == FTy->getNumParams(), + "Incorrect number of arguments passed to called function!", I); + + // Verify that all arguments to the call match the function type. + for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) + Assert3(CS.getArgument(i)->getType() == FTy->getParamType(i), + "Call parameter type does not match function signature!", + CS.getArgument(i), FTy->getParamType(i), I); + + const AttributeSet &Attrs = CS.getAttributes(); + + Assert1(VerifyAttributeCount(Attrs, CS.arg_size()), + "Attribute after last parameter!", I); + + // Verify call attributes. + VerifyFunctionAttrs(FTy, Attrs, I); + + if (FTy->isVarArg()) + // Check attributes on the varargs part. + for (unsigned Idx = 1 + FTy->getNumParams(); Idx <= CS.arg_size(); ++Idx) { + VerifyParameterAttrs(Attrs, Idx, CS.getArgument(Idx-1)->getType(), + false, I); + + Assert1(!Attrs.hasAttribute(Idx, Attribute::StructRet), + "Attribute 'sret' cannot be used for vararg call arguments!", I); + } + + // Verify that there's no metadata unless it's a direct call to an intrinsic. + if (CS.getCalledFunction() == 0 || + !CS.getCalledFunction()->getName().startswith("llvm.")) { + for (FunctionType::param_iterator PI = FTy->param_begin(), + PE = FTy->param_end(); PI != PE; ++PI) + Assert1(!(*PI)->isMetadataTy(), + "Function has metadata parameter but isn't an intrinsic", I); + } + + visitInstruction(*I); +} + +void Verifier::visitCallInst(CallInst &CI) { + VerifyCallSite(&CI); + + if (Function *F = CI.getCalledFunction()) + if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) + visitIntrinsicFunctionCall(ID, CI); +} + +void Verifier::visitInvokeInst(InvokeInst &II) { + VerifyCallSite(&II); + + // Verify that there is a landingpad instruction as the first non-PHI + // instruction of the 'unwind' destination. + Assert1(II.getUnwindDest()->isLandingPad(), + "The unwind destination does not have a landingpad instruction!",&II); + + visitTerminatorInst(II); +} + +/// visitBinaryOperator - Check that both arguments to the binary operator are +/// of the same type! +/// +void Verifier::visitBinaryOperator(BinaryOperator &B) { + Assert1(B.getOperand(0)->getType() == B.getOperand(1)->getType(), + "Both operands to a binary operator are not of the same type!", &B); + + switch (B.getOpcode()) { + // Check that integer arithmetic operators are only used with + // integral operands. + case Instruction::Add: + case Instruction::Sub: + case Instruction::Mul: + case Instruction::SDiv: + case Instruction::UDiv: + case Instruction::SRem: + case Instruction::URem: + Assert1(B.getType()->isIntOrIntVectorTy(), + "Integer arithmetic operators only work with integral types!", &B); + Assert1(B.getType() == B.getOperand(0)->getType(), + "Integer arithmetic operators must have same type " + "for operands and result!", &B); + break; + // Check that floating-point arithmetic operators are only used with + // floating-point operands. + case Instruction::FAdd: + case Instruction::FSub: + case Instruction::FMul: + case Instruction::FDiv: + case Instruction::FRem: + Assert1(B.getType()->isFPOrFPVectorTy(), + "Floating-point arithmetic operators only work with " + "floating-point types!", &B); + Assert1(B.getType() == B.getOperand(0)->getType(), + "Floating-point arithmetic operators must have same type " + "for operands and result!", &B); + break; + // Check that logical operators are only used with integral operands. + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: + Assert1(B.getType()->isIntOrIntVectorTy(), + "Logical operators only work with integral types!", &B); + Assert1(B.getType() == B.getOperand(0)->getType(), + "Logical operators must have same type for operands and result!", + &B); + break; + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + Assert1(B.getType()->isIntOrIntVectorTy(), + "Shifts only work with integral types!", &B); + Assert1(B.getType() == B.getOperand(0)->getType(), + "Shift return type must be same as operands!", &B); + break; + default: + llvm_unreachable("Unknown BinaryOperator opcode!"); + } + + visitInstruction(B); +} + +void Verifier::visitICmpInst(ICmpInst &IC) { + // Check that the operands are the same type + Type *Op0Ty = IC.getOperand(0)->getType(); + Type *Op1Ty = IC.getOperand(1)->getType(); + Assert1(Op0Ty == Op1Ty, + "Both operands to ICmp instruction are not of the same type!", &IC); + // Check that the operands are the right type + Assert1(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(), + "Invalid operand types for ICmp instruction", &IC); + // Check that the predicate is valid. + Assert1(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE && + IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE, + "Invalid predicate in ICmp instruction!", &IC); + + visitInstruction(IC); +} + +void Verifier::visitFCmpInst(FCmpInst &FC) { + // Check that the operands are the same type + Type *Op0Ty = FC.getOperand(0)->getType(); + Type *Op1Ty = FC.getOperand(1)->getType(); + Assert1(Op0Ty == Op1Ty, + "Both operands to FCmp instruction are not of the same type!", &FC); + // Check that the operands are the right type + Assert1(Op0Ty->isFPOrFPVectorTy(), + "Invalid operand types for FCmp instruction", &FC); + // Check that the predicate is valid. + Assert1(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE && + FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE, + "Invalid predicate in FCmp instruction!", &FC); + + visitInstruction(FC); +} + +void Verifier::visitExtractElementInst(ExtractElementInst &EI) { + Assert1(ExtractElementInst::isValidOperands(EI.getOperand(0), + EI.getOperand(1)), + "Invalid extractelement operands!", &EI); + visitInstruction(EI); +} + +void Verifier::visitInsertElementInst(InsertElementInst &IE) { + Assert1(InsertElementInst::isValidOperands(IE.getOperand(0), + IE.getOperand(1), + IE.getOperand(2)), + "Invalid insertelement operands!", &IE); + visitInstruction(IE); +} + +void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) { + Assert1(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1), + SV.getOperand(2)), + "Invalid shufflevector operands!", &SV); + visitInstruction(SV); +} + +void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) { + Type *TargetTy = GEP.getPointerOperandType()->getScalarType(); + + Assert1(isa<PointerType>(TargetTy), + "GEP base pointer is not a vector or a vector of pointers", &GEP); + Assert1(cast<PointerType>(TargetTy)->getElementType()->isSized(), + "GEP into unsized type!", &GEP); + Assert1(GEP.getPointerOperandType()->isVectorTy() == + GEP.getType()->isVectorTy(), "Vector GEP must return a vector value", + &GEP); + + SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end()); + Type *ElTy = + GetElementPtrInst::getIndexedType(GEP.getPointerOperandType(), Idxs); + Assert1(ElTy, "Invalid indices for GEP pointer type!", &GEP); + + Assert2(GEP.getType()->getScalarType()->isPointerTy() && + cast<PointerType>(GEP.getType()->getScalarType())->getElementType() + == ElTy, "GEP is not of right type for indices!", &GEP, ElTy); + + if (GEP.getPointerOperandType()->isVectorTy()) { + // Additional checks for vector GEPs. + unsigned GepWidth = GEP.getPointerOperandType()->getVectorNumElements(); + Assert1(GepWidth == GEP.getType()->getVectorNumElements(), + "Vector GEP result width doesn't match operand's", &GEP); + for (unsigned i = 0, e = Idxs.size(); i != e; ++i) { + Type *IndexTy = Idxs[i]->getType(); + Assert1(IndexTy->isVectorTy(), + "Vector GEP must have vector indices!", &GEP); + unsigned IndexWidth = IndexTy->getVectorNumElements(); + Assert1(IndexWidth == GepWidth, "Invalid GEP index vector width", &GEP); + } + } + visitInstruction(GEP); +} + +static bool isContiguous(const ConstantRange &A, const ConstantRange &B) { + return A.getUpper() == B.getLower() || A.getLower() == B.getUpper(); +} + +void Verifier::visitLoadInst(LoadInst &LI) { + PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType()); + Assert1(PTy, "Load operand must be a pointer.", &LI); + Type *ElTy = PTy->getElementType(); + Assert2(ElTy == LI.getType(), + "Load result type does not match pointer operand type!", &LI, ElTy); + if (LI.isAtomic()) { + Assert1(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease, + "Load cannot have Release ordering", &LI); + Assert1(LI.getAlignment() != 0, + "Atomic load must specify explicit alignment", &LI); + if (!ElTy->isPointerTy()) { + Assert2(ElTy->isIntegerTy(), + "atomic store operand must have integer type!", + &LI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "atomic store operand must be power-of-two byte-sized integer", + &LI, ElTy); + } + } else { + Assert1(LI.getSynchScope() == CrossThread, + "Non-atomic load cannot have SynchronizationScope specified", &LI); + } + + if (MDNode *Range = LI.getMetadata(LLVMContext::MD_range)) { + unsigned NumOperands = Range->getNumOperands(); + Assert1(NumOperands % 2 == 0, "Unfinished range!", Range); + unsigned NumRanges = NumOperands / 2; + Assert1(NumRanges >= 1, "It should have at least one range!", Range); + + ConstantRange LastRange(1); // Dummy initial value + for (unsigned i = 0; i < NumRanges; ++i) { + ConstantInt *Low = dyn_cast<ConstantInt>(Range->getOperand(2*i)); + Assert1(Low, "The lower limit must be an integer!", Low); + ConstantInt *High = dyn_cast<ConstantInt>(Range->getOperand(2*i + 1)); + Assert1(High, "The upper limit must be an integer!", High); + Assert1(High->getType() == Low->getType() && + High->getType() == ElTy, "Range types must match load type!", + &LI); + + APInt HighV = High->getValue(); + APInt LowV = Low->getValue(); + ConstantRange CurRange(LowV, HighV); + Assert1(!CurRange.isEmptySet() && !CurRange.isFullSet(), + "Range must not be empty!", Range); + if (i != 0) { + Assert1(CurRange.intersectWith(LastRange).isEmptySet(), + "Intervals are overlapping", Range); + Assert1(LowV.sgt(LastRange.getLower()), "Intervals are not in order", + Range); + Assert1(!isContiguous(CurRange, LastRange), "Intervals are contiguous", + Range); + } + LastRange = ConstantRange(LowV, HighV); + } + if (NumRanges > 2) { + APInt FirstLow = + dyn_cast<ConstantInt>(Range->getOperand(0))->getValue(); + APInt FirstHigh = + dyn_cast<ConstantInt>(Range->getOperand(1))->getValue(); + ConstantRange FirstRange(FirstLow, FirstHigh); + Assert1(FirstRange.intersectWith(LastRange).isEmptySet(), + "Intervals are overlapping", Range); + Assert1(!isContiguous(FirstRange, LastRange), "Intervals are contiguous", + Range); + } + + + } + + visitInstruction(LI); +} + +void Verifier::visitStoreInst(StoreInst &SI) { + PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType()); + Assert1(PTy, "Store operand must be a pointer.", &SI); + Type *ElTy = PTy->getElementType(); + Assert2(ElTy == SI.getOperand(0)->getType(), + "Stored value type does not match pointer operand type!", + &SI, ElTy); + if (SI.isAtomic()) { + Assert1(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease, + "Store cannot have Acquire ordering", &SI); + Assert1(SI.getAlignment() != 0, + "Atomic store must specify explicit alignment", &SI); + if (!ElTy->isPointerTy()) { + Assert2(ElTy->isIntegerTy(), + "atomic store operand must have integer type!", + &SI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "atomic store operand must be power-of-two byte-sized integer", + &SI, ElTy); + } + } else { + Assert1(SI.getSynchScope() == CrossThread, + "Non-atomic store cannot have SynchronizationScope specified", &SI); + } + visitInstruction(SI); +} + +void Verifier::visitAllocaInst(AllocaInst &AI) { + PointerType *PTy = AI.getType(); + Assert1(PTy->getAddressSpace() == 0, + "Allocation instruction pointer not in the generic address space!", + &AI); + Assert1(PTy->getElementType()->isSized(), "Cannot allocate unsized type", + &AI); + Assert1(AI.getArraySize()->getType()->isIntegerTy(), + "Alloca array size must have integer type", &AI); + visitInstruction(AI); +} + +void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { + Assert1(CXI.getOrdering() != NotAtomic, + "cmpxchg instructions must be atomic.", &CXI); + Assert1(CXI.getOrdering() != Unordered, + "cmpxchg instructions cannot be unordered.", &CXI); + PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType()); + Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI); + Type *ElTy = PTy->getElementType(); + Assert2(ElTy->isIntegerTy(), + "cmpxchg operand must have integer type!", + &CXI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "cmpxchg operand must be power-of-two byte-sized integer", + &CXI, ElTy); + Assert2(ElTy == CXI.getOperand(1)->getType(), + "Expected value type does not match pointer operand type!", + &CXI, ElTy); + Assert2(ElTy == CXI.getOperand(2)->getType(), + "Stored value type does not match pointer operand type!", + &CXI, ElTy); + visitInstruction(CXI); +} + +void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { + Assert1(RMWI.getOrdering() != NotAtomic, + "atomicrmw instructions must be atomic.", &RMWI); + Assert1(RMWI.getOrdering() != Unordered, + "atomicrmw instructions cannot be unordered.", &RMWI); + PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType()); + Assert1(PTy, "First atomicrmw operand must be a pointer.", &RMWI); + Type *ElTy = PTy->getElementType(); + Assert2(ElTy->isIntegerTy(), + "atomicrmw operand must have integer type!", + &RMWI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "atomicrmw operand must be power-of-two byte-sized integer", + &RMWI, ElTy); + Assert2(ElTy == RMWI.getOperand(1)->getType(), + "Argument value type does not match pointer operand type!", + &RMWI, ElTy); + Assert1(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() && + RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP, + "Invalid binary operation!", &RMWI); + visitInstruction(RMWI); +} + +void Verifier::visitFenceInst(FenceInst &FI) { + const AtomicOrdering Ordering = FI.getOrdering(); + Assert1(Ordering == Acquire || Ordering == Release || + Ordering == AcquireRelease || Ordering == SequentiallyConsistent, + "fence instructions may only have " + "acquire, release, acq_rel, or seq_cst ordering.", &FI); + visitInstruction(FI); +} + +void Verifier::visitExtractValueInst(ExtractValueInst &EVI) { + Assert1(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(), + EVI.getIndices()) == + EVI.getType(), + "Invalid ExtractValueInst operands!", &EVI); + + visitInstruction(EVI); +} + +void Verifier::visitInsertValueInst(InsertValueInst &IVI) { + Assert1(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(), + IVI.getIndices()) == + IVI.getOperand(1)->getType(), + "Invalid InsertValueInst operands!", &IVI); + + visitInstruction(IVI); +} + +void Verifier::visitLandingPadInst(LandingPadInst &LPI) { + BasicBlock *BB = LPI.getParent(); + + // The landingpad instruction is ill-formed if it doesn't have any clauses and + // isn't a cleanup. + Assert1(LPI.getNumClauses() > 0 || LPI.isCleanup(), + "LandingPadInst needs at least one clause or to be a cleanup.", &LPI); + + // The landingpad instruction defines its parent as a landing pad block. The + // landing pad block may be branched to only by the unwind edge of an invoke. + for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { + const InvokeInst *II = dyn_cast<InvokeInst>((*I)->getTerminator()); + Assert1(II && II->getUnwindDest() == BB && II->getNormalDest() != BB, + "Block containing LandingPadInst must be jumped to " + "only by the unwind edge of an invoke.", &LPI); + } + + // The landingpad instruction must be the first non-PHI instruction in the + // block. + Assert1(LPI.getParent()->getLandingPadInst() == &LPI, + "LandingPadInst not the first non-PHI instruction in the block.", + &LPI); + + // The personality functions for all landingpad instructions within the same + // function should match. + if (PersonalityFn) + Assert1(LPI.getPersonalityFn() == PersonalityFn, + "Personality function doesn't match others in function", &LPI); + PersonalityFn = LPI.getPersonalityFn(); + + // All operands must be constants. + Assert1(isa<Constant>(PersonalityFn), "Personality function is not constant!", + &LPI); + for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) { + Value *Clause = LPI.getClause(i); + Assert1(isa<Constant>(Clause), "Clause is not constant!", &LPI); + if (LPI.isCatch(i)) { + Assert1(isa<PointerType>(Clause->getType()), + "Catch operand does not have pointer type!", &LPI); + } else { + Assert1(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI); + Assert1(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause), + "Filter operand is not an array of constants!", &LPI); + } + } + + visitInstruction(LPI); +} + +void Verifier::verifyDominatesUse(Instruction &I, unsigned i) { + Instruction *Op = cast<Instruction>(I.getOperand(i)); + // If the we have an invalid invoke, don't try to compute the dominance. + // We already reject it in the invoke specific checks and the dominance + // computation doesn't handle multiple edges. + if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) { + if (II->getNormalDest() == II->getUnwindDest()) + return; + } + + const Use &U = I.getOperandUse(i); + Assert2(InstsInThisBlock.count(Op) || DT->dominates(Op, U), + "Instruction does not dominate all uses!", Op, &I); +} + +/// verifyInstruction - Verify that an instruction is well formed. +/// +void Verifier::visitInstruction(Instruction &I) { + BasicBlock *BB = I.getParent(); + Assert1(BB, "Instruction not embedded in basic block!", &I); + + if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential + for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); + UI != UE; ++UI) + Assert1(*UI != (User*)&I || !DT->isReachableFromEntry(BB), + "Only PHI nodes may reference their own value!", &I); + } + + // Check that void typed values don't have names + Assert1(!I.getType()->isVoidTy() || !I.hasName(), + "Instruction has a name, but provides a void value!", &I); + + // Check that the return value of the instruction is either void or a legal + // value type. + Assert1(I.getType()->isVoidTy() || + I.getType()->isFirstClassType(), + "Instruction returns a non-scalar type!", &I); + + // Check that the instruction doesn't produce metadata. Calls are already + // checked against the callee type. + Assert1(!I.getType()->isMetadataTy() || + isa<CallInst>(I) || isa<InvokeInst>(I), + "Invalid use of metadata!", &I); + + // Check that all uses of the instruction, if they are instructions + // themselves, actually have parent basic blocks. If the use is not an + // instruction, it is an error! + for (User::use_iterator UI = I.use_begin(), UE = I.use_end(); + UI != UE; ++UI) { + if (Instruction *Used = dyn_cast<Instruction>(*UI)) + Assert2(Used->getParent() != 0, "Instruction referencing instruction not" + " embedded in a basic block!", &I, Used); + else { + CheckFailed("Use of instruction is not an instruction!", *UI); + return; + } + } + + for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { + Assert1(I.getOperand(i) != 0, "Instruction has null operand!", &I); + + // Check to make sure that only first-class-values are operands to + // instructions. + if (!I.getOperand(i)->getType()->isFirstClassType()) { + Assert1(0, "Instruction operands must be first-class values!", &I); + } + + if (Function *F = dyn_cast<Function>(I.getOperand(i))) { + // Check to make sure that the "address of" an intrinsic function is never + // taken. + Assert1(!F->isIntrinsic() || i == (isa<CallInst>(I) ? e-1 : 0), + "Cannot take the address of an intrinsic!", &I); + Assert1(!F->isIntrinsic() || isa<CallInst>(I) || + F->getIntrinsicID() == Intrinsic::donothing, + "Cannot invoke an intrinsinc other than donothing", &I); + Assert1(F->getParent() == Mod, "Referencing function in another module!", + &I); + } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) { + Assert1(OpBB->getParent() == BB->getParent(), + "Referring to a basic block in another function!", &I); + } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) { + Assert1(OpArg->getParent() == BB->getParent(), + "Referring to an argument in another function!", &I); + } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) { + Assert1(GV->getParent() == Mod, "Referencing global in another module!", + &I); + } else if (isa<Instruction>(I.getOperand(i))) { + verifyDominatesUse(I, i); + } else if (isa<InlineAsm>(I.getOperand(i))) { + Assert1((i + 1 == e && isa<CallInst>(I)) || + (i + 3 == e && isa<InvokeInst>(I)), + "Cannot take the address of an inline asm!", &I); + } + } + + if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) { + Assert1(I.getType()->isFPOrFPVectorTy(), + "fpmath requires a floating point result!", &I); + Assert1(MD->getNumOperands() == 1, "fpmath takes one operand!", &I); + Value *Op0 = MD->getOperand(0); + if (ConstantFP *CFP0 = dyn_cast_or_null<ConstantFP>(Op0)) { + APFloat Accuracy = CFP0->getValueAPF(); + Assert1(Accuracy.isNormal() && !Accuracy.isNegative(), + "fpmath accuracy not a positive number!", &I); + } else { + Assert1(false, "invalid fpmath accuracy!", &I); + } + } + + MDNode *MD = I.getMetadata(LLVMContext::MD_range); + Assert1(!MD || isa<LoadInst>(I), "Ranges are only for loads!", &I); + + InstsInThisBlock.insert(&I); +} + +/// VerifyIntrinsicType - Verify that the specified type (which comes from an +/// intrinsic argument or return value) matches the type constraints specified +/// by the .td file (e.g. an "any integer" argument really is an integer). +/// +/// This return true on error but does not print a message. +bool Verifier::VerifyIntrinsicType(Type *Ty, + ArrayRef<Intrinsic::IITDescriptor> &Infos, + SmallVectorImpl<Type*> &ArgTys) { + using namespace Intrinsic; + + // If we ran out of descriptors, there are too many arguments. + if (Infos.empty()) return true; + IITDescriptor D = Infos.front(); + Infos = Infos.slice(1); + + switch (D.Kind) { + case IITDescriptor::Void: return !Ty->isVoidTy(); + case IITDescriptor::MMX: return !Ty->isX86_MMXTy(); + case IITDescriptor::Metadata: return !Ty->isMetadataTy(); + case IITDescriptor::Half: return !Ty->isHalfTy(); + case IITDescriptor::Float: return !Ty->isFloatTy(); + case IITDescriptor::Double: return !Ty->isDoubleTy(); + case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width); + case IITDescriptor::Vector: { + VectorType *VT = dyn_cast<VectorType>(Ty); + return VT == 0 || VT->getNumElements() != D.Vector_Width || + VerifyIntrinsicType(VT->getElementType(), Infos, ArgTys); + } + case IITDescriptor::Pointer: { + PointerType *PT = dyn_cast<PointerType>(Ty); + return PT == 0 || PT->getAddressSpace() != D.Pointer_AddressSpace || + VerifyIntrinsicType(PT->getElementType(), Infos, ArgTys); + } + + case IITDescriptor::Struct: { + StructType *ST = dyn_cast<StructType>(Ty); + if (ST == 0 || ST->getNumElements() != D.Struct_NumElements) + return true; + + for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i) + if (VerifyIntrinsicType(ST->getElementType(i), Infos, ArgTys)) + return true; + return false; + } + + case IITDescriptor::Argument: + // Two cases here - If this is the second occurrence of an argument, verify + // that the later instance matches the previous instance. + if (D.getArgumentNumber() < ArgTys.size()) + return Ty != ArgTys[D.getArgumentNumber()]; + + // Otherwise, if this is the first instance of an argument, record it and + // verify the "Any" kind. + assert(D.getArgumentNumber() == ArgTys.size() && "Table consistency error"); + ArgTys.push_back(Ty); + + switch (D.getArgumentKind()) { + case IITDescriptor::AK_AnyInteger: return !Ty->isIntOrIntVectorTy(); + case IITDescriptor::AK_AnyFloat: return !Ty->isFPOrFPVectorTy(); + case IITDescriptor::AK_AnyVector: return !isa<VectorType>(Ty); + case IITDescriptor::AK_AnyPointer: return !isa<PointerType>(Ty); + } + llvm_unreachable("all argument kinds not covered"); + + case IITDescriptor::ExtendVecArgument: + // This may only be used when referring to a previous vector argument. + return D.getArgumentNumber() >= ArgTys.size() || + !isa<VectorType>(ArgTys[D.getArgumentNumber()]) || + VectorType::getExtendedElementVectorType( + cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty; + + case IITDescriptor::TruncVecArgument: + // This may only be used when referring to a previous vector argument. + return D.getArgumentNumber() >= ArgTys.size() || + !isa<VectorType>(ArgTys[D.getArgumentNumber()]) || + VectorType::getTruncatedElementVectorType( + cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty; + } + llvm_unreachable("unhandled"); +} + +/// visitIntrinsicFunction - Allow intrinsics to be verified in different ways. +/// +void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) { + Function *IF = CI.getCalledFunction(); + Assert1(IF->isDeclaration(), "Intrinsic functions should never be defined!", + IF); + + // Verify that the intrinsic prototype lines up with what the .td files + // describe. + FunctionType *IFTy = IF->getFunctionType(); + Assert1(!IFTy->isVarArg(), "Intrinsic prototypes are not varargs", IF); + + SmallVector<Intrinsic::IITDescriptor, 8> Table; + getIntrinsicInfoTableEntries(ID, Table); + ArrayRef<Intrinsic::IITDescriptor> TableRef = Table; + + SmallVector<Type *, 4> ArgTys; + Assert1(!VerifyIntrinsicType(IFTy->getReturnType(), TableRef, ArgTys), + "Intrinsic has incorrect return type!", IF); + for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i) + Assert1(!VerifyIntrinsicType(IFTy->getParamType(i), TableRef, ArgTys), + "Intrinsic has incorrect argument type!", IF); + Assert1(TableRef.empty(), "Intrinsic has too few arguments!", IF); + + // Now that we have the intrinsic ID and the actual argument types (and we + // know they are legal for the intrinsic!) get the intrinsic name through the + // usual means. This allows us to verify the mangling of argument types into + // the name. + Assert1(Intrinsic::getName(ID, ArgTys) == IF->getName(), + "Intrinsic name not mangled correctly for type arguments!", IF); + + // If the intrinsic takes MDNode arguments, verify that they are either global + // or are local to *this* function. + for (unsigned i = 0, e = CI.getNumArgOperands(); i != e; ++i) + if (MDNode *MD = dyn_cast<MDNode>(CI.getArgOperand(i))) + visitMDNode(*MD, CI.getParent()->getParent()); + + switch (ID) { + default: + break; + case Intrinsic::ctlz: // llvm.ctlz + case Intrinsic::cttz: // llvm.cttz + Assert1(isa<ConstantInt>(CI.getArgOperand(1)), + "is_zero_undef argument of bit counting intrinsics must be a " + "constant int", &CI); + break; + case Intrinsic::dbg_declare: { // llvm.dbg.declare + Assert1(CI.getArgOperand(0) && isa<MDNode>(CI.getArgOperand(0)), + "invalid llvm.dbg.declare intrinsic call 1", &CI); + MDNode *MD = cast<MDNode>(CI.getArgOperand(0)); + Assert1(MD->getNumOperands() == 1, + "invalid llvm.dbg.declare intrinsic call 2", &CI); + } break; + case Intrinsic::memcpy: + case Intrinsic::memmove: + case Intrinsic::memset: + Assert1(isa<ConstantInt>(CI.getArgOperand(3)), + "alignment argument of memory intrinsics must be a constant int", + &CI); + Assert1(isa<ConstantInt>(CI.getArgOperand(4)), + "isvolatile argument of memory intrinsics must be a constant int", + &CI); + break; + case Intrinsic::gcroot: + case Intrinsic::gcwrite: + case Intrinsic::gcread: + if (ID == Intrinsic::gcroot) { + AllocaInst *AI = + dyn_cast<AllocaInst>(CI.getArgOperand(0)->stripPointerCasts()); + Assert1(AI, "llvm.gcroot parameter #1 must be an alloca.", &CI); + Assert1(isa<Constant>(CI.getArgOperand(1)), + "llvm.gcroot parameter #2 must be a constant.", &CI); + if (!AI->getType()->getElementType()->isPointerTy()) { + Assert1(!isa<ConstantPointerNull>(CI.getArgOperand(1)), + "llvm.gcroot parameter #1 must either be a pointer alloca, " + "or argument #2 must be a non-null constant.", &CI); + } + } + + Assert1(CI.getParent()->getParent()->hasGC(), + "Enclosing function does not use GC.", &CI); + break; + case Intrinsic::init_trampoline: + Assert1(isa<Function>(CI.getArgOperand(1)->stripPointerCasts()), + "llvm.init_trampoline parameter #2 must resolve to a function.", + &CI); + break; + case Intrinsic::prefetch: + Assert1(isa<ConstantInt>(CI.getArgOperand(1)) && + isa<ConstantInt>(CI.getArgOperand(2)) && + cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue() < 2 && + cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue() < 4, + "invalid arguments to llvm.prefetch", + &CI); + break; + case Intrinsic::stackprotector: + Assert1(isa<AllocaInst>(CI.getArgOperand(1)->stripPointerCasts()), + "llvm.stackprotector parameter #2 must resolve to an alloca.", + &CI); + break; + case Intrinsic::lifetime_start: + case Intrinsic::lifetime_end: + case Intrinsic::invariant_start: + Assert1(isa<ConstantInt>(CI.getArgOperand(0)), + "size argument of memory use markers must be a constant integer", + &CI); + break; + case Intrinsic::invariant_end: + Assert1(isa<ConstantInt>(CI.getArgOperand(1)), + "llvm.invariant.end parameter #2 must be a constant integer", &CI); + break; + } +} + +//===----------------------------------------------------------------------===// +// Implement the public interfaces to this file... +//===----------------------------------------------------------------------===// + +FunctionPass *llvm::createVerifierPass(VerifierFailureAction action) { + return new Verifier(action); +} + + +/// verifyFunction - Check a function for errors, printing messages on stderr. +/// Return true if the function is corrupt. +/// +bool llvm::verifyFunction(const Function &f, VerifierFailureAction action) { + Function &F = const_cast<Function&>(f); + assert(!F.isDeclaration() && "Cannot verify external functions"); + + FunctionPassManager FPM(F.getParent()); + Verifier *V = new Verifier(action); + FPM.add(V); + FPM.run(F); + return V->Broken; +} + +/// verifyModule - Check a module for errors, printing messages on stderr. +/// Return true if the module is corrupt. +/// +bool llvm::verifyModule(const Module &M, VerifierFailureAction action, + std::string *ErrorInfo) { + PassManager PM; + Verifier *V = new Verifier(action); + PM.add(V); + PM.run(const_cast<Module&>(M)); + + if (ErrorInfo && V->Broken) + *ErrorInfo = V->MessagesStr.str(); + return V->Broken; +} |