aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/ExecutionEngine/NaClJITMemoryManager.h240
-rw-r--r--lib/ExecutionEngine/JIT/JIT.h2
-rw-r--r--lib/ExecutionEngine/JIT/JITEmitter.cpp390
-rw-r--r--lib/ExecutionEngine/JIT/NaClJITMemoryManager.cpp431
-rw-r--r--lib/Target/X86/X86NaClJITInfo.cpp393
-rw-r--r--lib/Target/X86/X86NaClJITInfo.h75
-rw-r--r--lib/Target/X86/X86TargetMachine.h4
7 files changed, 9 insertions, 1526 deletions
diff --git a/include/llvm/ExecutionEngine/NaClJITMemoryManager.h b/include/llvm/ExecutionEngine/NaClJITMemoryManager.h
deleted file mode 100644
index 535d64b133..0000000000
--- a/include/llvm/ExecutionEngine/NaClJITMemoryManager.h
+++ /dev/null
@@ -1,240 +0,0 @@
-//=-- NaClJITMemoryManager.h - Interface JIT uses to Allocate Mem -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-
-#ifndef LLVM_EXECUTION_ENGINE_NACL_JIT_MEMMANAGER_H
-#define LLVM_EXECUTION_ENGINE_NACL_JIT_MEMMANAGER_H
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/Support/Allocator.h"
-
-namespace llvm {
-
-class Function;
-class GlobalValue;
-
-struct SimpleSlab {
- uint8_t *address;
- size_t size;
- uint8_t *next_free;
-};
-
-struct FreeListNode {
- uint8_t *address;
- uintptr_t size;
- FreeListNode *Prev;
- FreeListNode *Next;
- FreeListNode *RemoveFromFreeList() {
- assert(Next->Prev == this && Prev->Next == this && "Freelist broken!");
- Next->Prev = Prev;
- return Prev->Next = Next;
- }
- void AddToFreeList(FreeListNode *FreeList) {
- Next = FreeList;
- Prev = FreeList->Prev;
- Prev->Next = this;
- Next->Prev = this;
- }
-};
-
-class NaClJITMemoryManager : public JITMemoryManager {
- // NaCl disallows writing into any code region, and disallows executing any
- // data region. Thus we can never get any RWX memory and the the strategy
- // used by the other allocators of colocation of allocation metadata
- // with the allocated code won't work.
- // Currently with NaCl we have one single pool of usable space between the
- // text and rodata segments, defined by the linker
- // so to support stub allocation in the middle of a function, we allocate
- // them in slabs interspersed with the functions.
-
- static const size_t kStubSlabSize = 16 * 1024;
- static const size_t kDataSlabSize = 16 * 1024;
- static const size_t kCodeSlabSize = 64 * 1024;
-
- typedef DenseMap<uint8_t *, size_t> AllocationTable;
-
- uint8_t *AllocatableRegionStart;
- uint8_t *AllocatableRegionLimit;
- uint8_t *NextCode;
- SimpleSlab CurrentStubSlab;
-
- // Allocation metadata must be kept separate from code, so the free list is
- // allocated with new rather than being a header in the code blocks
- FreeListNode *CodeFreeListHead;
- FreeListNode *CurrentCodeBlock;
- // Mapping from pointer to allocated function, to size of allocation
- AllocationTable AllocatedFunctions;
-
- // Since Exception tables are allocated like functions (i.e. we don't know
- // ahead of time how large they are) we use the same allocation method for
- // simplicity even though it's not strictly necessary to separate the
- // allocation metadata from the allocated data.
- FreeListNode *DataFreeListHead;
- FreeListNode *CurrentDataBlock;
- AllocationTable AllocatedTables;
- BumpPtrAllocator DataAllocator;
-
- uint8_t *GOTBase; // Target Specific reserved memory
-
- FreeListNode *allocateCodeSlab(size_t MinSize);
- FreeListNode *allocateDataSlab(size_t MinSize);
- SimpleSlab allocateStubSlab(size_t MinSize);
-
- // Functions for allocations using one of the free lists
- void InitFreeList(FreeListNode **Head);
- void DestroyFreeList(FreeListNode *Head);
- FreeListNode *FreeListAllocate(uintptr_t &ActualSize, FreeListNode *Head,
- FreeListNode * (NaClJITMemoryManager::*allocate)(size_t));
- void FreeListFinishAllocation(FreeListNode *Block, FreeListNode *Head,
- uint8_t *AllocationStart, uint8_t *AllocationEnd, AllocationTable &table);
- void FreeListDeallocate(FreeListNode *Head, AllocationTable &Table,
- void *Body);
- public:
- // TODO(dschuff): how to find the real value? is it a flag?
- static const int kBundleSize = 32;
- static const intptr_t kJumpMask = -32;
- NaClJITMemoryManager();
- virtual ~NaClJITMemoryManager();
- static inline bool classof(const JITMemoryManager*) { return true; }
-
- /// setMemoryWritable - No-op on NaCl - code is never writable
- virtual void setMemoryWritable() {}
-
- /// setMemoryExecutable - No-op on NaCl - data is never executable
- virtual void setMemoryExecutable() {}
-
- /// setPoisonMemory - No-op on NaCl - nothing unvalidated is ever executable
- virtual void setPoisonMemory(bool poison) {}
-
- /// getPointerToNamedFunction - This method returns the address of the
- /// specified function. As such it is only useful for resolving library
- /// symbols, not code generated symbols.
- ///
- /// If AbortOnFailure is false and no function with the given name is
- /// found, this function silently returns a null pointer. Otherwise,
- /// it prints a message to stderr and aborts.
- ///
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true) ;
-
- //===--------------------------------------------------------------------===//
- // Global Offset Table Management
- //===--------------------------------------------------------------------===//
-
- /// AllocateGOT - If the current table requires a Global Offset Table, this
- /// method is invoked to allocate it. This method is required to set HasGOT
- /// to true.
- virtual void AllocateGOT();
-
- /// getGOTBase - If this is managing a Global Offset Table, this method should
- /// return a pointer to its base.
- virtual uint8_t *getGOTBase() const {
- return GOTBase;
- }
-
- //===--------------------------------------------------------------------===//
- // Main Allocation Functions
- //===--------------------------------------------------------------------===//
-
- /// startFunctionBody - When we start JITing a function, the JIT calls this
- /// method to allocate a block of free RWX memory, which returns a pointer to
- /// it. If the JIT wants to request a block of memory of at least a certain
- /// size, it passes that value as ActualSize, and this method returns a block
- /// with at least that much space. If the JIT doesn't know ahead of time how
- /// much space it will need to emit the function, it passes 0 for the
- /// ActualSize. In either case, this method is required to pass back the size
- /// of the allocated block through ActualSize. The JIT will be careful to
- /// not write more than the returned ActualSize bytes of memory.
- virtual uint8_t *startFunctionBody(const Function *F,
- uintptr_t &ActualSize);
-
- /// allocateStub - This method is called by the JIT to allocate space for a
- /// function stub (used to handle limited branch displacements) while it is
- /// JIT compiling a function. For example, if foo calls bar, and if bar
- /// either needs to be lazily compiled or is a native function that exists too
- /// far away from the call site to work, this method will be used to make a
- /// thunk for it. The stub should be "close" to the current function body,
- /// but should not be included in the 'actualsize' returned by
- /// startFunctionBody.
- virtual uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
- unsigned Alignment);
-
- /// endFunctionBody - This method is called when the JIT is done codegen'ing
- /// the specified function. At this point we know the size of the JIT
- /// compiled function. This passes in FunctionStart (which was returned by
- /// the startFunctionBody method) and FunctionEnd which is a pointer to the
- /// actual end of the function. This method should mark the space allocated
- /// and remember where it is in case the client wants to deallocate it.
- virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart,
- uint8_t *FunctionEnd);
-
- /// allocateCodeSection - Allocate a memory block of (at least) the given
- /// size suitable for executable code. The SectionID is a unique identifier
- /// assigned by the JIT and passed through to the memory manager for
- /// the instance class to use if it needs to communicate to the JIT about
- /// a given section after the fact.
- virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID);
-
- /// allocateDataSection - Allocate a memory block of (at least) the given
- /// size suitable for data. The SectionID is a unique identifier
- /// assigned by the JIT and passed through to the memory manager for
- /// the instance class to use if it needs to communicate to the JIT about
- /// a given section after the fact.
- virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID, bool IsReadOnly);
-
- /// Ignored.
- virtual bool applyPermissions(std::string *ErrMsg = 0) {}
-
- /// allocateSpace - Allocate a memory block of the given size. This method
- /// cannot be called between calls to startFunctionBody and endFunctionBody.
- virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment);
-
- /// allocateGlobal - Allocate memory for a global.
- virtual uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment);
-
- /// deallocateFunctionBody - Free the specified function body. The argument
- /// must be the return value from a call to startFunctionBody() that hasn't
- /// been deallocated yet. This is never called when the JIT is currently
- /// emitting a function.
- virtual void deallocateFunctionBody(void *Body);
-
- /// startExceptionTable - When we finished JITing the function, if exception
- /// handling is set, we emit the exception table.
- virtual uint8_t* startExceptionTable(const Function* F,
- uintptr_t &ActualSize);
-
- /// endExceptionTable - This method is called when the JIT is done emitting
- /// the exception table.
- virtual void endExceptionTable(const Function *F, uint8_t *TableStart,
- uint8_t *TableEnd, uint8_t* FrameRegister);
-
- /// deallocateExceptionTable - Free the specified exception table's memory.
- /// The argument must be the return value from a call to startExceptionTable()
- /// that hasn't been deallocated yet. This is never called when the JIT is
- /// currently emitting an exception table.
- virtual void deallocateExceptionTable(void *ET);
-
- virtual size_t GetDefaultCodeSlabSize() {
- return kCodeSlabSize;
- }
- virtual size_t GetDefaultDataSlabSize() {
- return kDataSlabSize;
- }
- virtual size_t GetDefaultStubSlabSize() {
- return kStubSlabSize;
- }
-
-};
-
-}
-
-#endif // LLVM_EXECUTION_ENGINE_NACL_JIT_MEMMANAGER_H
diff --git a/lib/ExecutionEngine/JIT/JIT.h b/lib/ExecutionEngine/JIT/JIT.h
index 338db8f454..2ae155bebf 100644
--- a/lib/ExecutionEngine/JIT/JIT.h
+++ b/lib/ExecutionEngine/JIT/JIT.h
@@ -210,8 +210,6 @@ public:
private:
static JITCodeEmitter *createEmitter(JIT &J, JITMemoryManager *JMM,
TargetMachine &tm);
- // Native client needs its own memory manager, so custom ones are unsupported
- static JITCodeEmitter *createNaClEmitter(JIT &J, TargetMachine &tm);
void runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked);
void updateFunctionStub(Function *F);
void jitTheFunction(Function *F, const MutexGuard &locked);
diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp
index 1c5abf751d..ecafda7286 100644
--- a/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -30,7 +30,6 @@
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/ExecutionEngine/NaClJITMemoryManager.h"
#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetJITInfo.h"
@@ -53,15 +52,12 @@
#ifndef NDEBUG
#include <iomanip>
#endif
-#ifdef __native_client__
-#include <nacl/nacl_dyncode.h>
-#endif
using namespace llvm;
STATISTIC(NumBytes, "Number of bytes of machine code compiled");
STATISTIC(NumRelos, "Number of relocations applied");
STATISTIC(NumRetries, "Number of retries with more memory");
-STATISTIC(NumNopBytes, "Number of bytes of NOPs emitted");
+
// A declaration may stop being a declaration once it's fully read from bitcode.
// This function returns true if F is fully read and is still a declaration.
@@ -285,6 +281,8 @@ namespace {
/// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
/// used to output functions to memory for execution.
class JITEmitter : public JITCodeEmitter {
+ JITMemoryManager *MemMgr;
+
// When outputting a function stub in the context of some other function, we
// save BufferBegin/BufferEnd/CurBufferPtr here.
uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
@@ -294,13 +292,11 @@ namespace {
// ask the memory manager for at least this much space. When we
// successfully emit the function, we reset this back to zero.
uintptr_t SizeEstimate;
-protected: //TODO:(dschuff): fix/move this once we do validation and are sure
- // which functions/data we need in NaClJITEmitter. also add LOCALMOD
- JITMemoryManager *MemMgr;
+
/// Relocations - These are the relocations that the function needs, as
/// emitted.
std::vector<MachineRelocation> Relocations;
-private:
+
/// MBBLocations - This vector is a mapping from MBB ID's to their address.
/// It is filled in by the StartMachineBasicBlock callback and queried by
/// the getMachineBasicBlockAddress callback.
@@ -384,7 +380,7 @@ private:
DE.reset(new JITDwarfEmitter(jit));
}
}
- virtual ~JITEmitter() { // @LOCALMOD
+ ~JITEmitter() {
delete MemMgr;
}
@@ -397,10 +393,10 @@ private:
void initJumpTableInfo(MachineJumpTableInfo *MJTI);
void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
- virtual void startGVStub(const GlobalValue* GV,
+ void startGVStub(const GlobalValue* GV,
unsigned StubSize, unsigned Alignment = 1);
- virtual void startGVStub(void *Buffer, unsigned StubSize);
- virtual void finishGVStub();
+ void startGVStub(void *Buffer, unsigned StubSize);
+ void finishGVStub();
virtual void *allocIndirectGV(const GlobalValue *GV,
const uint8_t *Buffer, size_t Size,
unsigned Alignment);
@@ -472,360 +468,6 @@ private:
bool MayNeedFarStub);
void *getPointerToGVIndirectSym(GlobalValue *V, void *Reference);
};
-
- // @LOCALMOD-START
- class NaClJITEmitter : public JITEmitter {
- /* There are two Nacl-specific requirements that must be dealt with: the
- * first is that the data and code spaces are strictly separated, and code
- * must be copied (by the service runtime/validator)to its destination
- * after emission and relocation have finished.
- * The second is bundle alignment: neither instructions nor multi-
- * instruction pseudoinstruction groups may cross bundle boundaries.
- *
- * Requirement 1 is dealt with jointly by NaClJITMemoryManager and
- * and NaClJITEmitter. NaClJITMemoryManager separates metadata from
- * code and returns pointers in the proper space
- * for code (startFunctionBody, allocateStub) and data (allocateSpace,
- * startExceptionTable, etc). NaClJITEmitter emits code into a separate
- * memory buffer (EmissionBuffer). After startFunction allocates the
- * function's memory, NaClJITEmitter's startFunction points BufferBegin,
- * CurBufferPtr and BufferEnd at the EmissionBuffer (this avoids having to
- * override all of the actual emission methods from JITCodeEmitter)
- * JITEmitter already uses this trick for emitting a stub in the middle
- * of emitting a function so it doesn't seem so terrible to do our own
- * similar swapping of the pointers.
- *
- * Requirement 2 is bundle alignment.
- * X86CodeEmitter makes several calls into JITCodeEmitter per instruction,
- * to add the various bytes, constants, etc. To implement bundle alignment,
- * we add methods to start and end a bundle-locked group
- * (the group can include just one instruction or several).
- * The X86CodeEmitter will pass-through any such markers created by the
- * rewriting passes (which surround multiple-instruction groups),
- * and will also generate them surrounding each individual instruction
- * (there should never be more than two-deep nesting).
- * When beginBundleLock is called, the CurBufferPtr is marked. When
- * endBundleLock is called, it checks that the group does not cross a
- * bundle boundary; if it does, it inserts nop padding as necessary.
- * If padding is added, the relocations must also be fixed up; this also
- * happens in endBundleLock.
- *
- */
- public:
- NaClJITEmitter(JIT &jit, TargetMachine &TM) :
- JITEmitter(jit, new NaClJITMemoryManager(), TM),
- BundleLockSavedCurBufferPtr(NULL),
- BundleNestCount(0),
- AlignNextGroup(kNone),
- GroupRelocationCount(0),
- JITInfo(&jit.getJITInfo()),
- kBundleSize(jit.getJITInfo().getBundleSize()),
- kJumpMask(jit.getJITInfo().getJumpMask()) {
- uintptr_t CodeSlabSize = MemMgr->GetDefaultCodeSlabSize();
- EmissionBuffer = MemMgr->allocateSpace(CodeSlabSize, kBundleSize);
- EmissionBufferSize = CodeSlabSize;
- DEBUG(dbgs() << "EmissionBuffer " << EmissionBuffer << " size "
- << EmissionBufferSize << "\n");
- StubEmissionBuffer = MemMgr->allocateSpace(kBundleSize, kBundleSize);
- StubEmissionBufferSize = kBundleSize;
- DEBUG(dbgs() << "StubEmissionBuffer " << StubEmissionBuffer << " size "
- << StubEmissionBufferSize << "\n");
- JITInfo = &jit.getJITInfo();
- }
-
- virtual ~NaClJITEmitter() {
- }
-
- static inline bool classof(const JITEmitter*) { return true; }
-
- virtual void startFunction(MachineFunction &F) {
- JITEmitter::startFunction(F);
- // Make sure the emission buffer is at least as big as the allocated
- // function
- if (BufferEnd - BufferBegin > (intptr_t)EmissionBufferSize) {
- EmissionBufferSize = std::max((uintptr_t)(BufferEnd - BufferBegin),
- 2 * EmissionBufferSize);
- // BumpPtrAllocator doesn't do anything when you call Deallocate. it
- // will be freed on destruction
- EmissionBuffer = MemMgr->allocateSpace(EmissionBufferSize,
- kBundleSize);
- DEBUG(dbgs() << "new EmissionBuffer " << EmissionBuffer << " size "
- << EmissionBufferSize << "\n");
- }
- // We ensure that the emission buffer is bundle-aligned, and constant
- // pool emission should not go into code space
- assert((CurBufferPtr == BufferBegin ||
- (int)F.getFunction()->getAlignment() > kBundleSize) &&
- "Pre-function data should not be emitted into code space");
- if (CurBufferPtr > BufferBegin) {
- // If CurBufferPtr has been bumped forward for alignment, we need to
- // pad the space with nops
- memcpy(EmissionBuffer,
- JITInfo->getNopSequence(CurBufferPtr - BufferBegin),
- CurBufferPtr - BufferBegin);
- NumNopBytes += CurBufferPtr - BufferBegin;
- }
- FunctionDestination = BufferBegin;
- setBufferPtrs(EmissionBuffer);
- }
-
- virtual bool finishFunction(MachineFunction &F) {
- uint8_t *end = CurBufferPtr;
- emitAlignment(kBundleSize);
- memcpy(end, JITInfo->getNopSequence(CurBufferPtr - end),
- CurBufferPtr - end);
- NumNopBytes += CurBufferPtr - end;
- JITInfo->setRelocationBuffer(BufferBegin);
- assert(BufferBegin == EmissionBuffer);
- int FunctionSize = CurBufferPtr - BufferBegin;
- setBufferPtrs(FunctionDestination);
- bool result = JITEmitter::finishFunction(F);
- // If we ran out of memory, don't bother validating, we'll just retry
- if (result) return result;
-
- DEBUG({
- dbgs() << "Validating " << FunctionDestination << "-" <<
- FunctionDestination + FunctionSize << "\n";
- if (sys::hasDisassembler()) {
- dbgs() << "Disassembled code:\n";
- dbgs() << sys::disassembleBuffer(EmissionBuffer,
- FunctionSize,
- (uintptr_t)FunctionDestination);
- } else {
- dbgs() << "Binary code:\n";
- uint8_t* q = BufferBegin;
- for (int i = 0; q < CurBufferPtr; q += 4, ++i) {
- if (i == 4)
- i = 0;
- if (i == 0)
- dbgs() << "JIT: " << (long)(q - BufferBegin) << ": ";
- bool Done = false;
- for (int j = 3; j >= 0; --j) {
- if (q + j >= CurBufferPtr)
- Done = true;
- else
- dbgs() << (unsigned short)q[j];
- }
- if (Done)
- break;
- dbgs() << ' ';
- if (i == 3)
- dbgs() << '\n';
- }
- dbgs()<< '\n';
- }
- });
-#ifdef __native_client__
- if(nacl_dyncode_create(FunctionDestination, EmissionBuffer,
- FunctionSize) != 0) {
- report_fatal_error("NaCl validation failed");
- }
-#endif
- return result;
- }
-
- virtual void startGVStub(const GlobalValue* GV,
- unsigned StubSize, unsigned Alignment = 1) {
- JITEmitter::startGVStub(GV, StubSize, Alignment);
- ReusedStub = false;
- assert(StubSize <= StubEmissionBufferSize);
- StubDestination = BufferBegin;
- setBufferPtrs(StubEmissionBuffer);
- }
- virtual void startGVStub(void *Buffer, unsigned StubSize) {
- JITEmitter::startGVStub(Buffer, StubSize);
- ReusedStub = true;
- assert(StubSize <= StubEmissionBufferSize);
- StubDestination = BufferBegin;
- setBufferPtrs(StubEmissionBuffer);
- }
- virtual void finishGVStub() {
- assert(CurBufferPtr - BufferBegin == kBundleSize);
-
- DEBUG(dbgs() << "Validating "<< BufferBegin<<"-"<<StubDestination<<"\n");
- int ValidationResult;
-#ifdef __native_client__
- if (!ReusedStub) {
- ValidationResult = nacl_dyncode_create(StubDestination, BufferBegin,
- CurBufferPtr - BufferBegin);
- } else {
- // This is not a thread-safe modification because it updates the whole
- // stub rather than just a jump target. However it is only used by
- // eager compilation to replace a stub which is not in use yet
- // (it jumps to 0).
- ValidationResult = nacl_dyncode_modify(StubDestination, BufferBegin,
- CurBufferPtr - BufferBegin);
- }
-#endif
- if (ValidationResult) {
- dbgs() << "NaCl stub validation failed:\n";
- if (sys::hasDisassembler()) {
- dbgs() << "Disassembled code:\n";
- dbgs() << sys::disassembleBuffer(BufferBegin,
- CurBufferPtr-BufferBegin,
- (uintptr_t)StubDestination);
- }
- report_fatal_error("Stub validation failed");
- }
- setBufferPtrs(StubDestination);
- JITEmitter::finishGVStub();
- }
-
- /// allocateSpace - Allocates *data* space, rather than space in the
- // current code block.
- virtual void *allocateSpace(uintptr_t Size, unsigned Alignment) {
- return MemMgr->allocateSpace(Size, Alignment);
- }
-
- virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
- uint8_t *end = CurBufferPtr;
- emitAlignment(MBB->getAlignment());
- memcpy(end, JITInfo->getNopSequence(CurBufferPtr - end),
- CurBufferPtr - end);
- NumNopBytes += CurBufferPtr - end;
- JITEmitter::StartMachineBasicBlock(MBB);
- }
-
- /// beginBundleLock - Save the current location of CurBufferPtr so we can
- // tell if the block crosses a bundle boundary
- virtual void beginBundleLock() {
- assert(BundleNestCount <= 2 && "Bundle-locked groups can't be nested");
- if (++BundleNestCount == 2) return;
- DEBUG(dbgs() << "begin lock, buffer begin:end:cur "<<BufferBegin<<" "<<
- BufferEnd<< " "<<CurBufferPtr << "\n");
- BundleLockSavedCurBufferPtr = CurBufferPtr;
- GroupRelocationCount = 0;
- }
-
- /// endBundleLock - Check if the group crosses a bundle boundary. If so
- // (or if the group must be aligned to the end of a bundle), move the
- // group and add appropriate padding
- virtual void endBundleLock() {
- assert(BundleNestCount > 0 && "mismatched bundle-lock start/end");
- if (--BundleNestCount > 0) return;
- DEBUG(dbgs() <<"end lock, buffer begin:end:cur:savd "<<BufferBegin<<" "<<
- BufferEnd<< " "<<CurBufferPtr <<" "<<
- BundleLockSavedCurBufferPtr<<"\n");
-
- int GroupLen = CurBufferPtr - BundleLockSavedCurBufferPtr;
- if (BufferEnd - CurBufferPtr <
- GroupLen + kBundleSize) {
- // Added padding can be no more than kBundleSize. Retry if there's any
- // possibility of overflow
- CurBufferPtr = BufferEnd;
- AlignNextGroup = kNone;
- return;
- }
- // Space left in the current bundle
- int SpaceLeft = (((intptr_t)BundleLockSavedCurBufferPtr + kBundleSize)
- & kJumpMask) - (intptr_t)BundleLockSavedCurBufferPtr;
- int TotalPadding = 0;
- if (SpaceLeft < GroupLen || AlignNextGroup == kBegin) {
- DEBUG(dbgs() << "space " << SpaceLeft <<" len "<<GroupLen<<"\n");
- memmove(BundleLockSavedCurBufferPtr + SpaceLeft,
- BundleLockSavedCurBufferPtr, GroupLen);
- memcpy(BundleLockSavedCurBufferPtr, JITInfo->getNopSequence(SpaceLeft),
- SpaceLeft);
- NumNopBytes += SpaceLeft;
- assert(CurBufferPtr == BundleLockSavedCurBufferPtr + GroupLen);
- CurBufferPtr += SpaceLeft;
- BundleLockSavedCurBufferPtr += SpaceLeft;
- TotalPadding = SpaceLeft;
- SpaceLeft = kBundleSize;
- }
-
- if (AlignNextGroup == kEnd) {
- DEBUG(dbgs() << "alignend, space len "<<SpaceLeft<<" "<<GroupLen<<"\n");
- int MoveDistance = SpaceLeft - GroupLen;
- memmove(BundleLockSavedCurBufferPtr + MoveDistance,
- BundleLockSavedCurBufferPtr, GroupLen);
- memcpy(BundleLockSavedCurBufferPtr,
- JITInfo->getNopSequence(MoveDistance), MoveDistance);
- NumNopBytes += MoveDistance;
- CurBufferPtr += MoveDistance;
- TotalPadding += MoveDistance;
- }
-
- AlignNextGroup = kNone;
-
- assert(CurBufferPtr <= BufferEnd && "Bundled group caused buf overflow");
- if (TotalPadding && GroupRelocationCount) {
- assert(Relocations.size() >= GroupRelocationCount &&
- "Too many relocations recorded for this group");
- for(std::vector<MachineRelocation>::reverse_iterator I =
- Relocations.rbegin(); GroupRelocationCount > 0;
- ++I, GroupRelocationCount--) {
- int NewOffset = I->getMachineCodeOffset()
- + TotalPadding;
- I->setMachineCodeOffset(NewOffset);
- }
- }
- }
-
- virtual void alignToBundleBeginning() {
- // mark that the next locked group must be aligned to bundle start
- // (e.g. an indirect branch target)
- assert(AlignNextGroup == kNone && "Conflicting group alignments");
- AlignNextGroup = kBegin;
- }
-
- virtual void alignToBundleEnd() {
- // mark that the next locked group must be aligned to bundle end (e.g. a
- // call)
- assert(AlignNextGroup == kNone && "Conflicting group alignments");
- AlignNextGroup = kEnd;
- }
-
- virtual uintptr_t getCurrentPCValue() const {
- // return destination PC value rather than generating location
- if (BufferBegin == EmissionBuffer) {
- return (uintptr_t)(FunctionDestination + (CurBufferPtr - BufferBegin));
- } else if (BufferBegin == StubEmissionBuffer) {
- return (uintptr_t)(StubDestination + (CurBufferPtr - BufferBegin));
- } else {
- return (uintptr_t)CurBufferPtr;
- }
- }
-
- // addRelocation gets called in the middle of emitting an instruction, and
- // creates the relocation based on the instruction's current position in
- // the emission buffer; however it could get moved if it crosses the bundle
- // boundary. so we intercept relocation creation and adjust newly-created
- // relocations if necessary
- virtual void addRelocation(const MachineRelocation &MR) {
- GroupRelocationCount++;
- JITEmitter::addRelocation(MR);
- }
-
- private:
- typedef enum _GroupAlign { kNone, kBegin, kEnd } GroupAlign;
- // FunctionDestination points to the final destination for the function
- // (i.e. where it will be copied after validation)
- uint8_t *FunctionDestination;
- uint8_t *BundleLockSavedCurBufferPtr;
- int BundleNestCount; // should not exceed 2
- GroupAlign AlignNextGroup;
- unsigned GroupRelocationCount;
- uint8_t *EmissionBuffer;
- uintptr_t EmissionBufferSize;
-
- bool ReusedStub;
- uint8_t *StubDestination;
- uint8_t *StubEmissionBuffer;
- uintptr_t StubEmissionBufferSize;
-
- TargetJITInfo *JITInfo;
- const int kBundleSize;
- const int32_t kJumpMask;
-
- // Set the buffer pointers (begin, cur, end) so they point into the buffer
- // at dest, preserving their relative positions
- void setBufferPtrs(uint8_t* dest) {
- BufferEnd = dest + (BufferEnd - BufferBegin);
- CurBufferPtr = dest + (CurBufferPtr - BufferBegin);
- BufferBegin = dest;
- }
-};
}
void CallSiteValueMapConfig::onDelete(JITResolverState *JRS, Function *F) {
@@ -1292,12 +934,6 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
// Mark code region readable and executable if it's not so already.
MemMgr->setMemoryExecutable();
- // @LOCALMOD-START
-#ifndef __native_client__
- // In NaCl, we haven't yet validated and copied the function code to the
- // destination yet, so there is nothing to disassemble. Furthermore we can't
- // touch the destination because it may not even be mapped yet
- // @LOCALMOD-END
DEBUG({
if (sys::hasDisassembler()) {
dbgs() << "JIT: Disassembled code:\n";
@@ -1327,7 +963,6 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
dbgs()<< '\n';
}
});
-#endif // @LOCALMOD
if (JITExceptionHandling) {
uintptr_t ActualSize = 0;
@@ -1612,14 +1247,7 @@ void JITEmitter::EmittedFunctionConfig::onRAUW(
JITCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM,
TargetMachine &tm) {
-// @LOCALMOD-START
-#ifndef __native_client__
return new JITEmitter(jit, JMM, tm);
-#else
- assert(!JMM && "NaCl does not support custom memory managers");
- return new NaClJITEmitter(jit, tm);
-#endif
-// @LOCALMOD-END
}
// getPointerToFunctionOrStub - If the specified function has been
diff --git a/lib/ExecutionEngine/JIT/NaClJITMemoryManager.cpp b/lib/ExecutionEngine/JIT/NaClJITMemoryManager.cpp
deleted file mode 100644
index d1b5ee704a..0000000000
--- a/lib/ExecutionEngine/JIT/NaClJITMemoryManager.cpp
+++ /dev/null
@@ -1,431 +0,0 @@
-//===-- NaClJITMemoryManager.cpp - Memory Allocator for JIT'd code --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the NaClJITMemoryManager class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "jit"
-#include "llvm/ExecutionEngine/NaClJITMemoryManager.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Config/config.h"
-#include <vector>
-
-#if defined(__linux__) || defined(__native_client__)
-#if defined(HAVE_SYS_STAT_H)
-#include <sys/stat.h>
-#endif
-#include <fcntl.h>
-#include <unistd.h>
-#endif
-
-using namespace llvm;
-
-#ifdef __native_client__
-// etext is guarded by ifdef so the code still compiles on non-ELF platforms
-extern char etext;
-#endif
-
-// The way NaCl linking is currently setup, there is a gap between the text
-// segment and the rodata segment where we can fill dyncode. The text ends
-// at etext, but there's no symbol for the start of rodata. Currently the
-// linker script puts it at 0x11000000
-// If we run out of space there, we can also allocate below the text segment
-// and keep going downward until we run into code loaded by the dynamic
-// linker. (TODO(dschuff): make that work)
-// For now, just start at etext and go until we hit rodata
-
-// It's an open issue that lazy jitting is not thread safe (PR5184). However
-// NaCl's dyncode_create solves exactly this problem, so in the future
-// this allocator could (should?) be made thread safe
-
-const size_t NaClJITMemoryManager::kStubSlabSize;
-const size_t NaClJITMemoryManager::kDataSlabSize;
-const size_t NaClJITMemoryManager::kCodeSlabSize;
-
-// TODO(dschuff) fix allocation start (etext + 64M is hopefully after where
-// glibc is loaded) and limit (maybe need a linker-provide symbol for the start
-// of the IRT or end of the segment gap)
-// (also fix allocateCodeSlab and maybe allocateStubSlab at that time)
-// what we really need is a usable nacl_dyncode_alloc(), but this could still
-// be improved upon using dl_iterate_phdr
-const static intptr_t kNaClSegmentGapEnd = 0x11000000;
-
-NaClJITMemoryManager::NaClJITMemoryManager() :
- AllocatableRegionLimit((uint8_t *)kNaClSegmentGapEnd),
- NextCode(AllocatableRegionStart), GOTBase(NULL) {
-#ifdef __native_client__
- AllocatableRegionStart = (uint8_t *)&etext + 1024*1024*64;
-#else
- assert(false && "NaClJITMemoryManager will not work outside NaCl sandbox");
-#endif
- AllocatableRegionStart =
- (uint8_t *)RoundUpToAlignment((uint64_t)AllocatableRegionStart,
- kBundleSize);
- NextCode = AllocatableRegionStart;
-
- // Allocate 1 stub slab to get us started
- CurrentStubSlab = allocateStubSlab(0);
- InitFreeList(&CodeFreeListHead);
- InitFreeList(&DataFreeListHead);
-
- DEBUG(dbgs() << "NaClJITMemoryManager: AllocatableRegionStart " <<
- AllocatableRegionStart << " Limit " << AllocatableRegionLimit << "\n");
-}
-
-NaClJITMemoryManager::~NaClJITMemoryManager() {
- delete [] GOTBase;
- DestroyFreeList(CodeFreeListHead);
- DestroyFreeList(DataFreeListHead);
-}
-
-FreeListNode *NaClJITMemoryManager::allocateCodeSlab(size_t MinSize) {
- FreeListNode *node = new FreeListNode();
- if (AllocatableRegionLimit - NextCode < (int)kCodeSlabSize) {
- // TODO(dschuff): might be possible to try the space below text segment?
- report_fatal_error("Ran out of code space");
- }
- node->address = NextCode;
- node->size = std::max(kCodeSlabSize, MinSize);
- NextCode += node->size;
- DEBUG(dbgs() << "allocated code slab " << NextCode - node->size << "-" <<
- NextCode << "\n");
- return node;
-}
-
-SimpleSlab NaClJITMemoryManager::allocateStubSlab(size_t MinSize) {
- SimpleSlab s;
- DEBUG(dbgs() << "allocateStubSlab: ");
- // It's a little weird to just allocate and throw away the FreeListNode, but
- // since code region allocation is still a bit ugly and magical, I decided
- // it's better to reuse allocateCodeSlab than duplicate the logic.
- FreeListNode *n = allocateCodeSlab(MinSize);
- s.address = n->address;
- s.size = n->size;
- s.next_free = n->address;
- delete n;
- return s;
-}
-
-FreeListNode *NaClJITMemoryManager::allocateDataSlab(size_t MinSize) {
- FreeListNode *node = new FreeListNode;
- size_t size = std::max(kDataSlabSize, MinSize);
- node->address = (uint8_t*)DataAllocator.Allocate(size, kBundleSize);
- node->size = size;
- return node;
-}
-
-void NaClJITMemoryManager::InitFreeList(FreeListNode **Head) {
- // Make sure there is always at least one entry in the free list
- *Head = new FreeListNode;
- (*Head)->Next = (*Head)->Prev = *Head;
- (*Head)->size = 0;
-}
-
-void NaClJITMemoryManager::DestroyFreeList(FreeListNode *Head) {
- FreeListNode *n = Head->Next;
- while(n != Head) {
- FreeListNode *next = n->Next;
- de