aboutsummaryrefslogtreecommitdiff
path: root/lib/ExecutionEngine
diff options
context:
space:
mode:
Diffstat (limited to 'lib/ExecutionEngine')
-rw-r--r--lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp4
-rw-r--r--lib/ExecutionEngine/JIT/JIT.h2
-rw-r--r--lib/ExecutionEngine/JIT/JITEmitter.cpp390
-rw-r--r--lib/ExecutionEngine/JIT/NaClJITMemoryManager.cpp430
4 files changed, 816 insertions, 10 deletions
diff --git a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index 7a206ebf73..d99b666345 100644
--- a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -322,7 +322,9 @@ GenericValue lle_X_abort(FunctionType *FT,
const std::vector<GenericValue> &Args) {
//FIXME: should we report or raise here?
//report_fatal_error("Interpreted program raised SIGABRT");
- raise (SIGABRT);
+ //TODO(dschuff) fixme or figure out how to get raise()
+ abort(); // @LOCALMOD
+ //raise (SIGABRT);
return GenericValue();
}
diff --git a/lib/ExecutionEngine/JIT/JIT.h b/lib/ExecutionEngine/JIT/JIT.h
index 2ae155bebf..338db8f454 100644
--- a/lib/ExecutionEngine/JIT/JIT.h
+++ b/lib/ExecutionEngine/JIT/JIT.h
@@ -210,6 +210,8 @@ public:
private:
static JITCodeEmitter *createEmitter(JIT &J, JITMemoryManager *JMM,
TargetMachine &tm);
+ // Native client needs its own memory manager, so custom ones are unsupported
+ static JITCodeEmitter *createNaClEmitter(JIT &J, TargetMachine &tm);
void runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked);
void updateFunctionStub(Function *F);
void jitTheFunction(Function *F, const MutexGuard &locked);
diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp
index 3bf6db8ee8..b19fc6fa97 100644
--- a/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -30,6 +30,7 @@
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/ExecutionEngine/NaClJITMemoryManager.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetJITInfo.h"
@@ -52,12 +53,15 @@
#ifndef NDEBUG
#include <iomanip>
#endif
+#ifdef __native_client__
+#include <nacl/nacl_dyncode.h>
+#endif
using namespace llvm;
STATISTIC(NumBytes, "Number of bytes of machine code compiled");
STATISTIC(NumRelos, "Number of relocations applied");
STATISTIC(NumRetries, "Number of retries with more memory");
-
+STATISTIC(NumNopBytes, "Number of bytes of NOPs emitted");
// A declaration may stop being a declaration once it's fully read from bitcode.
// This function returns true if F is fully read and is still a declaration.
@@ -281,8 +285,6 @@ namespace {
/// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
/// used to output functions to memory for execution.
class JITEmitter : public JITCodeEmitter {
- JITMemoryManager *MemMgr;
-
// When outputting a function stub in the context of some other function, we
// save BufferBegin/BufferEnd/CurBufferPtr here.
uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
@@ -292,11 +294,13 @@ namespace {
// ask the memory manager for at least this much space. When we
// successfully emit the function, we reset this back to zero.
uintptr_t SizeEstimate;
-
+protected: //TODO:(dschuff): fix/move this once we do validation and are sure
+ // which functions/data we need in NaClJITEmitter. also add LOCALMOD
+ JITMemoryManager *MemMgr;
/// Relocations - These are the relocations that the function needs, as
/// emitted.
std::vector<MachineRelocation> Relocations;
-
+private:
/// MBBLocations - This vector is a mapping from MBB ID's to their address.
/// It is filled in by the StartMachineBasicBlock callback and queried by
/// the getMachineBasicBlockAddress callback.
@@ -380,7 +384,7 @@ namespace {
DE.reset(new JITDwarfEmitter(jit));
}
}
- ~JITEmitter() {
+ virtual ~JITEmitter() { // @LOCALMOD
delete MemMgr;
}
@@ -398,10 +402,10 @@ namespace {
void initJumpTableInfo(MachineJumpTableInfo *MJTI);
void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
- void startGVStub(const GlobalValue* GV,
+ virtual void startGVStub(const GlobalValue* GV,
unsigned StubSize, unsigned Alignment = 1);
- void startGVStub(void *Buffer, unsigned StubSize);
- void finishGVStub();
+ virtual void startGVStub(void *Buffer, unsigned StubSize);
+ virtual void finishGVStub();
virtual void *allocIndirectGV(const GlobalValue *GV,
const uint8_t *Buffer, size_t Size,
unsigned Alignment);
@@ -473,6 +477,360 @@ namespace {
bool MayNeedFarStub);
void *getPointerToGVIndirectSym(GlobalValue *V, void *Reference);
};
+
+ // @LOCALMOD-START
+ class NaClJITEmitter : public JITEmitter {
+ /* There are two Nacl-specific requirements that must be dealt with: the
+ * first is that the data and code spaces are strictly separated, and code
+ * must be copied (by the service runtime/validator)to its destination
+ * after emission and relocation have finished.
+ * The second is bundle alignment: neither instructions nor multi-
+ * instruction pseudoinstruction groups may cross bundle boundaries.
+ *
+ * Requirement 1 is dealt with jointly by NaClJITMemoryManager and
+ * and NaClJITEmitter. NaClJITMemoryManager separates metadata from
+ * code and returns pointers in the proper space
+ * for code (startFunctionBody, allocateStub) and data (allocateSpace,
+ * startExceptionTable, etc). NaClJITEmitter emits code into a separate
+ * memory buffer (EmissionBuffer). After startFunction allocates the
+ * function's memory, NaClJITEmitter's startFunction points BufferBegin,
+ * CurBufferPtr and BufferEnd at the EmissionBuffer (this avoids having to
+ * override all of the actual emission methods from JITCodeEmitter)
+ * JITEmitter already uses this trick for emitting a stub in the middle
+ * of emitting a function so it doesn't seem so terrible to do our own
+ * similar swapping of the pointers.
+ *
+ * Requirement 2 is bundle alignment.
+ * X86CodeEmitter makes several calls into JITCodeEmitter per instruction,
+ * to add the various bytes, constants, etc. To implement bundle alignment,
+ * we add methods to start and end a bundle-locked group
+ * (the group can include just one instruction or several).
+ * The X86CodeEmitter will pass-through any such markers created by the
+ * rewriting passes (which surround multiple-instruction groups),
+ * and will also generate them surrounding each individual instruction
+ * (there should never be more than two-deep nesting).
+ * When beginBundleLock is called, the CurBufferPtr is marked. When
+ * endBundleLock is called, it checks that the group does not cross a
+ * bundle boundary; if it does, it inserts nop padding as necessary.
+ * If padding is added, the relocations must also be fixed up; this also
+ * happens in endBundleLock.
+ *
+ */
+ public:
+ NaClJITEmitter(JIT &jit, TargetMachine &TM) :
+ JITEmitter(jit, new NaClJITMemoryManager(), TM),
+ BundleLockSavedCurBufferPtr(NULL),
+ BundleNestCount(0),
+ AlignNextGroup(kNone),
+ GroupRelocationCount(0),
+ JITInfo(&jit.getJITInfo()),
+ kBundleSize(jit.getJITInfo().getBundleSize()),
+ kJumpMask(jit.getJITInfo().getJumpMask()) {
+ uintptr_t CodeSlabSize = MemMgr->GetDefaultCodeSlabSize();
+ EmissionBuffer = MemMgr->allocateSpace(CodeSlabSize, kBundleSize);
+ EmissionBufferSize = CodeSlabSize;
+ DEBUG(dbgs() << "EmissionBuffer " << EmissionBuffer << " size "
+ << EmissionBufferSize << "\n");
+ StubEmissionBuffer = MemMgr->allocateSpace(kBundleSize, kBundleSize);
+ StubEmissionBufferSize = kBundleSize;
+ DEBUG(dbgs() << "StubEmissionBuffer " << StubEmissionBuffer << " size "
+ << StubEmissionBufferSize << "\n");
+ JITInfo = &jit.getJITInfo();
+ }
+
+ virtual ~NaClJITEmitter() {
+ }
+
+ static inline bool classof(const JITEmitter*) { return true; }
+
+ virtual void startFunction(MachineFunction &F) {
+ JITEmitter::startFunction(F);
+ // Make sure the emission buffer is at least as big as the allocated
+ // function
+ if (BufferEnd - BufferBegin > (intptr_t)EmissionBufferSize) {
+ EmissionBufferSize = std::max((uintptr_t)(BufferEnd - BufferBegin),
+ 2 * EmissionBufferSize);
+ // BumpPtrAllocator doesn't do anything when you call Deallocate. it
+ // will be freed on destruction
+ EmissionBuffer = MemMgr->allocateSpace(EmissionBufferSize,
+ kBundleSize);
+ DEBUG(dbgs() << "new EmissionBuffer " << EmissionBuffer << " size "
+ << EmissionBufferSize << "\n");
+ }
+ // We ensure that the emission buffer is bundle-aligned, and constant
+ // pool emission should not go into code space
+ assert((CurBufferPtr == BufferBegin ||
+ (int)F.getFunction()->getAlignment() > kBundleSize) &&
+ "Pre-function data should not be emitted into code space");
+ if (CurBufferPtr > BufferBegin) {
+ // If CurBufferPtr has been bumped forward for alignment, we need to
+ // pad the space with nops
+ memcpy(EmissionBuffer,
+ JITInfo->getNopSequence(CurBufferPtr - BufferBegin),
+ CurBufferPtr - BufferBegin);
+ NumNopBytes += CurBufferPtr - BufferBegin;
+ }
+ FunctionDestination = BufferBegin;
+ setBufferPtrs(EmissionBuffer);
+ }
+
+ virtual bool finishFunction(MachineFunction &F) {
+ uint8_t *end = CurBufferPtr;
+ emitAlignment(kBundleSize);
+ memcpy(end, JITInfo->getNopSequence(CurBufferPtr - end),
+ CurBufferPtr - end);
+ NumNopBytes += CurBufferPtr - end;
+ JITInfo->setRelocationBuffer(BufferBegin);
+ assert(BufferBegin == EmissionBuffer);
+ int FunctionSize = CurBufferPtr - BufferBegin;
+ setBufferPtrs(FunctionDestination);
+ bool result = JITEmitter::finishFunction(F);
+ // If we ran out of memory, don't bother validating, we'll just retry
+ if (result) return result;
+
+ DEBUG({
+ dbgs() << "Validating " << FunctionDestination << "-" <<
+ FunctionDestination + FunctionSize << "\n";
+ if (sys::hasDisassembler()) {
+ dbgs() << "Disassembled code:\n";
+ dbgs() << sys::disassembleBuffer(EmissionBuffer,
+ FunctionSize,
+ (uintptr_t)FunctionDestination);
+ } else {
+ dbgs() << "Binary code:\n";
+ uint8_t* q = BufferBegin;
+ for (int i = 0; q < CurBufferPtr; q += 4, ++i) {
+ if (i == 4)
+ i = 0;
+ if (i == 0)
+ dbgs() << "JIT: " << (long)(q - BufferBegin) << ": ";
+ bool Done = false;
+ for (int j = 3; j >= 0; --j) {
+ if (q + j >= CurBufferPtr)
+ Done = true;
+ else
+ dbgs() << (unsigned short)q[j];
+ }
+ if (Done)
+ break;
+ dbgs() << ' ';
+ if (i == 3)
+ dbgs() << '\n';
+ }
+ dbgs()<< '\n';
+ }
+ });
+#ifdef __native_client__
+ if(nacl_dyncode_create(FunctionDestination, EmissionBuffer,
+ FunctionSize) != 0) {
+ report_fatal_error("NaCl validation failed");
+ }
+#endif
+ return result;
+ }
+
+ virtual void startGVStub(const GlobalValue* GV,
+ unsigned StubSize, unsigned Alignment = 1) {
+ JITEmitter::startGVStub(GV, StubSize, Alignment);
+ ReusedStub = false;
+ assert(StubSize <= StubEmissionBufferSize);
+ StubDestination = BufferBegin;
+ setBufferPtrs(StubEmissionBuffer);
+ }
+ virtual void startGVStub(void *Buffer, unsigned StubSize) {
+ JITEmitter::startGVStub(Buffer, StubSize);
+ ReusedStub = true;
+ assert(StubSize <= StubEmissionBufferSize);
+ StubDestination = BufferBegin;
+ setBufferPtrs(StubEmissionBuffer);
+ }
+ virtual void finishGVStub() {
+ assert(CurBufferPtr - BufferBegin == kBundleSize);
+
+ DEBUG(dbgs() << "Validating "<< BufferBegin<<"-"<<StubDestination<<"\n");
+ int ValidationResult;
+#ifdef __native_client__
+ if (!ReusedStub) {
+ ValidationResult = nacl_dyncode_create(StubDestination, BufferBegin,
+ CurBufferPtr - BufferBegin);
+ } else {
+ // This is not a thread-safe modification because it updates the whole
+ // stub rather than just a jump target. However it is only used by
+ // eager compilation to replace a stub which is not in use yet
+ // (it jumps to 0).
+ ValidationResult = nacl_dyncode_modify(StubDestination, BufferBegin,
+ CurBufferPtr - BufferBegin);
+ }
+#endif
+ if (ValidationResult) {
+ dbgs() << "NaCl stub validation failed:\n";
+ if (sys::hasDisassembler()) {
+ dbgs() << "Disassembled code:\n";
+ dbgs() << sys::disassembleBuffer(BufferBegin,
+ CurBufferPtr-BufferBegin,
+ (uintptr_t)StubDestination);
+ }
+ report_fatal_error("Stub validation failed");
+ }
+ setBufferPtrs(StubDestination);
+ JITEmitter::finishGVStub();
+ }
+
+ /// allocateSpace - Allocates *data* space, rather than space in the
+ // current code block.
+ virtual void *allocateSpace(uintptr_t Size, unsigned Alignment) {
+ return MemMgr->allocateSpace(Size, Alignment);
+ }
+
+ virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
+ uint8_t *end = CurBufferPtr;
+ emitAlignment(MBB->getAlignment());
+ memcpy(end, JITInfo->getNopSequence(CurBufferPtr - end),
+ CurBufferPtr - end);
+ NumNopBytes += CurBufferPtr - end;
+ JITEmitter::StartMachineBasicBlock(MBB);
+ }
+
+ /// beginBundleLock - Save the current location of CurBufferPtr so we can
+ // tell if the block crosses a bundle boundary
+ virtual void beginBundleLock() {
+ assert(BundleNestCount <= 2 && "Bundle-locked groups can't be nested");
+ if (++BundleNestCount == 2) return;
+ DEBUG(dbgs() << "begin lock, buffer begin:end:cur "<<BufferBegin<<" "<<
+ BufferEnd<< " "<<CurBufferPtr << "\n");
+ BundleLockSavedCurBufferPtr = CurBufferPtr;
+ GroupRelocationCount = 0;
+ }
+
+ /// endBundleLock - Check if the group crosses a bundle boundary. If so
+ // (or if the group must be aligned to the end of a bundle), move the
+ // group and add appropriate padding
+ virtual void endBundleLock() {
+ assert(BundleNestCount > 0 && "mismatched bundle-lock start/end");
+ if (--BundleNestCount > 0) return;
+ DEBUG(dbgs() <<"end lock, buffer begin:end:cur:savd "<<BufferBegin<<" "<<
+ BufferEnd<< " "<<CurBufferPtr <<" "<<
+ BundleLockSavedCurBufferPtr<<"\n");
+
+ int GroupLen = CurBufferPtr - BundleLockSavedCurBufferPtr;
+ if (BufferEnd - CurBufferPtr <
+ GroupLen + kBundleSize) {
+ // Added padding can be no more than kBundleSize. Retry if there's any
+ // possibility of overflow
+ CurBufferPtr = BufferEnd;
+ AlignNextGroup = kNone;
+ return;
+ }
+ // Space left in the current bundle
+ int SpaceLeft = (((intptr_t)BundleLockSavedCurBufferPtr + kBundleSize)
+ & kJumpMask) - (intptr_t)BundleLockSavedCurBufferPtr;
+ int TotalPadding = 0;
+ if (SpaceLeft < GroupLen || AlignNextGroup == kBegin) {
+ DEBUG(dbgs() << "space " << SpaceLeft <<" len "<<GroupLen<<"\n");
+ memmove(BundleLockSavedCurBufferPtr + SpaceLeft,
+ BundleLockSavedCurBufferPtr, GroupLen);
+ memcpy(BundleLockSavedCurBufferPtr, JITInfo->getNopSequence(SpaceLeft),
+ SpaceLeft);
+ NumNopBytes += SpaceLeft;
+ assert(CurBufferPtr == BundleLockSavedCurBufferPtr + GroupLen);
+ CurBufferPtr += SpaceLeft;
+ BundleLockSavedCurBufferPtr += SpaceLeft;
+ TotalPadding = SpaceLeft;
+ SpaceLeft = kBundleSize;
+ }
+
+ if (AlignNextGroup == kEnd) {
+ DEBUG(dbgs() << "alignend, space len "<<SpaceLeft<<" "<<GroupLen<<"\n");
+ int MoveDistance = SpaceLeft - GroupLen;
+ memmove(BundleLockSavedCurBufferPtr + MoveDistance,
+ BundleLockSavedCurBufferPtr, GroupLen);
+ memcpy(BundleLockSavedCurBufferPtr,
+ JITInfo->getNopSequence(MoveDistance), MoveDistance);
+ NumNopBytes += MoveDistance;
+ CurBufferPtr += MoveDistance;
+ TotalPadding += MoveDistance;
+ }
+
+ AlignNextGroup = kNone;
+
+ assert(CurBufferPtr <= BufferEnd && "Bundled group caused buf overflow");
+ if (TotalPadding && GroupRelocationCount) {
+ assert(Relocations.size() >= GroupRelocationCount &&
+ "Too many relocations recorded for this group");
+ for(std::vector<MachineRelocation>::reverse_iterator I =
+ Relocations.rbegin(); GroupRelocationCount > 0;
+ ++I, GroupRelocationCount--) {
+ int NewOffset = I->getMachineCodeOffset()
+ + TotalPadding;
+ I->setMachineCodeOffset(NewOffset);
+ }
+ }
+ }
+
+ virtual void alignToBundleBeginning() {
+ // mark that the next locked group must be aligned to bundle start
+ // (e.g. an indirect branch target)
+ assert(AlignNextGroup == kNone && "Conflicting group alignments");
+ AlignNextGroup = kBegin;
+ }
+
+ virtual void alignToBundleEnd() {
+ // mark that the next locked group must be aligned to bundle end (e.g. a
+ // call)
+ assert(AlignNextGroup == kNone && "Conflicting group alignments");
+ AlignNextGroup = kEnd;
+ }
+
+ virtual uintptr_t getCurrentPCValue() const {
+ // return destination PC value rather than generating location
+ if (BufferBegin == EmissionBuffer) {
+ return (uintptr_t)(FunctionDestination + (CurBufferPtr - BufferBegin));
+ } else if (BufferBegin == StubEmissionBuffer) {
+ return (uintptr_t)(StubDestination + (CurBufferPtr - BufferBegin));
+ } else {
+ return (uintptr_t)CurBufferPtr;
+ }
+ }
+
+ // addRelocation gets called in the middle of emitting an instruction, and
+ // creates the relocation based on the instruction's current position in
+ // the emission buffer; however it could get moved if it crosses the bundle
+ // boundary. so we intercept relocation creation and adjust newly-created
+ // relocations if necessary
+ virtual void addRelocation(const MachineRelocation &MR) {
+ GroupRelocationCount++;
+ JITEmitter::addRelocation(MR);
+ }
+
+ private:
+ typedef enum _GroupAlign { kNone, kBegin, kEnd } GroupAlign;
+ // FunctionDestination points to the final destination for the function
+ // (i.e. where it will be copied after validation)
+ uint8_t *FunctionDestination;
+ uint8_t *BundleLockSavedCurBufferPtr;
+ int BundleNestCount; // should not exceed 2
+ GroupAlign AlignNextGroup;
+ unsigned GroupRelocationCount;
+ uint8_t *EmissionBuffer;
+ uintptr_t EmissionBufferSize;
+
+ bool ReusedStub;
+ uint8_t *StubDestination;
+ uint8_t *StubEmissionBuffer;
+ uintptr_t StubEmissionBufferSize;
+
+ TargetJITInfo *JITInfo;
+ const int kBundleSize;
+ const int32_t kJumpMask;
+
+ // Set the buffer pointers (begin, cur, end) so they point into the buffer
+ // at dest, preserving their relative positions
+ void setBufferPtrs(uint8_t* dest) {
+ BufferEnd = dest + (BufferEnd - BufferBegin);
+ CurBufferPtr = dest + (CurBufferPtr - BufferBegin);
+ BufferBegin = dest;
+ }
+};
}
void CallSiteValueMapConfig::onDelete(JITResolverState *JRS, Function *F) {
@@ -939,6 +1297,12 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
// Mark code region readable and executable if it's not so already.
MemMgr->setMemoryExecutable();
+ // @LOCALMOD-START
+#ifndef __native_client__
+ // In NaCl, we haven't yet validated and copied the function code to the
+ // destination yet, so there is nothing to disassemble. Furthermore we can't
+ // touch the destination because it may not even be mapped yet
+ // @LOCALMOD-END
DEBUG({
if (sys::hasDisassembler()) {
dbgs() << "JIT: Disassembled code:\n";
@@ -968,6 +1332,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
dbgs()<< '\n';
}
});
+#endif // @LOCALMOD
if (JITExceptionHandling) {
uintptr_t ActualSize = 0;
@@ -1252,7 +1617,14 @@ void JITEmitter::EmittedFunctionConfig::onRAUW(
JITCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM,
TargetMachine &tm) {
+// @LOCALMOD-START
+#ifndef __native_client__
return new JITEmitter(jit, JMM, tm);
+#else
+ assert(!JMM && "NaCl does not support custom memory managers");
+ return new NaClJITEmitter(jit, tm);
+#endif
+// @LOCALMOD-END
}
// getPointerToFunctionOrStub - If the specified function has been
diff --git a/lib/ExecutionEngine/JIT/NaClJITMemoryManager.cpp b/lib/ExecutionEngine/JIT/NaClJITMemoryManager.cpp
new file mode 100644
index 0000000000..d44fee2292
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/NaClJITMemoryManager.cpp
@@ -0,0 +1,430 @@
+//===-- NaClJITMemoryManager.cpp - Memory Allocator for JIT'd code --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NaClJITMemoryManager class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "jit"
+#include "llvm/ExecutionEngine/NaClJITMemoryManager.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Config/config.h"
+#include <vector>
+
+#if defined(__linux__) || defined(__native_client__)
+#if defined(HAVE_SYS_STAT_H)
+#include <sys/stat.h>
+#endif
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+
+using namespace llvm;
+
+#ifdef __native_client__
+// etext is guarded by ifdef so the code still compiles on non-ELF platforms
+extern char etext;
+#endif
+
+// The way NaCl linking is currently setup, there is a gap between the text
+// segment and the rodata segment where we can fill dyncode. The text ends
+// at etext, but there's no symbol for the start of rodata. Currently the
+// linker script puts it at 0x11000000
+// If we run out of space there, we can also allocate below the text segment
+// and keep going downward until we run into code loaded by the dynamic
+// linker. (TODO(dschuff): make that work)
+// For now, just start at etext and go until we hit rodata
+
+// It's an open issue that lazy jitting is not thread safe (PR5184). However
+// NaCl's dyncode_create solves exactly this problem, so in the future
+// this allocator could (should?) be made thread safe
+
+const size_t NaClJITMemoryManager::kStubSlabSize;
+const size_t NaClJITMemoryManager::kDataSlabSize;
+const size_t NaClJITMemoryManager::kCodeSlabSize;
+
+// TODO(dschuff) fix allocation start (etext + 64M is hopefully after where
+// glibc is loaded) and limit (maybe need a linker-provide symbol for the start
+// of the IRT or end of the segment gap)
+// (also fix allocateCodeSlab and maybe allocateStubSlab at that time)
+// what we really need is a usable nacl_dyncode_alloc(), but this could still
+// be improved upon using dl_iterate_phdr
+const static intptr_t kNaClSegmentGapEnd = 0x11000000;
+
+NaClJITMemoryManager::NaClJITMemoryManager() :
+ AllocatableRegionLimit((uint8_t *)kNaClSegmentGapEnd),
+ NextCode(AllocatableRegionStart), GOTBase(NULL) {
+#ifdef __native_client__
+ AllocatableRegionStart = (uint8_t *)&etext + 1024*1024*64;
+#else
+ assert(false && "NaClJITMemoryManager will not work outside NaCl sandbox");
+#endif
+ AllocatableRegionStart =
+ (uint8_t *)RoundUpToAlignment((uint64_t)AllocatableRegionStart,
+ kBundleSize);
+ NextCode = AllocatableRegionStart;
+
+ // Allocate 1 stub slab to get us started
+ CurrentStubSlab = allocateStubSlab(0);
+ InitFreeList(&CodeFreeListHead);
+ InitFreeList(&DataFreeListHead);
+
+ DEBUG(dbgs() << "NaClJITMemoryManager: AllocatableRegionStart " <<
+ AllocatableRegionStart << " Limit " << AllocatableRegionLimit << "\n");
+}
+
+NaClJITMemoryManager::~NaClJITMemoryManager() {
+ delete [] GOTBase;
+ DestroyFreeList(CodeFreeListHead);
+ DestroyFreeList(DataFreeListHead);
+}
+
+FreeListNode *NaClJITMemoryManager::allocateCodeSlab(size_t MinSize) {
+ FreeListNode *node = new FreeListNode();
+ if (AllocatableRegionLimit - NextCode < (int)kCodeSlabSize) {
+ // TODO(dschuff): might be possible to try the space below text segment?
+ report_fatal_error("Ran out of code space");
+ }
+ node->address = NextCode;
+ node->size = std::max(kCodeSlabSize, MinSize);
+ NextCode += node->size;
+ DEBUG(dbgs() << "allocated code slab " << NextCode - node->size << "-" <<
+ NextCode << "\n");
+ return node;
+}
+
+SimpleSlab NaClJITMemoryManager::allocateStubSlab(size_t MinSize) {
+ SimpleSlab s;
+ DEBUG(dbgs() << "allocateStubSlab: ");
+ // It's a little weird to just allocate and throw away the FreeListNode, but
+ // since code region allocation is still a bit ugly and magical, I decided
+ // it's better to reuse allocateCodeSlab than duplicate the logic.
+ FreeListNode *n = allocateCodeSlab(MinSize);
+ s.address = n->address;
+ s.size = n->size;
+ s.next_free = n->address;
+ delete n;
+ return s;
+}
+
+FreeListNode *NaClJITMemoryManager::allocateDataSlab(size_t MinSize) {
+ FreeListNode *node = new FreeListNode;
+ size_t size = std::max(kDataSlabSize, MinSize);
+ node->address = (uint8_t*)DataAllocator.Allocate(size, kBundleSize);
+ node->size = size;
+ return node;
+}
+
+void NaClJITMemoryManager::InitFreeList(FreeListNode **Head) {
+ // Make sure there is always at least one entry in the free list
+ *Head = new FreeListNode;
+ (*Head)->Next = (*Head)->Prev = *Head;
+ (*Head)->size = 0;
+}
+
+void NaClJITMemoryManager::DestroyFreeList(FreeListNode *Head) {
+ FreeListNode *n = Head->Next;
+ while(n != Head) {
+ FreeListNode *next = n->Next;
+ delete n;
+ n = next;
+ }
+ delete Head;
+}
+
+FreeListNode *NaClJITMemoryManager::FreeListAllocate(uintptr_t &ActualSize,
+ FreeListNode *Head,
+ FreeListNode * (NaClJITMemoryManager::*allocate)(size_t)) {
+ FreeListNode *candidateBlock = Head;
+ FreeListNode *iter = Head->Next;
+
+ uintptr_t largest = candidateBlock->size;
+ // Search for the largest free block
+ while (iter != Head) {
+ if (iter->size > largest) {
+ largest = iter->size;
+ candidateBlock = iter;
+ }
+ iter = iter->Next;
+ }
+
+ if (largest < ActualSize || largest == 0) {
+ candidateBlock = (this->*allocate)(ActualSize);
+ } else {
+ candidateBlock->RemoveFromFreeList();
+ }
+ return candidateBlock;
+}
+
+void NaClJITMemoryManager::FreeListFinishAllocation(FreeListNode *Block,
+ FreeListNode *Head, uint8_t *AllocationStart, uint8_t *AllocationEnd,
+ AllocationTable &Table) {
+ assert(AllocationEnd > AllocationStart);
+ assert(Block->address == AllocationStart);
+ uint8_t *End = (uint8_t *)RoundUpToAlignment((uint64_t)AllocationEnd,
+ kBundleSize);
+ assert(End <= Block->address + Block->size);
+ int AllocationSize = End - Block->address;
+ Table[AllocationStart] = AllocationSize;
+
+ Block->size -= AllocationSize;
+ if (Block->size >= kBundleSize * 2) {//TODO(dschuff): better heuristic?
+ Block->address = End;
+ Block->AddToFreeList(Head);
+ } else {
+ delete Block;
+ }
+ DEBUG(dbgs()<<"FinishAllocation size "<< AllocationSize <<" end "<<End<<"\n");
+}
+
+void NaClJITMemoryManager::FreeListDeallocate(FreeListNode *Head,
+ AllocationTable &Table,
+ void *Body) {
+ uint8_t *Allocation = (uint8_t *)Body;
+ DEBUG(dbgs() << "deallocating "<<Body<<" ");
+ assert(Table.count(Allocation) && "FreeList Deallocation not found in table");
+ FreeListNode *Block = new FreeListNode;
+ Block->address = Allocation;
+ Block->size = Table[Allocation];
+ Block->AddToFreeList(Head);
+ DEBUG(dbgs() << "deallocated "<< Allocation<< " size " << Block->size <<"\n");
+}
+
+uint8_t *NaClJITMemoryManager::startFunctionBody(const Function *F,
+ uintptr_t &ActualSize) {
+ CurrentCodeBlock = FreeListAllocate(ActualSize, CodeFreeListHead,
+ &NaClJITMemoryManager::allocateCodeSlab);
+ DEBUG(dbgs() << "startFunctionBody CurrentBlock " << CurrentCodeBlock <<
+ " addr " << CurrentCodeBlock->address << "\n");
+ ActualSize = CurrentCodeBlock->size;
+ return CurrentCodeBlock->address;
+}
+
+void NaClJITMemoryManager::endFunctionBody(const Function *F,
+ uint8_t *FunctionStart,
+ uint8_t *FunctionEnd) {
+ DEBUG(dbgs() << "endFunctionBody ");
+ FreeListFinishAllocation(CurrentCodeBlock, CodeFreeListHead,
+ FunctionStart, FunctionEnd, AllocatedFunctions);
+
+}
+
+uint8_t *NaClJITMemoryManager::allocateCodeSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID) {
+ llvm_unreachable("Implement me! (or don't.)");
+}
+
+uint8_t *NaClJITMemoryManager::allocateDataSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID) {
+ return (uint8_t *)DataAllocator.Allocate(Size, Alignment);
+}
+
+void NaClJITMemoryManager::deallocateFunctionBody(void *Body) {
+ DEBUG(dbgs() << "deallocateFunctionBody, ");
+ if (Body) FreeListDeallocate(CodeFreeListHead, AllocatedFunctions, Body);
+}
+
+uint8_t *NaClJITMemoryManager::allocateStub(const GlobalValue* F,
+ unsigned StubSize,
+ unsigned Alignment) {
+ uint8_t *StartAddress = (uint8_t *)(uintptr_t)
+ RoundUpToAlignment((uintptr_t)CurrentStubSlab.next_free, Alignment);
+ if (StartAddress + StubSize >
+ CurrentStubSlab.address + CurrentStubSlab.size) {
+ CurrentStubSlab = allocateStubSlab(kStubSlabSize);
+ StartAddress = (uint8_t *)(uintptr_t)
+ RoundUpToAlignment((uintptr_t)CurrentStubSlab.next_free, Alignment);
+ }
+ CurrentStubSlab.next_free = StartAddress + StubSize;
+ DEBUG(dbgs() <<"allocated stub "<<StartAddress<< " size "<<StubSize<<"\n");
+ return StartAddress;
+}
+
+uint8_t *NaClJITMemoryManager::allocateSpace(intptr_t Size,
+ unsigned Alignment) {
+ uint8_t *r = (uint8_t*)DataAllocator.Allocate(Size, Alignment);
+ DEBUG(dbgs() << "allocateSpace " << Size <<"/"<<Alignment<<" ret "<<r<<"\n");
+ return r;
+}
+
+uint8_t *NaClJITMemoryManager::allocateGlobal(uintptr_t Size,
+ unsigned Alignment) {
+ uint8_t *r = (uint8_t*)DataAllocator.Allocate(Size, Alignment);
+ DEBUG(dbgs() << "allocateGlobal " << Size <<"/"<<Alignment<<" ret "<<r<<"\n");
+ return r;
+}
+
+uint8_t* NaClJITMemoryManager::startExceptionTable(const Function* F,
+ uintptr_t &ActualSize) {
+ CurrentDataBlock = FreeListAllocate(ActualSize, DataFreeListHead,
+ &NaClJITMemoryManager::allocateDataSlab);
+ DEBUG(dbgs() << "startExceptionTable CurrentBlock " << CurrentDataBlock <<
+ " addr " << CurrentDataBlock->address << "\n");
+ ActualSize = CurrentDataBlock->size;
+ return CurrentDataBlock->address;
+}
+
+void NaClJITMemoryManager::endExceptionTable(const Function *F,
+ uint8_t *TableStart,
+ uint8_t *TableEnd, uint8_t* FrameRegister) {
+ DEBUG(dbgs() << "endExceptionTable ");
+ FreeListFinishAllocation(CurrentDataBlock, DataFreeListHead,
+ TableStart, TableEnd, AllocatedTables);
+}
+
+void NaClJITMemoryManager::deallocateExceptionTable(void *ET) {
+ DEBUG(dbgs() << "deallocateExceptionTable, ");
+ if (ET) FreeListDeallocate(DataFreeListHead, AllocatedTables, ET);
+}
+
+// Copy of DefaultJITMemoryManager's implementation
+void NaClJITMemoryManager::AllocateGOT() {
+ assert(GOTBase == 0 && "Cannot allocate the got multiple times");
+ GOTBase = new uint8_t[sizeof(void*) * 8192];
+ HasGOT = true;
+}
+
+//===----------------------------------------------------------------------===//
+// getPointerToNamedFunction() implementation.
+// This code is pasted directly from r153607 of JITMemoryManager.cpp and has
+// never been tested. It most likely doesn't work inside the sandbox.
+//===----------------------------------------------------------------------===//
+
+// AtExitHandlers - List of functions to call when the program exits,
+// registered with the atexit() library function.
+static std::vector<void (*)()> AtExitHandlers;
+
+/// runAtExitHandlers - Run any functions registered by the program's
+/// calls to atexit(3), which we intercept and store in
+/// AtExitHandlers.
+///
+static void runAtExitHandlers() {
+ while (!AtExitHandlers.empty()) {
+ void (*Fn)() = AtExitHandlers.back();
+ AtExitHandlers.pop_back();
+ Fn();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Function stubs that are invoked instead of certain library calls
+//
+// Force the following functions to be linked in to anything that uses the
+// JIT. This is a hack designed to work around the all-too-clever Glibc
+// strategy of making these functions work differently when inlined vs. when
+// not inlined, and hiding their real definitions in a separate archive file
+// that the dynamic linker can't see. For more info, search for
+// 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
+#if defined(__linux__)
+/* stat functions are redirecting to __xstat with a version number. On x86-64
+ * linking with libc_nonshared.a and -Wl,--export-dynamic doesn't make 'stat'
+ * available as an exported symbol, so we have to add it explicitly.
+ */
+namespace {
+class StatSymbols {
+public:
+ StatSymbols() {
+ sys::DynamicLibrary::AddSymbol("stat", (void*)(intptr_t)stat);
+ sys::DynamicLibrary::AddSymbol("fstat", (void*)(intptr_t)fstat);
+ sys::DynamicLibrary::AddSymbol("lstat", (void*)(intptr_t)lstat);
+ sys::DynamicLibrary::AddSymbol("stat64", (void*)(intptr_t)stat64);
+ sys::DynamicLibrary::AddSymbol("\x1stat64", (void*)(intptr_t)stat64);
+ sys::DynamicLibrary::AddSymbol("\x1open64", (void*)(intptr_t)open64);
+ sys::DynamicLibrary::AddSymbol("\x1lseek64", (void*)(intptr_t)lseek64);
+ sys::DynamicLibrary::AddSymbol("fstat64", (void*)(intptr_t)fstat64);
+ sys::DynamicLibrary::AddSymbol("lstat64", (void*)(intptr_t)lstat64);
+ sys::DynamicLibrary::AddSymbol("atexit", (void*)(intptr_t)atexit);
+ sys::DynamicLibrary::AddSymbol("mknod", (void*)(intptr_t)mknod);
+ }
+};
+}
+static StatSymbols initStatSymbols;
+#endif // __linux__
+
+// jit_exit - Used to intercept the "exit" library call.
+static void jit_exit(int Status) {
+ runAtExitHandlers(); // Run atexit handlers...
+ exit(Status);
+}
+
+// jit_atexit - Used to intercept the "atexit" library call.
+static int jit_atexit(void (*Fn)()) {
+ AtExitHandlers.push_back(Fn); // Take note of atexit handler...
+ return 0; // Always successful
+}
+
+static int jit_noop() {
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+//
+/// getPointerToNamedFunction - This me