aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/autodebugger_c.py10
-rw-r--r--tools/cache.py194
l---------tools/eliminator/node_modules/.bin/cake1
l---------tools/eliminator/node_modules/.bin/coffee1
-rw-r--r--tools/file_packager.py2
-rw-r--r--tools/js_optimizer.py25
-rw-r--r--tools/jsrun.py27
-rw-r--r--tools/shared.py400
-rw-r--r--tools/tempfiles.py40
9 files changed, 419 insertions, 281 deletions
diff --git a/tools/autodebugger_c.py b/tools/autodebugger_c.py
index 5d41faf0..54a4d691 100644
--- a/tools/autodebugger_c.py
+++ b/tools/autodebugger_c.py
@@ -21,9 +21,13 @@ for filename in filenames:
if m and (' if ' not in lines[i-1] or '{' in lines[i-1]) and \
(' if ' not in lines[i+1] or '{' in lines[i+1]) and \
(' else' not in lines[i-1] or '{' in lines[i-1]) and \
- (' else' not in lines[i+1] or '{' in lines[i+1]):
- var = m.groups(1)[0].rstrip().split(' ')[-1]
- lines[i] += ''' printf("%s:%d:%s=%%d\\n", %s);''' % (filename, i+1, var, var)
+ (' else' not in lines[i+1] or '{' in lines[i+1]) and \
+ (' for' not in lines[i-1]) and \
+ ('struct' not in lines[i]):
+ raw = m.groups(1)[0].rstrip()
+ var = raw.split(' ')[-1]
+ if ' ' in raw and '[' in var: continue
+ lines[i] += ''' printf("%s:%d:%s=%%d\\n", (int)%s);''' % (filename, i+1, var, var)
f = open(filename, 'w')
f.write('\n'.join(lines))
diff --git a/tools/cache.py b/tools/cache.py
new file mode 100644
index 00000000..e7908fba
--- /dev/null
+++ b/tools/cache.py
@@ -0,0 +1,194 @@
+import os.path, sys, shutil, hashlib, cPickle, zlib, time
+
+import tempfiles
+
+# Permanent cache for dlmalloc and stdlibc++
+class Cache:
+ def __init__(self, dirname=None, debug=False):
+ if dirname is None:
+ dirname = os.environ.get('EM_CACHE')
+ if not dirname:
+ dirname = os.path.expanduser(os.path.join('~', '.emscripten_cache'))
+ self.dirname = dirname
+ self.debug = debug
+
+ def ensure(self):
+ if not os.path.exists(self.dirname):
+ os.makedirs(self.dirname)
+
+ def erase(self):
+ tempfiles.try_delete(self.dirname)
+ try:
+ open(self.dirname + '__last_clear', 'w').write('last clear: ' + time.asctime() + '\n')
+ except Exception, e:
+ print >> sys.stderr, 'failed to save last clear time: ', e
+
+ def get_path(self, shortname):
+ return os.path.join(self.dirname, shortname)
+
+ # Request a cached file. If it isn't in the cache, it will be created with
+ # the given creator function
+ def get(self, shortname, creator, extension='.bc'):
+ if not shortname.endswith(extension): shortname += extension
+ cachename = os.path.join(self.dirname, shortname)
+ if os.path.exists(cachename):
+ return cachename
+ self.ensure()
+ shutil.copyfile(creator(), cachename)
+ return cachename
+
+# JS-specific cache. We cache the results of compilation and optimization,
+# so that in incremental builds we can just load from cache.
+# We cache reasonably-large-sized chunks
+class JCache:
+ def __init__(self, cache):
+ self.cache = cache
+ self.dirname = os.path.join(cache.dirname, 'jcache')
+ self.debug = cache.debug
+
+ def ensure(self):
+ self.cache.ensure()
+ if not os.path.exists(self.dirname):
+ os.makedirs(self.dirname)
+
+ def get_shortkey(self, keys):
+ if type(keys) not in [list, tuple]:
+ keys = [keys]
+ ret = ''
+ for key in keys:
+ assert type(key) == str
+ ret += hashlib.md5(key).hexdigest()
+ return ret
+
+ def get_cachename(self, shortkey):
+ return os.path.join(self.dirname, shortkey)
+
+ # Returns a cached value, if it exists. Make sure the full key matches
+ def get(self, shortkey, keys):
+ if self.debug: print >> sys.stderr, 'jcache get?', shortkey
+ cachename = self.get_cachename(shortkey)
+ if not os.path.exists(cachename):
+ if self.debug: print >> sys.stderr, 'jcache none at all'
+ return
+ try:
+ data = cPickle.loads(zlib.decompress(open(cachename).read()))
+ except Exception, e:
+ if DEBUG_CACHE: print >> sys.stderr, 'jcache decompress/unpickle error:', e
+ if len(data) != 2:
+ if self.debug: print >> sys.stderr, 'jcache error in get'
+ return
+ oldkeys = data[0]
+ if len(oldkeys) != len(keys):
+ if self.debug: print >> sys.stderr, 'jcache collision (a)'
+ return
+ for i in range(len(oldkeys)):
+ if oldkeys[i] != keys[i]:
+ if self.debug: print >> sys.stderr, 'jcache collision (b)'
+ return
+ if self.debug: print >> sys.stderr, 'jcache win'
+ return data[1]
+
+ # Sets the cached value for a key (from get_key)
+ def set(self, shortkey, keys, value):
+ cachename = self.get_cachename(shortkey)
+ try:
+ f = open(cachename, 'w')
+ f.write(zlib.compress(cPickle.dumps([keys, value])))
+ f.close()
+ except Exception, e:
+ if DEBUG_CACHE: print >> sys.stderr, 'jcache compress/pickle error:', e
+ return
+ # for i in range(len(keys)):
+ # open(cachename + '.key' + str(i), 'w').write(keys[i])
+ # open(cachename + '.value', 'w').write(value)
+
+# Given a set of functions of form (ident, text), and a preferred chunk size,
+# generates a set of chunks for parallel processing and caching.
+# It is very important to generate similar chunks in incremental builds, in
+# order to maximize the chance of cache hits. To achieve that, we save the
+# chunking used in the previous compilation of this phase, and we try to
+# generate the same chunks, barring big differences in function sizes that
+# violate our chunk size guideline. If caching is not used, chunking_file
+# should be None
+def chunkify(funcs, chunk_size, chunking_file, DEBUG=False):
+ previous_mapping = None
+ if chunking_file:
+ chunking_file = chunking_file
+ if os.path.exists(chunking_file):
+ try:
+ previous_mapping = cPickle.Unpickler(open(chunking_file, 'rb')).load() # maps a function identifier to the chunk number it will be in
+ #if DEBUG: print >> sys.stderr, 'jscache previous mapping', previous_mapping
+ except:
+ pass
+ chunks = []
+ if previous_mapping:
+ # initialize with previous chunking
+ news = []
+ for func in funcs:
+ ident, data = func
+ assert ident, 'need names for jcache chunking'
+ if not ident in previous_mapping:
+ news.append(func)
+ else:
+ n = previous_mapping[ident]
+ while n >= len(chunks): chunks.append([])
+ chunks[n].append(func)
+ if DEBUG: print >> sys.stderr, 'jscache not in previous chunking', len(news)
+ # add news and adjust for new sizes
+ spilled = news
+ for i in range(len(chunks)):
+ chunk = chunks[i]
+ size = sum([len(func[1]) for func in chunk])
+ #if DEBUG: print >> sys.stderr, 'need spilling?', i, size, len(chunk), 'vs', chunk_size, 1.5*chunk_size
+ while size > 1.5*chunk_size and len(chunk) > 1:
+ spill = chunk.pop()
+ spilled.append(spill)
+ size -= len(spill[1])
+ #if DEBUG: print >> sys.stderr, 'jscache new + spilled', len(spilled)
+ for chunk in chunks:
+ size = sum([len(func[1]) for func in chunk])
+ while size < 0.66*chunk_size and len(spilled) > 0:
+ spill = spilled.pop()
+ chunk.append(spill)
+ size += len(spill[1])
+ chunks = filter(lambda chunk: len(chunk) > 0, chunks) # might have empty ones, eliminate them
+ funcs = spilled # we will allocate these into chunks as if they were normal inputs
+ #if DEBUG: print >> sys.stderr, 'leftover spills', len(spilled)
+ # initialize reasonably, the rest of the funcs we need to split out
+ curr = []
+ total_size = 0
+ for i in range(len(funcs)):
+ func = funcs[i]
+ curr_size = len(func[1])
+ if total_size + curr_size < chunk_size:
+ curr.append(func)
+ total_size += curr_size
+ else:
+ chunks.append(curr)
+ curr = [func]
+ total_size = curr_size
+ if curr:
+ chunks.append(curr)
+ curr = None
+ if chunking_file:
+ # sort within each chunk, to keep the order identical
+ for chunk in chunks:
+ chunk.sort(key=lambda func: func[0])
+ # save new mapping info
+ new_mapping = {}
+ for i in range(len(chunks)):
+ chunk = chunks[i]
+ for ident, data in chunk:
+ assert ident not in new_mapping, 'cannot have duplicate names in jcache chunking'
+ new_mapping[ident] = i
+ cPickle.Pickler(open(chunking_file, 'wb')).dump(new_mapping)
+ #if DEBUG:
+ # for i in range(len(chunks)):
+ # chunk = chunks[i]
+ # print >> sys.stderr, 'final chunk', i, len(chunk)
+ # print >> sys.stderr, 'new mapping:', new_mapping
+ # if previous_mapping:
+ # for ident in set(previous_mapping.keys() + new_mapping.keys()):
+ # if previous_mapping.get(ident) != new_mapping.get(ident):
+ # print >> sys.stderr, 'mapping inconsistency', ident, previous_mapping.get(ident), new_mapping.get(ident)
+ return [''.join([func[1] for func in chunk]) for chunk in chunks] # remove function names
diff --git a/tools/eliminator/node_modules/.bin/cake b/tools/eliminator/node_modules/.bin/cake
deleted file mode 120000
index d95f32af..00000000
--- a/tools/eliminator/node_modules/.bin/cake
+++ /dev/null
@@ -1 +0,0 @@
-../coffee-script/bin/cake \ No newline at end of file
diff --git a/tools/eliminator/node_modules/.bin/coffee b/tools/eliminator/node_modules/.bin/coffee
deleted file mode 120000
index b57f275d..00000000
--- a/tools/eliminator/node_modules/.bin/coffee
+++ /dev/null
@@ -1 +0,0 @@
-../coffee-script/bin/coffee \ No newline at end of file
diff --git a/tools/file_packager.py b/tools/file_packager.py
index bfa8e2f0..73ff4919 100644
--- a/tools/file_packager.py
+++ b/tools/file_packager.py
@@ -35,8 +35,8 @@ TODO: You can also provide .crn files yourself, pre-crunched. With this o
import os, sys, shutil, random
-from shared import Compression, execute, suffix, unsuffixed
import shared
+from shared import Compression, execute, suffix, unsuffixed
from subprocess import Popen, PIPE, STDOUT
data_target = sys.argv[1]
diff --git a/tools/js_optimizer.py b/tools/js_optimizer.py
index cbf64486..2fd2211b 100644
--- a/tools/js_optimizer.py
+++ b/tools/js_optimizer.py
@@ -2,7 +2,8 @@
import os, sys, subprocess, multiprocessing, re
import shared
-temp_files = shared.TempFiles()
+configuration = shared.configuration
+temp_files = configuration.get_temp_files()
__rootpath__ = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def path_from_root(*pathelems):
@@ -10,7 +11,9 @@ def path_from_root(*pathelems):
JS_OPTIMIZER = path_from_root('tools', 'js-optimizer.js')
-BEST_JS_PROCESS_SIZE = 1024*1024
+NUM_CHUNKS_PER_CORE = 1.5
+MIN_CHUNK_SIZE = int(os.environ.get('EMCC_JSOPT_MIN_CHUNK_SIZE') or 1024*1024) # configuring this is just for debugging purposes
+MAX_CHUNK_SIZE = 20*1024*1024
WINDOWS = sys.platform.startswith('win')
@@ -28,7 +31,7 @@ def run_on_chunk(command):
return filename
def run_on_js(filename, passes, js_engine, jcache):
-
+ if isinstance(jcache, bool) and jcache: jcache = shared.JCache
if jcache: shared.JCache.ensure()
if type(passes) == str:
@@ -74,6 +77,8 @@ def run_on_js(filename, passes, js_engine, jcache):
assert gen_end > gen_start
pre = js[:gen_start]
post = js[gen_end:]
+ if 'last' in passes:
+ post = post.replace(suffix, '') # no need to write out the metadata - nothing after us needs it
js = js[gen_start:gen_end]
else:
pre = ''
@@ -98,7 +103,11 @@ def run_on_js(filename, passes, js_engine, jcache):
total_size = len(js)
js = None
- chunks = shared.JCache.chunkify(funcs, BEST_JS_PROCESS_SIZE, 'jsopt' if jcache else None)
+ cores = int(os.environ.get('EMCC_CORES') or multiprocessing.cpu_count())
+ intended_num_chunks = int(round(cores * NUM_CHUNKS_PER_CORE))
+ chunk_size = min(MAX_CHUNK_SIZE, max(MIN_CHUNK_SIZE, total_size / intended_num_chunks))
+
+ chunks = shared.chunkify(funcs, chunk_size, jcache.get_cachename('jsopt') if jcache else None)
if jcache:
# load chunks from cache where we can # TODO: ignore small chunks
@@ -131,18 +140,18 @@ def run_on_js(filename, passes, js_engine, jcache):
if len(filenames) > 0:
# XXX Use '--nocrankshaft' to disable crankshaft to work around v8 bug 1895, needed for older v8/node (node 0.6.8+ should be ok)
- commands = map(lambda filename: [js_engine, JS_OPTIMIZER, filename, 'noPrintMetadata'] + passes, filenames)
+ commands = map(lambda filename: js_engine + [JS_OPTIMIZER, filename, 'noPrintMetadata'] + passes, filenames)
#print [' '.join(command) for command in commands]
- cores = min(multiprocessing.cpu_count(), filenames)
+ cores = min(cores, filenames)
if len(chunks) > 1 and cores >= 2:
# We can parallelize
- if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks, using %d cores (total: %.2f MB)' % (len(chunks), cores, total_size/(1024*1024.))
+ if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks of size %d, using %d cores (total: %.2f MB)' % (len(chunks), chunk_size, cores, total_size/(1024*1024.))
pool = multiprocessing.Pool(processes=cores)
filenames = pool.map(run_on_chunk, commands, chunksize=1)
else:
# We can't parallize, but still break into chunks to avoid uglify/node memory issues
- if len(chunks) > 1 and DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks' % (len(chunks))
+ if len(chunks) > 1 and DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks of size %d' % (len(chunks), chunk_size)
filenames = [run_on_chunk(command) for command in commands]
else:
filenames = []
diff --git a/tools/jsrun.py b/tools/jsrun.py
new file mode 100644
index 00000000..27c55350
--- /dev/null
+++ b/tools/jsrun.py
@@ -0,0 +1,27 @@
+import time
+from subprocess import Popen, PIPE, STDOUT
+
+def timeout_run(proc, timeout, note='unnamed process', full_output=False):
+ start = time.time()
+ if timeout is not None:
+ while time.time() - start < timeout and proc.poll() is None:
+ time.sleep(0.1)
+ if proc.poll() is None:
+ proc.kill() # XXX bug: killing emscripten.py does not kill it's child process!
+ raise Exception("Timed out: " + note)
+ out = proc.communicate()
+ return '\n'.join(out) if full_output else out[0]
+
+def run_js(filename, engine=None, args=[], check_timeout=False, stdout=PIPE, stderr=None, cwd=None, full_output=False):
+ if type(engine) is not list:
+ engine = [engine]
+ command = engine + [filename] + (['--'] if 'd8' in engine[0] else []) + args
+ return timeout_run(
+ Popen(
+ command,
+ stdout=stdout,
+ stderr=stderr,
+ cwd=cwd),
+ 15*60 if check_timeout else None,
+ 'Execution',
+ full_output=full_output)
diff --git a/tools/shared.py b/tools/shared.py
index ce9001fb..09f6aef4 100644
--- a/tools/shared.py
+++ b/tools/shared.py
@@ -1,6 +1,11 @@
-import shutil, time, os, sys, json, tempfile, copy, shlex, atexit, subprocess, hashlib, cPickle
+import shutil, time, os, sys, json, tempfile, copy, shlex, atexit, subprocess, hashlib, cPickle, re
from subprocess import Popen, PIPE, STDOUT
from tempfile import mkstemp
+import jsrun, cache, tempfiles
+
+def listify(x):
+ if type(x) is not list: return [x]
+ return x
# On Windows python suffers from a particularly nasty bug if python is spawning new processes while python itself is spawned from some other non-console process.
# Use a custom replacement for Popen on Windows to avoid the "WindowsError: [Error 6] The handle is invalid" errors when emcc is driven through cmake or mingw32-make.
@@ -28,7 +33,10 @@ class WindowsPopen:
self.stderr_ = PIPE
# Call the process with fixed streams.
- self.process = subprocess.Popen(args, bufsize, executable, self.stdin_, self.stdout_, self.stderr_, preexec_fn, close_fds, shell, cwd, env, universal_newlines, startupinfo, creationflags)
+ try:
+ self.process = subprocess.Popen(args, bufsize, executable, self.stdin_, self.stdout_, self.stderr_, preexec_fn, close_fds, shell, cwd, env, universal_newlines, startupinfo, creationflags)
+ except Exception, e:
+ print >> sys.stderr, 'subprocess.Popen(args=%s) failed! Exception %s' % (' '.join(args), str(e))
def communicate(self, input=None):
output = self.process.communicate(input)
@@ -155,7 +163,8 @@ EXPECTED_NODE_VERSION = (0,6,8)
def check_node_version():
try:
- actual = Popen([NODE_JS, '--version'], stdout=PIPE).communicate()[0].strip()
+ node = listify(NODE_JS)
+ actual = Popen(node + ['--version'], stdout=PIPE).communicate()[0].strip()
version = tuple(map(int, actual.replace('v', '').split('.')))
if version >= EXPECTED_NODE_VERSION:
return True
@@ -172,7 +181,7 @@ def check_node_version():
# we re-check sanity when the settings are changed)
# We also re-check sanity and clear the cache when the version changes
-EMSCRIPTEN_VERSION = '1.2.4'
+EMSCRIPTEN_VERSION = '1.2.6'
def check_sanity(force=False):
try:
@@ -281,30 +290,50 @@ AUTODEBUGGER = path_from_root('tools', 'autodebugger.py')
BINDINGS_GENERATOR = path_from_root('tools', 'bindings_generator.py')
EXEC_LLVM = path_from_root('tools', 'exec_llvm.py')
FILE_PACKAGER = path_from_root('tools', 'file_packager.py')
-RELOOPER = path_from_root('src', 'relooper.js')
# Temp dir. Create a random one, unless EMCC_DEBUG is set, in which case use TEMP_DIR/emscripten_temp
-try:
- TEMP_DIR
-except:
- print >> sys.stderr, 'TEMP_DIR not defined in ~/.emscripten, using /tmp'
- TEMP_DIR = '/tmp'
+class Configuration:
+ def __init__(self, environ):
+ self.DEBUG = environ.get('EMCC_DEBUG')
+ if self.DEBUG == "0":
+ self.DEBUG = None
+ self.DEBUG_CACHE = self.DEBUG and "cache" in self.DEBUG
+ self.EMSCRIPTEN_TEMP_DIR = None
-CANONICAL_TEMP_DIR = os.path.join(TEMP_DIR, 'emscripten_temp')
-EMSCRIPTEN_TEMP_DIR = None
+ try:
+ self.TEMP_DIR = TEMP_DIR
+ except NameError:
+ print >> sys.stderr, 'TEMP_DIR not defined in ~/.emscripten, using /tmp'
+ self.TEMP_DIR = '/tmp'
-DEBUG = os.environ.get('EMCC_DEBUG')
-if DEBUG:
- try:
- EMSCRIPTEN_TEMP_DIR = CANONICAL_TEMP_DIR
- if not os.path.exists(EMSCRIPTEN_TEMP_DIR):
- os.makedirs(EMSCRIPTEN_TEMP_DIR)
- except Exception, e:
- print >> sys.stderr, e, 'Could not create canonical temp dir. Check definition of TEMP_DIR in ~/.emscripten'
+ self.CANONICAL_TEMP_DIR = os.path.join(self.TEMP_DIR, 'emscripten_temp')
+
+ if self.DEBUG:
+ try:
+ self.EMSCRIPTEN_TEMP_DIR = self.CANONICAL_TEMP_DIR
+ if not os.path.exists(self.EMSCRIPTEN_TEMP_DIR):
+ os.makedirs(self.EMSCRIPTEN_TEMP_DIR)
+ except Exception, e:
+ print >> sys.stderr, e, 'Could not create canonical temp dir. Check definition of TEMP_DIR in ~/.emscripten'
+
+ def get_temp_files(self):
+ return tempfiles.TempFiles(
+ tmp=self.TEMP_DIR if not self.DEBUG else self.EMSCRIPTEN_TEMP_DIR,
+ save_debug_files=os.environ.get('EMCC_DEBUG_SAVE'))
+
+ def debug_log(self, msg):
+ if self.DEBUG:
+ print >> sys.stderr, msg
+
+configuration = Configuration(environ=os.environ)
+DEBUG = configuration.DEBUG
+EMSCRIPTEN_TEMP_DIR = configuration.EMSCRIPTEN_TEMP_DIR
+DEBUG_CACHE = configuration.DEBUG_CACHE
+CANONICAL_TEMP_DIR = configuration.CANONICAL_TEMP_DIR
if not EMSCRIPTEN_TEMP_DIR:
- EMSCRIPTEN_TEMP_DIR = tempfile.mkdtemp(prefix='emscripten_temp_', dir=TEMP_DIR)
+ EMSCRIPTEN_TEMP_DIR = tempfile.mkdtemp(prefix='emscripten_temp_', dir=configuration.TEMP_DIR)
def clean_temp():
try_delete(EMSCRIPTEN_TEMP_DIR)
atexit.register(clean_temp)
@@ -376,6 +405,9 @@ if USE_EMSDK:
else:
EMSDK_OPTS = []
+#print >> sys.stderr, 'SDK opts', ' '.join(EMSDK_OPTS)
+#print >> sys.stderr, 'Compiler opts', ' '.join(COMPILER_OPTS)
+
# Engine tweaks
try:
@@ -399,42 +431,7 @@ if not WINDOWS:
pass
# Temp file utilities
-
-def try_delete(filename):
- try:
- os.unlink(filename)
- except:
- try:
- shutil.rmtree(filename)
- except:
- pass
-
-class TempFiles:
- def __init__(self):
- self.to_clean = []
-
- def note(self, filename):
- self.to_clean.append(filename)
-
- def get(self, suffix):
- """Returns a named temp file with the given prefix."""
- named_file = tempfile.NamedTemporaryFile(dir=TEMP_DIR if not DEBUG else EMSCRIPTEN_TEMP_DIR, suffix=suffix, delete=False)
- self.note(named_file.name)
- return named_file
-
- def clean(self):
- if os.environ.get('EMCC_DEBUG_SAVE'):
- print >> sys.stderr, 'not cleaning up temp files since in debug-save mode, see them in %s' % EMSCRIPTEN_TEMP_DIR
- return
- for filename in self.to_clean:
- try_delete(filename)
- self.to_clean = []
-
- def run_and_clean(self, func):
- try:
- return func()
- finally:
- self.clean()
+from tempfiles import try_delete
# Utilities
@@ -448,22 +445,10 @@ def check_engine(engine):
print 'Checking JS engine %s failed. Check %s. Details: %s' % (str(engine), EM_CONFIG, str(e))
return False
-def timeout_run(proc, timeout, note='unnamed process', full_output=False):
- start = time.time()
- if timeout is not None:
- while time.time() - start < timeout and proc.poll() is None:
- time.sleep(0.1)
- if proc.poll() is None:
- proc.kill() # XXX bug: killing emscripten.py does not kill it's child process!
- raise Exception("Timed out: " + note)
- out = proc.communicate()
- return '\n'.join(out) if full_output else out[0]
-
-def run_js(filename, engine=None, args=[], check_timeout=False, stdout=PIPE, stderr=None, cwd=None, full_output=False):
- if engine is None: engine = JS_ENGINES[0]
- if type(engine) is not list: engine = [engine]
- command = engine + [filename] + (['--'] if 'd8' in engine[0] else []) + args
- return timeout_run(Popen(command, stdout=stdout, stderr=stderr, cwd=cwd), 15*60 if check_timeout else None, 'Execution', full_output=full_output)
+def run_js(filename, engine=None, *args, **kw):
+ if engine is None:
+ engine = JS_ENGINES[0]
+ return jsrun.run_js(filename, engine, *args, **kw)
def to_cc(cxx):
# By default, LLVM_GCC and CLANG are really the C++ versions. This gets an explicit C version
@@ -554,7 +539,7 @@ class Settings:
ret = []
for key, value in Settings.__dict__.iteritems():
if key == key.upper(): # this is a hack. all of our settings are ALL_CAPS, python internals are not
- jsoned = json.dumps(value)
+ jsoned = json.dumps(value, sort_keys=True)
ret += ['-s', key + '=' + jsoned]
return ret
@@ -563,11 +548,11 @@ class Settings:
if opt_level >= 1:
Settings.ASSERTIONS = 0
Settings.DISABLE_EXCEPTION_CATCHING = 1
+ Settings.EMIT_GENERATED_FUNCTIONS = 1
if opt_level >= 2:
Settings.RELOOP = 1
if opt_level >= 3:
- # Aside from these, -O3 also runs closure compiler
- Settings.INLINING_LIMIT = 0
+ # Aside from these, -O3 also runs closure compiler and llvm lto
Settings.DOUBLE_MODE = 0
Settings.PRECISE_I64_MATH = 0
if noisy: print >> sys.stderr, 'Warning: Applying some potentially unsafe optimizations! (Use -O2 if this fails.)'
@@ -638,7 +623,7 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
.replace('$EMSCRIPTEN_ROOT', path_from_root('').replace('\\', '/')) \
.replace('$CFLAGS', env['CFLAGS']) \
.replace('$CXXFLAGS', env['CFLAGS'])
- toolchainFile = mkstemp(suffix='.cmaketoolchain.txt', dir=TEMP_DIR)[1]
+ toolchainFile = mkstemp(suffix='.cmaketoolchain.txt', dir=configuration.TEMP_DIR)[1]
open(toolchainFile, 'w').write(CMakeToolchain)
args.append('-DCMAKE_TOOLCHAIN_FILE=%s' % os.path.abspath(toolchainFile))
return args
@@ -811,7 +796,37 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
# Finish link
actual_files = unique_ordered(actual_files) # tolerate people trying to link a.so a.so etc.
if DEBUG: print >>sys.stderr, 'emcc: llvm-linking:', actual_files
- output = Popen([LLVM_LINK] + actual_files + ['-o', target], stdout=PIPE).communicate()[0]
+
+ # check for too-long command line
+ link_cmd = [LLVM_LINK] + actual_files + ['-o', target]
+ # 8k is a bit of an arbitrary limit, but a reasonable one
+ # for max command line size before we use a respose file
+ response_file = None
+ if WINDOWS and len(' '.join(link_cmd)) > 8192:
+ if DEBUG: print >>sys.stderr, 'using response file for llvm-link'
+ [response_fd, response_file] = mkstemp(suffix='.response', dir=TEMP_DIR)
+
+ link_cmd = [LLVM_LINK, "@" + response_file]
+
+ response_fh = os.fdopen(response_fd, 'w')
+ for arg in actual_files:
+ # we can't put things with spaces in the response file
+ if " " in arg:
+ link_cmd.append(arg)
+ else:
+ response_fh.write(arg + "\n")
+ response_fh.close()
+ link_cmd.append("-o")
+ link_cmd.append(target)
+
+ if len(' '.join(link_cmd)) > 8192:
+ print >>sys.stderr, 'emcc: warning: link command line is very long, even with response file -- use paths with no spaces'
+
+ output = Popen(link_cmd, stdout=PIPE).communicate()[0]
+
+ if response_file:
+ os.unlink(response_file)
+
assert os.path.exists(target) and (output is None or 'Could not open input file' not in output), 'Linking error: ' + output
for temp_dir in temp_dirs:
try_delete(temp_dir)
@@ -907,14 +922,14 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
output_filename = filename + '.o'
try_delete(output_filename)
Popen([PYTHON, EMCC, filename] + args + ['-o', output_filename], stdout=stdout, stderr=stderr, env=env).communicate()
- assert os.path.exists(output_filename), 'emcc could not create output file'
+ assert os.path.exists(output_filename), 'emcc could not create output file: ' + output_filename
@staticmethod
def emar(action, output_filename, filenames, stdout=None, stderr=None, env=None):
try_delete(output_filename)
Popen([PYTHON, EMAR, action, output_filename] + filenames, stdout=stdout, stderr=stderr, env=env).communicate()
if 'c' in action:
- assert os.path.exists(output_filename), 'emar could not create output file'
+ assert os.path.exists(output_filename), 'emar could not create output file: ' + output_filename
@staticmethod
def emscripten(filename, append_ext=True, extra_args=[]):
@@ -922,8 +937,9 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
os.environ['EMSCRIPTEN_SUPPRESS_USAGE_WARNING'] = '1'
# Run Emscripten
+ Settings.RELOOPER = Cache.get_path('relooper.js')
settings = Settings.serialize()
- compiler_output = timeout_run(Popen([PYTHON, EMSCRIPTEN, filename + ('.o.ll' if append_ext else ''), '-o', filename + '.o.js'] + settings + extra_args, stdout=PIPE), None, 'Compiling')
+ compiler_output = jsrun.timeout_run(Popen([PYTHON, EMSCRIPTEN, filename + ('.o.ll' if append_ext else ''), '-o', filename + '.o.js'] + settings + extra_args, stdout=PIPE), None, 'Compiling')
#print compiler_output
# Detect compilation crashes and errors
@@ -1054,7 +1070,7 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
@staticmethod
def js_optimizer(filename, passes, jcache):
- return js_optimizer.run(filename, passes, NODE_JS, jcache)
+ return js_optimizer.run(filename, passes, listify(NODE_JS), jcache)
@staticmethod
def closure_compiler(filename):
@@ -1115,25 +1131,26 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
# Make sure the relooper exists. If it does not, check out the relooper code and bootstrap it
@staticmethod
- def ensure_relooper():
- if os.path.exists(RELOOPER): return
+ def ensure_relooper(relooper):
+ if os.path.exists(relooper): return
+ Cache.ensure()
curr = os.getcwd()
try:
ok = False
print >> sys.stderr, '======================================='
print >> sys.stderr, 'bootstrapping relooper...'
- Cache.ensure()
os.chdir(path_from_root('src'))
def make(opt_level):
- raw = RELOOPER + '.raw.js'
+ raw = relooper + '.raw.js'
Building.emcc(os.path.join('relooper', 'Relooper.cpp'), ['-I' + os.path.join('relooper'), '--post-js',
os.path.join('relooper', 'emscripten', 'glue.js'),
'-s', 'TOTAL_MEMORY=52428800',
'-s', 'EXPORTED_FUNCTIONS=["_rl_set_output_buffer","_rl_make_output_buffer","_rl_new_block","_rl_delete_block","_rl_block_add_branch_to","_rl_new_relooper","_rl_delete_relooper","_rl_relooper_add_block","_rl_relooper_calculate","_rl_relooper_render", "_rl_set_asm_js_mode"]',
'-s', 'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=["memcpy", "memset", "malloc", "free", "puts"]',
+ '-s', 'RELOOPER="' + relooper + '"',
'-O' + str(opt_level), '--closure', '0'], raw)
- f = open(RELOOPER, 'w')
+ f = open(relooper, 'w')
f.write("// Relooper, (C) 2012 Alon Zakai, MIT license, https://github.com/kripken/Relooper\n")
f.write("var Relooper = (function() {\n");
f.write(open(raw).read())
@@ -1153,190 +1170,39 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
finally:
os.chdir(curr)
if not ok:
- print >> sys.stderr, 'bootstrapping relooper failed. You may need to manually create src/relooper.js by compiling it, see src/relooper/emscripten'
+ print >> sys.stderr, 'bootstrapping relooper failed. You may need to manually create relooper.js by compiling it, see src/relooper/emscripten'
1/0
-# Permanent cache for dlmalloc and stdlibc++
-class Cache:
- dirname = os.environ.get('EM_CACHE')
- if not dirname:
- dirname = os.path.expanduser(os.path.join('~', '.emscripten_cache'))
-
- @staticmethod
- def ensure():
- if not os.path.exists(Cache.dirname):
- os.makedirs(Cache.dirname)
-
- @staticmethod
- def erase():
- try:
- shutil.rmtree(Cache.dirname)
- except:
- pass
- try_delete(RELOOPER)
-
- # Request a cached file. If it isn't in the cache, it will be created with
- # the given creator function
- @staticmethod
- def get(shortname, creator):
- if not shortname.endswith('.bc'): shortname += '.bc'
- cachename = os.path.join(Cache.dirname, shortname)
- if os.path.exists(cachename):
- return cachename
- Cache.ensure()
- shutil.copyfile(creator(), cachename)
- return cachename
-
-# JS-specific cache. We cache the results of compilation and optimization,
-# so that in incremental builds we can just load from cache.
-# We cache reasonably-large-sized chunks
-class JCache:
- dirname = os.path.join(Cache.dirname, 'jcache')
-
- @staticmethod
- def ensure():
- Cache.ensure()
- if not os.path.exists(JCache.dirname):
- os.makedirs(JCache.dirname)
-
- @staticmethod
- def get_shortkey(keys):
- if type(keys) not in [list, tuple]:
- keys = [keys]
- ret = ''
- for key in keys:
- assert type(key) == str
- ret += hashlib.md5(key).hexdigest()
- return ret
-
@staticmethod
- def get_cachename(shortkey):
- return os.path.join(JCache.dirname, shortkey)
+ def preprocess(infile, outfile):
+ '''
+ Preprocess source C/C++ in some special ways that emscripten needs. Returns
+ a filename (potentially the same one if nothing was changed).
- # Returns a cached value, if it exists. Make sure the full key matches
- @staticmethod
- def get(shortkey, keys):
- #if DEBUG: print >> sys.stderr, 'jcache get?', shortkey
- cachename = JCache.get_cachename(shortkey)
- if not os.path.exists(cachename):
- #if DEBUG: print >> sys.stderr, 'jcache none at all'
- return
- data = cPickle.Unpickler(open(cachename, 'rb')).load()
- if len(data) != 2:
- #if DEBUG: print >> sys.stderr, 'jcache error in get'
- return
- oldkeys = data[0]
- if len(oldkeys) != len(keys):
- #if DEBUG: print >> sys.stderr, 'jcache collision (a)'
- return
- for i in range(len(oldkeys)):
- if oldkeys[i] != keys[i]:
- #if DEBUG: print >> sys.stderr, 'jcache collision (b)'
- return
- #if DEBUG: print >> sys.stderr, 'jcache win'
- return data[1]
-
- # Sets the cached value for a key (from get_key)
- @staticmethod
- def set(shortkey, keys, value):
- cachename = JCache.get_cachename(shortkey)
- cPickle.Pickler(open(cachename, 'wb')).dump([keys, value])
- #if DEBUG:
- # for i in range(len(keys)):
- # open(cachename + '.key' + str(i), 'w').write(keys[i])
- # open(cachename + '.value', 'w').write(value)
-
- # Given a set of functions of form (ident, text), and a preferred chunk size,
- # generates a set of chunks for parallel processing and caching.
- # It is very important to generate similar chunks in incremental builds, in
- # order to maximize the chance of cache hits. To achieve that, we save the
- # chunking used in the previous compilation of this phase, and we try to
- # generate the same chunks, barring big differences in function sizes that
- # violate our chunk size guideline. If caching is not used, chunking_file
- # should be None
- @staticmethod
- def chunkify(funcs, chunk_size, chunking_file):
- previous_mapping = None
- if chunking_file:
- chunking_file = JCache.get_cachename(chunking_file)
- if os.path.exists(chunking_file):
- try:
- previous_mapping = cPickle.Unpickler(open(chunking_file, 'rb')).load() # maps a function identifier to the chunk number it will be in
- #if DEBUG: print >> sys.stderr, 'jscache previous mapping', previous_mapping
- except:
- pass
- chunks = []
- if previous_mapping:
- # initialize with previous chunking
- news = []
- for func in funcs:
- ident, data = func
- assert ident, 'need names for jcache chunking'
- if not ident in previous_mapping:
- news.append(func)
- else:
- n = previous_mapping[ident]
- while n >= len(chunks): chunks.append([])
- chunks[n].append(func)
- if DEBUG: print >> sys.stderr, 'jscache not in previous chunking', len(news)
- # add news and adjust for new sizes
- spilled = news
- for i in range(len(chunks)):
- chunk = chunks[i]
- size = sum([len(func[1]) for func in chunk])
- #if DEBUG: print >> sys.stderr, 'need spilling?', i, size, len(chunk), 'vs', chunk_size, 1.5*chunk_size
- while size > 1.5*chunk_size and len(chunk) > 1:
- spill = chunk.pop()
- spilled.append(spill)
- size -= len(spill[1])
- #if DEBUG: print >> sys.stderr, 'jscache new + spilled', len(spilled)
- for chunk in chunks:
- size = sum([len(func[1]) for func in chunk])
- while size < 0.66*chunk_size and len(spilled) > 0:
- spill = spilled.pop()
- chunk.append(spill)
- size += len(spill[1])
- chunks = filter(lambda chunk: len(chunk) > 0, chunks) # might have empty ones, eliminate them
- funcs = spilled # we will allocate these into chunks as if they were normal inputs
- #if DEBUG: print >> sys.stderr, 'leftover spills', len(spilled)
- # initialize reasonably, the rest of the funcs we need to split out
- curr = []
- total_size = 0
- for i in range(len(funcs)):
- func = funcs[i]
- curr_size = len(func[1])
- if total_size + curr_size < chunk_size:
- curr.append(func)
- total_size += curr_size
- else:
- chunks.append(curr)
- curr = [func]
- total_size = curr_size
- if curr:
- chunks.append(curr)
- curr = None
- if chunking_file:
- # sort within each chunk, to keep the order identical
- for chunk in chunks:
- chunk.sort(key=lambda func: func[0])
- # save new mapping info
- new_mapping = {}
- for i in range(len(chunks)):
- chunk = chunks[i]
- for ident, data in chunk:
- assert ident not in new_mapping, 'cannot have duplicate names in jcache chunking'
- new_mapping[ident] = i
- cPickle.Pickler(open(chunking_file, 'wb')).dump(new_mapping)
- #if DEBUG:
- # for i in range(len(chu