aboutsummaryrefslogtreecommitdiff
path: root/tools/shared.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/shared.py')
-rw-r--r--tools/shared.py142
1 files changed, 102 insertions, 40 deletions
diff --git a/tools/shared.py b/tools/shared.py
index 401a580b..aca0677d 100644
--- a/tools/shared.py
+++ b/tools/shared.py
@@ -2,6 +2,10 @@ import shutil, time, os, sys, json, tempfile, copy, shlex, atexit, subprocess, h
from subprocess import Popen, PIPE, STDOUT
from tempfile import mkstemp
+def listify(x):
+ if type(x) is not list: return [x]
+ return x
+
# On Windows python suffers from a particularly nasty bug if python is spawning new processes while python itself is spawned from some other non-console process.
# Use a custom replacement for Popen on Windows to avoid the "WindowsError: [Error 6] The handle is invalid" errors when emcc is driven through cmake or mingw32-make.
# See http://bugs.python.org/issue3905
@@ -28,7 +32,10 @@ class WindowsPopen:
self.stderr_ = PIPE
# Call the process with fixed streams.
- self.process = subprocess.Popen(args, bufsize, executable, self.stdin_, self.stdout_, self.stderr_, preexec_fn, close_fds, shell, cwd, env, universal_newlines, startupinfo, creationflags)
+ try:
+ self.process = subprocess.Popen(args, bufsize, executable, self.stdin_, self.stdout_, self.stderr_, preexec_fn, close_fds, shell, cwd, env, universal_newlines, startupinfo, creationflags)
+ except Exception, e:
+ print >> sys.stderr, 'subprocess.Popen(args=%s) failed! Exception %s' % (' '.join(args), str(e))
def communicate(self, input=None):
output = self.process.communicate(input)
@@ -155,7 +162,8 @@ EXPECTED_NODE_VERSION = (0,6,8)
def check_node_version():
try:
- actual = Popen([NODE_JS, '--version'], stdout=PIPE).communicate()[0].strip()
+ node = listify(NODE_JS)
+ actual = Popen(node + ['--version'], stdout=PIPE).communicate()[0].strip()
version = tuple(map(int, actual.replace('v', '').split('.')))
if version >= EXPECTED_NODE_VERSION:
return True
@@ -295,6 +303,10 @@ CANONICAL_TEMP_DIR = os.path.join(TEMP_DIR, 'emscripten_temp')
EMSCRIPTEN_TEMP_DIR = None
DEBUG = os.environ.get('EMCC_DEBUG')
+if DEBUG == "0":
+ DEBUG = None
+DEBUG_CACHE = DEBUG and "cache" in DEBUG
+
if DEBUG:
try:
EMSCRIPTEN_TEMP_DIR = CANONICAL_TEMP_DIR
@@ -376,6 +388,9 @@ if USE_EMSDK:
else:
EMSDK_OPTS = []
+#print >> sys.stderr, 'SDK opts', ' '.join(EMSDK_OPTS)
+#print >> sys.stderr, 'Compiler opts', ' '.join(COMPILER_OPTS)
+
# Engine tweaks
try:
@@ -461,7 +476,8 @@ def timeout_run(proc, timeout, note='unnamed process', full_output=False):
def run_js(filename, engine=None, args=[], check_timeout=False, stdout=PIPE, stderr=None, cwd=None, full_output=False):
if engine is None: engine = JS_ENGINES[0]
- if type(engine) is not list: engine = [engine]
+ engine = listify(engine)
+ #if not WINDOWS: 'd8' in engine[0] or 'node' in engine[0]: engine += ['--stack_size=8192'] # needed for some big projects
command = engine + [filename] + (['--'] if 'd8' in engine[0] else []) + args
return timeout_run(Popen(command, stdout=stdout, stderr=stderr, cwd=cwd), 15*60 if check_timeout else None, 'Execution', full_output=full_output)
@@ -554,7 +570,7 @@ class Settings:
ret = []
for key, value in Settings.__dict__.iteritems():
if key == key.upper(): # this is a hack. all of our settings are ALL_CAPS, python internals are not
- jsoned = json.dumps(value)
+ jsoned = json.dumps(value, sort_keys=True)
ret += ['-s', key + '=' + jsoned]
return ret
@@ -563,10 +579,11 @@ class Settings:
if opt_level >= 1:
Settings.ASSERTIONS = 0
Settings.DISABLE_EXCEPTION_CATCHING = 1
+ Settings.EMIT_GENERATED_FUNCTIONS = 1
if opt_level >= 2:
Settings.RELOOP = 1
if opt_level >= 3:
- Settings.INLINING_LIMIT = 0
+ # Aside from these, -O3 also runs closure compiler and llvm lto
Settings.DOUBLE_MODE = 0
Settings.PRECISE_I64_MATH = 0
if noisy: print >> sys.stderr, 'Warning: Applying some potentially unsafe optimizations! (Use -O2 if this fails.)'
@@ -662,6 +679,9 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
def make(args, stdout=None, stderr=None, env=None):
if env is None:
env = Building.get_building_env()
+ if not args:
+ print >> sys.stderr, 'Error: Executable to run not specified.'
+ sys.exit(1)
#args += ['VERBOSE=1']
try:
Popen(args, stdout=stdout, stderr=stderr, env=env).communicate()
@@ -745,12 +765,16 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
resolved_symbols = set()
temp_dirs = []
files = map(os.path.abspath, files)
+ has_ar = False
+ for f in files:
+ has_ar = has_ar or Building.is_ar(f)
for f in files:
if not Building.is_ar(f):
if Building.is_bitcode(f):
- new_symbols = Building.llvm_nm(f)
- resolved_symbols = resolved_symbols.union(new_symbols.defs)
- unresolved_symbols = unresolved_symbols.union(new_symbols.undefs.difference(resolved_symbols)).difference(new_symbols.defs)
+ if has_ar:
+ new_symbols = Building.llvm_nm(f)
+ resolved_symbols = resolved_symbols.union(new_symbols.defs)
+ unresolved_symbols = unresolved_symbols.union(new_symbols.undefs.difference(resolved_symbols)).difference(new_symbols.defs)
actual_files.append(f)
else:
# Extract object files from ar archives, and link according to gnu ld semantics
@@ -803,7 +827,37 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
# Finish link
actual_files = unique_ordered(actual_files) # tolerate people trying to link a.so a.so etc.
if DEBUG: print >>sys.stderr, 'emcc: llvm-linking:', actual_files
- output = Popen([LLVM_LINK] + actual_files + ['-o', target], stdout=PIPE).communicate()[0]
+
+ # check for too-long command line
+ link_cmd = [LLVM_LINK] + actual_files + ['-o', target]
+ # 8k is a bit of an arbitrary limit, but a reasonable one
+ # for max command line size before we use a respose file
+ response_file = None
+ if WINDOWS and len(' '.join(link_cmd)) > 8192:
+ if DEBUG: print >>sys.stderr, 'using response file for llvm-link'
+ [response_fd, response_file] = mkstemp(suffix='.response', dir=TEMP_DIR)
+
+ link_cmd = [LLVM_LINK, "@" + response_file]
+
+ response_fh = os.fdopen(response_fd, 'w')
+ for arg in actual_files:
+ # we can't put things with spaces in the response file
+ if " " in arg:
+ link_cmd.append(arg)
+ else:
+ response_fh.write(arg + "\n")
+ response_fh.close()
+ link_cmd.append("-o")
+ link_cmd.append(target)
+
+ if len(' '.join(link_cmd)) > 8192:
+ print >>sys.stderr, 'emcc: warning: link command line is very long, even with response file -- use paths with no spaces'
+
+ output = Popen(link_cmd, stdout=PIPE).communicate()[0]
+
+ if response_file:
+ os.unlink(response_file)
+
assert os.path.exists(target) and (output is None or 'Could not open input file' not in output), 'Linking error: ' + output
for temp_dir in temp_dirs:
try_delete(temp_dir)
@@ -825,6 +879,7 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
def llvm_opt(filename, opts):
if type(opts) is int:
opts = Building.pick_llvm_opts(opts)
+ #opts += ['-debug-pass=Arguments']
if DEBUG: print >> sys.stderr, 'emcc: LLVM opts:', opts
output = Popen([LLVM_OPT, filename] + opts + ['-o=' + filename + '.opt.bc'], stdout=PIPE).communicate()[0]
assert os.path.exists(filename + '.opt.bc'), 'Failed to run llvm optimizations: ' + output
@@ -861,8 +916,14 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
assert os.path.exists(output_filename), 'Could not create bc file: ' + output
return output_filename
+ nm_cache = {} # cache results of nm - it can be slow to run
+
@staticmethod
def llvm_nm(filename, stdout=PIPE, stderr=None):
+ if filename in Building.nm_cache:
+ #if DEBUG: print >> sys.stderr, 'loading nm results for %s from cache' % filename
+ return Building.nm_cache[filename]
+
# LLVM binary ==> list of symbols
output = Popen([LLVM_NM, filename], stdout=stdout, stderr=stderr).communicate()[0]
class ret:
@@ -883,6 +944,7 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
ret.defs = set(ret.defs)
ret.undefs = set(ret.undefs)
ret.commons = set(ret.commons)
+ Building.nm_cache[filename] = ret
return ret
@staticmethod
@@ -1038,7 +1100,7 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
@staticmethod
def js_optimizer(filename, passes, jcache):
- return js_optimizer.run(filename, passes, NODE_JS, jcache)
+ return js_optimizer.run(filename, passes, listify(NODE_JS), jcache)
@staticmethod
def closure_compiler(filename):
@@ -1083,24 +1145,6 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
@staticmethod
def is_bitcode(filename):
- # checks if a file contains LLVM bitcode
- # if the file doesn't exist or doesn't have valid symbols, it isn't bitcode
- try:
- defs = Building.llvm_nm(filename, stderr=PIPE)
- # If no symbols found, it might just be an empty bitcode file, try to dis it
- if len(defs.defs) + len(defs.undefs) + len(defs.commons) == 0:
- # llvm-nm 3.0 has a bug when reading symbols from ar files
- # so try to see if we're dealing with an ar file, in which
- # case we should try to dis it.
- if not Building.is_ar(filename):
- test_ll = os.path.join(EMSCRIPTEN_TEMP_DIR, 'test.ll')
- Building.llvm_dis(filename, test_ll)
- assert os.path.exists(test_ll)
- try_delete(test_ll)
- except Exception, e:
- if DEBUG: print >> sys.stderr, 'shared.Building.is_bitcode failed to test whether file \'%s\' is a llvm bitcode file! Failed on exception: %s' % (filename, e)
- return False
-
# look for magic signature
b = open(filename, 'r').read(4)
if b[0] == 'B' and b[1] == 'C':
@@ -1176,6 +1220,10 @@ class Cache:
except:
pass
try_delete(RELOOPER)
+ try:
+ open(Cache.dirname + '__last_clear', 'w').write('last clear: ' + time.asctime() + '\n')
+ except:
+ print >> sys.stderr, 'failed to save last clear time'
# Request a cached file. If it isn't in the cache, it will be created with
# the given creator function
@@ -1218,29 +1266,30 @@ class JCache:
# Returns a cached value, if it exists. Make sure the full key matches
@staticmethod
def get(shortkey, keys):
- #if DEBUG: print >> sys.stderr, 'jcache get?', shortkey
+ if DEBUG_CACHE: print >> sys.stderr, 'jcache get?', shortkey
cachename = JCache.get_cachename(shortkey)
if not os.path.exists(cachename):
- #if DEBUG: print >> sys.stderr, 'jcache none at all'
+ if DEBUG_CACHE: print >> sys.stderr, 'jcache none at all'
return
data = cPickle.Unpickler(open(cachename, 'rb')).load()
if len(data) != 2:
- #if DEBUG: print >> sys.stderr, 'jcache error in get'
+ if DEBUG_CACHE: print >> sys.stderr, 'jcache error in get'
return
oldkeys = data[0]
if len(oldkeys) != len(keys):
- #if DEBUG: print >> sys.stderr, 'jcache collision (a)'
+ if DEBUG_CACHE: print >> sys.stderr, 'jcache collision (a)'
return
for i in range(len(oldkeys)):
if oldkeys[i] != keys[i]:
- #if DEBUG: print >> sys.stderr, 'jcache collision (b)'
+ if DEBUG_CACHE: print >> sys.stderr, 'jcache collision (b)'
return
- #if DEBUG: print >> sys.stderr, 'jcache win'
+ if DEBUG_CACHE: print >> sys.stderr, 'jcache win'
return data[1]
# Sets the cached value for a key (from get_key)
@staticmethod
def set(shortkey, keys, value):
+ if DEBUG_CACHE: print >> sys.stderr, 'save to cache', shortkey
cachename = JCache.get_cachename(shortkey)
cPickle.Pickler(open(cachename, 'wb')).dump([keys, value])
#if DEBUG:
@@ -1264,28 +1313,36 @@ class JCache:
if os.path.exists(chunking_file):
try:
previous_mapping = cPickle.Unpickler(open(chunking_file, 'rb')).load() # maps a function identifier to the chunk number it will be in
- except:
- pass
+ if DEBUG: print >> sys.stderr, 'jscache previous mapping of size %d loaded from %s' % (len(previous_mapping), chunking_file)
+ except Exception, e:
+ print >> sys.stderr, 'Failed to load and unpickle previous chunking file at %s: ' % chunking_file, e
+ else:
+ print >> sys.stderr, 'Previous chunking file not found at %s' % chunking_file
chunks = []
if previous_mapping:
# initialize with previous chunking
news = []
for func in funcs:
ident, data = func
+ assert ident, 'need names for jcache chunking'
if not ident in previous_mapping:
news.append(func)
else:
n = previous_mapping[ident]
while n >= len(chunks): chunks.append([])
chunks[n].append(func)
+ if DEBUG: print >> sys.stderr, 'jscache not in previous chunking', len(news)
# add news and adjust for new sizes
spilled = news
- for chunk in chunks:
+ for i in range(len(chunks)):
+ chunk = chunks[i]
size = sum([len(func[1]) for func in chunk])
- while size > 1.5*chunk_size and len(chunk) > 0:
+ #if DEBUG: print >> sys.stderr, 'need spilling?', i, size, len(chunk), 'vs', chunk_size, 1.5*chunk_size
+ while size > 1.5*chunk_size and len(chunk) > 1:
spill = chunk.pop()
spilled.append(spill)
size -= len(spill[1])
+ #if DEBUG: print >> sys.stderr, 'jscache new + spilled', len(spilled)
for chunk in chunks:
size = sum([len(func[1]) for func in chunk])
while size < 0.66*chunk_size and len(spilled) > 0:
@@ -1294,6 +1351,7 @@ class JCache:
size += len(spill[1])
chunks = filter(lambda chunk: len(chunk) > 0, chunks) # might have empty ones, eliminate them
funcs = spilled # we will allocate these into chunks as if they were normal inputs
+ #if DEBUG: print >> sys.stderr, 'leftover spills', len(spilled)
# initialize reasonably, the rest of the funcs we need to split out
curr = []
total_size = 0
@@ -1319,15 +1377,19 @@ class JCache:
for i in range(len(chunks)):
chunk = chunks[i]
for ident, data in chunk:
+ assert ident not in new_mapping, 'cannot have duplicate names in jcache chunking'
new_mapping[ident] = i
cPickle.Pickler(open(chunking_file, 'wb')).dump(new_mapping)
+ if DEBUG: print >> sys.stderr, 'jscache mapping of size %d saved to %s' % (len(new_mapping), chunking_file)
#if DEBUG:
+ # for i in range(len(chunks)):
+ # chunk = chunks[i]
+ # print >> sys.stderr, 'final chunk', i, len(chunk)
+ # print >> sys.stderr, 'new mapping:', new_mapping
# if previous_mapping:
# for ident in set(previous_mapping.keys() + new_mapping.keys()):
# if previous_mapping.get(ident) != new_mapping.get(ident):
# print >> sys.stderr, 'mapping inconsistency', ident, previous_mapping.get(ident), new_mapping.get(ident)
- # for key, value in new_mapping.iteritems():
- # print >> sys.stderr, 'mapping:', key, value
return [''.join([func[1] for func in chunk]) for chunk in chunks] # remove function names
class JS: