aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/js_optimizer.py18
-rw-r--r--tools/shared.py74
2 files changed, 60 insertions, 32 deletions
diff --git a/tools/js_optimizer.py b/tools/js_optimizer.py
index cbf64486..52cae6e5 100644
--- a/tools/js_optimizer.py
+++ b/tools/js_optimizer.py
@@ -10,7 +10,9 @@ def path_from_root(*pathelems):
JS_OPTIMIZER = path_from_root('tools', 'js-optimizer.js')
-BEST_JS_PROCESS_SIZE = 1024*1024
+NUM_CHUNKS_PER_CORE = 1.5
+MIN_CHUNK_SIZE = 1024*1024
+MAX_CHUNK_SIZE = 20*1024*1024
WINDOWS = sys.platform.startswith('win')
@@ -74,6 +76,8 @@ def run_on_js(filename, passes, js_engine, jcache):
assert gen_end > gen_start
pre = js[:gen_start]
post = js[gen_end:]
+ if 'last' in passes:
+ post = post.replace(suffix, '') # no need to write out the metadata - nothing after us needs it
js = js[gen_start:gen_end]
else:
pre = ''
@@ -98,7 +102,11 @@ def run_on_js(filename, passes, js_engine, jcache):
total_size = len(js)
js = None
- chunks = shared.JCache.chunkify(funcs, BEST_JS_PROCESS_SIZE, 'jsopt' if jcache else None)
+ cores = int(os.environ.get('EMCC_CORES') or multiprocessing.cpu_count())
+ intended_num_chunks = int(round(cores * NUM_CHUNKS_PER_CORE))
+ chunk_size = min(MAX_CHUNK_SIZE, max(MIN_CHUNK_SIZE, total_size / intended_num_chunks))
+
+ chunks = shared.JCache.chunkify(funcs, chunk_size, 'jsopt' if jcache else None)
if jcache:
# load chunks from cache where we can # TODO: ignore small chunks
@@ -134,15 +142,15 @@ def run_on_js(filename, passes, js_engine, jcache):
commands = map(lambda filename: [js_engine, JS_OPTIMIZER, filename, 'noPrintMetadata'] + passes, filenames)
#print [' '.join(command) for command in commands]
- cores = min(multiprocessing.cpu_count(), filenames)
+ cores = min(cores, filenames)
if len(chunks) > 1 and cores >= 2:
# We can parallelize
- if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks, using %d cores (total: %.2f MB)' % (len(chunks), cores, total_size/(1024*1024.))
+ if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks of size %d, using %d cores (total: %.2f MB)' % (len(chunks), chunk_size, cores, total_size/(1024*1024.))
pool = multiprocessing.Pool(processes=cores)
filenames = pool.map(run_on_chunk, commands, chunksize=1)
else:
# We can't parallize, but still break into chunks to avoid uglify/node memory issues
- if len(chunks) > 1 and DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks' % (len(chunks))
+ if len(chunks) > 1 and DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks of size %d' % (len(chunks), chunk_size)
filenames = [run_on_chunk(command) for command in commands]
else:
filenames = []
diff --git a/tools/shared.py b/tools/shared.py
index ebf803da..1d189cc6 100644
--- a/tools/shared.py
+++ b/tools/shared.py
@@ -294,7 +294,7 @@ except:
CANONICAL_TEMP_DIR = os.path.join(TEMP_DIR, 'emscripten_temp')
EMSCRIPTEN_TEMP_DIR = None
-DEBUG = os.environ.get('EMCC_DEBUG')
+DEBUG = int(os.environ.get('EMCC_DEBUG') or 0)
if DEBUG:
try:
EMSCRIPTEN_TEMP_DIR = CANONICAL_TEMP_DIR
@@ -554,7 +554,7 @@ class Settings:
ret = []
for key, value in Settings.__dict__.iteritems():
if key == key.upper(): # this is a hack. all of our settings are ALL_CAPS, python internals are not
- jsoned = json.dumps(value)
+ jsoned = json.dumps(value, sort_keys=True)
ret += ['-s', key + '=' + jsoned]
return ret
@@ -563,6 +563,7 @@ class Settings:
if opt_level >= 1:
Settings.ASSERTIONS = 0
Settings.DISABLE_EXCEPTION_CATCHING = 1
+ Settings.EMIT_GENERATED_FUNCTIONS = 1
if opt_level >= 2:
Settings.RELOOP = 1
if opt_level >= 3:
@@ -749,12 +750,16 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
resolved_symbols = set()
temp_dirs = []
files = map(os.path.abspath, files)
+ has_ar = False
+ for f in files:
+ has_ar = has_ar or Building.is_ar(f)
for f in files:
if not Building.is_ar(f):
if Building.is_bitcode(f):
- new_symbols = Building.llvm_nm(f)
- resolved_symbols = resolved_symbols.union(new_symbols.defs)
- unresolved_symbols = unresolved_symbols.union(new_symbols.undefs.difference(resolved_symbols)).difference(new_symbols.defs)
+ if has_ar:
+ new_symbols = Building.llvm_nm(f)
+ resolved_symbols = resolved_symbols.union(new_symbols.defs)
+ unresolved_symbols = unresolved_symbols.union(new_symbols.undefs.difference(resolved_symbols)).difference(new_symbols.defs)
actual_files.append(f)
else:
# Extract object files from ar archives, and link according to gnu ld semantics
@@ -807,7 +812,26 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
# Finish link
actual_files = unique_ordered(actual_files) # tolerate people trying to link a.so a.so etc.
if DEBUG: print >>sys.stderr, 'emcc: llvm-linking:', actual_files
- output = Popen([LLVM_LINK] + actual_files + ['-o', target], stdout=PIPE).communicate()[0]
+
+ # check for too-long command line
+ link_cmd = [LLVM_LINK] + actual_files + ['-o', target]
+ # 8k is a bit of an arbitrary limit, but a reasonable one
+ # for max command line size before we use a respose file
+ response_file = None
+ if len(' '.join(link_cmd)) > 8192:
+ if DEBUG: print >>sys.stderr, 'using response file for llvm-link'
+ [response_fd, response_file] = mkstemp(suffix='.response', dir=TEMP_DIR)
+ response_fh = os.fdopen(response_fd, 'w')
+ for arg in actual_files:
+ response_fh.write(arg + "\n")
+ response_fh.close()
+ link_cmd = [LLVM_LINK, "@" + response_file, '-o', target]
+
+ output = Popen(link_cmd, stdout=PIPE).communicate()[0]
+
+ if response_file:
+ os.unlink(response_file)
+
assert os.path.exists(target) and (output is None or 'Could not open input file' not in output), 'Linking error: ' + output
for temp_dir in temp_dirs:
try_delete(temp_dir)
@@ -866,8 +890,14 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
assert os.path.exists(output_filename), 'Could not create bc file: ' + output
return output_filename
+ nm_cache = {} # cache results of nm - it can be slow to run
+
@staticmethod
def llvm_nm(filename, stdout=PIPE, stderr=None):
+ if filename in Building.nm_cache:
+ #if DEBUG: print >> sys.stderr, 'loading nm results for %s from cache' % filename
+ return Building.nm_cache[filename]
+
# LLVM binary ==> list of symbols
output = Popen([LLVM_NM, filename], stdout=stdout, stderr=stderr).communicate()[0]
class ret:
@@ -888,6 +918,7 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
ret.defs = set(ret.defs)
ret.undefs = set(ret.undefs)
ret.commons = set(ret.commons)
+ Building.nm_cache[filename] = ret
return ret
@staticmethod
@@ -1088,24 +1119,6 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
@staticmethod
def is_bitcode(filename):
- # checks if a file contains LLVM bitcode
- # if the file doesn't exist or doesn't have valid symbols, it isn't bitcode
- try:
- defs = Building.llvm_nm(filename, stderr=PIPE)
- # If no symbols found, it might just be an empty bitcode file, try to dis it
- if len(defs.defs) + len(defs.undefs) + len(defs.commons) == 0:
- # llvm-nm 3.0 has a bug when reading symbols from ar files
- # so try to see if we're dealing with an ar file, in which
- # case we should try to dis it.
- if not Building.is_ar(filename):
- test_ll = os.path.join(EMSCRIPTEN_TEMP_DIR, 'test.ll')
- Building.llvm_dis(filename, test_ll)
- assert os.path.exists(test_ll)
- try_delete(test_ll)
- except Exception, e:
- if DEBUG: print >> sys.stderr, 'shared.Building.is_bitcode failed to test whether file \'%s\' is a llvm bitcode file! Failed on exception: %s' % (filename, e)
- return False
-
# look for magic signature
b = open(filename, 'r').read(4)
if b[0] == 'B' and b[1] == 'C':
@@ -1181,6 +1194,10 @@ class Cache:
except:
pass
try_delete(RELOOPER)
+ try:
+ open(Cache.dirname + '__last_clear', 'w').write('last clear: ' + time.asctime() + '\n')
+ except:
+ print >> sys.stderr, 'failed to save last clear time'
# Request a cached file. If it isn't in the cache, it will be created with
# the given creator function
@@ -1269,9 +1286,11 @@ class JCache:
if os.path.exists(chunking_file):
try:
previous_mapping = cPickle.Unpickler(open(chunking_file, 'rb')).load() # maps a function identifier to the chunk number it will be in
- #if DEBUG: print >> sys.stderr, 'jscache previous mapping', previous_mapping
- except:
- pass
+ if DEBUG: print >> sys.stderr, 'jscache previous mapping of size %d loaded from %s' % (len(previous_mapping), chunking_file)
+ except Exception, e:
+ print >> sys.stderr, 'Failed to load and unpickle previous chunking file at %s: ' % chunking_file, e
+ else:
+ print >> sys.stderr, 'Previous chunking file not found at %s' % chunking_file
chunks = []
if previous_mapping:
# initialize with previous chunking
@@ -1334,6 +1353,7 @@ class JCache:
assert ident not in new_mapping, 'cannot have duplicate names in jcache chunking'
new_mapping[ident] = i
cPickle.Pickler(open(chunking_file, 'wb')).dump(new_mapping)
+ if DEBUG: print >> sys.stderr, 'jscache mapping of size %d saved to %s' % (len(new_mapping), chunking_file)
#if DEBUG:
# for i in range(len(chunks)):
# chunk = chunks[i]