aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/shared.py110
1 files changed, 59 insertions, 51 deletions
diff --git a/tools/shared.py b/tools/shared.py
index a9c97b69..3659f3ab 100644
--- a/tools/shared.py
+++ b/tools/shared.py
@@ -1,4 +1,4 @@
-import shutil, time, os, sys, json, tempfile, copy, shlex, atexit, subprocess
+import shutil, time, os, sys, json, tempfile, copy, shlex, atexit, subprocess, multiprocessing
from subprocess import Popen, PIPE, STDOUT
from tempfile import mkstemp
@@ -428,6 +428,16 @@ def read_pgo_data(filename):
'overflows_lines': overflows_lines
}
+def run_js_optimizer(command):
+ filename = command[2] # XXX hackish
+ output = Popen(command, stdout=PIPE).communicate()[0]
+ assert len(output) > 0 and not output.startswith('Assertion failed'), 'Error in js optimizer: ' + output
+ filename += '.jo.js'
+ f = open(filename, 'w')
+ f.write(output)
+ f.close()
+ return filename
+
# Settings. A global singleton. Not pretty, but nicer than passing |, settings| everywhere
class Settings:
@@ -924,64 +934,62 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)''' % { 'winfix': '' if not WINDOWS e
Building.LLVM_OPT_OPTS = opts
return opts
- MAX_JS_PROCESS_SIZE = 12*1024*1024
- BEST_JS_PROCESS_SIZE = 8*1024*1024
+ BEST_JS_PROCESS_SIZE = 1024*1024
@staticmethod
- def splitter(filename, addendum, func, args=[]):
- # Split up huge files into pieces small enough for node/uglify/etc. to handle
- js = open(filename).read()
- if len(js) > Building.MAX_JS_PROCESS_SIZE:
- if os.environ.get('EMCC_DEBUG'): print >> sys.stderr, 'splitting up js file'
-
- suffix_marker = '// EMSCRIPTEN_GENERATED_FUNCTIONS'
- suffix_start = js.find(suffix_marker)
- suffix = ''
- if suffix_start >= 0:
- suffix = js[suffix_start:js.find('\n', suffix_start)] + '\n'
+ def js_optimizer(filename, passes):
+ if type(passes) == str:
+ passes = [passes]
- filename += addendum
- f = open(filename, 'w')
+ js = open(filename).read()
- i = 0
- f_start = 0
- while True:
- f_end = f_start
- while f_end-f_start < Building.BEST_JS_PROCESS_SIZE and f_end != -1:
- f_end = js.find('\n}\n', f_end+1)
- chunk = js[f_start:(-1 if f_end == -1 else f_end+3)] + suffix
- temp_in_file = filename + '.p%d.js' % i
- if os.environ.get('EMCC_DEBUG'): print >> sys.stderr, ' split %d, size %d' % (i, len(chunk))
- i += 1
- open(temp_in_file, 'w').write(chunk)
- temp_out_file = func(*([temp_in_file] + args + [False])) # maybe_big is now false
- f.write(
- ''.join(filter(lambda line: not line.startswith(suffix_marker), open(temp_out_file).readlines()))
- )
- if f_end == -1: break
- f_start = f_end+3
- if f_start >= len(js): break
-
- f.write(suffix)
+ # Find suffix
+ suffix_marker = '// EMSCRIPTEN_GENERATED_FUNCTIONS'
+ suffix_start = js.find(suffix_marker)
+ suffix = ''
+ if suffix_start >= 0:
+ suffix = js[suffix_start:js.find('\n', suffix_start)] + '\n'
+
+ # Pick where to split into chunks, so that (1) they do not oom in node/uglify, and (2) we can run them in parallel
+ chunks = []
+ i = 0
+ f_start = 0
+ while True:
+ f_end = f_start
+ while f_end-f_start < Building.BEST_JS_PROCESS_SIZE and f_end != -1:
+ f_end = js.find('\n}\n', f_end+1)
+ chunk = js[f_start:(-1 if f_end == -1 else f_end+3)] + suffix
+ temp_file = filename + '.p%d.js' % i
+ i += 1
+ f_start = f_end+3
+ done = f_end == -1 or f_start >= len(js)
+ if done and len(chunks) == 0: break # do not write anything out, just use the input file
+ f = open(temp_file, 'w')
+ f.write(chunk)
f.close()
- return filename
+ chunks.append(temp_file)
+ if done: break
- @staticmethod
- def js_optimizer(filename, passes, maybe_big=True):
- if maybe_big:
- ret = Building.splitter(filename, addendum='.jo.js', func=Building.js_optimizer, args=[passes])
- if ret: return ret
+ if len(chunks) == 0:
+ chunks.append(filename)
- if type(passes) == str:
- passes = [passes]
# XXX Use '--nocrankshaft' to disable crankshaft to work around v8 bug 1895, needed for older v8/node (node 0.6.8+ should be ok)
- output = Popen([NODE_JS, JS_OPTIMIZER, filename] + passes, stdout=PIPE).communicate()[0]
- assert len(output) > 0 and not output.startswith('Assertion failed'), 'Error in js optimizer: ' + output
- filename += '.jo.js'
- f = open(filename, 'w')
- f.write(output)
- f.close()
- return filename
+ commands = map(lambda chunk: [NODE_JS, JS_OPTIMIZER, chunk] + passes, chunks)
+
+ if len(chunks) > 1:
+ cores = min(multiprocessing.cpu_count(), chunks)
+ if os.environ.get('EMCC_DEBUG'): print >> sys.stderr, 'splitting up js optimization into %d chunks, using %d cores' % (len(chunks), cores)
+ pool = multiprocessing.Pool(processes=cores)
+ filenames = pool.map(run_js_optimizer, commands, chunksize=1)
+ filename += '.jo.js'
+ f = open(filename, 'w')
+ for out_file in filenames:
+ f.write(open(out_file).read())
+ f.close()
+ return filename
+ else:
+ # one simple chunk, just do it
+ return run_js_optimizer(commands[0])
@staticmethod
def closure_compiler(filename):