aboutsummaryrefslogtreecommitdiff
path: root/tools/js_optimizer.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/js_optimizer.py')
-rw-r--r--tools/js_optimizer.py176
1 files changed, 123 insertions, 53 deletions
diff --git a/tools/js_optimizer.py b/tools/js_optimizer.py
index 8681280a..5bed4cb7 100644
--- a/tools/js_optimizer.py
+++ b/tools/js_optimizer.py
@@ -1,5 +1,8 @@
-import os, sys, subprocess, multiprocessing
+import os, sys, subprocess, multiprocessing, re
+import shared
+
+temp_files = shared.TempFiles()
__rootpath__ = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def path_from_root(*pathelems):
@@ -17,13 +20,15 @@ def run_on_chunk(command):
filename = command[2] # XXX hackish
output = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]
assert len(output) > 0 and not output.startswith('Assertion failed'), 'Error in js optimizer: ' + output
- filename += '.jo.js'
+ filename = temp_files.get(os.path.basename(filename) + '.jo.js').name
f = open(filename, 'w')
f.write(output)
f.close()
return filename
-def run(filename, passes, js_engine):
+def run(filename, passes, js_engine, jcache):
+ if jcache: shared.JCache.ensure()
+
if type(passes) == str:
passes = [passes]
@@ -37,64 +42,129 @@ def run(filename, passes, js_engine):
suffix = ''
if suffix_start >= 0:
suffix = js[suffix_start:js.find('\n', suffix_start)] + '\n'
+ # if there is metadata, we will run only on the generated functions. If there isn't, we will run on everything.
+ generated = set(eval(suffix[len(suffix_marker)+1:]))
+
+ if not suffix and jcache:
+ # JCache cannot be used without metadata, since it might reorder stuff, and that's dangerous since only generated can be reordered
+ # This means jcache does not work after closure compiler runs, for example. But you won't get much benefit from jcache with closure
+ # anyhow (since closure is likely the longest part of the build).
+ if DEBUG: print >>sys.stderr, 'js optimizer: no metadata, so disabling jcache'
+ jcache = False
+
+ # If we process only generated code, find that and save the rest on the side
+ func_sig = re.compile('function (_[\w$]+)\(')
+ if suffix:
+ pos = 0
+ gen_start = 0
+ gen_end = 0
+ while 1:
+ m = func_sig.search(js, pos)
+ if not m: break
+ pos = m.end()
+ ident = m.group(1)
+ if ident in generated:
+ if not gen_start:
+ gen_start = m.start()
+ assert gen_start
+ gen_end = js.find('\n}\n', m.end()) + 3
+ assert gen_end > gen_start
+ pre = js[:gen_start]
+ post = js[gen_end:]
+ js = js[gen_start:gen_end]
+ else:
+ pre = ''
+ post = ''
# Pick where to split into chunks, so that (1) they do not oom in node/uglify, and (2) we can run them in parallel
- chunks = []
- i = 0
- f_start = 0
- while True:
- f_end = f_start
- while f_end-f_start < BEST_JS_PROCESS_SIZE and f_end != -1:
- f_end = js.find('\n}\n', f_end+1)
- chunk = js[f_start:(-1 if f_end == -1 else f_end+3)] + suffix
- temp_file = filename + '.p%d.js' % i
- #if DEBUG: print >> sys.stderr, ' chunk %d: %d bytes' % (i, (f_end if f_end >= 0 else len(js)) - f_start)
- i += 1
- f_start = f_end+3
- done = f_end == -1 or f_start >= len(js)
- if done and len(chunks) == 0: break # do not write anything out, just use the input file
- f = open(temp_file, 'w')
- f.write(chunk)
- f.close()
- chunks.append(temp_file)
- if done: break
-
- if len(chunks) == 0:
- chunks.append(filename)
-
- # XXX Use '--nocrankshaft' to disable crankshaft to work around v8 bug 1895, needed for older v8/node (node 0.6.8+ should be ok)
- commands = map(lambda chunk: [js_engine, JS_OPTIMIZER, chunk] + passes, chunks)
-
- if len(chunks) > 1:
- # We are splitting into chunks. Hopefully we can do that in parallel
- commands = map(lambda command: command + ['noPrintMetadata'], commands)
- filename += '.jo.js'
-
- fail = None
- cores = min(multiprocessing.cpu_count(), chunks)
- if cores < 2:
- fail = 'python reports you have %d cores' % cores
- #elif WINDOWS:
- # fail = 'windows (see issue 663)' # This seems fixed with adding emcc.py that imports this file
-
- if not fail:
+ # If we have metadata, we split only the generated code, and save the pre and post on the side (and do not optimize them)
+ parts = map(lambda part: part, js.split('\n}\n'))
+ funcs = []
+ for i in range(len(parts)):
+ func = parts[i]
+ if i < len(parts)-1: func += '\n}\n' # last part needs no }
+ m = func_sig.search(func)
+ if m:
+ ident = m.group(1)
+ else:
+ if suffix: continue # ignore whitespace
+ ident = 'anon_%d' % i
+ funcs.append((ident, func))
+ parts = None
+ total_size = len(js)
+ js = None
+
+ chunks = shared.JCache.chunkify(funcs, BEST_JS_PROCESS_SIZE, 'jsopt' if jcache else None)
+
+ if jcache:
+ # load chunks from cache where we can # TODO: ignore small chunks
+ cached_outputs = []
+ def load_from_cache(chunk):
+ keys = [chunk]
+ shortkey = shared.JCache.get_shortkey(keys) # TODO: share shortkeys with later code
+ out = shared.JCache.get(shortkey, keys)
+ if out:
+ cached_outputs.append(out)
+ return False
+ return True
+ chunks = filter(load_from_cache, chunks)
+ if len(cached_outputs) > 0:
+ if DEBUG: print >> sys.stderr, ' loading %d jsfuncchunks from jcache' % len(cached_outputs)
+ else:
+ cached_outputs = []
+
+ if len(chunks) > 0:
+ def write_chunk(chunk, i):
+ temp_file = temp_files.get('.jsfunc_%d.ll' % i).name
+ f = open(temp_file, 'w')
+ f.write(chunk)
+ f.write(suffix)
+ f.close()
+ return temp_file
+ filenames = [write_chunk(chunks[i], i) for i in range(len(chunks))]
+ else:
+ filenames = []
+
+ if len(filenames) > 0:
+ # XXX Use '--nocrankshaft' to disable crankshaft to work around v8 bug 1895, needed for older v8/node (node 0.6.8+ should be ok)
+ commands = map(lambda filename: [js_engine, JS_OPTIMIZER, filename, 'noPrintMetadata'] + passes, filenames)
+
+ cores = min(multiprocessing.cpu_count(), filenames)
+ if len(chunks) > 1 and cores >= 2:
# We can parallelize
- if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks, using %d cores' % (len(chunks), cores)
+ if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks, using %d cores (total: %.2f MB)' % (len(chunks), cores, total_size/(1024*1024.))
pool = multiprocessing.Pool(processes=cores)
filenames = pool.map(run_on_chunk, commands, chunksize=1)
else:
# We can't parallize, but still break into chunks to avoid uglify/node memory issues
- if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks (not in parallel because %s)' % (len(chunks), fail)
+ if len(chunks) > 1 and DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks' % (len(chunks))
filenames = [run_on_chunk(command) for command in commands]
+ else:
+ filenames = []
- f = open(filename, 'w')
- for out_file in filenames:
- f.write(open(out_file).read())
- f.write(suffix)
+ filename += '.jo.js'
+ f = open(filename, 'w')
+ f.write(pre);
+ for out_file in filenames:
+ f.write(open(out_file).read())
f.write('\n')
- f.close()
- return filename
- else:
- # one simple chunk, just do it
- return run_on_chunk(commands[0])
+ if jcache:
+ for cached in cached_outputs:
+ f.write(cached); # TODO: preserve order
+ f.write('\n')
+ f.write(post);
+ # No need to write suffix: if there was one, it is inside post which exists when suffix is there
+ f.write('\n')
+ f.close()
+
+ if jcache:
+ # save chunks to cache
+ for i in range(len(chunks)):
+ chunk = chunks[i]
+ keys = [chunk]
+ shortkey = shared.JCache.get_shortkey(keys)
+ shared.JCache.set(shortkey, keys, open(filenames[i]).read())
+ if DEBUG and len(chunks) > 0: print >> sys.stderr, ' saving %d jsfuncchunks to jcache' % len(chunks)
+
+ return filename