aboutsummaryrefslogtreecommitdiff
path: root/tools/js_optimizer.py
blob: 8681280aee5ab31c7c9b2b6372671352903d1ad1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os, sys, subprocess, multiprocessing

__rootpath__ = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def path_from_root(*pathelems):
  return os.path.join(__rootpath__, *pathelems)

JS_OPTIMIZER = path_from_root('tools', 'js-optimizer.js')

BEST_JS_PROCESS_SIZE = 1024*1024

WINDOWS = sys.platform.startswith('win')

DEBUG = os.environ.get('EMCC_DEBUG')

def run_on_chunk(command):
  filename = command[2] # XXX hackish
  output = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]
  assert len(output) > 0 and not output.startswith('Assertion failed'), 'Error in js optimizer: ' + output
  filename += '.jo.js'
  f = open(filename, 'w')
  f.write(output)
  f.close()
  return filename

def run(filename, passes, js_engine):
  if type(passes) == str:
    passes = [passes]

  js = open(filename).read()
  if os.linesep != '\n':
    js = js.replace(os.linesep, '\n') # we assume \n in the splitting code

  # Find suffix
  suffix_marker = '// EMSCRIPTEN_GENERATED_FUNCTIONS'
  suffix_start = js.find(suffix_marker)
  suffix = ''
  if suffix_start >= 0:
    suffix = js[suffix_start:js.find('\n', suffix_start)] + '\n'

  # Pick where to split into chunks, so that (1) they do not oom in node/uglify, and (2) we can run them in parallel
  chunks = []
  i = 0
  f_start = 0
  while True:
    f_end = f_start
    while f_end-f_start < BEST_JS_PROCESS_SIZE and f_end != -1:
      f_end = js.find('\n}\n', f_end+1)
    chunk = js[f_start:(-1 if f_end == -1 else f_end+3)] + suffix
    temp_file = filename + '.p%d.js' % i
    #if DEBUG: print >> sys.stderr, '  chunk %d: %d bytes' % (i, (f_end if f_end >= 0 else len(js)) - f_start)
    i += 1
    f_start = f_end+3
    done = f_end == -1 or f_start >= len(js)
    if done and len(chunks) == 0: break # do not write anything out, just use the input file
    f = open(temp_file, 'w')
    f.write(chunk)
    f.close()
    chunks.append(temp_file)
    if done: break

  if len(chunks) == 0:
    chunks.append(filename)

  # XXX Use '--nocrankshaft' to disable crankshaft to work around v8 bug 1895, needed for older v8/node (node 0.6.8+ should be ok)
  commands = map(lambda chunk: [js_engine, JS_OPTIMIZER, chunk] + passes, chunks)

  if len(chunks) > 1:
    # We are splitting into chunks. Hopefully we can do that in parallel
    commands = map(lambda command: command + ['noPrintMetadata'], commands)
    filename += '.jo.js'

    fail = None
    cores = min(multiprocessing.cpu_count(), chunks)
    if cores < 2:
      fail = 'python reports you have %d cores' % cores
    #elif WINDOWS:
    #  fail = 'windows (see issue 663)' # This seems fixed with adding emcc.py that imports this file

    if not fail:
      # We can parallelize
      if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks, using %d cores' % (len(chunks), cores)
      pool = multiprocessing.Pool(processes=cores)
      filenames = pool.map(run_on_chunk, commands, chunksize=1)
    else:
      # We can't parallize, but still break into chunks to avoid uglify/node memory issues
      if DEBUG: print >> sys.stderr, 'splitting up js optimization into %d chunks (not in parallel because %s)' % (len(chunks), fail)
      filenames = [run_on_chunk(command) for command in commands]

    f = open(filename, 'w')
    for out_file in filenames:
      f.write(open(out_file).read())
    f.write(suffix)
    f.write('\n')
    f.close()
    return filename
  else:
    # one simple chunk, just do it
    return run_on_chunk(commands[0])