diff options
author | Chad Austin <chad@imvu.com> | 2013-03-04 19:50:42 -0800 |
---|---|---|
committer | Chad Austin <chad@imvu.com> | 2013-03-04 19:50:42 -0800 |
commit | c1c88e7185be8db7e38a94ceecc2a23f8cb72d7d (patch) | |
tree | 06bcecf22d8e5b2d5e89376bcd8926bb461e4996 | |
parent | cc0d959ebeb0624d7e8f3af496c170340afb8e55 (diff) |
Rebasing broke a lot of things (I had to fix about 12 conflicts) so now the sanity tests pass again.
-rwxr-xr-x | emscripten.py | 13 | ||||
-rw-r--r-- | tools/cache.py | 46 |
2 files changed, 16 insertions, 43 deletions
diff --git a/emscripten.py b/emscripten.py index 19da6268..0b9244c2 100755 --- a/emscripten.py +++ b/emscripten.py @@ -46,7 +46,7 @@ def process_funcs((i, funcs, meta, settings_file, compiler, forwarded_file, libr return out def emscript(infile, settings, outfile, libraries=[], compiler_engine=None, - jcache=None, temp_files=None, DEBUG=False): + jcache=None, temp_files=None, DEBUG=None, DEBUG_CACHE=None): """Runs the emscripten LLVM-to-JS compiler. We parallelize as much as possible Args: @@ -56,8 +56,6 @@ def emscript(infile, settings, outfile, libraries=[], compiler_engine=None, outfile: The file where the output is written. """ - DEBUG = configuration.DEBUG - DEBUG_CACHE = configuration.DEBUG_CACHE compiler = path_from_root('src', 'compiler.js') # Parallelization: We run 3 phases: @@ -490,7 +488,7 @@ Runtime.stackRestore = function(top) { asm.stackRestore(top) }; outfile.close() -def main(args, compiler_engine, cache, jcache, relooper, temp_files, DEBUG): +def main(args, compiler_engine, cache, jcache, relooper, temp_files, DEBUG, DEBUG_CACHE): # Prepare settings for serialization to JSON. settings = {} for setting in args.settings: @@ -573,7 +571,7 @@ def main(args, compiler_engine, cache, jcache, relooper, temp_files, DEBUG): shared.Building.ensure_relooper(relooper) emscript(args.infile, settings, args.outfile, libraries, compiler_engine=compiler_engine, - jcache=jcache, temp_files=temp_files, DEBUG=DEBUG) + jcache=jcache, temp_files=temp_files, DEBUG=DEBUG, DEBUG_CACHE=DEBUG_CACHE) def _main(environ): parser = optparse.OptionParser( @@ -669,8 +667,10 @@ WARNING: You should normally never use this! Use emcc instead. if keywords.verbose is None: DEBUG = get_configuration().DEBUG + DEBUG_CACHE = get_configuration().DEBUG_CACHE else: DEBUG = keywords.verbose + DEBUG_CACHE = keywords.verbose cache = cache_module.Cache() temp_files.run_and_clean(lambda: main( @@ -680,7 +680,8 @@ WARNING: You should normally never use this! Use emcc instead. jcache=cache_module.JCache(cache) if keywords.jcache else None, relooper=relooper, temp_files=temp_files, - DEBUG=DEBUG + DEBUG=DEBUG, + DEBUG_CACHE=DEBUG_CACHE, )) if __name__ == '__main__': diff --git a/tools/cache.py b/tools/cache.py index dec32473..d9fabf92 100644 --- a/tools/cache.py +++ b/tools/cache.py @@ -96,18 +96,16 @@ class JCache: # generate the same chunks, barring big differences in function sizes that # violate our chunk size guideline. If caching is not used, chunking_file # should be None -@classmethod -def chunkify(funcs, chunk_size, chunking_file): +def chunkify(funcs, chunk_size, chunking_file, DEBUG=False): previous_mapping = None if chunking_file: + chunking_file = chunking_file if os.path.exists(chunking_file): try: previous_mapping = cPickle.Unpickler(open(chunking_file, 'rb')).load() # maps a function identifier to the chunk number it will be in - if DEBUG: print >> sys.stderr, 'jscache previous mapping of size %d loaded from %s' % (len(previous_mapping), chunking_file) - except Exception, e: - print >> sys.stderr, 'Failed to load and unpickle previous chunking file at %s: ' % chunking_file, e - else: - print >> sys.stderr, 'Previous chunking file not found at %s' % chunking_file + #if DEBUG: print >> sys.stderr, 'jscache previous mapping', previous_mapping + except: + pass chunks = [] if previous_mapping: # initialize with previous chunking @@ -152,35 +150,10 @@ def chunkify(funcs, chunk_size, chunking_file): curr.append(func) total_size += curr_size else: - n = previous_mapping[ident] - while n >= len(chunks): chunks.append([]) - chunks[n].append(func) - # add news and adjust for new sizes - spilled = news - for chunk in chunks: - size = sum([len(func[1]) for func in chunk]) - while size > 1.5*chunk_size and len(chunk) > 0: - spill = chunk.pop() - spilled.append(spill) - size -= len(spill[1]) - for chunk in chunks: - size = sum([len(func[1]) for func in chunk]) - while size < 0.66*chunk_size and len(spilled) > 0: - spill = spilled.pop() - chunk.append(spill) - size += len(spill[1]) - chunks = filter(lambda chunk: len(chunk) > 0, chunks) # might have empty ones, eliminate them - funcs = spilled # we will allocate these into chunks as if they were normal inputs -# initialize reasonably, the rest of the funcs we need to split out -curr = [] -total_size = 0 -for i in range(len(funcs)): - func = funcs[i] - curr_size = len(func[1]) - if total_size + curr_size < chunk_size: - curr.append(func) - total_size += curr_size - else: + chunks.append(curr) + curr = [func] + total_size = curr_size + if curr: chunks.append(curr) curr = None if chunking_file: @@ -195,7 +168,6 @@ for i in range(len(funcs)): assert ident not in new_mapping, 'cannot have duplicate names in jcache chunking' new_mapping[ident] = i cPickle.Pickler(open(chunking_file, 'wb')).dump(new_mapping) - if DEBUG: print >> sys.stderr, 'jscache mapping of size %d saved to %s' % (len(new_mapping), chunking_file) #if DEBUG: # for i in range(len(chunks)): # chunk = chunks[i] |