aboutsummaryrefslogtreecommitdiff
path: root/tests/runner.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/runner.py')
-rw-r--r--tests/runner.py43
1 files changed, 34 insertions, 9 deletions
diff --git a/tests/runner.py b/tests/runner.py
index 35be05a6..27a939bd 100644
--- a/tests/runner.py
+++ b/tests/runner.py
@@ -27,6 +27,7 @@ EMSCRIPTEN = path_from_root('emscripten.py')
DEMANGLER = path_from_root('third_party', 'demangler.py')
NAMESPACER = path_from_root('tools', 'namespacer.py')
EMMAKEN = path_from_root('tools', 'emmaken.py')
+AUTODEBUGGER = path_from_root('tools', 'autodebugger.py')
# Global cache for tests (we have multiple TestCase instances; this object lets them share data)
@@ -183,6 +184,9 @@ class RunnerCore(unittest.TestCase):
assert 'strict warning:' not in ret, 'We should pass all strict mode checks: ' + ret
return ret
+ def run_llvm_interpreter(self, args):
+ return Popen([LLVM_INTERPRETER] + args, stdout=PIPE, stderr=STDOUT).communicate()[0]
+
def assertContained(self, value, string):
if value not in string:
raise Exception("Expected to find '%s' in '%s'" % (value, string))
@@ -200,7 +204,7 @@ if 'benchmark' not in sys.argv:
class T(RunnerCore): # Short name, to make it more fun to use manually on the commandline
## Does a complete test - builds, runs, checks output, etc.
- def do_test(self, src, expected_output, args=[], output_nicerizer=None, output_processor=None, no_build=False, main_file=None, additional_files=[], js_engines=None, post_build=None, basename='src.cpp', libraries=[], includes=[], force_c=False):
+ def do_test(self, src, expected_output=None, args=[], output_nicerizer=None, output_processor=None, no_build=False, main_file=None, additional_files=[], js_engines=None, post_build=None, basename='src.cpp', libraries=[], includes=[], force_c=False):
#print 'Running test:', inspect.stack()[1][3].replace('test_', ''), '[%s,%s,%s]' % (COMPILER.split(os.sep)[-1], 'llvm-optimizations' if LLVM_OPTS else '', 'reloop&optimize' if RELOOP else '')
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
@@ -215,6 +219,11 @@ if 'benchmark' not in sys.argv:
if post_build is not None:
post_build(filename + '.o.js')
+ # If not provided with expected output, then generate it right now, using lli
+ if expected_output is None:
+ expected_output = self.run_llvm_interpreter([filename + '.o'])
+ print '[autogenerated expected output: %20s]' % (expected_output[0:17].replace('\n', '')+'...')
+
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
if js_engines is None:
js_engines = [V8_ENGINE, SPIDERMONKEY_ENGINE]
@@ -227,7 +236,7 @@ if 'benchmark' not in sys.argv:
#shutil.rmtree(dirname) # TODO: leave no trace in memory. But for now nice for debugging
- def prep_ll_test(self, filename, ll_file):
+ def prep_ll_test(self, filename, ll_file, force_recompile=False):
if ll_file.endswith(('.bc', '.o')):
shutil.copy(ll_file, filename + '.o')
self.do_llvm_dis(filename)
@@ -235,24 +244,25 @@ if 'benchmark' not in sys.argv:
os.remove(filename + '.o.ll')
ll_file = filename + '.o.ll.in'
- if LLVM_OPTS:
+ if LLVM_OPTS or force_recompile:
shutil.copy(ll_file, filename + '.o.ll.pre')
- Popen([LLVM_AS, filename + '.o.ll.pre'] + ['-o=' + filename + '.o'], stdout=PIPE, stderr=STDOUT).communicate()[0]
+ output = Popen([LLVM_AS, filename + '.o.ll.pre'] + ['-o=' + filename + '.o'], stdout=PIPE, stderr=STDOUT).communicate()[0]
+ assert 'error:' not in output, 'Error in llvm-as: ' + output
self.do_llvm_opts(filename)
Popen([LLVM_DIS, filename + '.o'] + LLVM_DIS_OPTS + ['-o=' + filename + '.o.ll'], stdout=PIPE, stderr=STDOUT).communicate()[0]
else:
shutil.copy(ll_file, filename + '.o.ll')
# No building - just process an existing .ll file (or .bc, which we turn into .ll)
- def do_ll_test(self, ll_file, output, args=[], js_engines=None, output_nicerizer=None, post_build=None):
+ def do_ll_test(self, ll_file, expected_output=None, args=[], js_engines=None, output_nicerizer=None, post_build=None, force_recompile=False):
if COMPILER != LLVM_GCC: return # We use existing .ll, so which compiler is unimportant
filename = os.path.join(self.get_dir(), 'src.cpp')
- self.prep_ll_test(filename, ll_file)
+ self.prep_ll_test(filename, ll_file, force_recompile)
self.do_emscripten(filename)
self.do_test(None,
- output,
+ expected_output,
args,
no_build=True,
js_engines=js_engines,
@@ -1663,8 +1673,7 @@ if 'benchmark' not in sys.argv:
# Generate the native code output using lli
lli_file = os.path.join(self.get_dir(), 'lli.raw')
- stdout = Popen([LLVM_INTERPRETER, os.path.join(self.get_dir(), 'src.c.o'), '-i', original_j2k, '-o', lli_file],
- stdout=PIPE, stderr=STDOUT).communicate()[0]
+ stdout = self.run_llvm_interpreter([os.path.join(self.get_dir(), 'src.c.o'), '-i', original_j2k, '-o', lli_file])
assert 'Successfully generated' in stdout, 'Error in lli run: ' + stdout
lli_data = open(lli_file, 'rb').read()
@@ -1743,6 +1752,22 @@ if 'benchmark' not in sys.argv:
output = 'hello, world!'
self.do_ll_test(path_from_root('tests', 'cases', name), output)
+ def test_autodebug(self):
+ if COMPILER != LLVM_GCC: return # TODO: Check both
+ if LLVM_OPTS: return # They mess us up
+
+ # Run a test that should work, generating some code
+ self.test_structs()
+
+ # Autodebug the code
+ filename = os.path.join(self.get_dir(), 'src.cpp.o.ll')
+ output = Popen(['python', AUTODEBUGGER, filename, filename+'.ll'], stdout=PIPE, stderr=STDOUT).communicate()[0]
+ assert 'Success.' in output
+
+ # Compare to each other, and to expected output
+ self.do_ll_test(path_from_root('tests', filename+'.ll'), force_recompile=True)
+ self.do_ll_test(path_from_root('tests', filename+'.ll'), '34 : 10\n42 : 7008\n51 : 7018') # No need to force recompile twice - already autodebugged
+
### Integration tests
def test_scriptaclass(self):