aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlon Zakai <alonzakai@gmail.com>2012-12-01 15:54:16 -0800
committerAlon Zakai <alonzakai@gmail.com>2012-12-07 14:23:22 -0800
commitc507a8cf49469b37814864ddc446feb88383dd5d (patch)
tree9891a3961c0cd11172f5557da415268a4e9709e0
parent74c843c55f0238bd75a919fdd5b2dc2f57f48fe2 (diff)
64-bit dynamic shifts in asm
-rwxr-xr-xemscripten.py4
-rw-r--r--src/analyzer.js5
-rw-r--r--src/parseTools.js7
-rw-r--r--src/runtime.js34
4 files changed, 35 insertions, 15 deletions
diff --git a/emscripten.py b/emscripten.py
index 7c064850..1b05a7a8 100755
--- a/emscripten.py
+++ b/emscripten.py
@@ -291,9 +291,9 @@ def emscript(infile, settings, outfile, libraries=[]):
function_tables_defs = '\n'.join([table for table in last_forwarded_json['Functions']['tables'].itervalues()])
if settings.get('ASM_JS'):
- asm_setup = ''
+ asm_setup = 'var Runtime_bitshift64 = Runtime.bitshift64;'
fundamentals = ['buffer', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array']
- basics = ['abort', 'assert', 'STACKTOP', 'STACK_MAX', 'tempDoublePtr', 'ABORT']
+ basics = ['abort', 'assert', 'STACKTOP', 'STACK_MAX', 'tempDoublePtr', 'ABORT', 'Runtime_bitshift64']
if not settings['NAMED_GLOBALS']: basics += ['GLOBAL_BASE']
if forwarded_json['Types']['preciseI64MathUsed']:
basics += ['i64Math_' + op for op in ['add', 'subtract', 'multiply', 'divide', 'modulo']]
diff --git a/src/analyzer.js b/src/analyzer.js
index 014579f4..6a1eb2f6 100644
--- a/src/analyzer.js
+++ b/src/analyzer.js
@@ -554,9 +554,10 @@ function analyzer(data, sidePass) {
// We can't statically legalize this, do the operation at runtime TODO: optimize
assert(sourceBits == 64, 'TODO: handle nonconstant shifts on != 64 bits');
value.intertype = 'value';
- value.ident = 'Runtime.bitshift64(' + sourceElements[0].ident + ', ' +
+ value.ident = 'Runtime' + (ASM_JS ? '_' : '.') + 'bitshift64(' + sourceElements[0].ident + ', ' +
sourceElements[1].ident + ',"' + value.op + '",' + value.params[1].ident + '$0);' +
- 'var ' + value.assignTo + '$0 = ' + value.assignTo + '[0], ' + value.assignTo + '$1 = ' + value.assignTo + '[1];';
+ 'var ' + value.assignTo + '$0 = ' + makeGetTempDouble(0) + ', ' + value.assignTo + '$1 = ' + makeGetTempDouble(1) + ';';
+ value.assignTo = null;
i++;
continue;
}
diff --git a/src/parseTools.js b/src/parseTools.js
index 258ef40a..86c0e027 100644
--- a/src/parseTools.js
+++ b/src/parseTools.js
@@ -965,6 +965,10 @@ function asmCoercion(value, type) {
}
}
+function makeGetTempDouble(i) {
+ return makeGetValue('tempDoublePtr', Runtime.getNativeTypeSize('i32')*i, 'i32');
+}
+
// See makeSetValue
function makeGetValue(ptr, pos, type, noNeedFirst, unsigned, ignore, align, noSafe) {
noticePtr(ptr);
@@ -1755,7 +1759,8 @@ function processMathop(item) {
case 'ashr':
case 'lshr': {
if (!isNumber(idents[1])) {
- return 'Runtime.bitshift64(' + idents[0] + '[0], ' + idents[0] + '[1],"' + op + '",' + stripCorrections(idents[1]) + '[0]|0)';
+ return '(Runtime' + (ASM_JS ? '_' : '.') + 'bitshift64(' + idents[0] + '[0], ' + idents[0] + '[1],"' + op + '",' + stripCorrections(idents[1]) + '[0]|0),' +
+ '[' + makeGetTempDouble(0) + ',' + makeGetTempDouble(1) + '])';
}
bits = parseInt(idents[1]);
var ander = Math.pow(2, bits)-1;
diff --git a/src/runtime.js b/src/runtime.js
index 52e0f4bc..3bb7ad30 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -124,36 +124,50 @@ var Runtime = {
// Mirrors processMathop's treatment of constants (which we optimize directly)
bitshift64: function(low, high, op, bits) {
+ var ret;
var ander = Math.pow(2, bits)-1;
if (bits < 32) {
switch (op) {
case 'shl':
- return [low << bits, (high << bits) | ((low&(ander << (32 - bits))) >>> (32 - bits))];
+ ret = [low << bits, (high << bits) | ((low&(ander << (32 - bits))) >>> (32 - bits))];
+ break;
case 'ashr':
- return [(((low >>> bits ) | ((high&ander) << (32 - bits))) >> 0) >>> 0, (high >> bits) >>> 0];
+ ret = [(((low >>> bits ) | ((high&ander) << (32 - bits))) >> 0) >>> 0, (high >> bits) >>> 0];
+ break;
case 'lshr':
- return [((low >>> bits) | ((high&ander) << (32 - bits))) >>> 0, high >>> bits];
+ ret = [((low >>> bits) | ((high&ander) << (32 - bits))) >>> 0, high >>> bits];
+ break;
}
} else if (bits == 32) {
switch (op) {
case 'shl':
- return [0, low];
+ ret = [0, low];
+ break;
case 'ashr':
- return [high, (high|0) < 0 ? ander : 0];
+ ret = [high, (high|0) < 0 ? ander : 0];
+ break;
case 'lshr':
- return [high, 0];
+ ret = [high, 0];
+ break;
}
} else { // bits > 32
switch (op) {
case 'shl':
- return [0, low << (bits - 32)];
+ ret = [0, low << (bits - 32)];
+ break;
case 'ashr':
- return [(high >> (bits - 32)) >>> 0, (high|0) < 0 ? ander : 0];
+ ret = [(high >> (bits - 32)) >>> 0, (high|0) < 0 ? ander : 0];
+ break;
case 'lshr':
- return [high >>> (bits - 32) , 0];
+ ret = [high >>> (bits - 32) , 0];
+ break;
}
}
- abort('unknown bitshift64 op: ' + [value, op, bits]);
+#if ASSERTIONS
+ assert(ret);
+#endif
+ HEAP32[tempDoublePtr>>2] = ret[0]; // cannot use utility functions since we are in runtime itself
+ HEAP32[tempDoublePtr+4>>2] = ret[1];
},
// Imprecise bitops utilities