diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/analyzer.js | 19 | ||||
-rw-r--r-- | src/corruptionCheck.js | 6 | ||||
-rw-r--r-- | src/intertyper.js | 12 | ||||
-rw-r--r-- | src/jsifier.js | 40 | ||||
-rw-r--r-- | src/library.js | 70 | ||||
-rw-r--r-- | src/modules.js | 2 | ||||
-rw-r--r-- | src/parseTools.js | 22 | ||||
-rw-r--r-- | src/preamble.js | 6 | ||||
-rw-r--r-- | src/runtime.js | 32 | ||||
-rw-r--r-- | src/settings.js | 3 |
10 files changed, 170 insertions, 42 deletions
diff --git a/src/analyzer.js b/src/analyzer.js index 7fbdf24d..03d44cb7 100644 --- a/src/analyzer.js +++ b/src/analyzer.js @@ -469,6 +469,23 @@ function analyzer(data, sidePass) { i++; continue; // special case, handled in makeComparison } + case 'va_arg': { + assert(value.type == 'i64'); + assert(value.value.type == 'i32*', value.value.type); + i += removeAndAdd(label.lines, i, range(2).map(function(x) { + return { + intertype: 'va_arg', + assignTo: value.assignTo + '$' + x, + type: 'i32', + value: { + intertype: 'value', + ident: value.value.ident, // We read twice from the same i32* var, incrementing // + '$' + x, + type: 'i32*' + } + }; + })); + continue; + } case 'extractvalue': { // XXX we assume 32-bit alignment in extractvalue/insertvalue, // but in theory they can run on packed structs too (see use getStructuralTypePartBits) // potentially legalize the actual extracted value too if it is >32 bits, not just the extraction in general @@ -1492,7 +1509,7 @@ function analyzer(data, sidePass) { calcAllocatedSize(item.allocatedType)*item.allocatedNum: 0; if (USE_TYPED_ARRAYS === 2) { // We need to keep the stack aligned - item.allocatedSize = Runtime.forceAlign(item.allocatedSize, QUANTUM_SIZE); + item.allocatedSize = Runtime.forceAlign(item.allocatedSize, Runtime.STACK_ALIGN); } } var index = 0; diff --git a/src/corruptionCheck.js b/src/corruptionCheck.js index 315f5cf0..8b37120a 100644 --- a/src/corruptionCheck.js +++ b/src/corruptionCheck.js @@ -42,7 +42,7 @@ var CorruptionChecker = { CorruptionChecker.checkAll(); var size = CorruptionChecker.ptrs[ptr]; //Module.printErr('free ' + ptr + ' of size ' + size); - assert(size); + assert(size, ptr); var allocation = ptr - size*CorruptionChecker.BUFFER_FACTOR; //Module.printErr('free ' + ptr + ' of size ' + size + ' and allocation ' + allocation); delete CorruptionChecker.ptrs[ptr]; @@ -67,12 +67,12 @@ var CorruptionChecker = { }, fillBuffer: function(buffer, size) { for (var x = buffer; x < buffer + size; x++) { - {{{ makeSetValue('x', 0, 'CorruptionChecker.canary(x)', 'i8') }}}; + {{{ makeSetValue('x', 0, 'CorruptionChecker.canary(x)', 'i8', null, null, null, 1) }}}; } }, checkBuffer: function(buffer, size) { for (var x = buffer; x < buffer + size; x++) { - if (({{{ makeGetValue('x', 0, 'i8') }}}&255) != CorruptionChecker.canary(x)) { + if (({{{ makeGetValue('x', 0, 'i8', null, null, null, null, 1) }}}&255) != CorruptionChecker.canary(x)) { assert(0, 'Heap corruption detected!' + [x, buffer, size, {{{ makeGetValue('x', 0, 'i8') }}}&255, CorruptionChecker.canary(x)]); } } diff --git a/src/intertyper.js b/src/intertyper.js index 57e3011d..445c37f4 100644 --- a/src/intertyper.js +++ b/src/intertyper.js @@ -336,6 +336,8 @@ function intertyper(data, sidePass, baseLineNums) { return 'InsertValue'; if (tokensLength >= 3 && token0Text == 'phi') return 'Phi'; + if (tokensLength >= 3 && token0Text == 'va_arg') + return 'va_arg'; if (tokensLength >= 3 && token0Text == 'landingpad') return 'Landingpad'; if (token0Text == 'fence') @@ -817,6 +819,16 @@ function intertyper(data, sidePass, baseLineNums) { this.forwardItem(item, 'Reintegrator'); } }); + // 'phi' + substrate.addActor('va_arg', { + processItem: function(item) { + item.intertype = 'va_arg'; + var segments = splitTokenList(item.tokens.slice(1)); + item.type = segments[1][0].text; + item.value = parseLLVMSegment(segments[0]); + this.forwardItem(item, 'Reintegrator'); + } + }); // mathops substrate.addActor('Mathops', { processItem: function(item) { diff --git a/src/jsifier.js b/src/jsifier.js index 5fcf6b18..9207f65d 100644 --- a/src/jsifier.js +++ b/src/jsifier.js @@ -1295,6 +1295,14 @@ function JSify(data, functionsOnly, givenFunctions) { return RuntimeGenerator.stackAlloc(getFastValue(calcAllocatedSize(item.allocatedType), '*', item.allocatedNum)); } }); + makeFuncLineActor('va_arg', function(item) { + assert(TARGET_LE32); + var ident = item.value.ident; + var move = Runtime.STACK_ALIGN; + return '(tempInt=' + makeGetValue(ident, 4, '*') + ',' + + makeSetValue(ident, 4, 'tempInt + ' + move, '*') + ',' + + makeGetValue(makeGetValue(ident, 0, '*'), 'tempInt', item.type) + ')'; + }); makeFuncLineActor('mathop', processMathop); @@ -1318,16 +1326,21 @@ function JSify(data, functionsOnly, givenFunctions) { ident = Variables.resolveAliasToIdent(ident); var shortident = ident.slice(1); var simpleIdent = shortident; - var callIdent = LibraryManager.getRootIdent(simpleIdent); - if (callIdent) { - simpleIdent = callIdent; // ident may not be in library, if all there is is ident__inline, but in this case it is - if (callIdent.indexOf('.') < 0) { - callIdent = '_' + callIdent; // Not Math.*, so add the normal prefix - } + if (isLocalVar(ident)) { + var callIdent = ident; } else { - callIdent = ident; + // Not a local var, check if in library + var callIdent = LibraryManager.getRootIdent(simpleIdent); + if (callIdent) { + simpleIdent = callIdent; // ident may not be in library, if all there is is ident__inline, but in this case it is + if (callIdent.indexOf('.') < 0) { + callIdent = '_' + callIdent; // Not Math.*, so add the normal prefix + } + } else { + callIdent = ident; + } + if (callIdent == '0') return 'abort(-2)'; } - if (callIdent == '0') return 'abort(-2)'; var args = []; var argsTypes = []; @@ -1358,6 +1371,7 @@ function JSify(data, functionsOnly, givenFunctions) { } else { size = Runtime.getNativeFieldSize(param.type); } + size = Runtime.alignMemory(size, Runtime.STACK_ALIGN); varargs.push(val); varargs = varargs.concat(zeros(size-1)); // TODO: replace concats like this with push @@ -1391,15 +1405,14 @@ function JSify(data, functionsOnly, givenFunctions) { var type = varargsTypes[i]; if (type == 0) return null; var ret; + assert(offset % Runtime.STACK_ALIGN == 0); // varargs must be aligned if (!varargsByVals[i]) { ret = makeSetValue(getFastValue('tempInt', '+', offset), 0, arg, type, null, null, QUANTUM_SIZE, null, ','); - offset += Runtime.getNativeFieldSize(type); + offset += Runtime.alignMemory(Runtime.getNativeFieldSize(type), Runtime.STACK_ALIGN); } else { - assert(offset % 4 == 0); // varargs must be aligned var size = calcAllocatedSize(removeAllPointing(type)); - assert(size % 4 == 0); // varargs must stay aligned ret = makeCopyValues(getFastValue('tempInt', '+', offset), arg, size, null, null, varargsByVals[i], ','); - offset += size; + offset += Runtime.forceAlign(size, Runtime.STACK_ALIGN); } return ret; }).filter(function(arg) { @@ -1585,10 +1598,11 @@ function JSify(data, functionsOnly, givenFunctions) { sortGlobals(globalsData.globalVariables).forEach(function(g) { var ident = g.ident; if (!isIndexableGlobal(ident)) return; + assert(Variables.nextIndexedOffset % Runtime.STACK_ALIGN == 0); Variables.indexedGlobals[ident] = Variables.nextIndexedOffset; Variables.nextIndexedOffset += Runtime.alignMemory(calcAllocatedSize(Variables.globals[ident].type)); if (ident.substr(0, 5) == '__ZTV') { // leave room for null-terminating the vtable - Variables.nextIndexedOffset += Runtime.getNativeTypeSize('i32'); + Variables.nextIndexedOffset += Runtime.alignMemory(QUANTUM_SIZE); } }); } diff --git a/src/library.js b/src/library.js index 9b63084f..ade63d45 100644 --- a/src/library.js +++ b/src/library.js @@ -2499,7 +2499,7 @@ LibraryManager.library = { for (var formatIndex = 0; formatIndex < format.length;) { if (format[formatIndex] === '%' && format[formatIndex+1] == 'n') { var argPtr = {{{ makeGetValue('varargs', 'argIndex', 'void*') }}}; - argIndex += Runtime.getNativeFieldSize('void*'); + argIndex += Runtime.getAlignSize('void*', null, true); {{{ makeSetValue('argPtr', 0, 'soFar', 'i32') }}}; formatIndex += 2; continue; @@ -2508,7 +2508,7 @@ LibraryManager.library = { // TODO: Support strings like "%5c" etc. if (format[formatIndex] === '%' && format[formatIndex+1] == 'c') { var argPtr = {{{ makeGetValue('varargs', 'argIndex', 'void*') }}}; - argIndex += Runtime.getNativeFieldSize('void*'); + argIndex += Runtime.getAlignSize('void*', null, true); fields++; next = get(); {{{ makeSetValue('argPtr', 0, 'next', 'i8') }}} @@ -2600,7 +2600,7 @@ LibraryManager.library = { var text = buffer.join(''); var argPtr = {{{ makeGetValue('varargs', 'argIndex', 'void*') }}}; - argIndex += Runtime.getNativeFieldSize('void*'); + argIndex += Runtime.getAlignSize('void*', null, true); switch (type) { case 'd': case 'u': case 'i': if (half) { @@ -2669,8 +2669,16 @@ LibraryManager.library = { ret = {{{ makeGetValue('varargs', 'argIndex', 'double', undefined, undefined, true) }}}; #if USE_TYPED_ARRAYS == 2 } else if (type == 'i64') { + +#if TARGET_LE32 + ret = [{{{ makeGetValue('varargs', 'argIndex', 'i32', undefined, undefined, true) }}}, + {{{ makeGetValue('varargs', 'argIndex+8', 'i32', undefined, undefined, true) }}}]; + argIndex += {{{ STACK_ALIGN }}}; // each 32-bit chunk is in a 64-bit block +#else ret = [{{{ makeGetValue('varargs', 'argIndex', 'i32', undefined, undefined, true) }}}, {{{ makeGetValue('varargs', 'argIndex+4', 'i32', undefined, undefined, true) }}}]; +#endif + #else } else if (type == 'i64') { ret = {{{ makeGetValue('varargs', 'argIndex', 'i64', undefined, undefined, true) }}}; @@ -2679,7 +2687,7 @@ LibraryManager.library = { type = 'i32'; // varargs are always i32, i64, or double ret = {{{ makeGetValue('varargs', 'argIndex', 'i32', undefined, undefined, true) }}}; } - argIndex += Runtime.getNativeFieldSize(type); + argIndex += Math.max(Runtime.getNativeFieldSize(type), Runtime.getAlignSize(type, null, true)); return ret; } @@ -3612,6 +3620,9 @@ LibraryManager.library = { asprintf: function(s, format, varargs) { return _sprintf(-s, format, varargs); }, + +#if TARGET_X86 + // va_arg is just like our varargs vfprintf: 'fprintf', vsnprintf: 'snprintf', vprintf: 'printf', @@ -3620,6 +3631,44 @@ LibraryManager.library = { vscanf: 'scanf', vfscanf: 'fscanf', vsscanf: 'sscanf', +#endif + +#if TARGET_LE32 + // convert va_arg into varargs + vfprintf__deps: ['fprintf'], + vfprintf: function(s, f, va_arg) { + return _fprintf(s, f, {{{ makeGetValue('va_arg', 0, '*') }}}); + }, + vsnprintf__deps: ['snprintf'], + vsnprintf: function(s, n, format, va_arg) { + return _snprintf(s, n, format, {{{ makeGetValue('va_arg', 0, '*') }}}); + }, + vprintf__deps: ['printf'], + vprintf: function(format, va_arg) { + return _printf(format, {{{ makeGetValue('va_arg', 0, '*') }}}); + }, + vsprintf__deps: ['sprintf'], + vsprintf: function(s, format, va_arg) { + return _sprintf(s, format, {{{ makeGetValue('va_arg', 0, '*') }}}); + }, + vasprintf__deps: ['asprintf'], + vasprintf: function(s, format, va_arg) { + return _asprintf(s, format, {{{ makeGetValue('va_arg', 0, '*') }}}); + }, + vscanf__deps: ['scanf'], + vscanf: function(format, va_arg) { + return _scanf(format, {{{ makeGetValue('va_arg', 0, '*') }}}); + }, + vfscanf__deps: ['fscanf'], + vfscanf: function(s, format, va_arg) { + return _fscanf(s, format, {{{ makeGetValue('va_arg', 0, '*') }}}); + }, + vsscanf__deps: ['sscanf'], + vsscanf: function(s, format, va_arg) { + return _sscanf(s, format, {{{ makeGetValue('va_arg', 0, '*') }}}); + }, +#endif + fopen64: 'fopen', __01fopen64_: 'fopen', __01freopen64_: 'freopen', @@ -4825,7 +4874,13 @@ LibraryManager.library = { llvm_va_start__inline: function(ptr) { // varargs - we received a pointer to the varargs as a final 'extra' parameter called 'varrp' +#if TARGET_X86 return makeSetValue(ptr, 0, 'varrp', 'void*'); +#endif +#if TARGET_LE32 + // 4-word structure: start, current offset + return makeSetValue(ptr, 0, 'varrp', 'void*') + ';' + makeSetValue(ptr, 4, 0, 'void*'); +#endif }, llvm_va_end: function() {}, @@ -7487,12 +7542,13 @@ LibraryManager.library = { } var i = 0; do { - var curr = {{{ makeGetValue('varargs', 'i*4', 'i8') }}}; + var curr = {{{ makeGetValue('varargs', '0', 'i8') }}}; + varargs += {{{ STACK_ALIGN }}}; {{{ makeSetValue('_emscripten_jcache_printf_.buffer', 'i', 'curr', 'i8') }}}; i++; - assert(i*4 < MAX); + assert(i*{{{ STACK_ALIGN }}} < MAX); } while (curr != 0); - Module.print(intArrayToString(__formatString(_emscripten_jcache_printf_.buffer, varargs + i*4)).replace('\\n', '')); + Module.print(intArrayToString(__formatString(_emscripten_jcache_printf_.buffer, varargs)).replace('\\n', '')); Runtime.stackAlloc(-4*i); // free up the stack space we know is ok to free }, diff --git a/src/modules.js b/src/modules.js index ce162ac1..9cbe88aa 100644 --- a/src/modules.js +++ b/src/modules.js @@ -2,6 +2,8 @@ // Various namespace-like modules +var STACK_ALIGN = TARGET_X86 ? 4 : 8; + var LLVM = { LINKAGES: set('private', 'linker_private', 'linker_private_weak', 'linker_private_weak_def_auto', 'internal', 'available_externally', 'linkonce', 'common', 'weak', 'appending', 'extern_weak', 'linkonce_odr', diff --git a/src/parseTools.js b/src/parseTools.js index 77ab979d..39de4b7c 100644 --- a/src/parseTools.js +++ b/src/parseTools.js @@ -109,6 +109,10 @@ function isJSVar(ident) { } +function isLocalVar(ident) { + return ident[0] == '$'; +} + function isStructPointerType(type) { // This test is necessary for clang - in llvm-gcc, we // could check for %struct. The downside is that %1 can @@ -1020,7 +1024,9 @@ function getHeapOffset(offset, type, forceAsm) { } if (Runtime.getNativeFieldSize(type) > 4) { - type = 'i32'; // XXX we emulate 64-bit values as 32 + if (type == 'i64' || TARGET_X86) { + type = 'i32'; // XXX we emulate 64-bit values as 32 in x86, and also in le32 but only i64, not double + } } var sz = Runtime.getNativeTypeSize(type); @@ -1121,7 +1127,9 @@ function makeGetValue(ptr, pos, type, noNeedFirst, unsigned, ignore, align, noSa return '{ ' + ret.join(', ') + ' }'; } - if (DOUBLE_MODE == 1 && USE_TYPED_ARRAYS == 2 && type == 'double') { + // In double mode 1, in x86 we always assume unaligned because we can't trust that; otherwise in le32 + // we need this code path if we are not fully aligned. + if (DOUBLE_MODE == 1 && USE_TYPED_ARRAYS == 2 && type == 'double' && (TARGET_X86 || align < 8)) { return '(' + makeSetTempDouble(0, 'i32', makeGetValue(ptr, pos, 'i32', noNeedFirst, unsigned, ignore, align)) + ',' + makeSetTempDouble(1, 'i32', makeGetValue(ptr, getFastValue(pos, '+', Runtime.getNativeTypeSize('i32')), 'i32', noNeedFirst, unsigned, ignore, align)) + ',' + makeGetTempDouble(0, 'double') + ')'; @@ -1130,6 +1138,7 @@ function makeGetValue(ptr, pos, type, noNeedFirst, unsigned, ignore, align, noSa if (USE_TYPED_ARRAYS == 2 && align) { // Alignment is important here. May need to split this up var bytes = Runtime.getNativeTypeSize(type); + if (DOUBLE_MODE == 0 && type == 'double') bytes = 4; // we will really only read 4 bytes here if (bytes > align) { var ret = '('; if (isIntImplemented(type)) { @@ -1137,7 +1146,7 @@ function makeGetValue(ptr, pos, type, noNeedFirst, unsigned, ignore, align, noSa // Special case that we can optimize ret += makeGetValue(ptr, pos, 'i16', noNeedFirst, 2, ignore) + '|' + '(' + makeGetValue(ptr, getFastValue(pos, '+', 2), 'i16', noNeedFirst, 2, ignore) + '<<16)'; - } else { // XXX we cannot truly handle > 4... + } else { // XXX we cannot truly handle > 4... (in x86) ret = ''; for (var i = 0; i < bytes; i++) { ret += '(' + makeGetValue(ptr, getFastValue(pos, '+', i), 'i8', noNeedFirst, 1, ignore) + (i > 0 ? '<<' + (8*i) : '') + ')'; @@ -1226,6 +1235,7 @@ function makeSetValue(ptr, pos, value, type, noNeedFirst, ignore, align, noSafe, return ret.join('; '); } + // TODO: optimize like get for le32 if (DOUBLE_MODE == 1 && USE_TYPED_ARRAYS == 2 && type == 'double') { return '(' + makeSetTempDouble(0, 'double', value) + ',' + makeSetValue(ptr, pos, makeGetTempDouble(0, 'i32'), 'i32', noNeedFirst, ignore, align, noSafe, ',') + ',' + @@ -1637,7 +1647,11 @@ function makeGetSlabs(ptr, type, allowMultiple, unsigned) { case 'i1': case 'i8': return [unsigned ? 'HEAPU8' : 'HEAP8']; break; case 'i16': return [unsigned ? 'HEAPU16' : 'HEAP16']; break; case 'i32': case 'i64': return [unsigned ? 'HEAPU32' : 'HEAP32']; break; - case 'float': case 'double': return ['HEAPF32']; break; + case 'double': { + if (TARGET_LE32) return ['HEAPF64']; // in le32, we do have the ability to assume 64-bit alignment + // otherwise, fall through to float + } + case 'float': return ['HEAPF32']; default: { throw 'what, exactly, can we do for unknown types in TA2?! ' + new Error().stack; } diff --git a/src/preamble.js b/src/preamble.js index 92305ca0..cac0be4c 100644 --- a/src/preamble.js +++ b/src/preamble.js @@ -35,8 +35,8 @@ function SAFE_HEAP_CLEAR(dest) { var SAFE_HEAP_ERRORS = 0; var ACCEPTABLE_SAFE_HEAP_ERRORS = 0; -function SAFE_HEAP_ACCESS(dest, type, store, ignore) { - //if (dest === A_NUMBER) Module.print ([dest, type, store] + ' ' + new Error().stack); // Something like this may be useful, in debugging +function SAFE_HEAP_ACCESS(dest, type, store, ignore, storeValue) { + //if (dest === A_NUMBER) Module.print ([dest, type, store, ignore, storeValue] + ' ' + new Error().stack); // Something like this may be useful, in debugging assert(dest >= STACK_ROOT, 'segmentation fault: null pointer, or below normal memory'); @@ -97,7 +97,7 @@ function SAFE_HEAP_STORE(dest, value, type, ignore) { } //if (!ignore && (value === Infinity || value === -Infinity || isNaN(value))) throw [value, typeof value, new Error().stack]; - SAFE_HEAP_ACCESS(dest, type, true, ignore); + SAFE_HEAP_ACCESS(dest, type, true, ignore, value); if (dest in HEAP_WATCHED) { Module.print((new Error()).stack); throw "Bad store!" + dest; diff --git a/src/runtime.js b/src/runtime.js index 5269301c..9daab820 100644 --- a/src/runtime.js +++ b/src/runtime.js @@ -14,8 +14,8 @@ var RuntimeGenerator = { ret += sep + '_memset(' + type + 'TOP, 0, ' + size + ')'; } ret += sep + type + 'TOP = (' + type + 'TOP + ' + size + ')|0'; - if ({{{ QUANTUM_SIZE }}} > 1 && !ignoreAlign) { - ret += sep + RuntimeGenerator.alignMemory(type + 'TOP', {{{ QUANTUM_SIZE }}}); + if ({{{ STACK_ALIGN }}} > 1 && !ignoreAlign) { + ret += sep + RuntimeGenerator.alignMemory(type + 'TOP', {{{ STACK_ALIGN }}}); } return ret; }, @@ -23,9 +23,7 @@ var RuntimeGenerator = { // An allocation that lives as long as the current function call stackAlloc: function(size, sep) { sep = sep || ';'; - if (USE_TYPED_ARRAYS === 2) 'STACKTOP = (STACKTOP + STACKTOP|0 % ' + ({{{ QUANTUM_SIZE }}} - (isNumber(size) ? Math.min(size, {{{ QUANTUM_SIZE }}}) : {{{ QUANTUM_SIZE }}})) + ')' + sep; - // The stack is always QUANTUM SIZE aligned, so we may not need to force alignment here - var ret = RuntimeGenerator.alloc(size, 'STACK', false, sep, USE_TYPED_ARRAYS != 2 || (isNumber(size) && parseInt(size) % {{{ QUANTUM_SIZE }}} == 0)); + var ret = RuntimeGenerator.alloc(size, 'STACK', false, sep, USE_TYPED_ARRAYS != 2 || (isNumber(size) && parseInt(size) % {{{ STACK_ALIGN }}} == 0)); if (ASSERTIONS) { ret += sep + 'assert(' + asmCoercion('(STACKTOP|0) < (STACK_MAX|0)', 'i32') + ')'; } @@ -37,8 +35,8 @@ var RuntimeGenerator = { var ret = 'var __stackBase__ = ' + (ASM_JS ? '0; __stackBase__ = ' : '') + 'STACKTOP'; if (initial > 0) ret += '; STACKTOP = (STACKTOP + ' + initial + ')|0'; if (USE_TYPED_ARRAYS == 2) { - assert(initial % QUANTUM_SIZE == 0); - if (ASSERTIONS && QUANTUM_SIZE == 4) { + assert(initial % Runtime.STACK_ALIGN == 0); + if (ASSERTIONS && Runtime.STACK_ALIGN == 4) { ret += '; assert(' + asmCoercion('!(STACKTOP&3)', 'i32') + ')'; } } @@ -70,7 +68,7 @@ var RuntimeGenerator = { alignMemory: function(target, quantum) { if (typeof quantum !== 'number') { - quantum = '(quantum ? quantum : {{{ QUANTUM_SIZE }}})'; + quantum = '(quantum ? quantum : {{{ STACK_ALIGN }}})'; } return target + ' = ' + Runtime.forceAlign(target, quantum); }, @@ -175,6 +173,18 @@ var Runtime = { set: set, + STACK_ALIGN: {{{ STACK_ALIGN }}}, + + // type can be a native type or a struct (or null, for structs we only look at size here) + getAlignSize: function(type, size, vararg) { + // we align i64s and doubles on 64-bit boundaries, unlike x86 +#if TARGET_LE32 + if (type == 'i64' || type == 'double' || vararg) return 8; + if (!type) return Math.min(size, 8); // align structures internally to 64 bits +#endif + return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE); + }, + // Calculate aligned size, just like C structs should be. TODO: Consider // requesting that compilation be done with #pragma pack(push) /n #pragma pack(1), // which would remove much of the complexity here. @@ -187,10 +197,10 @@ var Runtime = { var size, alignSize; if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) { size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s. - alignSize = size; + alignSize = Runtime.getAlignSize(field, size); } else if (Runtime.isStructType(field)) { size = Types.types[field].flatSize; - alignSize = Types.types[field].alignSize; + alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize); } else if (field[0] == 'b') { // bN, large number field, like a [N x i8] size = field.substr(1)|0; @@ -198,7 +208,7 @@ var Runtime = { } else { throw 'Unclear type in struct: ' + field + ', in ' + type.name_ + ' :: ' + dump(Types.types[type.name_]); } - alignSize = type.packed ? 1 : Math.min(alignSize, Runtime.QUANTUM_SIZE); + if (type.packed) alignSize = 1; type.alignSize = Math.max(type.alignSize, alignSize); var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory type.flatSize = curr + size; diff --git a/src/settings.js b/src/settings.js index 48b0347e..2e4c2550 100644 --- a/src/settings.js +++ b/src/settings.js @@ -17,6 +17,9 @@ var QUANTUM_SIZE = 4; // This is the size of an individual field in a structure. // // Changing this from the default of 4 is deprecated. +var TARGET_X86 = 1; // For le32-unknown-nacl +var TARGET_LE32 = 0; // For i386-pc-linux-gnu + var CORRECT_SIGNS = 1; // Whether we make sure to convert unsigned values to signed values. // Decreases performance with additional runtime checks. Might not be // needed in some kinds of code. |