aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xemscripten.py5
-rw-r--r--src/library.js8
-rw-r--r--src/library_gl.js142
-rw-r--r--src/relooper/Relooper.cpp60
-rw-r--r--src/relooper/Relooper.h7
-rw-r--r--src/relooper/fuzzer.py14
-rw-r--r--src/relooper/test.cpp28
-rw-r--r--src/relooper/test.txt45
-rw-r--r--tests/cases/breakinthemiddle2.ll8
-rw-r--r--tests/cases/breakinthemiddle3.ll29
-rw-r--r--tests/cases/invokeundef.ll5
-rw-r--r--tests/printf/output.txt1
-rw-r--r--tests/printf/output_i64_1.txt1
-rw-r--r--tests/printf/test.c1
14 files changed, 232 insertions, 122 deletions
diff --git a/emscripten.py b/emscripten.py
index 8ddb7375..befad8d5 100755
--- a/emscripten.py
+++ b/emscripten.py
@@ -872,6 +872,7 @@ def emscript_fast(infile, settings, outfile, libraries=[], compiler_engine=None,
for key in metadata['implementedFunctions'] + forwarded_json['Functions']['implementedFunctions'].keys(): # XXX perf
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
exported_implemented_functions.add(key)
+ implemented_functions = set(metadata['implementedFunctions'])
# Add named globals
named_globals = '\n'.join(['var %s = %s;' % (k, v) for k, v in metadata['namedGlobals'].iteritems()])
@@ -925,7 +926,7 @@ def emscript_fast(infile, settings, outfile, libraries=[], compiler_engine=None,
Counter.j += 1
newline = Counter.j % 30 == 29
if item == '0': return bad if not newline else (bad + '\n')
- if item not in metadata['implementedFunctions']:
+ if item not in implemented_functions:
# this is imported into asm, we must wrap it
call_ident = item
if call_ident in metadata['redirects']: call_ident = metadata['redirects'][call_ident]
@@ -1025,7 +1026,7 @@ def emscript_fast(infile, settings, outfile, libraries=[], compiler_engine=None,
pass
# If no named globals, only need externals
global_vars = metadata['externs'] #+ forwarded_json['Variables']['globals']
- global_funcs = list(set(['_' + key for key, value in forwarded_json['Functions']['libraryFunctions'].iteritems() if value != 2]).difference(set(global_vars)).difference(set(metadata['implementedFunctions'])))
+ global_funcs = list(set(['_' + key for key, value in forwarded_json['Functions']['libraryFunctions'].iteritems() if value != 2]).difference(set(global_vars)).difference(implemented_functions))
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
asm_global_funcs = ''.join([' var ' + g.replace('.', '_') + '=global.' + g + ';\n' for g in maths]) + \
diff --git a/src/library.js b/src/library.js
index 9c515552..af37130c 100644
--- a/src/library.js
+++ b/src/library.js
@@ -1950,9 +1950,9 @@ LibraryManager.library = {
}
// Handle precision.
- var precisionSet = false;
+ var precisionSet = false, precision = -1;
if (next == {{{ charCode('.') }}}) {
- var precision = 0;
+ precision = 0;
precisionSet = true;
textIndex++;
next = {{{ makeGetValue(0, 'textIndex+1', 'i8') }}};
@@ -1969,9 +1969,8 @@ LibraryManager.library = {
}
}
next = {{{ makeGetValue(0, 'textIndex+1', 'i8') }}};
- } else {
- var precision = 6; // Standard default.
}
+ if (precision === -1) precision = 6; // Standard default.
// Handle integer sizes. WARNING: These assume a 32-bit architecture!
var argSize;
@@ -4455,6 +4454,7 @@ LibraryManager.library = {
terminate: '__cxa_call_unexpected',
+ __gxx_personality_v0__deps: ['llvm_eh_exception', '_ZSt18uncaught_exceptionv', '__cxa_find_matching_catch'],
__gxx_personality_v0: function() {
},
diff --git a/src/library_gl.js b/src/library_gl.js
index 0c601673..f6978c04 100644
--- a/src/library_gl.js
+++ b/src/library_gl.js
@@ -4077,96 +4077,110 @@ var LibraryGL = {
// does not work for glBegin/End, where we generate renderer components dynamically and then
// disable them ourselves, but it does help with glDrawElements/Arrays.
if (!GLImmediate.modifiedClientAttributes) {
+#if GL_ASSERTIONS
+ if ((GLImmediate.stride & 3) != 0) {
+ Runtime.warnOnce('Warning: Rendering from client side vertex arrays where stride (' + GLImmediate.stride + ') is not a multiple of four! This is not currently supported!');
+ }
+#endif
GLImmediate.vertexCounter = (GLImmediate.stride * count) / 4; // XXX assuming float
return;
}
GLImmediate.modifiedClientAttributes = false;
- var stride = 0, start;
+ // The role of prepareClientAttributes is to examine the set of client-side vertex attribute buffers
+ // that user code has submitted, and to prepare them to be uploaded to a VBO in GPU memory
+ // (since WebGL does not support client-side rendering, i.e. rendering from vertex data in CPU memory)
+ // User can submit vertex data generally in three different configurations:
+ // 1. Fully planar: all attributes are in their own separate tightly-packed arrays in CPU memory.
+ // 2. Fully interleaved: all attributes share a single array where data is interleaved something like (pos,uv,normal), (pos,uv,normal), ...
+ // 3. Complex hybrid: Multiple separate arrays that either are sparsely strided, and/or partially interleave vertex attributes.
+
+ // For simplicity, we support the case (2) as the fast case. For (1) and (3), we do a memory copy of the
+ // vertex data here to prepare a relayouted buffer that is of the structure in case (2). The reason
+ // for this is that it allows the emulation code to get away with using just one VBO buffer for rendering,
+ // and not have to maintain multiple ones. Therefore cases (1) and (3) will be very slow, and case (2) is fast.
+
+ // Detect which case we are in by using a quick heuristic by examining the strides of the buffers. If all the buffers have identical
+ // stride, we assume we have case (2), otherwise we have something more complex.
+ var clientStartPointer = 0x7FFFFFFF;
+ var bytes = 0; // Total number of bytes taken up by a single vertex.
+ var minStride = 0x7FFFFFFF;
+ var maxStride = 0;
var attributes = GLImmediate.liveClientAttributes;
attributes.length = 0;
- for (var i = 0; i < GLImmediate.NUM_ATTRIBUTES; i++) {
- if (GLImmediate.enabledClientAttributes[i]) attributes.push(GLImmediate.clientAttributes[i]);
- }
- attributes.sort(function(x, y) { return !x ? (!y ? 0 : 1) : (!y ? -1 : (x.pointer - y.pointer)) });
- start = GL.currArrayBuffer ? 0 : attributes[0].pointer;
- var multiStrides = false;
- for (var i = 0; i < attributes.length; i++) {
- var attribute = attributes[i];
- if (!attribute) break;
- if (stride != 0 && stride != attribute.stride) multiStrides = true;
- if (attribute.stride) stride = attribute.stride;
+ for (var i = 0; i < 3+GLImmediate.MAX_TEXTURES; i++) {
+ if (GLImmediate.enabledClientAttributes[i]) {
+ var attr = GLImmediate.clientAttributes[i];
+ attributes.push(attr);
+ clientStartPointer = Math.min(clientStartPointer, attr.pointer);
+ attr.sizeBytes = attr.size * GL.byteSizeByType[attr.type - GL.byteSizeByTypeRoot];
+ bytes += attr.sizeBytes;
+ minStride = Math.min(minStride, attr.stride);
+ maxStride = Math.max(maxStride, attr.stride);
+ }
}
- if (multiStrides) stride = 0; // we will need to restride
- var bytes = 0; // total size in bytes
- if (!stride && !beginEnd) {
- // beginEnd can not have stride in the attributes, that is fine. otherwise,
- // no stride means that all attributes are in fact packed. to keep the rest of
- // our emulation code simple, we perform unpacking/restriding here. this adds overhead, so
- // it is a good idea to not hit this!
-#if ASSERTIONS
- Runtime.warnOnce('Unpacking/restriding attributes, this is slow and dangerous');
+ if ((minStride != maxStride || maxStride < bytes) && !beginEnd) {
+ // We are in cases (1) or (3): slow path, shuffle the data around into a single interleaved vertex buffer.
+ // The immediate-mode glBegin()/glEnd() vertex submission gets automatically generated in appropriate layout,
+ // so never need to come down this path if that was used.
+#if GL_ASSERTIONS
+ Runtime.warnOnce('Rendering from planar client-side vertex arrays. This is a very slow emulation path! Use interleaved vertex arrays for best performance.');
#endif
if (!GLImmediate.restrideBuffer) GLImmediate.restrideBuffer = _malloc(GL.MAX_TEMP_BUFFER_SIZE);
- start = GLImmediate.restrideBuffer;
-#if ASSERTIONS
- assert(start % 4 == 0);
-#endif
+ var start = GLImmediate.restrideBuffer;
+ bytes = 0;
// calculate restrided offsets and total size
for (var i = 0; i < attributes.length; i++) {
- var attribute = attributes[i];
- if (!attribute) break;
- var size = attribute.size * GL.byteSizeByType[attribute.type - GL.byteSizeByTypeRoot];
+ var attr = attributes[i];
+ var size = attr.sizeBytes;
if (size % 4 != 0) size += 4 - (size % 4); // align everything
- attribute.offset = bytes;
+ attr.offset = bytes;
bytes += size;
}
-#if ASSERTIONS
- assert(count*bytes <= GL.MAX_TEMP_BUFFER_SIZE);
-#endif
- // copy out the data (we need to know the stride for that, and define attribute.pointer
+ // copy out the data (we need to know the stride for that, and define attr.pointer)
for (var i = 0; i < attributes.length; i++) {
- var attribute = attributes[i];
- if (!attribute) break;
- var size4 = Math.floor((attribute.size * GL.byteSizeByType[attribute.type - GL.byteSizeByTypeRoot])/4);
- for (var j = 0; j < count; j++) {
- for (var k = 0; k < size4; k++) { // copy in chunks of 4 bytes, our alignment makes this possible
- HEAP32[((start + attribute.offset + bytes*j)>>2) + k] = HEAP32[(attribute.pointer>>2) + j*size4 + k];
+ var attr = attributes[i];
+ var srcStride = Math.max(attr.sizeBytes, attr.stride);
+ if ((srcStride & 3) == 0 && (attr.sizeBytes & 3) == 0) {
+ var size4 = attr.sizeBytes>>2;
+ var srcStride4 = Math.max(attr.sizeBytes, attr.stride)>>2;
+ for (var j = 0; j < count; j++) {
+ for (var k = 0; k < size4; k++) { // copy in chunks of 4 bytes, our alignment makes this possible
+ HEAP32[((start + attr.offset + bytes*j)>>2) + k] = HEAP32[(attr.pointer>>2) + j*srcStride4 + k];
+ }
+ }
+ } else {
+ for (var j = 0; j < count; j++) {
+ for (var k = 0; k < attr.sizeBytes; k++) { // source data was not aligned to multiples of 4, must copy byte by byte.
+ HEAP8[start + attr.offset + bytes*j + k] = HEAP8[attr.pointer + j*srcStride + k];
+ }
}
}
- attribute.pointer = start + attribute.offset;
+ attr.pointer = start + attr.offset;
}
+ GLImmediate.stride = bytes;
+ GLImmediate.vertexPointer = start;
} else {
- // normal situation, everything is strided and in the same buffer
- for (var i = 0; i < attributes.length; i++) {
- var attribute = attributes[i];
- if (!attribute) break;
- attribute.offset = attribute.pointer - start;
- if (attribute.offset > bytes) { // ensure we start where we should
-#if ASSERTIONS
- assert((attribute.offset - bytes)%4 == 0); // XXX assuming 4-alignment
-#endif
- bytes += attribute.offset - bytes;
- }
- bytes += attribute.size * GL.byteSizeByType[attribute.type - GL.byteSizeByTypeRoot];
- if (bytes % 4 != 0) bytes += 4 - (bytes % 4); // XXX assuming 4-alignment
+ // case (2): fast path, all data is interleaved to a single vertex array so we can get away with a single VBO upload.
+ if (GL.currArrayBuffer) {
+ GLImmediate.vertexPointer = 0;
+ } else {
+ GLImmediate.vertexPointer = clientStartPointer;
}
-#if ASSERTIONS
- assert(beginEnd || bytes <= stride); // if not begin-end, explicit stride should make sense with total byte size
-#endif
- if (bytes < stride) { // ensure the size is that of the stride
- bytes = stride;
+ for (var i = 0; i < attributes.length; i++) {
+ var attr = attributes[i];
+ attr.offset = attr.pointer - GLImmediate.vertexPointer; // Compute what will be the offset of this attribute in the VBO after we upload.
}
+ GLImmediate.stride = Math.max(maxStride, bytes);
}
- GLImmediate.stride = bytes;
-
if (!beginEnd) {
- bytes *= count;
- if (!GL.currArrayBuffer) {
- GLImmediate.vertexPointer = start;
+#if GL_ASSERTIONS
+ if ((GLImmediate.stride & 3) != 0) {
+ Runtime.warnOnce('Warning: Rendering from client side vertex arrays where stride (' + GLImmediate.stride + ') is not a multiple of four! This is not currently supported!');
}
- GLImmediate.vertexCounter = bytes / 4; // XXX assuming float
+#endif
+ GLImmediate.vertexCounter = (GLImmediate.stride * count) / 4; // XXX assuming float
}
},
diff --git a/src/relooper/Relooper.cpp b/src/relooper/Relooper.cpp
index 389d7447..d5772c62 100644
--- a/src/relooper/Relooper.cpp
+++ b/src/relooper/Relooper.cpp
@@ -40,27 +40,56 @@ static void PutIndented(const char *String);
static char *OutputBufferRoot = NULL;
static char *OutputBuffer = NULL;
static int OutputBufferSize = 0;
+static int OutputBufferOwned = false;
+
+static int LeftInOutputBuffer() {
+ return OutputBufferSize - (OutputBuffer - OutputBufferRoot);
+}
+
+static bool EnsureOutputBuffer(int Needed) { // ensures the output buffer is sufficient. returns true is no problem happened
+ Needed++; // ensure the trailing \0 is not forgotten
+ int Left = LeftInOutputBuffer();
+ if (!OutputBufferOwned) {
+ assert(Needed < Left);
+ } else {
+ // we own the buffer, and can resize if necessary
+ if (Needed >= Left) {
+ int Offset = OutputBuffer - OutputBufferRoot;
+ int TotalNeeded = OutputBufferSize + Needed - Left + 10240;
+ int NewSize = OutputBufferSize;
+ while (NewSize < TotalNeeded) NewSize = NewSize + (NewSize/2);
+ //printf("resize %d => %d\n", OutputBufferSize, NewSize);
+ OutputBufferRoot = (char*)realloc(OutputBufferRoot, NewSize);
+ OutputBuffer = OutputBufferRoot + Offset;
+ OutputBufferSize = NewSize;
+ return false;
+ }
+ }
+ return true;
+}
void PrintIndented(const char *Format, ...) {
assert(OutputBuffer);
- assert(OutputBuffer + Indenter::CurrIndent*INDENTATION - OutputBufferRoot < OutputBufferSize);
+ EnsureOutputBuffer(Indenter::CurrIndent*INDENTATION);
for (int i = 0; i < Indenter::CurrIndent*INDENTATION; i++, OutputBuffer++) *OutputBuffer = ' ';
- va_list Args;
- va_start(Args, Format);
- int left = OutputBufferSize - (OutputBuffer - OutputBufferRoot);
- int written = vsnprintf(OutputBuffer, left, Format, Args);
- assert(written < left);
- OutputBuffer += written;
- va_end(Args);
+ int Written;
+ while (1) { // write and potentially resize buffer until we have enough room
+ int Left = LeftInOutputBuffer();
+ va_list Args;
+ va_start(Args, Format);
+ Written = vsnprintf(OutputBuffer, Left, Format, Args);
+ va_end(Args);
+ if (EnsureOutputBuffer(Written)) break;
+ }
+ OutputBuffer += Written;
}
void PutIndented(const char *String) {
assert(OutputBuffer);
- assert(OutputBuffer + Indenter::CurrIndent*INDENTATION - OutputBufferRoot < OutputBufferSize);
+ EnsureOutputBuffer(Indenter::CurrIndent*INDENTATION);
for (int i = 0; i < Indenter::CurrIndent*INDENTATION; i++, OutputBuffer++) *OutputBuffer = ' ';
- int left = OutputBufferSize - (OutputBuffer - OutputBufferRoot);
- int needed = strlen(String)+1;
- assert(needed < left);
+ int Needed = strlen(String)+1;
+ EnsureOutputBuffer(Needed);
strcpy(OutputBuffer, String);
OutputBuffer += strlen(String);
*OutputBuffer++ = '\n';
@@ -1158,11 +1187,18 @@ void Relooper::Render() {
void Relooper::SetOutputBuffer(char *Buffer, int Size) {
OutputBufferRoot = OutputBuffer = Buffer;
OutputBufferSize = Size;
+ OutputBufferOwned = false;
}
void Relooper::MakeOutputBuffer(int Size) {
+ if (OutputBufferRoot && OutputBufferSize >= Size && OutputBufferOwned) return;
OutputBufferRoot = OutputBuffer = (char*)malloc(Size);
OutputBufferSize = Size;
+ OutputBufferOwned = true;
+}
+
+char *Relooper::GetOutputBuffer() {
+ return OutputBufferRoot;
}
void Relooper::SetAsmJSMode(int On) {
diff --git a/src/relooper/Relooper.h b/src/relooper/Relooper.h
index dfabcabb..6b9394db 100644
--- a/src/relooper/Relooper.h
+++ b/src/relooper/Relooper.h
@@ -200,11 +200,16 @@ struct Relooper {
void Render();
// Sets the global buffer all printing goes to. Must call this or MakeOutputBuffer.
+ // XXX: this is deprecated, see MakeOutputBuffer
static void SetOutputBuffer(char *Buffer, int Size);
- // Creates an output buffer. Must call this or SetOutputBuffer.
+ // Creates an internal output buffer. Must call this or SetOutputBuffer. Size is
+ // a hint for the initial size of the buffer, it can be resized later one demand.
+ // For that reason this is more recommended than SetOutputBuffer.
static void MakeOutputBuffer(int Size);
+ static char *GetOutputBuffer();
+
// Sets asm.js mode on or off (default is off)
static void SetAsmJSMode(int On);
diff --git a/src/relooper/fuzzer.py b/src/relooper/fuzzer.py
index fa47583e..18db997e 100644
--- a/src/relooper/fuzzer.py
+++ b/src/relooper/fuzzer.py
@@ -47,8 +47,18 @@ while(1) switch(label) {
int main() {
char *buffer = (char*)malloc(10*1024*1024);
+'''
+
+ if random.randint(0, 1) == 0:
+ make = False
+ fast += '''
Relooper::SetOutputBuffer(buffer, 10*1024*1024);
'''
+ else:
+ make = True
+ fast += '''
+ Relooper::MakeOutputBuffer(%d);
+''' % random.randint(1, 1024*1024*10)
for i in range(1, num):
slow += ' case %d: print(%d); state = check(); modded = state %% %d\n' % (i, i, len(branches[i])+1)
@@ -102,11 +112,11 @@ int main() {
printf("\\n\\n");
r.Render();
- puts(buffer);
+ puts(%s);
return 1;
}
-'''
+''' % ('buffer' if not make else 'Relooper::GetOutputBuffer()')
slow += '}'
diff --git a/src/relooper/test.cpp b/src/relooper/test.cpp
index 773f6ee4..b4ce669c 100644
--- a/src/relooper/test.cpp
+++ b/src/relooper/test.cpp
@@ -286,5 +286,33 @@ int main() {
puts(buffer);
}
+
+ if (1) {
+ Relooper::MakeOutputBuffer(10);
+
+ printf("\n\n-- If pattern, emulated, using MakeOutputBuffer --\n\n");
+
+ Block *b_a = new Block("// block A\n", NULL);
+ Block *b_b = new Block("// block B\n", "b_check()");
+ Block *b_c = new Block("// block C\n", NULL);
+
+ b_a->AddBranchTo(b_b, "check == 10", "atob();");
+ b_a->AddBranchTo(b_c, NULL, "atoc();");
+
+ b_b->AddBranchTo(b_c, "case 17:", "btoc();");
+ b_b->AddBranchTo(b_a, NULL, NULL);
+
+ Relooper r;
+ r.SetEmulate(true);
+ r.AddBlock(b_a);
+ r.AddBlock(b_b);
+ r.AddBlock(b_c);
+
+ r.Calculate(b_a);
+ printf("\n\n", "the_var");
+ r.Render();
+
+ puts(buffer);
+ }
}
diff --git a/src/relooper/test.txt b/src/relooper/test.txt
index 9bdd4093..cb02b867 100644
--- a/src/relooper/test.txt
+++ b/src/relooper/test.txt
@@ -315,3 +315,48 @@
}
}
+
+
+-- If pattern, emulated, using MakeOutputBuffer --
+
+
+
+ label = 1;
+ L0: while(1) {
+ switch(label|0) {
+ case 3: {
+ // block C
+ break;
+ }
+ case 1: {
+ // block A
+ if (check == 10) {
+ atob();
+ label = 2;
+ continue L0;
+ } else {
+ atoc();
+ label = 3;
+ continue L0;
+ }
+ break;
+ }
+ case 2: {
+ // block B
+ switch (b_check()) {
+ case 17: {
+ btoc();
+ label = 3;
+ continue L0;
+ break;
+ }
+ default: {
+ label = 1;
+ continue L0;
+ }
+ }
+ break;
+ }
+ }
+ }
+
diff --git a/tests/cases/breakinthemiddle2.ll b/tests/cases/breakinthemiddle2.ll
index ba96654f..2f8c1c91 100644
--- a/tests/cases/breakinthemiddle2.ll
+++ b/tests/cases/breakinthemiddle2.ll
@@ -11,10 +11,6 @@ define linkonce_odr i32 @main() align 2 {
label555: ; preds = %0
br label %label569 ; branch should ignore all code after it in the block
; No predecessors!
- %aa472 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- cleanup
- %aa473 = extractvalue { i8*, i32 } %aa472, 0
- %aa474 = extractvalue { i8*, i32 } %aa472, 1
br label %label569
label569: ; preds = %0
@@ -23,10 +19,6 @@ label569: ; preds = %0
label990:
ret i32 0 ; ret should ignore all code after it in the block
; No predecessors!
- %a472 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- cleanup
- %a473 = extractvalue { i8*, i32 } %a472, 0
- %a474 = extractvalue { i8*, i32 } %a472, 1
br label %label569
label999: ; preds = %555
diff --git a/tests/cases/breakinthemiddle3.ll b/tests/cases/breakinthemiddle3.ll
deleted file mode 100644
index 38da15ef..00000000
--- a/tests/cases/breakinthemiddle3.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-target datalayout = "e-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-p:32:32:32-v128:32:32"
-target triple = "le32-unknown-nacl"
-
-@.str = private constant [15 x i8] c"hello, world!\0A\00", align 1 ; [#uses=1]
-
-define linkonce_odr i32 @main() align 2 {
- %a333 = call i32 @printf(i8* getelementptr inbounds ([15 x i8]* @.str, i32 0, i32 0)) ; [#uses=0]
- %z199 = trunc i8 1 to i1 ; [#uses=1]
- switch i32 %a333, label %label999 [
- i32 1000, label %label995
- ] ; switch should ignore all code after it in the block
- ; No predecessors!
- %a472 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- cleanup
- %a473 = extractvalue { i8*, i32 } %a472, 0
- %a474 = extractvalue { i8*, i32 } %a472, 1
- br label %label999
-
-label995:
- %b333b = call i32 @printf(i8* getelementptr inbounds ([15 x i8]* @.str, i32 0, i32 0)) ; [#uses=0]
- br label %label999
-
-label999: ; preds = %555
- ret i32 0
-}
-
-declare i32 @printf(i8*)
-declare i32 @__gxx_personality_v0(...)
-
diff --git a/tests/cases/invokeundef.ll b/tests/cases/invokeundef.ll
index be1dd671..2f13e7ab 100644
--- a/tests/cases/invokeundef.ll
+++ b/tests/cases/invokeundef.ll
@@ -31,6 +31,8 @@ invcont33:
ret i32 %retval1
lpad106:
+ %Z = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
ret i32 %retval1
return: ; preds = %entry
@@ -40,3 +42,6 @@ return: ; preds = %entry
; [#uses=1]
declare i32 @puts(i8*)
+
+declare i32 @__gxx_personality_v0(...)
+
diff --git a/tests/printf/output.txt b/tests/printf/output.txt
index 0155f0da..a3baed28 100644
--- a/tests/printf/output.txt
+++ b/tests/printf/output.txt
@@ -8280,4 +8280,5 @@ ffffff8000000000
1
1
+1.234568E+04
no_new_line
diff --git a/tests/printf/output_i64_1.txt b/tests/printf/output_i64_1.txt
index e38fb78f..ea85d302 100644
--- a/tests/printf/output_i64_1.txt
+++ b/tests/printf/output_i64_1.txt
@@ -8280,4 +8280,5 @@ ffffff8000000000
1
1
+1.234568E+04
no_new_line
diff --git a/tests/printf/test.c b/tests/printf/test.c
index 1c8ad9f7..adeb69db 100644
--- a/tests/printf/test.c
+++ b/tests/printf/test.c
@@ -8285,6 +8285,7 @@ int main() {
printf("%hx\n", -0xFFFF);
printf("%x\n", -0xFFFFFFFF);
printf("\n");
+ printf("%*.*E\n", 10, -1, 12345.6789123);
printf("no_new_line");
return 0;
}