aboutsummaryrefslogtreecommitdiff
path: root/lib
AgeCommit message (Collapse)Author
2010-06-28Change CGCall to handle the "coerce" case where the coerce-to typeChris Lattner
is a FCA to pass each of the elements as individual scalars. This produces code fast isel is less likely to reject and is easier on the optimizers. For example, before we would compile: struct DeclGroup { long NumDecls; char * Y; }; char * foo(DeclGroup D) { return D.NumDecls+D.Y; } to: %struct.DeclGroup = type { i64, i64 } define i64 @_Z3foo9DeclGroup(%struct.DeclGroup) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] store %struct.DeclGroup %0, %struct.DeclGroup* %D, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] %tmp3 = load i64* %tmp2 ; <i64> [#uses=1] %add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1] ret i64 %add } Now we get: %0 = type { i64, i64 } %struct.DeclGroup = type { i64, i8* } define i8* @_Z3foo9DeclGroup(i64, i64) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %2 = insertvalue %0 undef, i64 %0, 0 ; <%0> [#uses=1] %3 = insertvalue %0 %2, i64 %1, 1 ; <%0> [#uses=1] %4 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1] store %0 %3, %0* %4, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } Elimination of the FCA inside the function is still-to-come. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107099 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28make the trivial forms of CreateCoerced{Load|Store} trivial.Chris Lattner
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107091 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Modify the way sub-statements are stored and retrieved from PCH.Argyrios Kyrtzidis
Before this commit, sub-stmts were stored as encountered and when they were placed in the Stmts stack we had to know what index each stmt operand has. This complicated supporting variable sub-stmts and sub-stmts that were contained in TypeSourceInfos, e.g. x = sizeof(int[1]); would crash PCH. Now, sub-stmts are stored in reverse order, from last to first, so that when reading them, in order to get the next sub-stmt we just need to pop the last stmt from the stack. This greatly simplified the way stmts are written and read (just use PCHWriter::AddStmt and PCHReader::ReadStmt accordingly) and allowed variable stmt operands and TypeSourceInfo exprs. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107087 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28pass/return structs of char and short as i8/i16 to avoidChris Lattner
aweful through-memory coersion, just like we do for i32 now. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107078 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28more tidying up.Chris Lattner
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107076 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Remove state assertion.Ted Kremenek
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107064 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Don't crash in InitializePreprocessor() when there is no valid PTHManager. ↵Ted Kremenek
Fixes <rdar://problem/8098441>. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107061 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28random acts of tidying.Chris Lattner
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107050 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28X86-64:Chris Lattner
pass/return structs of float/int as float/i32 instead of double/i64 to make the code generated for ABI cleaner. Passing in the low part of a double is the same as passing in a float. For example, we now compile: struct DeclGroup { float NumDecls; }; float foo(DeclGroup D); void bar(DeclGroup *D) { foo(*D); } into: %struct.DeclGroup = type { float } define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1] %0 = load float* %coerce.dive, align 1 ; <float> [#uses=1] %call = call float @_Z3foo9DeclGroup(float %0) ; <float> [#uses=0] ret void } instead of: %struct.DeclGroup = type { float } define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp3 = alloca double ; <double*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1] %0 = bitcast double* %tmp3 to float* ; <float*> [#uses=1] %1 = load float* %coerce.dive ; <float> [#uses=1] store float %1, float* %0, align 1 %2 = load double* %tmp3 ; <double> [#uses=1] %call = call float @_Z3foo9DeclGroup(double %2) ; <float> [#uses=0] ret void } which is this machine code (at -O0): __Z3barP9DeclGroup: subq $24, %rsp movq %rdi, 16(%rsp) movq 16(%rsp), %rdi leaq 8(%rsp), %rax movl (%rdi), %ecx movl %ecx, (%rax) movss 8(%rsp), %xmm0 callq __Z3foo9DeclGroup addq $24, %rsp ret vs this: __Z3barP9DeclGroup: subq $24, %rsp movq %rdi, 16(%rsp) movq 16(%rsp), %rdi leaq 8(%rsp), %rax movl (%rdi), %ecx movl %ecx, (%rax) movss 8(%rsp), %xmm0 movss %xmm0, (%rsp) movsd (%rsp), %xmm0 callq __Z3foo9DeclGroup addq $24, %rsp ret At -O3, it is the difference between this now: __Z3barP9DeclGroup: movss (%rdi), %xmm0 jmp __Z3foo9DeclGroup # TAILCALL vs this before: __Z3barP9DeclGroup: movl (%rdi), %eax movd %rax, %xmm0 jmp __Z3foo9DeclGroup # TAILCALL git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107048 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Minor refactorin of my last patch (radar 7860965 related).Fariborz Jahanian
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107047 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Have __func__ and siblings point to block's implementation functionFariborz Jahanian
name. Fixes radar 7860965. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107044 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Set the default arch based on the triple.Rafael Espindola
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107021 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Fix UnitTests/2004-02-02-NegativeZero.c, which regressed whenChris Lattner
I broke negate of FP values. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107019 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28llvm::errs() is non-buffered, so it doesn't need to be flushed.Dan Gohman
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107012 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Introduce Expr::Classify and Expr::ClassifyModifiable, which determine the ↵Sebastian Redl
classification of an expression under the C++0x taxology (value category). Reimplement isLvalue and isModifiableLvalue using these functions. No regressions in the test suite from this, and my rough performance check doesn't show any regressions either. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@107007 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Support CXXPseudoDestructorExpr for PCH.Argyrios Kyrtzidis
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106999 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Support DependentScopeDeclRefExpr for PCH.Argyrios Kyrtzidis
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106998 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Refactor PCH reading/writing of template arguments passed to expressions.Argyrios Kyrtzidis
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106997 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Fix PCH emitting/reading for template arguments that contain expressions.Argyrios Kyrtzidis
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106996 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Fix various bugs in recent commits for C++ PCH.Argyrios Kyrtzidis
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106995 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Partial fix for PR7267 based on comments by John McCall on an earlier patch.Chandler Carruth
This is more targeted, as it simply provides toggle actions for the parser to turn access checking on and off. We then use these to suppress access checking only while we parse the template-id (included scope specifier) of an explicit instantiation and explicit specialization of a class template. The specialization behavior is an extension, as it seems likely a defect that the standard did not exempt them as it does explicit instantiations. This allows the very common practice of specializing trait classes to work for private, internal types. This doesn't address instantiating or specializing function templates, although those apparently already partially work. The naming and style for the Action layer isn't my favorite, comments and suggestions would be appreciated there. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106993 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Pointer comparisons (and pointer-pointer subtraction). Basically filling in ↵Jordy Rose
SimpleSValuator::EvalBinOpLL(). git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106992 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-28Suppress diagnosing access violations while looking up deallocation functionsChandler Carruth
much as we already do for allocation function lookup. Explicitly check access for the function we actually select in one case that was previously missing, but being caught behind the blanket diagnostics for all overload candidates. This fixs PR7436. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106986 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27Use softfp for linux gnueabi, keep the warning for everything else.Rafael Espindola
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106984 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27Correctly destroy reference temporaries with global storage. Remove ↵Anders Carlsson
ErrorUnsupported call when binding a global reference to a non-lvalue. Fixes PR7326. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106983 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27Add a CreateReferenceTemporary that will do the right thing for variables ↵Anders Carlsson
with global storage. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106982 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27Simplify CodeGenFunction::EmitReferenceBindingToExpr as a first step towards ↵Anders Carlsson
fixing PR7326. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106981 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27Reduce indentation.Anders Carlsson
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106980 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27misc tidyingChris Lattner
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106978 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27finally get around to doing a significant cleanup to irgen:Chris Lattner
have CGF create and make accessible standard int32,int64 and intptr types. This fixes a ton of 80 column violations introduced by LLVMContextification and cleans up stuff a lot. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106977 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27tidy up OrderGlobalInitsChris Lattner
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106976 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27If coercing something from int or pointer type to int or pointer typeChris Lattner
(potentially after unwrapping it from a struct) do it without going through memory. We now compile: struct DeclGroup { unsigned NumDecls; }; int foo(DeclGroup D) { return D.NumDecls; } into: %struct.DeclGroup = type { i32 } define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone { entry: %D = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %coerce.dive = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %coerce.val.ii = trunc i64 %0 to i32 ; <i32> [#uses=1] store i32 %coerce.val.ii, i32* %coerce.dive %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %tmp1 = load i32* %tmp ; <i32> [#uses=1] ret i32 %tmp1 } instead of: %struct.DeclGroup = type { i32 } define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone { entry: %D = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp = alloca i64 ; <i64*> [#uses=2] %coerce.dive = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] store i64 %0, i64* %tmp %1 = bitcast i64* %tmp to i32* ; <i32*> [#uses=1] %2 = load i32* %1, align 1 ; <i32> [#uses=1] store i32 %2, i32* %coerce.dive %tmp1 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %tmp2 = load i32* %tmp1 ; <i32> [#uses=1] ret i32 %tmp2 } ... which is quite a bit less terrifying. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106975 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27Same patch as the previous on the store side. Before we compiled this:Chris Lattner
struct DeclGroup { unsigned NumDecls; }; int foo(DeclGroup D) { return D.NumDecls; } to: %struct.DeclGroup = type { i32 } define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone { entry: %D = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp = alloca i64 ; <i64*> [#uses=2] store i64 %0, i64* %tmp %1 = bitcast i64* %tmp to %struct.DeclGroup* ; <%struct.DeclGroup*> [#uses=1] %2 = load %struct.DeclGroup* %1, align 1 ; <%struct.DeclGroup> [#uses=1] store %struct.DeclGroup %2, %struct.DeclGroup* %D %tmp1 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %tmp2 = load i32* %tmp1 ; <i32> [#uses=1] ret i32 %tmp2 } which caused fast isel bailouts due to the FCA load/store of %2. Now we generate this just blissful code: %struct.DeclGroup = type { i32 } define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone { entry: %D = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp = alloca i64 ; <i64*> [#uses=2] %coerce.dive = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] store i64 %0, i64* %tmp %1 = bitcast i64* %tmp to i32* ; <i32*> [#uses=1] %2 = load i32* %1, align 1 ; <i32> [#uses=1] store i32 %2, i32* %coerce.dive %tmp1 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %tmp2 = load i32* %tmp1 ; <i32> [#uses=1] ret i32 %tmp2 } This avoids fastisel bailing out and is groundwork for future patch. This reduces bailouts on CGStmt.ll to 911 from 935. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106974 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27improve CreateCoercedLoad a bit to generate slightly less awfulChris Lattner
IR when handling X86-64 by-value struct stuff. For example, we use to compile this: struct DeclGroup { unsigned NumDecls; }; int foo(DeclGroup D); void bar(DeclGroup *D) { foo(*D); } into: define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) ssp nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp3 = alloca i64 ; <i64*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %0 = bitcast i64* %tmp3 to %struct.DeclGroup* ; <%struct.DeclGroup*> [#uses=1] %1 = load %struct.DeclGroup* %agg.tmp ; <%struct.DeclGroup> [#uses=1] store %struct.DeclGroup %1, %struct.DeclGroup* %0, align 1 %2 = load i64* %tmp3 ; <i64> [#uses=1] call void @_Z3foo9DeclGroup(i64 %2) ret void } which would cause fastisel to bail out due to the first class aggregate load %1. With this patch we now compile it into the (still awful): define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind ssp noredzone { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp3 = alloca i64 ; <i64*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <i32*> [#uses=1] %0 = bitcast i64* %tmp3 to i32* ; <i32*> [#uses=1] %1 = load i32* %coerce.dive ; <i32> [#uses=1] store i32 %1, i32* %0, align 1 %2 = load i64* %tmp3 ; <i64> [#uses=1] %call = call i32 @_Z3foo9DeclGroup(i64 %2) noredzone ; <i32> [#uses=0] ret void } which doesn't bail out. On CGStmt.ll, this reduces fastisel bail outs from 958 to 935, and is the precursor of better things to come. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106973 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27Implicitly compare symbolic expressions to zero when they're being used as ↵Jordy Rose
constraints. Part of PR7491. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106972 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-27Change IR generation for return (in the simple case) to avoid doing sillyChris Lattner
load/store nonsense in the epilog. For example, for: int foo(int X) { int A[100]; return A[X]; } we used to generate: %arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1] %tmp1 = load i32* %arrayidx ; <i32> [#uses=1] store i32 %tmp1, i32* %retval %0 = load i32* %retval ; <i32> [#uses=1] ret i32 %0 } which codegen'd to this code: _foo: ## @foo ## BB#0: ## %entry subq $408, %rsp ## imm = 0x198 movl %edi, 400(%rsp) movl 400(%rsp), %edi movslq %edi, %rax movl (%rsp,%rax,4), %edi movl %edi, 404(%rsp) movl 404(%rsp), %eax addq $408, %rsp ## imm = 0x198 ret Now we generate: %arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1] %tmp1 = load i32* %arrayidx ; <i32> [#uses=1] ret i32 %tmp1 } and: _foo: ## @foo ## BB#0: ## %entry subq $408, %rsp ## imm = 0x198 movl %edi, 404(%rsp) movl 404(%rsp), %edi movslq %edi, %rax movl (%rsp,%rax,4), %eax addq $408, %rsp ## imm = 0x198 ret This actually does matter, cutting out 2000 lines of IR from CGStmt.ll for example. Another interesting effect is that altivec.h functions which are dead now get dce'd by the inliner. Hence all the changes to builtins-ppc-altivec.c to ensure the calls aren't dead. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106970 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26reduce indentationChris Lattner
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106967 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26Implement rdar://7530813 - collapse multiple GEP instructions in IRgenChris Lattner
This avoids generating two gep's for common array operations. Before we would generate something like: %tmp = load i32* %X.addr ; <i32> [#uses=1] %arraydecay = getelementptr inbounds [100 x i32]* %A, i32 0, i32 0 ; <i32*> [#uses=1] %arrayidx = getelementptr inbounds i32* %arraydecay, i32 %tmp ; <i32*> [#uses=1] %tmp1 = load i32* %arrayidx ; <i32> [#uses=1] Now we generate: %tmp = load i32* %X.addr ; <i32> [#uses=1] %arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i32 %tmp ; <i32*> [#uses=1] %tmp1 = load i32* %arrayidx ; <i32> [#uses=1] Less IR is better at -O0. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106966 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26Allow '__extension__' to be analyzed in a lvalue context.Ted Kremenek
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106964 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26minor cleanup: don't emit the base of an array subscript until after Chris Lattner
we're done diddling around with the index stuff. Use a cheaper type comparison. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106963 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26fix inc/dec to honor -fwrapv and -ftrapv, implementing PR7426.Chris Lattner
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106962 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26move scalar inc/dec codegen into ScalarExprEmitter instead Chris Lattner
of being in CGF. No functionality change. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106961 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26use more efficient type comparison predicates.Chris Lattner
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106958 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26Fix unary minus to trap on overflow with -ftrapv, refactoring binopChris Lattner
code so we can use it from VisitUnaryMinus. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106957 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26Implement support for -fwrapv, rdar://7221421Chris Lattner
As part of this, pull together trapv handling into the same enum. This also add support for NSW multiplies. This also makes PCH disagreement on overflow behavior silent, since it really doesn't matter except for warnings and codegen (no macros get defined etc). git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106956 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26implement rdar://7432000 - signed negate should codegen as NSW.Chris Lattner
While I'm in there, adjust pointer to member adjustments as well. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106955 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26Implement support for #pragma message, patch by Michael Spencer!Chris Lattner
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106950 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26Change EmitReferenceBindingToExpr to take a decl instead of a boolean.Anders Carlsson
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106949 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26Add function for mangling reference temporaries.Anders Carlsson
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106948 91177308-0d34-0410-b5e6-96231b3b80d8
2010-06-26Mangle pointer and (lvalue) reference types in the Microsoft C++ Mangler.Charles Davis
Also, fix mangling of throw specs. Turns out MSVC totally ignores throw specs when mangling names. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106937 91177308-0d34-0410-b5e6-96231b3b80d8