diff options
author | JF Bastien <jfb@chromium.org> | 2013-07-22 14:02:59 -0700 |
---|---|---|
committer | JF Bastien <jfb@chromium.org> | 2013-07-22 14:02:59 -0700 |
commit | ccad851d1493d84019f3372c0033908cdd516f58 (patch) | |
tree | 8d73fa6f6e6d38d11b14a83d3a2d0f80c8d0eb6e | |
parent | 0eb1be38d149f22aab802958086a295628a3d76f (diff) |
Cherrypick upstream volatile _Complex alignment patches
Specifically:
r186564 - Fix volatile _Complex alignment test on platforms where 64-bit floating point isn't 64-bit aligned
r186490 - Propagate alignment for _Complex
These should fix GCC torture test failures, as well as the all-important uses of volatile _Complex numbers in C, and their alignment being incorrect.
BUG= PNaCl FYI bots red on torture tests
TEST= ./tools/toolchain_tester/torture_test.py pnacl x86-64 --concurrency=32 >& torture-x86-64.log ; ./tools/toolchain_tester/torture_test.py pnacl x86-32 --concurrency=32 >& torture-x86-32.log ; ./tools/toolchain_tester/torture_test.py pnacl arm --concurrency=32 >& torture-arm.log
R=stichnot@chromium.org
Review URL: https://codereview.chromium.org/19915003
-rw-r--r-- | lib/CodeGen/CGExprComplex.cpp | 26 | ||||
-rw-r--r-- | test/CodeGen/volatile-1.c | 44 | ||||
-rw-r--r-- | test/CodeGen/volatile-2.c | 12 | ||||
-rw-r--r-- | test/CodeGen/volatile-complex.c | 72 |
4 files changed, 120 insertions, 34 deletions
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp index 36f974a313..3a3971479d 100644 --- a/lib/CodeGen/CGExprComplex.cpp +++ b/lib/CodeGen/CGExprComplex.cpp @@ -18,6 +18,7 @@ #include "llvm/ADT/SmallString.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" +#include <algorithm> using namespace clang; using namespace CodeGen; @@ -294,19 +295,26 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue) { llvm::Value *SrcPtr = lvalue.getAddress(); bool isVolatile = lvalue.isVolatileQualified(); + unsigned AlignR = lvalue.getAlignment().getQuantity(); + ASTContext &C = CGF.getContext(); + QualType ComplexTy = lvalue.getType(); + unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity(); + unsigned AlignI = std::min(AlignR, ComplexAlign); llvm::Value *Real=0, *Imag=0; if (!IgnoreReal || isVolatile) { llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0, SrcPtr->getName() + ".realp"); - Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real"); + Real = Builder.CreateAlignedLoad(RealP, AlignR, isVolatile, + SrcPtr->getName() + ".real"); } if (!IgnoreImag || isVolatile) { llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1, SrcPtr->getName() + ".imagp"); - Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag"); + Imag = Builder.CreateAlignedLoad(ImagP, AlignI, isVolatile, + SrcPtr->getName() + ".imag"); } return ComplexPairTy(Real, Imag); } @@ -322,10 +330,16 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr = lvalue.getAddress(); llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real"); llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag"); - - // TODO: alignment - Builder.CreateStore(Val.first, RealPtr, lvalue.isVolatileQualified()); - Builder.CreateStore(Val.second, ImagPtr, lvalue.isVolatileQualified()); + unsigned AlignR = lvalue.getAlignment().getQuantity(); + ASTContext &C = CGF.getContext(); + QualType ComplexTy = lvalue.getType(); + unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity(); + unsigned AlignI = std::min(AlignR, ComplexAlign); + + Builder.CreateAlignedStore(Val.first, RealPtr, AlignR, + lvalue.isVolatileQualified()); + Builder.CreateAlignedStore(Val.second, ImagPtr, AlignI, + lvalue.isVolatileQualified()); } diff --git a/test/CodeGen/volatile-1.c b/test/CodeGen/volatile-1.c index 65511593d3..09c75ee83b 100644 --- a/test/CodeGen/volatile-1.c +++ b/test/CodeGen/volatile-1.c @@ -26,45 +26,45 @@ int printf(const char *, ...); void test() { // CHECK: load volatile [[INT]]* @i i; - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 // CHECK-NEXT: sitofp [[INT]] (float)(ci); - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 (void)ci; // CHECK-NEXT: bitcast // CHECK-NEXT: memcpy (void)a; - // CHECK-NEXT: [[R:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) - // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 (void)(ci=ci); // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]]* @j // CHECK-NEXT: store volatile [[INT]] [[T]], [[INT]]* @i (void)(i=j); - // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) - // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 // Not sure why they're ordered this way. // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]] // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]] - // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 ci+=ci; - // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) - // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]] // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]] - // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) - // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 // These additions can be elided // CHECK-NEXT: add [[INT]] [[R]], [[R2]] // CHECK-NEXT: add [[INT]] [[I]], [[I2]] diff --git a/test/CodeGen/volatile-2.c b/test/CodeGen/volatile-2.c index 3d342de690..9233b2a167 100644 --- a/test/CodeGen/volatile-2.c +++ b/test/CodeGen/volatile-2.c @@ -3,8 +3,8 @@ void test0() { // CHECK: define void @test0() // CHECK: [[F:%.*]] = alloca float - // CHECK-NEXT: [[REAL:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @test0_v, i32 0, i32 0) - // CHECK-NEXT: load volatile float* getelementptr inbounds ({{.*}} @test0_v, i32 0, i32 1) + // CHECK-NEXT: [[REAL:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @test0_v, i32 0, i32 0), align 4 + // CHECK-NEXT: load volatile float* getelementptr inbounds ({{.*}} @test0_v, i32 0, i32 1), align 4 // CHECK-NEXT: store float [[REAL]], float* [[F]], align 4 // CHECK-NEXT: ret void extern volatile _Complex float test0_v; @@ -13,10 +13,10 @@ void test0() { void test1() { // CHECK: define void @test1() - // CHECK: [[REAL:%.*]] = load volatile float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0) - // CHECK-NEXT: [[IMAG:%.*]] = load volatile float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1) - // CHECK-NEXT: store volatile float [[REAL]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0) - // CHECK-NEXT: store volatile float [[IMAG]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1) + // CHECK: [[REAL:%.*]] = load volatile float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0), align 4 + // CHECK-NEXT: [[IMAG:%.*]] = load volatile float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1), align 4 + // CHECK-NEXT: store volatile float [[REAL]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0), align 4 + // CHECK-NEXT: store volatile float [[IMAG]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1), align 4 // CHECK-NEXT: ret void extern volatile _Complex float test1_v; test1_v = test1_v; diff --git a/test/CodeGen/volatile-complex.c b/test/CodeGen/volatile-complex.c new file mode 100644 index 0000000000..92ffd9de45 --- /dev/null +++ b/test/CodeGen/volatile-complex.c @@ -0,0 +1,72 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s + +// Validate that volatile _Complex loads and stores are generated +// properly, including their alignment (even when overaligned). +// +// This test assumes that floats are 32-bit aligned and doubles are +// 64-bit aligned, and uses x86-64 as a target that should have this +// datalayout. + +// CHECK: target datalayout = "{{.*}}f32:32:32-f64:64:64{{.*}}" + +volatile _Complex float cf; +volatile _Complex double cd; +volatile _Complex float cf32 __attribute__((aligned(32))); +volatile _Complex double cd32 __attribute__((aligned(32))); + + +// CHECK: define void @test_cf() +// CHECK-NEXT: entry: +void test_cf() { + // CHECK-NEXT: load volatile float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 0), align 4 + // CHECK-NEXT: load volatile float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 1), align 4 + (void)(cf); + // CHECK-NEXT: [[R:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 1), align 4 + // CHECK-NEXT: store volatile float [[R]], float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 0), align 4 + // CHECK-NEXT: store volatile float [[I]], float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 1), align 4 + (void)(cf=cf); + // CHECK-NEXT: ret void +} + +// CHECK: define void @test_cd() +// CHECK-NEXT: entry: +void test_cd() { + // CHECK-NEXT: load volatile double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 0), align 8 + // CHECK-NEXT: load volatile double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 1), align 8 + (void)(cd); + // CHECK-NEXT: [[R:%.*]] = load volatile double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 0), align 8 + // CHECK-NEXT: [[I:%.*]] = load volatile double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 1), align 8 + // CHECK-NEXT: store volatile double [[R]], double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 0), align 8 + // CHECK-NEXT: store volatile double [[I]], double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 1), align 8 + (void)(cd=cd); + // CHECK-NEXT: ret void +} + +// CHECK: define void @test_cf32() +// CHECK-NEXT: entry: +void test_cf32() { + // CHECK-NEXT: load volatile float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 0), align 32 + // CHECK-NEXT: load volatile float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 1), align 4 + (void)(cf32); + // CHECK-NEXT: [[R:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 0), align 32 + // CHECK-NEXT: [[I:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 1), align 4 + // CHECK-NEXT: store volatile float [[R]], float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 0), align 32 + // CHECK-NEXT: store volatile float [[I]], float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 1), align 4 + (void)(cf32=cf32); + // CHECK-NEXT: ret void +} + +// CHECK: define void @test_cd32() +// CHECK-NEXT: entry: +void test_cd32() { + // CHECK-NEXT: load volatile double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 0), align 32 + // CHECK-NEXT: load volatile double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 1), align 8 + (void)(cd32); + // CHECK-NEXT: [[R:%.*]] = load volatile double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 0), align 32 + // CHECK-NEXT: [[I:%.*]] = load volatile double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 1), align 8 + // CHECK-NEXT: store volatile double [[R]], double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 0), align 32 + // CHECK-NEXT: store volatile double [[I]], double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 1), align 8 + (void)(cd32=cd32); + // CHECK-NEXT: ret void +} |