aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Northover <Tim.Northover@arm.com>2013-02-08 08:00:13 +0000
committerTim Northover <Tim.Northover@arm.com>2013-02-08 08:00:13 +0000
commit026ce82649a872c0f5e8ea039341644f25ae53a7 (patch)
treeaedc010ec4fc948e54a30ac1b6f5631f08c2e823
parent6bbaa772515e5bd021f38358667746db40ae54d2 (diff)
Improve filechecking of volatile test.
My previous attempt was extremely deficient, allowing more volatiles to be introduced and not even checking all of the ones that are present. This attempt doesn't try to keep track of the values stored or offsets within particular objects, just that the correct objects are accessed in a correctly volatile manner throughout. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@174700 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--test/CodeGen/volatile.c147
1 files changed, 110 insertions, 37 deletions
diff --git a/test/CodeGen/volatile.c b/test/CodeGen/volatile.c
index b028635344..0dcdc15c77 100644
--- a/test/CodeGen/volatile.c
+++ b/test/CodeGen/volatile.c
@@ -1,9 +1,5 @@
// RUN: %clang_cc1 -emit-llvm < %s | FileCheck %s
-// The number 28 comes from the current codegen for volatile loads;
-// if this number changes, it's not necessarily something wrong, but
-// something has changed to affect volatile load/store codegen
-
int S;
volatile int vS;
@@ -41,94 +37,171 @@ volatile_int vtS;
int main() {
int i;
-
+// CHECK: [[I:%[a-zA-Z0-9_.]+]] = alloca i32
// load
i=S;
+// CHECK: load i32* @S
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vS;
-// CHECK: load volatile
+// CHECK: load volatile i32* @vS
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=*pS;
+// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32** @pS
+// CHECK: load i32* [[PS_VAL]]
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=*pvS;
-// CHECK: load volatile
+// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32** @pvS
+// CHECK: load volatile i32* [[PVS_VAL]]
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=A[2];
+// CHECK: load i32* getelementptr {{.*}} @A
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vA[2];
-// CHECK: load volatile
+// CHECK: load volatile i32* getelementptr {{.*}} @vA
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=F.x;
+// CHECK: load i32* getelementptr {{.*}} @F
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vF.x;
-// CHECK: load volatile
+// CHECK: load volatile i32* getelementptr {{.*}} @vF
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=F2.x;
+// CHECK: load i32* getelementptr {{.*}} @F2
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vF2.x;
-// CHECK: load volatile
+// CHECK: load volatile i32* getelementptr {{.*}} @vF2
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vpF2->x;
-// CHECK: load volatile
+// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9_.]+}}** @vpF2
+// CHECK: [[ELT:%[a-zA-Z0-9_.]+]] = getelementptr {{.*}} [[VPF2_VAL]]
+// CHECK: load volatile i32* [[ELT]]
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=F3.x.y;
+// CHECK: load i32* getelementptr {{.*}} @F3
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vF3.x.y;
-// CHECK: load volatile
+// CHECK: load volatile i32* getelementptr {{.*}} @vF3
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=BF.x;
+// CHECK: load i8* getelementptr {{.*}} @BF
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vBF.x;
-// CHECK: load volatile
+// CHECK: load volatile i8* getelementptr {{.*}} @vBF
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=V[3];
+// CHECK: load <4 x i32>* @V
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vV[3];
-// CHECK: load volatile
+// CHECK: load volatile <4 x i32>* @vV
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=VE.yx[1];
+// CHECK: load <4 x i32>* @VE
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vVE.zy[1];
-// CHECK: load volatile
+// CHECK: load volatile <4 x i32>* @vVE
+// CHECK: store i32 {{.*}}, i32* [[I]]
i = aggFct().x; // Note: not volatile
+ // N.b. Aggregate return is extremely target specific, all we can
+ // really say here is that there probably shouldn't be a volatile
+ // load.
+// CHECK-NOT: load volatile
+// CHECK: store i32 {{.*}}, i32* [[I]]
i=vtS;
-// CHECK: load volatile
+// CHECK: load volatile i32* @vtS
+// CHECK: store i32 {{.*}}, i32* [[I]]
// store
S=i;
+// CHECK: load i32* [[I]]
+// CHECK: store i32 {{.*}}, i32* @S
vS=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: store volatile i32 {{.*}}, i32* @vS
*pS=i;
+// CHECK: load i32* [[I]]
+// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32** @pS
+// CHECK: store i32 {{.*}}, i32* [[PS_VAL]]
*pvS=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32** @pvS
+// CHECK: store volatile i32 {{.*}}, i32* [[PVS_VAL]]
A[2]=i;
+// CHECK: load i32* [[I]]
+// CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @A
vA[2]=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vA
F.x=i;
+// CHECK: load i32* [[I]]
+// CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @F
vF.x=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF
F2.x=i;
+// CHECK: load i32* [[I]]
+// CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @F2
vF2.x=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF2
vpF2->x=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9._]+}}** @vpF2
+// CHECK: [[ELT:%[a-zA-Z0-9_.]+]] = getelementptr {{.*}} [[VPF2_VAL]]
+// CHECK: store volatile i32 {{.*}}, i32* [[ELT]]
vF3.x.y=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF3
BF.x=i;
+// CHECK: load i32* [[I]]
+// CHECK: load i8* getelementptr {{.*}} @BF
+// CHECK: store i8 {{.*}}, i8* getelementptr {{.*}} @BF
vBF.x=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: load volatile i8* getelementptr {{.*}} @vBF
+// CHECK: store volatile i8 {{.*}}, i8* getelementptr {{.*}} @vBF
V[3]=i;
+// CHECK: load i32* [[I]]
+// CHECK: load <4 x i32>* @V
+// CHECK: store <4 x i32> {{.*}}, <4 x i32>* @V
vV[3]=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: load volatile <4 x i32>* @vV
+// CHECK: store volatile <4 x i32> {{.*}}, <4 x i32>* @vV
vtS=i;
-// CHECK: store volatile
+// CHECK: load i32* [[I]]
+// CHECK: store volatile i32 {{.*}}, i32* @vtS
// other ops:
++S;
+// CHECK: load i32* @S
+// CHECK: store i32 {{.*}}, i32* @S
++vS;
-// CHECK: load volatile
-// CHECK: store volatile
+// CHECK: load volatile i32* @vS
+// CHECK: store volatile i32 {{.*}}, i32* @vS
i+=S;
+// CHECK: load i32* @S
+// CHECK: load i32* [[I]]
+// CHECK: store i32 {{.*}}, i32* [[I]]
i+=vS;
-// CHECK: load volatile
+// CHECK: load volatile i32* @vS
+// CHECK: load i32* [[I]]
+// CHECK: store i32 {{.*}}, i32* [[I]]
++vtS;
-// CHECK: load volatile
-// CHECK: store volatile
+// CHECK: load volatile i32* @vtS
+// CHECK: store volatile i32 {{.*}}, i32* @vtS
(void)vF2;
// From vF2 to a temporary
-// CHECK: call void @llvm.memcpy{{.*}} i1 true
+// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* {{.*}} @vF2 {{.*}}, i1 true)
vF2 = vF2;
// vF2 to itself
-// CHECK: call void @llvm.memcpy{{.*}} i1 true
+// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
vF2 = vF2 = vF2;
// vF2 to itself twice
-// CHECK: call void @llvm.memcpy{{.*}} i1 true
-// CHECK: call void @llvm.memcpy{{.*}} i1 true
+// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
+// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
vF2 = (vF2, vF2);
// vF2 to a temporary, then vF2 to itself
-// CHECK: call void @llvm.memcpy{{.*}} i1 true
-// CHECK: call void @llvm.memcpy{{.*}} i1 true
+// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* {{.*@vF2.*}}, i1 true)
+// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
}