diff options
author | Chris Lattner <sabre@nondot.org> | 2011-11-27 06:54:59 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2011-11-27 06:54:59 +0000 |
commit | d2bf432b2b6ba02e20958953a237213d48b00f20 (patch) | |
tree | e7f62c784abde309223ad5206d56fa2d33dd8b12 /test | |
parent | 8ddff91282ec36360677d0febd34803fd9f02153 (diff) |
Upgrade syntax of tests using volatile instructions to use 'load volatile' instead of 'volatile load', which is archaic.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145171 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
91 files changed, 300 insertions, 318 deletions
diff --git a/test/Analysis/BasicAA/constant-over-index.ll b/test/Analysis/BasicAA/constant-over-index.ll index 8a8ac4f721..48ef2595f2 100644 --- a/test/Analysis/BasicAA/constant-over-index.ll +++ b/test/Analysis/BasicAA/constant-over-index.ll @@ -16,8 +16,8 @@ loop: %p.0.i.0 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0 - volatile store double 0.0, double* %p3 - volatile store double 0.1, double* %p.0.i.0 + store volatile double 0.0, double* %p3 + store volatile double 0.1, double* %p.0.i.0 %i.next = add i64 %i, 1 %cmp = icmp slt i64 %i.next, 3 diff --git a/test/Analysis/BasicAA/phi-and-select.ll b/test/Analysis/BasicAA/phi-and-select.ll index 9bc47ae44a..0ed4a2c5a5 100644 --- a/test/Analysis/BasicAA/phi-and-select.ll +++ b/test/Analysis/BasicAA/phi-and-select.ll @@ -17,8 +17,8 @@ false: exit: %a = phi double* [ %x, %true ], [ %y, %false ] %b = phi double* [ %x, %false ], [ %y, %true ] - volatile store double 0.0, double* %a - volatile store double 1.0, double* %b + store volatile double 0.0, double* %a + store volatile double 1.0, double* %b ret void } @@ -27,8 +27,8 @@ define void @bar(i1 %m, double* noalias %x, double* noalias %y) { entry: %a = select i1 %m, double* %x, double* %y %b = select i1 %m, double* %y, double* %x - volatile store double 0.000000e+00, double* %a - volatile store double 1.000000e+00, double* %b + store volatile double 0.000000e+00, double* %a + store volatile double 1.000000e+00, double* %b ret void } @@ -56,8 +56,8 @@ nfalse: nexit: %b = phi double* [ %v, %ntrue ], [ %w, %nfalse ] - volatile store double 0.0, double* %a - volatile store double 1.0, double* %b + store volatile double 0.0, double* %a + store volatile double 1.0, double* %b ret void } @@ -67,7 +67,7 @@ define void @fin(i1 %m, double* noalias %x, double* noalias %y, entry: %a = select i1 %m, double* %x, double* %y %b = select i1 %n, double* %v, double* %w - volatile store double 0.000000e+00, double* %a - volatile store double 1.000000e+00, double* %b + store volatile double 0.000000e+00, double* %a + store volatile double 1.000000e+00, double* %b ret void } diff --git a/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll b/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll index 78c6222375..94c562bf01 100644 --- a/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll +++ b/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll @@ -11,7 +11,7 @@ bb74.i: ; preds = %bb88.i, %bb74.i, %entry bb88.i: ; preds = %bb74.i br i1 false, label %mandel.exit, label %bb74.i mandel.exit: ; preds = %bb88.i - %tmp2 = volatile load double* getelementptr ({ double, double }* @accum, i32 0, i32 0), align 8 ; <double> [#uses=1] + %tmp2 = load volatile double* getelementptr ({ double, double }* @accum, i32 0, i32 0), align 8 ; <double> [#uses=1] %tmp23 = fptosi double %tmp2 to i32 ; <i32> [#uses=1] %tmp5 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @.str, i32 0, i32 0), i32 %tmp23 ) ; <i32> [#uses=0] ret i32 0 diff --git a/test/CodeGen/ARM/call.ll b/test/CodeGen/ARM/call.ll index 0f9543f0a1..107e79a9e0 100644 --- a/test/CodeGen/ARM/call.ll +++ b/test/CodeGen/ARM/call.ll @@ -26,7 +26,7 @@ define i32* @m_231b(i32, i32, i32*, i32*, i32*) nounwind { ; CHECKV4: bx r{{.*}} BB0: %5 = inttoptr i32 %0 to i32* ; <i32*> [#uses=1] - %t35 = volatile load i32* %5 ; <i32> [#uses=1] + %t35 = load volatile i32* %5 ; <i32> [#uses=1] %6 = inttoptr i32 %t35 to i32** ; <i32**> [#uses=1] %7 = getelementptr i32** %6, i32 86 ; <i32**> [#uses=1] %8 = load i32** %7 ; <i32*> [#uses=1] diff --git a/test/CodeGen/CBackend/2005-02-14-VolatileOperations.ll b/test/CodeGen/CBackend/2005-02-14-VolatileOperations.ll deleted file mode 100644 index dd505af483..0000000000 --- a/test/CodeGen/CBackend/2005-02-14-VolatileOperations.ll +++ /dev/null @@ -1,8 +0,0 @@ -; RUN: llc < %s -march=c | grep volatile - -define void @test(i32* %P) { - %X = volatile load i32* %P ; <i32> [#uses=1] - volatile store i32 %X, i32* %P - ret void -} - diff --git a/test/CodeGen/CBackend/2005-09-27-VolatileFuncPtr.ll b/test/CodeGen/CBackend/2005-09-27-VolatileFuncPtr.ll deleted file mode 100644 index 99de837dc7..0000000000 --- a/test/CodeGen/CBackend/2005-09-27-VolatileFuncPtr.ll +++ /dev/null @@ -1,10 +0,0 @@ -; RUN: llc < %s -march=c | grep {\\* *volatile *\\*} - -@G = external global void ()* ; <void ()**> [#uses=2] - -define void @test() { - volatile store void ()* @test, void ()** @G - volatile load void ()** @G ; <void ()*>:1 [#uses=0] - ret void -} - diff --git a/test/CodeGen/CBackend/2008-02-01-UnalignedLoadStore.ll b/test/CodeGen/CBackend/2008-02-01-UnalignedLoadStore.ll index 6e0cf68292..e6eeba30fa 100644 --- a/test/CodeGen/CBackend/2008-02-01-UnalignedLoadStore.ll +++ b/test/CodeGen/CBackend/2008-02-01-UnalignedLoadStore.ll @@ -8,8 +8,8 @@ define void @test(i32* %P) { } define void @test2(i32* %P) { - %X = volatile load i32* %P, align 2 - volatile store i32 %X, i32* %P, align 2 + %X = load volatile i32* %P, align 2 + store volatile i32 %X, i32* %P, align 2 ret void } diff --git a/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll b/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll index f339373ffc..4c7d2d0925 100644 --- a/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll +++ b/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll @@ -7,9 +7,9 @@ target triple = "msp430-unknown-linux-gnu" define void @uip_arp_arpin() nounwind { entry: - %tmp = volatile load i16* @uip_len ; <i16> [#uses=1] + %tmp = load volatile i16* @uip_len ; <i16> [#uses=1] %cmp = icmp ult i16 %tmp, 42 ; <i1> [#uses=1] - volatile store i16 0, i16* @uip_len + store volatile i16 0, i16* @uip_len br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry diff --git a/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll b/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll index 088d3e1e7b..e8c0d14afd 100644 --- a/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll +++ b/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll @@ -6,8 +6,8 @@ target triple = "msp430-generic-generic" define i16 @foo() nounwind readnone { entry: %result = alloca i16, align 1 ; <i16*> [#uses=2] - volatile store i16 0, i16* %result - %tmp = volatile load i16* %result ; <i16> [#uses=1] + store volatile i16 0, i16* %result + %tmp = load volatile i16* %result ; <i16> [#uses=1] ret i16 %tmp } @@ -22,8 +22,8 @@ while.cond: ; preds = %while.cond, %entry while.end: ; preds = %while.cond %result.i = alloca i16, align 1 ; <i16*> [#uses=2] - volatile store i16 0, i16* %result.i - %tmp.i = volatile load i16* %result.i ; <i16> [#uses=0] + store volatile i16 0, i16* %result.i + %tmp.i = load volatile i16* %result.i ; <i16> [#uses=0] ret i16 0 } diff --git a/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll b/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll index 4d7d9b96c7..9fab4826e0 100644 --- a/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll +++ b/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll @@ -11,10 +11,10 @@ entry: %x.addr = alloca i8 ; <i8*> [#uses=2] %tmp = alloca i8, align 1 ; <i8*> [#uses=2] store i8 %x, i8* %x.addr - %tmp1 = volatile load i8* @"\010x0021" ; <i8> [#uses=1] + %tmp1 = load volatile i8* @"\010x0021" ; <i8> [#uses=1] store i8 %tmp1, i8* %tmp %tmp2 = load i8* %x.addr ; <i8> [#uses=1] - volatile store i8 %tmp2, i8* @"\010x0021" + store volatile i8 %tmp2, i8* @"\010x0021" %tmp3 = load i8* %tmp ; <i8> [#uses=1] store i8 %tmp3, i8* %retval %0 = load i8* %retval ; <i8> [#uses=1] diff --git a/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll b/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll index 856eb9db3f..c1a186a637 100644 --- a/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll +++ b/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll @@ -4,9 +4,9 @@ define void @foo() nounwind { entry: %r = alloca i8 ; <i8*> [#uses=2] %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] - volatile load i8* %r, align 1 ; <i8>:0 [#uses=1] + load volatile i8* %r, align 1 ; <i8>:0 [#uses=1] or i8 %0, 1 ; <i8>:1 [#uses=1] - volatile store i8 %1, i8* %r, align 1 + store volatile i8 %1, i8* %r, align 1 br label %return return: ; preds = %entry diff --git a/test/CodeGen/MSP430/AddrMode-bis-rx.ll b/test/CodeGen/MSP430/AddrMode-bis-rx.ll index 4f9a7248bb..c7ecb5ab85 100644 --- a/test/CodeGen/MSP430/AddrMode-bis-rx.ll +++ b/test/CodeGen/MSP430/AddrMode-bis-rx.ll @@ -32,7 +32,7 @@ define i8 @am3(i8 %x, i16 %n) nounwind { ; CHECK: bis.b bar(r14), r15 define i16 @am4(i16 %x) nounwind { - %1 = volatile load i16* inttoptr(i16 32 to i16*) + %1 = load volatile i16* inttoptr(i16 32 to i16*) %2 = or i16 %1,%x ret i16 %2 } diff --git a/test/CodeGen/MSP430/AddrMode-bis-xr.ll b/test/CodeGen/MSP430/AddrMode-bis-xr.ll index 17ebd87368..727c29fc08 100644 --- a/test/CodeGen/MSP430/AddrMode-bis-xr.ll +++ b/test/CodeGen/MSP430/AddrMode-bis-xr.ll @@ -35,9 +35,9 @@ define void @am3(i16 %i, i8 %x) nounwind { ; CHECK: bis.b r14, bar(r15) define void @am4(i16 %x) nounwind { - %1 = volatile load i16* inttoptr(i16 32 to i16*) + %1 = load volatile i16* inttoptr(i16 32 to i16*) %2 = or i16 %x, %1 - volatile store i16 %2, i16* inttoptr(i16 32 to i16*) + store volatile i16 %2, i16* inttoptr(i16 32 to i16*) ret void } ; CHECK: am4: diff --git a/test/CodeGen/MSP430/AddrMode-mov-rx.ll b/test/CodeGen/MSP430/AddrMode-mov-rx.ll index 6676b88cd1..7cd345bd8f 100644 --- a/test/CodeGen/MSP430/AddrMode-mov-rx.ll +++ b/test/CodeGen/MSP430/AddrMode-mov-rx.ll @@ -29,7 +29,7 @@ define i8 @am3(i16 %n) nounwind { ; CHECK: mov.b bar(r15), r15 define i16 @am4() nounwind { - %1 = volatile load i16* inttoptr(i16 32 to i16*) + %1 = load volatile i16* inttoptr(i16 32 to i16*) ret i16 %1 } ; CHECK: am4: diff --git a/test/CodeGen/MSP430/AddrMode-mov-xr.ll b/test/CodeGen/MSP430/AddrMode-mov-xr.ll index 4b327b0578..5eeb02f729 100644 --- a/test/CodeGen/MSP430/AddrMode-mov-xr.ll +++ b/test/CodeGen/MSP430/AddrMode-mov-xr.ll @@ -29,7 +29,7 @@ define void @am3(i16 %i, i8 %a) nounwind { ; CHECK: mov.b r14, bar(r15) define void @am4(i16 %a) nounwind { - volatile store i16 %a, i16* inttoptr(i16 32 to i16*) + store volatile i16 %a, i16* inttoptr(i16 32 to i16*) ret void } ; CHECK: am4: diff --git a/test/CodeGen/Mips/2010-07-20-Switch.ll b/test/CodeGen/Mips/2010-07-20-Switch.ll index 5425bdf4f8..83b8a250c2 100644 --- a/test/CodeGen/Mips/2010-07-20-Switch.ll +++ b/test/CodeGen/Mips/2010-07-20-Switch.ll @@ -3,8 +3,8 @@ define i32 @main() nounwind readnone { entry: %x = alloca i32, align 4 ; <i32*> [#uses=2] - volatile store i32 2, i32* %x, align 4 - %0 = volatile load i32* %x, align 4 ; <i32> [#uses=1] + store volatile i32 2, i32* %x, align 4 + %0 = load volatile i32* %x, align 4 ; <i32> [#uses=1] ; CHECK: lui $3, %hi($JTI0_0) ; CHECK: addiu $3, $3, %lo($JTI0_0) ; CHECK: sll $2, $2, 2 diff --git a/test/CodeGen/Mips/mipslopat.ll b/test/CodeGen/Mips/mipslopat.ll index 02798285b4..1f433b9870 100644 --- a/test/CodeGen/Mips/mipslopat.ll +++ b/test/CodeGen/Mips/mipslopat.ll @@ -6,7 +6,7 @@ define void @simple_vol_file() nounwind { entry: - %tmp = volatile load i32** @stat_vol_ptr_int, align 4 + %tmp = load volatile i32** @stat_vol_ptr_int, align 4 %0 = bitcast i32* %tmp to i8* call void @llvm.prefetch(i8* %0, i32 0, i32 0, i32 1) %tmp1 = load i32** @stat_ptr_vol_int, align 4 diff --git a/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll b/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll index 7b6d4916c1..e7a1cf69c6 100644 --- a/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll +++ b/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll @@ -12,7 +12,7 @@ declare void @IODelay(i32) define i32 @_Z14ProgramByWordsPvyy(i8* %buffer, i64 %Offset, i64 %bufferSize) nounwind { entry: - volatile store i8 -1, i8* null, align 1 + store volatile i8 -1, i8* null, align 1 %tmp28 = icmp eq i8 0, 0 ; <i1> [#uses=1] br i1 %tmp28, label %bb107, label %bb @@ -43,7 +43,7 @@ bb68: ; preds = %bb31 %tmp2021.i = trunc i64 %Pos.0.reg2mem.0 to i32 ; <i32> [#uses=1] %tmp202122.i = inttoptr i32 %tmp2021.i to i8* ; <i8*> [#uses=1] tail call void @IODelay( i32 500 ) nounwind - %tmp53.i = volatile load i16* null, align 2 ; <i16> [#uses=2] + %tmp53.i = load volatile i16* null, align 2 ; <i16> [#uses=2] %tmp5455.i = zext i16 %tmp53.i to i32 ; <i32> [#uses=1] br i1 false, label %bb.i, label %bb65.i @@ -59,7 +59,7 @@ bb70.i: ; preds = %bb65.i ret i32 0 _Z24unlock_then_erase_sectory.exit: ; preds = %bb65.i - volatile store i8 -1, i8* %tmp202122.i, align 1 + store volatile i8 -1, i8* %tmp202122.i, align 1 %tmp93 = add i64 0, %Pos.0.reg2mem.0 ; <i64> [#uses=2] %tmp98 = add i64 0, %Offset ; <i64> [#uses=1] %tmp100 = icmp ugt i64 %tmp98, %tmp93 ; <i1> [#uses=1] diff --git a/test/CodeGen/Thumb/vargs.ll b/test/CodeGen/Thumb/vargs.ll index c2ba208e4a..50a1a07288 100644 --- a/test/CodeGen/Thumb/vargs.ll +++ b/test/CodeGen/Thumb/vargs.ll @@ -13,9 +13,9 @@ entry: bb: ; preds = %bb, %entry %a_addr.0 = phi i32 [ %a, %entry ], [ %tmp5, %bb ] ; <i32> [#uses=2] - %tmp = volatile load i8** %va ; <i8*> [#uses=2] + %tmp = load volatile i8** %va ; <i8*> [#uses=2] %tmp2 = getelementptr i8* %tmp, i32 4 ; <i8*> [#uses=1] - volatile store i8* %tmp2, i8** %va + store volatile i8* %tmp2, i8** %va %tmp5 = add i32 %a_addr.0, -1 ; <i32> [#uses=1] %tmp.upgrd.2 = icmp eq i32 %a_addr.0, 1 ; <i1> [#uses=1] br i1 %tmp.upgrd.2, label %bb7, label %bb diff --git a/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll b/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll index bb734aca4e..fcf1bae796 100644 --- a/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll +++ b/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll @@ -21,7 +21,7 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32- define void @test(%s1* %this, i32 %format, i32 %w, i32 %h, i32 %levels, i32* %s, i8* %data, i32* nocapture %rowbytes, void (i8*, i8*)* %release, i8* %info) nounwind { entry: %tmp1 = getelementptr inbounds %s1* %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0 - volatile store i32 1, i32* %tmp1, align 4 + store volatile i32 1, i32* %tmp1, align 4 %tmp12 = getelementptr inbounds %s1* %this, i32 0, i32 1 store i32 %levels, i32* %tmp12, align 4 %tmp13 = getelementptr inbounds %s1* %this, i32 0, i32 3 @@ -46,7 +46,7 @@ entry: %tmp24 = shl i32 %flags.0, 16 %asmtmp.i.i.i = tail call %0 asm sideeffect "\0A0:\09ldrex $1, [$2]\0A\09orr $1, $1, $3\0A\09strex $0, $1, [$2]\0A\09cmp $0, #0\0A\09bne 0b", "=&r,=&r,r,r,~{memory},~{cc}"(i32* %tmp1, i32 %tmp24) nounwind %tmp25 = getelementptr inbounds %s1* %this, i32 0, i32 2, i32 0, i32 0 - volatile store i32 1, i32* %tmp25, align 4 + store volatile i32 1, i32* %tmp25, align 4 %tmp26 = icmp eq i32 %levels, 0 br i1 %tmp26, label %return, label %bb4 diff --git a/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll b/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll index 90af3870bd..a6234d377d 100644 --- a/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll +++ b/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -march=x86 | not grep movsd ; RUN: llc < %s -march=x86 | grep movw ; RUN: llc < %s -march=x86 | grep addw -; These transforms are turned off for volatile loads and stores. +; These transforms are turned off for load volatiles and stores. ; Check that they weren't turned off for all loads and stores! @atomic = global double 0.000000e+00 ; <double*> [#uses=1] diff --git a/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll b/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll index 86652826ae..037559edaf 100644 --- a/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll +++ b/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll @@ -8,13 +8,13 @@ define i16 @f(i64 %x, double %y) { %b = bitcast i64 %x to double ; <double> [#uses=1] - volatile store double %b, double* @atomic ; one processor operation only - volatile store double 0.000000e+00, double* @atomic2 ; one processor operation only + store volatile double %b, double* @atomic ; one processor operation only + store volatile double 0.000000e+00, double* @atomic2 ; one processor operation only %b2 = bitcast double %y to i64 ; <i64> [#uses=1] - volatile store i64 %b2, i64* @anything ; may transform to store of double - %l = volatile load i32* @ioport ; must not narrow + store volatile i64 %b2, i64* @anything ; may transform to store of double + %l = load volatile i32* @ioport ; must not narrow %t = trunc i32 %l to i16 ; <i16> [#uses=1] - %l2 = volatile load i32* @ioport ; must not narrow + %l2 = load volatile i32* @ioport ; must not narrow %tmp = lshr i32 %l2, 16 ; <i32> [#uses=1] %t2 = trunc i32 %tmp to i16 ; <i16> [#uses=1] %f = add i16 %t, %t2 ; <i16> [#uses=1] diff --git a/test/CodeGen/X86/2008-09-29-VolatileBug.ll b/test/CodeGen/X86/2008-09-29-VolatileBug.ll index 935c4c55f0..f35245bb2a 100644 --- a/test/CodeGen/X86/2008-09-29-VolatileBug.ll +++ b/test/CodeGen/X86/2008-09-29-VolatileBug.ll @@ -6,7 +6,7 @@ define i32 @main() nounwind { entry: - %0 = volatile load i32* @g_407, align 4 ; <i32> [#uses=1] + %0 = load volatile i32* @g_407, align 4 ; <i32> [#uses=1] %1 = trunc i32 %0 to i8 ; <i8> [#uses=1] %2 = tail call i32 @func_45(i8 zeroext %1) nounwind ; <i32> [#uses=0] ret i32 0 diff --git a/test/CodeGen/X86/2009-01-31-BigShift2.ll b/test/CodeGen/X86/2009-01-31-BigShift2.ll index 9d240844af..3e425536d1 100644 --- a/test/CodeGen/X86/2009-01-31-BigShift2.ll +++ b/test/CodeGen/X86/2009-01-31-BigShift2.ll @@ -6,6 +6,6 @@ define void @test(<8 x double>* %P, i64* %Q) nounwind { %B = bitcast <8 x double> %A to i512 ; <i512> [#uses=1] %C = lshr i512 %B, 448 ; <i512> [#uses=1] %D = trunc i512 %C to i64 ; <i64> [#uses=1] - volatile store i64 %D, i64* %Q + store volatile i64 %D, i64* %Q ret void } diff --git a/test/CodeGen/X86/2009-03-23-MultiUseSched.ll b/test/CodeGen/X86/2009-03-23-MultiUseSched.ll index 90dabb8ab6..8bbdb0e82f 100644 --- a/test/CodeGen/X86/2009-03-23-MultiUseSched.ll +++ b/test/CodeGen/X86/2009-03-23-MultiUseSched.ll @@ -9,30 +9,30 @@ @X = external global i64 ; <i64*> [#uses=25] define fastcc i64 @foo() nounwind { - %tmp = volatile load i64* @X ; <i64> [#uses=7] - %tmp1 = volatile load i64* @X ; <i64> [#uses=5] - %tmp2 = volatile load i64* @X ; <i64> [#uses=3] - %tmp3 = volatile load i64* @X ; <i64> [#uses=1] - %tmp4 = volatile load i64* @X ; <i64> [#uses=5] - %tmp5 = volatile load i64* @X ; <i64> [#uses=3] - %tmp6 = volatile load i64* @X ; <i64> [#uses=2] - %tmp7 = volatile load i64* @X ; <i64> [#uses=1] - %tmp8 = volatile load i64* @X ; <i64> [#uses=1] - %tmp9 = volatile load i64* @X ; <i64> [#uses=1] - %tmp10 = volatile load i64* @X ; <i64> [#uses=1] - %tmp11 = volatile load i64* @X ; <i64> [#uses=1] - %tmp12 = volatile load i64* @X ; <i64> [#uses=1] - %tmp13 = volatile load i64* @X ; <i64> [#uses=1] - %tmp14 = volatile load i64* @X ; <i64> [#uses=1] - %tmp15 = volatile load i64* @X ; <i64> [#uses=1] - %tmp16 = volatile load i64* @X ; <i64> [#uses=1] - %tmp17 = volatile load i64* @X ; <i64> [#uses=1] - %tmp18 = volatile load i64* @X ; <i64> [#uses=1] - %tmp19 = volatile load i64* @X ; <i64> [#uses=1] - %tmp20 = volatile load i64* @X ; <i64> [#uses=1] - %tmp21 = volatile load i64* @X ; <i64> [#uses=1] - %tmp22 = volatile load i64* @X ; <i64> [#uses=1] - %tmp23 = volatile load i64* @X ; <i64> [#uses=1] + %tmp = load volatile i64* @X ; <i64> [#uses=7] + %tmp1 = load volatile i64* @X ; <i64> [#uses=5] + %tmp2 = load volatile i64* @X ; <i64> [#uses=3] + %tmp3 = load volatile i64* @X ; <i64> [#uses=1] + %tmp4 = load volatile i64* @X ; <i64> [#uses=5] + %tmp5 = load volatile i64* @X ; <i64> [#uses=3] + %tmp6 = load volatile i64* @X ; <i64> [#uses=2] + %tmp7 = load volatile i64* @X ; <i64> [#uses=1] + %tmp8 = load volatile i64* @X ; <i64> [#uses=1] + %tmp9 = load volatile i64* @X ; <i64> [#uses=1] + %tmp10 = load volatile i64* @X ; <i64> [#uses=1] + %tmp11 = load volatile i64* @X ; <i64> [#uses=1] + %tmp12 = load volatile i64* @X ; <i64> [#uses=1] + %tmp13 = load volatile i64* @X ; <i64> [#uses=1] + %tmp14 = load volatile i64* @X ; <i64> [#uses=1] + %tmp15 = load volatile i64* @X ; <i64> [#uses=1] + %tmp16 = load volatile i64* @X ; <i64> [#uses=1] + %tmp17 = load volatile i64* @X ; <i64> [#uses=1] + %tmp18 = load volatile i64* @X ; <i64> [#uses=1] + %tmp19 = load volatile i64* @X ; <i64> [#uses=1] + %tmp20 = load volatile i64* @X ; <i64> [#uses=1] + %tmp21 = load volatile i64* @X ; <i64> [#uses=1] + %tmp22 = load volatile i64* @X ; <i64> [#uses=1] + %tmp23 = load volatile i64* @X ; <i64> [#uses=1] %tmp24 = call i64 @llvm.bswap.i64(i64 %tmp8) ; <i64> [#uses=1] %tmp25 = add i64 %tmp6, %tmp5 ; <i64> [#uses=1] %tmp26 = add i64 %tmp25, %tmp4 ; <i64> [#uses=1] @@ -229,7 +229,7 @@ define fastcc i64 @foo() nounwind { %tmp217 = add i64 %tmp205, %tmp215 ; <i64> [#uses=1] %tmp218 = add i64 %tmp217, %tmp211 ; <i64> [#uses=1] %tmp219 = call i64 @llvm.bswap.i64(i64 %tmp23) ; <i64> [#uses=2] - volatile store i64 %tmp219, i64* @X, align 8 + store volatile i64 %tmp219, i64* @X, align 8 %tmp220 = add i64 %tmp203, %tmp190 ; <i64> [#uses=1] %tmp221 = add i64 %tmp220, %tmp216 ; <i64> [#uses=1] %tmp222 = add i64 %tmp219, %tmp177 ; <i64> [#uses=1] diff --git a/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll b/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll index a5e28c0748..c2cd89c33e 100644 --- a/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll +++ b/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll @@ -12,7 +12,7 @@ entry: br label %bb bb: ; preds = %bb.i, %bb, %entry - %2 = volatile load i32* @g_9, align 4 ; <i32> [#uses=2] + %2 = load volatile i32* @g_9, align 4 ; <i32> [#uses=2] %3 = icmp sgt i32 %2, 1 ; <i1> [#uses=1] %4 = and i1 %3, %1 ; <i1> [#uses=1] br i1 %4, label %bb.i, label %bb diff --git a/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll b/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll index 790fd88c46..410a42a428 100644 --- a/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll +++ b/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll @@ -41,18 +41,18 @@ bb3: ; preds = %bb2, %bb br i1 undef, label %bb5, label %bb4 bb4: ; preds = %bb3 - %17 = volatile load i32* @uint8, align 4 ; <i32> [#uses=0] + %17 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0] br label %bb5 bb5: ; preds = %bb4, %bb3 - %18 = volatile load i32* @uint8, align 4 ; <i32> [#uses=0] + %18 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0] %19 = sext i8 undef to i16 ; <i16> [#uses=1] %20 = tail call i32 @func_24(i16 zeroext %19, i8 signext 1) nounwind; <i32> [#uses=0] br i1 undef, label %return, label %bb6.preheader bb6.preheader: ; preds = %bb5 %21 = sext i8 %p_52 to i32 ; <i32> [#uses=1] - %22 = volatile load i32* @uint8, align 4 ; <i32> [#uses=0] + %22 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0] %23 = tail call i32 (...)* @safefuncts(i32 %21, i32 1) nounwind; <i32> [#uses=0] unreachable diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll index d0afee6ada..66def49e4b 100644 --- a/test/CodeGen/X86/block-placement.ll +++ b/test/CodeGen/X86/block-placement.ll @@ -690,199 +690,199 @@ define void @many_unanalyzable_branches() { entry: br label %0 - %val0 = volatile load float* undef + %val0 = load volatile float* undef %cmp0 = fcmp une float %val0, undef br i1 %cmp0, label %1, label %0 - %val1 = volatile load float* undef + %val1 = load volatile float* undef %cmp1 = fcmp une float %val1, undef br i1 %cmp1, label %2, label %1 - %val2 = volatile load float* undef + %val2 = load volatile float* undef %cmp2 = fcmp une float %val2, undef br i1 %cmp2, label %3, label %2 - %val3 = volatile load float* undef + %val3 = load volatile float* undef %cmp3 = fcmp une float %val3, undef br i1 %cmp3, label %4, label %3 - %val4 = volatile load float* undef + %val4 = load volatile float* undef %cmp4 = fcmp une float %val4, undef br i1 %cmp4, label %5, label %4 - %val5 = volatile load float* undef + %val5 = load volatile float* undef %cmp5 = fcmp une float %val5, undef br i1 %cmp5, label %6, label %5 - %val6 = volatile load float* undef + %val6 = load volatile float* undef %cmp6 = fcmp une float %val6, undef br i1 %cmp6, label %7, label %6 - %val7 = volatile load float* undef + %val7 = load volatile float* undef %cmp7 = fcmp une float %val7, undef br i1 %cmp7, label %8, label %7 - %val8 = volatile load float* undef + %val8 = load volatile float* undef %cmp8 = fcmp une float %val8, undef br i1 %cmp8, label %9, label %8 - %val9 = volatile load float* undef + %val9 = load volatile float* undef %cmp9 = fcmp une float %val9, undef br i1 %cmp9, label %10, label %9 - %val10 = volatile load float* undef + %val10 = load volatile float* undef %cmp10 = fcmp une float %val10, undef br i1 %cmp10, label %11, label %10 - %val11 = volatile load float* undef + %val11 = load volatile float* undef %cmp11 = fcmp une float %val11, undef br i1 %cmp11, label %12, label %11 - %val12 = volatile load float* undef + %val12 = load volatile float* undef %cmp12 = fcmp une float %val12, undef br i1 %cmp12, label %13, label %12 - %val13 = volatile load float* undef + %val13 = load volatile float* undef %cmp13 = fcmp une float %val13, undef br i1 %cmp13, label %14, label %13 - %val14 = volatile load float* undef + %val14 = load volatile float* undef %cmp14 = fcmp une float %val14, undef br i1 %cmp14, label %15, label %14 - %val15 = volatile load float* undef + %val15 = load volatile float* undef %cmp15 = fcmp une float %val15, undef br i1 %cmp15, label %16, label %15 - %val16 = volatile load float* undef + %val16 = load volatile float* undef %cmp16 = fcmp une float %val16, undef br i1 %cmp16, label %17, label %16 - %val17 = volatile load float* undef + %val17 = load volatile float* undef %cmp17 = fcmp une float %val17, undef br i1 %cmp17, label %18, label %17 - %val18 = volatile load float* undef + %val18 = load volatile float* undef %cmp18 = fcmp une float %val18, undef br i1 %cmp18, label %19, label %18 - %val19 = volatile load float* undef + %val19 = load volatile float* undef %cmp19 = fcmp une float %val19, undef br i1 %cmp19, label %20, label %19 - %val20 = volatile load float* undef + %val20 = load volatile float* undef %cmp20 = fcmp une float %val20, undef br i1 %cmp20, label %21, label %20 - %val21 = volatile load float* undef + %val21 = load volatile float* undef %cmp21 = fcmp une float %val21, undef br i1 %cmp21, label %22, label %21 - %val22 = volatile load float* undef + %val22 = load volatile float* undef %cmp22 = fcmp une float %val22, undef br i1 %cmp22, label %23, label %22 - %val23 = volatile load float* undef + %val23 = load volatile float* undef %cmp23 = fcmp une float %val23, undef br i1 %cmp23, label %24, label %23 - %val24 = volatile load float* undef + %val24 = load volatile float* undef %cmp24 = fcmp une float %val24, undef br i1 %cmp24, label %25, label %24 - %val25 = volatile load float* undef + %val25 = load volatile float* undef %cmp25 = fcmp une float %val25, undef br i1 %cmp25, label %26, label %25 - %val26 = volatile load float* undef + %val26 = load volatile float* undef %cmp26 = fcmp une float %val26, undef br i1 %cmp26, label %27, label %26 - %val27 = volatile load float* undef + %val27 = load volatile float* undef %cmp27 = fcmp une float %val27, undef br i1 %cmp27, label %28, label %27 - %val28 = volatile load float* undef + %val28 = load volatile float* undef %cmp28 = fcmp une float %val28, undef br i1 %cmp28, label %29, label %28 - %val29 = volatile load float* undef + %val29 = load volatile float* undef %cmp29 = fcmp une float %val29, undef br i1 %cmp29, label %30, label %29 - %val30 = volatile load float* undef + %val30 = load volatile float* undef %cmp30 = fcmp une float %val30, undef br i1 %cmp30, label %31, label %30 - %val31 = volatile load float* undef + %val31 = load volatile float* undef %cmp31 = fcmp une float %val31, undef br i1 %cmp31, label %32, label %31 - %val32 = volatile load float* undef + %val32 = load volatile float* undef %cmp32 = fcmp une float %val32, undef br i1 %cmp32, label %33, label %32 - %val33 = volatile load float* undef + %val33 = load volatile float* undef %cmp33 = fcmp une float %val33, undef br i1 %cmp33, label %34, label %33 - %val34 = volatile load float* undef + %val34 = load volatile float* undef %cmp34 = fcmp une float %val34, undef br i1 %cmp34, label %35, label %34 - %val35 = volatile load float* undef + %val35 = load volatile float* undef %cmp35 = fcmp une float %val35, undef br i1 %cmp35, label %36, label %35 - %val36 = volatile load float* undef + %val36 = load volatile float* undef %cmp36 = fcmp une float %val36, undef br i1 %cmp36, label %37, label %36 - %val37 = volatile load float* undef + %val37 = load volatile float* undef %cmp37 = fcmp une float %val37, undef br i1 %cmp37, label %38, label %37 - %val38 = volatile load float* undef + %val38 = load volatile float* undef %cmp38 = fcmp une float %val38, undef br i1 %cmp38, label %39, label %38 - %val39 = volatile load float* undef + %val39 = load volatile float* undef %cmp39 = fcmp une float %val39, undef br i1 %cmp39, label %40, label %39 - %val40 = volatile load float* undef + %val40 = load volatile float* undef %cmp40 = fcmp une float %val40, undef br i1 %cmp40, label %41, label %40 - %val41 = volatile load float* undef + %val41 = load volatile float* undef %cmp41 = fcmp une float %val41, undef br i1 %cmp41, label %42, label %41 - %val42 = volatile load float* undef + %val42 = load volatile float* undef %cmp42 = fcmp une float %val42, undef br i1 %cmp42, label %43, label %42 - %val43 = volatile load float* undef + %val43 = load volatile float* undef %cmp43 = fcmp une float %val43, undef br i1 %cmp43, label %44, label %43 - %val44 = volatile load float* undef + %val44 = load volatile float* undef %cmp44 = fcmp une float %val44, undef br i1 %cmp44, label %45, label %44 - %val45 = volatile load float* undef + %val45 = load volatile float* undef %cmp45 = fcmp une float %val45, undef br i1 %cmp45, label %46, label %45 - %val46 = volatile load float* undef + %val46 = load volatile float* undef %cmp46 = fcmp une float %val46, undef br i1 %cmp46, label %47, label %46 - %val47 = volatile load float* undef + %val47 = load volatile float* undef %cmp47 = fcmp une float %val47, undef br i1 %cmp47, label %48, label %47 - %val48 = volatile load float* undef + %val48 = load volatile float* undef %cmp48 = fcmp une float %val48, undef br i1 %cmp48, label %49, label %48 - %val49 = volatile load float* undef + %val49 = load volatile float* undef %cmp49 = fcmp une float %val49, undef br i1 %cmp49, label %50, label %49 - %val50 = volatile load float* undef + %val50 = load volatile float* undef %cmp50 = fcmp une float %val50, undef br i1 %cmp50, label %51, label %50 - %val51 = volatile load float* undef + %val51 = load volatile float* undef %cmp51 = fcmp une float %val51, undef br i1 %cmp51, label %52, label %51 - %val52 = volatile load float* undef + %val52 = load volatile float* undef %cmp52 = fcmp une float %val52, undef br i1 %cmp52, label %53, label %52 - %val53 = volatile load float* undef + %val53 = load volatile float* undef %cmp53 = fcmp une float %val53, undef br i1 %cmp53, label %54, label %53 - %val54 = volatile load float* undef + %val54 = load volatile float* undef %cmp54 = fcmp une float %val54, undef br i1 %cmp54, label %55, label %54 - %val55 = volatile load float* undef + %val55 = load volatile float* undef %cmp55 = fcmp une float %val55, undef br i1 %cmp55, label %56, label %55 - %val56 = volatile load float* undef + %val56 = load volatile float* undef %cmp56 = fcmp une float %val56, undef br i1 %cmp56, label %57, label %56 - %val57 = volatile load float* undef + %val57 = load volatile float* undef %cmp57 = fcmp une float %val57, undef br i1 %cmp57, label %58, label %57 - %val58 = volatile load float* undef + %val58 = load volatile float* undef %cmp58 = fcmp une float %val58, undef br i1 %cmp58, label %59, label %58 - %val59 = volatile load float* undef + %val59 = load volatile float* undef %cmp59 = fcmp une float %val59, undef br i1 %cmp59, label %60, label %59 - %val60 = volatile load float* undef + %val60 = load volatile float* undef %cmp60 = fcmp une float %val60, undef br i1 %cmp60, label %61, label %60 - %val61 = volatile load float* undef + %val61 = load volatile float* undef %cmp61 = fcmp une float %val61, undef br i1 %cmp61, label %62, label %61 - %val62 = volatile load float* undef + %val62 = load volatile float* undef %cmp62 = fcmp une float %val62, undef br i1 %cmp62, label %63, label %62 - %val63 = volatile load float* undef + %val63 = load volatile float* undef %cmp63 = fcmp une float %val63, undef br i1 %cmp63, label %64, label %63 - %val64 = volatile load float* undef + %val64 = load volatile float* undef %cmp64 = fcmp une float %val64, undef br i1 %cmp64, label %65, label %64 diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll index 7a8d6e6a8a..2e7ffbfd54 100644 --- a/test/CodeGen/X86/cmov.ll +++ b/test/CodeGen/X86/cmov.ll @@ -84,7 +84,7 @@ entry: br i1 %3, label %func_4.exit.i, label %bb.i.i.i bb.i.i.i: ; preds = %entry - %4 = volatile load i8* @g_100, align 1 ; <i8> [#uses=0] + %4 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0] br label %func_4.exit.i ; CHECK: test4: @@ -101,7 +101,7 @@ func_4.exit.i: ; preds = %bb.i.i.i, %entry br i1 %brmerge.i, label %func_1.exit, label %bb.i.i bb.i.i: ; preds = %func_4.exit.i - %5 = volatile load i8* @g_100, align 1 ; <i8> [#uses=0] + %5 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0] br label %func_1.exit func_1.exit: ; preds = %bb.i.i, %func_4.exit.i diff --git a/test/CodeGen/X86/coalescer-commute1.ll b/test/CodeGen/X86/coalescer-commute1.ll index 8aa0bfdd51..d9e0778102 100644 --- a/test/CodeGen/X86/coalescer-commute1.ll +++ b/test/CodeGen/X86/coalescer-commute1.ll @@ -21,6 +21,6 @@ bb: ; preds = %bb, %entry br i1 %exitcond, label %bb13, label %bb bb13: ; preds = %bb - volatile store float %tmp6, float* @G, align 4 + store volatile float %tmp6, float* @G, align 4 ret void } diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll index 153145728f..cf6e27d159 100644 --- a/test/CodeGen/X86/crash.ll +++ b/test/CodeGen/X86/crash.ll @@ -6,16 +6,16 @@ ; Chain and flag folding issues. define i32 @test1() nounwind ssp { entry: - %tmp5.i = volatile load i32* undef ; <i32> [#uses=1] + %tmp5.i = load volatile i32* undef ; <i32> [#uses=1] %conv.i = zext i32 %tmp5.i to i64 ; <i64> [#uses=1] - %tmp12.i = volatile load i32* undef ; <i32> [#uses=1] + %tmp12.i = load volatile i32* undef ; <i32> [#uses=1] %conv13.i = zext i32 %tmp12.i to i64 ; <i64> [#uses=1] %shl.i = shl i64 %conv13.i, 32 ; <i64> [#uses=1] %or.i = or i64 %shl.i, %conv.i ; <i64> [#uses=1] %add16.i = add i64 %or.i, 256 ; <i64> [#uses=1] %shr.i = lshr i64 %add16.i, 8 ; <i64> [#uses=1] %conv19.i = trunc i64 %shr.i to i32 ; <i32> [#uses=1] - volatile store i32 %conv19.i, i32* undef + store volatile i32 %conv19.i, i32* undef ret i32 undef } diff --git a/test/CodeGen/X86/fp-stack-ret-conv.ll b/test/CodeGen/X86/fp-stack-ret-conv.ll index f220b24f90..3e26141eca 100644 --- a/test/CodeGen/X86/fp-stack-ret-conv.ll +++ b/test/CodeGen/X86/fp-stack-ret-conv.ll @@ -10,7 +10,7 @@ entry: %tmp13 = tail call double @foo() %tmp1314 = fptrunc double %tmp13 to float ; <float> [#uses=1] %tmp3940 = fpext float %tmp1314 to double ; <double> [#uses=1] - volatile store double %tmp3940, double* %b + store volatile double %tmp3940, double* %b ret void } diff --git a/test/CodeGen/X86/loop-strength-reduce5.ll b/test/CodeGen/X86/loop-strength-reduce5.ll index b07eeb6759..d50a66805d 100644 --- a/test/CodeGen/X86/loop-strength-reduce5.ll +++ b/test/CodeGen/X86/loop-strength-reduce5.ll @@ -11,9 +11,9 @@ entry: bb: ; preds = %bb, %entry %i.014.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2] %tmp1 = trunc i32 %i.014.0 to i16 ; <i16> [#uses=2] - volatile store i16 %tmp1, i16* @X, align 2 + store volatile i16 %tmp1, i16* @X, align 2 %tmp34 = shl i16 %tmp1, 2 ; <i16> [#uses=1] - volatile store i16 %tmp34, i16* @Y, align 2 + store volatile i16 %tmp34, i16* @Y, align 2 %indvar.next = add i32 %i.014.0, 1 ; <i32> [#uses=2] %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1] br i1 %exitcond, label %return, label %bb diff --git a/test/CodeGen/X86/lsr-nonaffine.ll b/test/CodeGen/X86/lsr-nonaffine.ll index d0d2bbd67c..d825b5a76c 100644 --- a/test/CodeGen/X86/lsr-nonaffine.ll +++ b/test/CodeGen/X86/lsr-nonaffine.ll @@ -19,7 +19,7 @@ entry: loop: %i = phi i64 [ 0, %entry ], [ %i.next, %loop ] - volatile store i64 %i, i64* %p + store volatile i64 %i, i64* %p %i.next = add i64 %i, %s %c = icmp slt i64 %i.next, %n br i1 %c, label %loop, label %exit diff --git a/test/CodeGen/X86/lsr-sort.ll b/test/CodeGen/X86/lsr-sort.ll index 1f3b59a905..b85ddeb13b 100644 --- a/test/CodeGen/X86/lsr-sort.ll +++ b/test/CodeGen/X86/lsr-sort.ll @@ -12,7 +12,7 @@ entry: bb: ; preds = %bb, %entry %i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2] %1 = trunc i32 %i.03 to i16 ; <i16> [#uses=1] - volatile store i16 %1, i16* @X, align 2 + store volatile i16 %1, i16* @X, align 2 %indvar.next = add i32 %i.03, 1 ; <i32> [#uses=2] %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1] br i1 %exitcond, label %return, label %bb diff --git a/test/CodeGen/X86/nancvt.ll b/test/CodeGen/X86/nancvt.ll index 82b73319ad..8036710b22 100644 --- a/test/CodeGen/X86/nancvt.ll +++ b/test/CodeGen/X86/nancvt.ll @@ -52,8 +52,8 @@ bb: ; preds = %bb23 %tmp17 = ashr i64 %tmp16, %.cast ; <i64> [#uses=1] %tmp1718 = trunc i64 %tmp17 to i32 ; <i32> [#uses=1] %tmp19 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1] - volatile store i32 %tmp1718, i32* @var - volatile store i32 %tmp13, i32* @var + store volatile i32 %tmp1718, i32* @var + store volatile i32 %tmp13, i32* @var %tmp21 = load i32* %i, align 4 ; <i32> [#uses=1] %tmp22 = add i32 %tmp21, 1 ; <i32> [#uses=1] store i32 %tmp22, i32* %i, align 4 @@ -86,7 +86,7 @@ bb28: ; preds = %bb46 %tmp3940 = bitcast float* %tmp39 to i32* ; <i32*> [#uses=1] %tmp41 = load i32* %tmp3940, align 4 ; <i32> [#uses=1] %tmp42 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1] - volatile store i32 %tmp41, i32* @var + store volatile i32 %tmp41, i32* @var %tmp44 = load i32* %i, align 4 ; <i32> [#uses=1] %tmp45 = add i32 %tmp44, 1 ; <i32> [#uses=1] store i32 %tmp45, i32* %i, align 4 @@ -127,8 +127,8 @@ bb52: ; preds = %bb78 %tmp72 = ashr i64 %tmp70, %.cast71 ; <i64> [#uses=1] %tmp7273 = trunc i64 %tmp72 to i32 ; <i32> [#uses=1] %tmp74 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1] - volatile store i32 %tmp7273, i32* @var - volatile store i32 %tmp66, i32* @var + store volatile i32 %tmp7273, i32* @var + store volatile i32 %tmp66, i32* @var %tmp76 = load i32* %i, align 4 ; <i32> [#uses=1] %tmp77 = add i32 %tmp76, 1 ; <i32> [#uses=1] store i32 %tmp77, i32* %i, align 4 @@ -161,7 +161,7 @@ bb84: ; preds = %bb101 %tmp9495 = bitcast float* %tmp94 to i32* ; <i32*> [#uses=1] %tmp96 = load i32* %tmp9495, align 4 ; <i32> [#uses=1] %tmp97 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1] - volatile store i32 %tmp96, i32* @var + store volatile i32 %tmp96, i32* @var %tmp99 = load i32* %i, align 4 ; <i32> [#uses=1] %tmp100 = add i32 %tmp99, 1 ; <i32> [#uses=1] store i32 %tmp100, i32* %i, align 4 diff --git a/test/CodeGen/X86/narrow-shl-load.ll b/test/CodeGen/X86/narrow-shl-load.ll index ef27cbc341..7822453add 100644 --- a/test/CodeGen/X86/narrow-shl-load.ll +++ b/test/CodeGen/X86/narrow-shl-load.ll @@ -67,7 +67,7 @@ declare void @exit(i32) noreturn ; DAG Combiner can't fold this into a load of the 1'th byte. ; PR8757 define i32 @test3(i32 *%P) nounwind ssp { - volatile store i32 128, i32* %P + store volatile i32 128, i32* %P %tmp4.pre = load i32* %P %phitmp = trunc i32 %tmp4.pre to i16 %phitmp13 = shl i16 %phitmp, 8 diff --git a/test/CodeGen/X86/overlap-shift.ll b/test/CodeGen/X86/overlap-shift.ll index c1fc041e7d..d185af16b9 100644 --- a/test/CodeGen/X86/overlap-shift.ll +++ b/test/CodeGen/X86/overlap-shift.ll @@ -13,7 +13,7 @@ define i32 @test1(i32 %X) { %Z = shl i32 %X, 2 ; <i32> [#uses=1] - volatile store i32 %Z, i32* @G + store volatile i32 %Z, i32* @G ret i32 %X } diff --git a/test/CodeGen/X86/pr1505b.ll b/test/CodeGen/X86/pr1505b.ll index 945ec4c6b6..9b0ef83ab0 100644 --- a/test/CodeGen/X86/pr1505b.ll +++ b/test/CodeGen/X86/pr1505b.ll @@ -33,7 +33,7 @@ declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*) define i32 @main() { entry: ; CHECK: flds - %tmp6 = volatile load float* @a ; <float> [#uses=1] + %tmp6 = load volatile float* @a ; <float> [#uses=1] ; CHECK: fstps (%esp) ; CHECK: tanf %tmp9 = tail call float @tanf( float %tmp6 ) ; <float> [#uses=1] @@ -41,7 +41,7 @@ entry: ; CHECK: fstp ; CHECK: fldl - %tmp12 = volatile load double* @b ; <double> [#uses=1] + %tmp12 = load volatile double* @b ; <double> [#uses=1] ; CHECK: fstpl (%esp) ; CHECK: tan %tmp13 = tail call double @tan( double %tmp12 ) ; <double> [#uses=1] diff --git a/test/CodeGen/X86/pr2182.ll b/test/CodeGen/X86/pr2182.ll index 2a8bb35801..02a36054d8 100644 --- a/test/CodeGen/X86/pr2182.ll +++ b/test/CodeGen/X86/pr2182.ll @@ -15,17 +15,17 @@ define void @loop_2() nounwind { ; CHECK-NEXT: addl $3, (%{{.*}}) ; CHECK-NEXT: ret - %tmp = volatile load i32* @x, align 4 ; <i32> [#uses=1] + %tmp = load volatile i32* @x, align 4 ; <i32> [#uses=1] %tmp1 = add i32 %tmp, 3 ; <i32> [#uses=1] - volatile store i32 %tmp1, i32* @x, align 4 - %tmp.1 = volatile load i32* @x, align 4 ; <i32> [#uses=1] + store volatile i32 %tmp1, i32* @x, align 4 + %tmp.1 = load volatile i32* @x, align 4 ; <i32> [#uses=1] %tmp1.1 = add i32 %tmp.1, 3 ; <i32> [#uses=1] - volatile store i32 %tmp1.1, i32* @x, align 4 - %tmp.2 = volatile load i32* @x, align 4 ; <i32> [#uses=1] + store volatile i32 %tmp1.1, i32* @x, align 4 + %tmp.2 = load volatile i32* @x, align 4 ; <i32> [#uses=1] %tmp1.2 = add i32 %tmp.2, 3 ; <i32> [#uses=1] - volatile store i32 %tmp1.2, i32* @x, align 4 - %tmp.3 = volatile load i32* @x, align 4 ; <i32> [#uses=1] + store volatile i32 %tmp1.2, i32* @x, align 4 + %tmp.3 = load volatile i32* @x, align 4 ; <i32> [#uses=1] %tmp1.3 = add i32 %tmp.3, 3 ; <i32> [#uses=1] - volatile store i32 %tmp1.3, i32* @x, align 4 + store volatile i32 %tmp1.3, i32* @x, align 4 ret void } diff --git a/test/CodeGen/X86/sext-subreg.ll b/test/CodeGen/X86/sext-subreg.ll index b2b9f8121f..a128af9950 100644 --- a/test/CodeGen/X86/sext-subreg.ll +++ b/test/CodeGen/X86/sext-subreg.ll @@ -8,10 +8,10 @@ define i64 @t(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind { ; CHECK: movl %eax %C = add i64 %A, %B %D = trunc i64 %C to i32 - volatile store i32 %D, i32* %P + store volatile i32 %D, i32* %P %E = shl i64 %C, 32 %F = ashr i64 %E, 32 - volatile store i64 %F, i64 *%P2 - volatile store i32 %D, i32* %P + store volatile i64 %F, i64 *%P2 + store volatile i32 %D, i32* %P ret i64 undef } diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll index 793c026712..f6c13ec0ad 100644 --- a/test/CodeGen/X86/stack-align.ll +++ b/test/CodeGen/X86/stack-align.ll @@ -11,13 +11,13 @@ define void @test({ double, double }* byval %z, double* %P) nounwind { entry: %tmp3 = load double* @G, align 16 ; <double> [#uses=1] %tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1] - volatile store double %tmp4, double* %P + store volatile double %tmp4, double* %P %tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1] - %tmp1 = volatile load double* %tmp, align 8 ; <double> [#uses=1] + %tmp1 = load volatile double* %tmp, align 8 ; <double> [#uses=1] %tmp2 = tail call double @fabs( double %tmp1 ) ; <double> [#uses=1] ; CHECK: andpd{{.*}}4(%esp), %xmm %tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1] - volatile store double %tmp6, double* %P, align 8 + store volatile double %tmp6, double* %P, align 8 ret void } diff --git a/test/CodeGen/X86/store-empty-member.ll b/test/CodeGen/X86/store-empty-member.ll index 37f86c60fa..aea85b94d4 100644 --- a/test/CodeGen/X86/store-empty-member.ll +++ b/test/CodeGen/X86/store-empty-member.ll @@ -9,6 +9,6 @@ define void @foo() nounwind { %1 = alloca %testType - volatile store %testType {i32 1, [0 x i32] zeroinitializer, i32 2}, %testType* %1 + store volatile %testType {i32 1, [0 x i32] zeroinitializer, i32 2}, %testType* %1 ret void } diff --git a/test/CodeGen/X86/tail-opts.ll b/test/CodeGen/X86/tail-opts.ll index d6c16ca007..f1b9f20082 100644 --- a/test/CodeGen/X86/tail-opts.ll +++ b/test/CodeGen/X86/tail-opts.ll @@ -314,7 +314,7 @@ bby: ] bb7: - volatile store i32 0, i32* @XYZ + store volatile i32 0, i32* @XYZ unreachable bbx: @@ -323,7 +323,7 @@ bbx: ] bb12: - volatile store i32 0, i32* @XYZ + store volatile i32 0, i32* @XYZ unreachable return: @@ -352,8 +352,8 @@ bby: ] bb7: - volatile store i32 0, i32* @XYZ - volatile store i32 1, i32* @XYZ + store volatile i32 0, i32* @XYZ + store volatile i32 1, i32* @XYZ unreachable bbx: @@ -362,8 +362,8 @@ bbx: ] bb12: - volatile store i32 0, i32* @XYZ - volatile store i32 1, i32* @XYZ + store volatile i32 0, i32* @XYZ + store volatile i32 1, i32* @XYZ unreachable return: @@ -390,8 +390,8 @@ bby: ] bb7: - volatile store i32 0, i32* @XYZ - volatile store i32 1, i32* @XYZ + store volatile i32 0, i32* @XYZ + store volatile i32 1, i32* @XYZ unreachable bbx: @@ -400,8 +400,8 @@ bbx: ] bb12: - volatile store i32 0, i32* @XYZ - volatile store i32 1, i32* @XYZ + store volatile i32 0, i32* @XYZ + store volatile i32 1, i32* @XYZ unreachable return: diff --git a/test/CodeGen/X86/twoaddr-lea.ll b/test/CodeGen/X86/twoaddr-lea.ll index a1d797feea..b7fe039b67 100644 --- a/test/CodeGen/X86/twoaddr-lea.ll +++ b/test/CodeGen/X86/twoaddr-lea.ll @@ -14,7 +14,7 @@ define i32 @test1(i32 %X) nounwind { ; CHECK-NOT: mov ; CHECK: leal 1(%rdi) %Z = add i32 %X, 1 - volatile store i32 %Z, i32* @G + store volatile i32 %Z, i32* @G ret i32 %X } diff --git a/test/CodeGen/X86/vec_shuffle-23.ll b/test/CodeGen/X86/vec_shuffle-23.ll index 05a3a1e9d2..24687359cc 100644 --- a/test/CodeGen/X86/vec_shuffle-23.ll +++ b/test/CodeGen/X86/vec_shuffle-23.ll @@ -5,7 +5,7 @@ define i32 @t() nounwind { entry: %a = alloca <4 x i32> ; <<4 x i32>*> [#uses=2] %b = alloca <4 x i32> ; <<4 x i32>*> [#uses=5] - volatile store <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a + store volatile <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a %tmp = load <4 x i32>* %a ; <<4 x i32>> [#uses=1] store <4 x i32> %tmp, <4 x i32>* %b %tmp1 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1] diff --git a/test/CodeGen/X86/vec_shuffle-24.ll b/test/CodeGen/X86/vec_shuffle-24.ll index 1b104deb30..d038dafaf2 100644 --- a/test/CodeGen/X86/vec_shuffle-24.ll +++ b/test/CodeGen/X86/vec_shuffle-24.ll @@ -5,7 +5,7 @@ entry: ; CHECK: punpckldq %a = alloca <4 x i32> ; <<4 x i32>*> [#uses=2] %b = alloca <4 x i32> ; <<4 x i32>*> [#uses=5] - volatile store <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a + store volatile <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a %tmp = load <4 x i32>* %a ; <<4 x i32>> [#uses=1] store <4 x i32> %tmp, <4 x i32>* %b %tmp1 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1] diff --git a/test/CodeGen/X86/volatile.ll b/test/CodeGen/X86/volatile.ll index 2e5742afdf..1a82014536 100644 --- a/test/CodeGen/X86/volatile.ll +++ b/test/CodeGen/X86/volatile.ll @@ -4,14 +4,14 @@ @x = external global double define void @foo() nounwind { - %a = volatile load double* @x - volatile store double 0.0, double* @x - volatile store double 0.0, double* @x - %b = volatile load double* @x + %a = load volatile double* @x + store volatile double 0.0, double* @x + store volatile double 0.0, double* @x + %b = load volatile double* @x ret void } define void @bar() nounwind { - %c = volatile load double* @x + %c = load volatile double* @x ret void } diff --git a/test/CodeGen/XCore/licm-ldwcp.ll b/test/CodeGen/XCore/licm-ldwcp.ll index 4884f70e73..794c6bb64e 100644 --- a/test/CodeGen/XCore/licm-ldwcp.ll +++ b/test/CodeGen/XCore/licm-ldwcp.ll @@ -13,6 +13,6 @@ entry: br label %bb bb: ; preds = %bb, %entry - volatile store i32 525509670, i32* %p, align 4 + store volatile i32 525509670, i32* %p, align 4 br label %bb } diff --git a/test/CodeGen/XCore/scavenging.ll b/test/CodeGen/XCore/scavenging.ll index 3181e96116..5b612d0f9b 100644 --- a/test/CodeGen/XCore/scavenging.ll +++ b/test/CodeGen/XCore/scavenging.ll @@ -18,32 +18,32 @@ entry: %x = alloca [100 x i32], align 4 ; <[100 x i32]*> [#uses=2] %0 = load i32* @size, align 4 ; <i32> [#uses=1] %1 = alloca i32, i32 %0, align 4 ; <i32*> [#uses=1] - %2 = volatile load i32* @g0, align 4 ; <i32> [#uses=1] - %3 = volatile load i32* @g1, align 4 ; <i32> [#uses=1] - %4 = volatile load i32* @g2, align 4 ; <i32> [#uses=1] - %5 = volatile load i32* @g3, align 4 ; <i32> [#uses=1] - %6 = volatile load i32* @g4, align 4 ; <i32> [#uses=1] - %7 = volatile load i32* @g5, align 4 ; <i32> [#uses=1] - %8 = volatile load i32* @g6, align 4 ; <i32> [#uses=1] - %9 = volatile load i32* @g7, align 4 ; <i32> [#uses=1] - %10 = volatile load i32* @g8, align 4 ; <i32> [#uses=1] - %11 = volatile load i32* @g9, align 4 ; <i32> [#uses=1] - %12 = volatile load i32* @g10, align 4 ; <i32> [#uses=1] - %13 = volatile load i32* @g11, align 4 ; <i32> [#uses=2] + %2 = load volatile i32* @g0, align 4 ; <i32> [#uses=1] + %3 = load volatile i32* @g1, align 4 ; <i32> [#uses=1] + %4 = load volatile i32* @g2, align 4 ; <i32> [#uses=1] + %5 = load volatile i32* @g3, align 4 ; <i32> [#uses=1] + %6 = load volatile i32* @g4, align 4 ; <i32> [#uses=1] + %7 = load volatile i32* @g5, align 4 ; <i32> [#uses=1] + %8 = load volatile i32* @g6, align 4 ; <i32> [#uses=1] + %9 = load volatile i32* @g7, align 4 ; <i32> [#uses=1] + %10 = load volatile i32* @g8, align 4 ; <i32> [#uses=1] + %11 = load volatile i32* @g9, align 4 ; <i32> [#uses=1] + %12 = load volatile i32* @g10, align 4 ; <i32> [#uses=1] + %13 = load volatile i32* @g11, align 4 ; <i32> [#uses=2] %14 = getelementptr [100 x i32]* %x, i32 0, i32 50 ; <i32*> [#uses=1] store i32 %13, i32* %14, align 4 - volatile store i32 %13, i32* @g11, align 4 - volatile store i32 %12, i32* @g10, align 4 - volatile store i32 %11, i32* @g9, align 4 - volatile store i32 %10, i32* @g8, align 4 - volatile store i32 %9, i32* @g7, align 4 - volatile store i32 %8, i32* @g6, align 4 - volatile store i32 %7, i32* @g5, align 4 - volatile store i32 %6, i32* @g4, align 4 - volatile store i32 %5, i32* @g3, align 4 - volatile store i32 %4, i32* @g2, align 4 - volatile store i32 %3, i32* @g1, align 4 - volatile store i32 %2, i32* @g0, align 4 + store volatile i32 %13, i32* @g11, align 4 + store volatile i32 %12, i32* @g10, align 4 + store volatile i32 %11, i32* @g9, align 4 + store volatile i32 %10, i32* @g8, align 4 + store volatile i32 %9, i32* @g7, align 4 + store volatile i32 %8, i32* @g6, align 4 + store volatile i32 %7, i32* @g5, align 4 + store volatile i32 %6, i32* @g4, align 4 + store volatile i32 %5, i32* @g3, align 4 + store volatile i32 %4, i32* @g2, align 4 + store volatile i32 %3, i32* @g1, align 4 + store volatile i32 %2, i32* @g0, align 4 %x1 = getelementptr [100 x i32]* %x, i32 0, i32 0 ; <i32*> [#uses=1] call void @g(i32* %x1, i32* %1) nounwind ret void diff --git a/test/DebugInfo/2010-05-03-OriginDIE.ll b/test/DebugInfo/2010-05-03-OriginDIE.ll index 0e1d1fddc4..94bddc092f 100644 --- a/test/DebugInfo/2010-05-03-OriginDIE.ll +++ b/test/DebugInfo/2010-05-03-OriginDIE.ll @@ -19,7 +19,7 @@ entry: %0 = getelementptr inbounds %struct.gpm_t* %gpm, i32 0, i32 2, i32 0 ; <i8*> [#uses=1] %1 = getelementptr inbounds %struct.gpt_t* %gpt, i32 0, i32 9, i32 0 ; <i8*> [#uses=1] call void @uuid_LtoB(i8* %0, i8* %1) nounwind, !dbg !0 - %a9 = volatile load i64* %data_addr.i18, align 8 ; <i64> [#uses=1] + %a9 = load volatile i64* %data_addr.i18, align 8 ; <i64> [#uses=1] %a10 = call i64 @llvm.bswap.i64(i64 %a9) nounwind ; <i64> [#uses=1] %a11 = getelementptr inbounds %struct.gpt_t* %gpt, i32 0, i32 8, !dbg !7 ; <i64*> [#uses=1] %a12 = load i64* %a11, align 4, !dbg !7 ; <i64> [#uses=1] @@ -29,7 +29,7 @@ entry: call void @llvm.dbg.value(metadata !18, i64 0, metadata !19) nounwind call void @llvm.dbg.declare(metadata !6, metadata !23) nounwind call void @llvm.dbg.value(metadata !{i64* %data_addr.i17}, i64 0, metadata !34) nounwind - %a13 = volatile load i64* %data_addr.i17, align 8 ; <i64> [#uses=1] + %a13 = load volatile i64* %data_addr.i17, align 8 ; <i64> [#uses=1] %a14 = call i64 @llvm.bswap.i64(i64 %a13) nounwind ; <i64> [#uses=2] %a15 = add i64 %a10, %a14, !dbg !7 ; <i64> [#uses=1] %a16 = sub i64 %a15, %a14 ; <i64> [#uses=1] diff --git a/test/Linker/2004-05-07-TypeResolution1.ll b/test/Linker/2004-05-07-TypeResolution1.ll index f0ade33713..4cff9ace43 100644 --- a/test/Linker/2004-05-07-TypeResolution1.ll +++ b/test/Linker/2004-05-07-TypeResolution1.ll @@ -30,6 +30,6 @@ declare void @func(%struct2*) define void @tty_init() { entry: - volatile store void (%struct2*)* @func, void (%struct2*)** getelementptr (%struct1* @driver1, i64 0, i32 1) + store volatile void (%struct2*)* @func, void (%struct2*)** getelementptr (%struct1* @driver1, i64 0, i32 1) ret void } diff --git a/test/Linker/2004-05-07-TypeResolution2.ll b/test/Linker/2004-05-07-TypeResolution2.ll index 74fe39f4d9..3807127178 100644 --- a/test/Linker/2004-05-07-TypeResolution2.ll +++ b/test/Linker/2004-05-07-TypeResolution2.ll @@ -9,7 +9,7 @@ target datalayout = "e-p:32:32" define internal void @f1(%struct1* %tty) { loopentry.preheader: %tmp.2.i.i = getelementptr %struct1* %tty, i64 0, i32 1 ; <void (%struct2*)**> [#uses=1] - %tmp.3.i.i = volatile load void (%struct2*)** %tmp.2.i.i ; <void (%struct2*)*> [#uses=0] + %tmp.3.i.i = load volatile void (%struct2*)** %tmp.2.i.i ; <void (%struct2*)*> [#uses=0] ret void } diff --git a/test/MC/ARM/elf-reloc-01.ll b/test/MC/ARM/elf-reloc-01.ll index e6efe7eb94..6899d92b50 100644 --- a/test/MC/ARM/elf-reloc-01.ll +++ b/test/MC/ARM/elf-reloc-01.ll @@ -42,12 +42,12 @@ entry: ] bb: ; preds = %entry - volatile store i32 11, i32* @var_tls, align 4 - volatile store double 2.200000e+01, double* @var_tls_double, align 8 - volatile store i32 33, i32* @var_static, align 4 - volatile store double 4.400000e+01, double* @var_static_double, align 8 - volatile store i32 55, i32* @var_global, align 4 - volatile store double 6.600000e+01, double* @var_global_double, align 8 + store volatile i32 11, i32* @var_tls, align 4 + store volatile double 2.200000e+01, double* @var_tls_double, align 8 + store volatile i32 33, i32* @var_static, align 4 + store volatile double 4.400000e+01, double* @var_static_double, align 8 + store volatile i32 55, i32* @var_global, align 4 + store volatile double 6.600000e+01, double* @var_global_double, align 8 br label %bb3 bb2: ; preds = %entry diff --git a/test/Other/lint.ll b/test/Other/lint.ll index 4aa984e2e1..ca2b1a336a 100644 --- a/test/Other/lint.ll +++ b/test/Other/lint.ll @@ -151,7 +151,7 @@ entry: exit: %t3 = phi i32* [ %t4, %exit ] %t4 = bitcast i32* %t3 to i32* - %x = volatile load i32* %t3 + %x = load volatile i32* %t3 br label %exit } diff --git a/test/Transforms/DeadArgElim/deadexternal.ll b/test/Transforms/DeadArgElim/deadexternal.ll index b2d63ec772..e3fe1bbb54 100644 --- a/test/Transforms/DeadArgElim/deadexternal.ll +++ b/test/Transforms/DeadArgElim/deadexternal.ll @@ -30,10 +30,10 @@ entry: define void @h() { entry: %i = alloca i32, align 4 - volatile store i32 10, i32* %i, align 4 + store volatile i32 10, i32* %i, align 4 ; CHECK: %tmp = load volatile i32* %i, align 4 ; CHECK-next: call void @f(i32 undef) - %tmp = volatile load i32* %i, align 4 + %tmp = load volatile i32* %i, align 4 call void @f(i32 %tmp) ret void } diff --git a/test/Transforms/EarlyCSE/basic.ll b/test/Transforms/EarlyCSE/basic.ll index 57b1697ff4..32c302c920 100644 --- a/test/Transforms/EarlyCSE/basic.ll +++ b/test/Transforms/EarlyCSE/basic.ll @@ -10,22 +10,22 @@ define void @test1(i8 %V, i32 *%P) { %C = zext i8 %V to i32 %D = zext i8 %V to i32 ;; CSE - volatile store i32 %C, i32* %P - volatile store i32 %D, i32* %P + store volatile i32 %C, i32* %P + store volatile i32 %D, i32* %P ; CHECK-NEXT: %C = zext i8 %V to i32 ; CHECK-NEXT: store volatile i32 %C ; CHECK-NEXT: store volatile i32 %C %E = add i32 %C, %C %F = add i32 %C, %C - volatile store i32 %E, i32* %P - volatile store i32 %F, i32* %P + store volatile i32 %E, i32* %P + store volatile i32 %F, i32* %P ; CHECK-NEXT: %E = add i32 %C, %C ; CHECK-NEXT: store volatile i32 %E ; CHECK-NEXT: store volatile i32 %E %G = add nuw i32 %C, %C ;; not a CSE with E - volatile store i32 %G, i32* %P + store volatile i32 %G, i32* %P ; CHECK-NEXT: %G = add nuw i32 %C, %C ; CHECK-NEXT: store volatile i32 %G ret void diff --git a/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll b/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll index 85df09ebd7..b7e4d1f872 100644 --- a/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll +++ b/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll @@ -4,6 +4,6 @@ @g = global i32 0 ; <i32*> [#uses=1] define i32 @f() { - %t = volatile load i32* @g ; <i32> [#uses=1] + %t = load volatile i32* @g ; <i32> [#uses=1] ret i32 %t } diff --git a/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll b/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll index f21fabc493..93991d21a1 100644 --- a/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll +++ b/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll @@ -5,6 +5,6 @@ define void @foo() { ; CHECK: void @foo() { - %tmp = volatile load i32* @g + %tmp = load volatile i32* @g ret void } diff --git a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll index a6803abc5d..588d5c9a68 100644 --- a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll +++ b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll @@ -3,7 +3,7 @@ define double @foo() nounwind { entry: - %tmp1 = volatile load double* @t0.1441, align 8 ; <double> [#uses=2] + %tmp1 = load volatile double* @t0.1441, align 8 ; <double> [#uses=2] %tmp4 = fmul double %tmp1, %tmp1 ; <double> [#uses=1] ret double %tmp4 } diff --git a/test/Transforms/IPConstantProp/dangling-block-address.ll b/test/Transforms/IPConstantProp/dangling-block-address.ll index 0489dfa796..bb101333f8 100644 --- a/test/Transforms/IPConstantProp/dangling-block-address.ll +++ b/test/Transforms/IPConstantProp/dangling-block-address.ll @@ -12,7 +12,7 @@ define void @foo(i32 %x) nounwind readnone { entry: %b = alloca i32, align 4 ; <i32*> [#uses=1] - volatile store i32 -1, i32* %b + store volatile i32 -1, i32* %b ret void } diff --git a/test/Transforms/IndVarSimplify/avoid-i0.ll b/test/Transforms/IndVarSimplify/avoid-i0.ll index 59661fa2e8..22f2e4b718 100644 --- a/test/Transforms/IndVarSimplify/avoid-i0.ll +++ b/test/Transforms/IndVarSimplify/avoid-i0.ll @@ -90,7 +90,7 @@ entry: br label %bb4 bb: ; preds = %bb4 - %0 = volatile load i32* @x, align 4 ; <i32> [#uses=1] + %0 = load volatile i32* @x, align 4 ; <i32> [#uses=1] store i32 %0, i32* %vol.0, align 4 store i32 0, i32* %l_52, align 4 br label %bb2 diff --git a/test/Transforms/IndVarSimplify/preserve-gep-remainder.ll b/test/Transforms/IndVarSimplify/preserve-gep-remainder.ll index 2f3100fcaf..a62943d0a5 100644 --- a/test/Transforms/IndVarSimplify/preserve-gep-remainder.ll +++ b/test/Transforms/IndVarSimplify/preserve-gep-remainder.ll @@ -14,7 +14,7 @@ loop: %i = phi i64 [ 0, %entry ], [ %i.next, %loop ] %ip = add i64 %i, 1 %p.2.ip.1 = getelementptr [3 x [3 x double]]* %p, i64 2, i64 %ip, i64 1 - volatile store double 0.0, double* %p.2.ip.1 + store volatile double 0.0, double* %p.2.ip.1 %i.next = add i64 %i, 1 br label %loop } diff --git a/test/Transforms/IndVarSimplify/sink-alloca.ll b/test/Transforms/IndVarSimplify/sink-alloca.ll index e7d642c9b3..64207d823d 100644 --- a/test/Transforms/IndVarSimplify/sink-alloca.ll +++ b/test/Transforms/IndVarSimplify/sink-alloca.ll @@ -18,8 +18,8 @@ while.cond: ; preds = %while.cond, %entry br i1 %tobool, label %while.end, label %while.cond while.end: ; preds = %while.cond - volatile store i32 0, i32* %result.i - %tmp.i = volatile load i32* %result.i ; <i32> [#uses=0] + store volatile i32 0, i32* %result.i + %tmp.i = load volatile i32* %result.i ; <i32> [#uses=0] ret i32 0 } declare i32 @bar() diff --git a/test/Transforms/Inline/noinline-recursive-fn.ll b/test/Transforms/Inline/noinline-recursive-fn.ll index 1d5ebbbf0f..d56b39069e 100644 --- a/test/Transforms/Inline/noinline-recursive-fn.ll +++ b/test/Transforms/Inline/noinline-recursive-fn.ll @@ -17,7 +17,7 @@ entry: bb: ; preds = %entry %1 = sub nsw i32 %x, 1 ; <i32> [#uses=1] call void @foo(i32 %1) nounwind ssp - volatile store i32 1, i32* @g, align 4 + store volatile i32 1, i32* @g, align 4 ret void return: ; preds = %entry @@ -42,7 +42,7 @@ entry: %0 = bitcast i8* %Bar to void (i32, i8*, i8*)* %1 = sub nsw i32 %x, 1 call void %0(i32 %1, i8* %Foo, i8* %Bar) nounwind - volatile store i32 42, i32* @g, align 4 + store volatile i32 42, i32* @g, align 4 ret void } @@ -54,7 +54,7 @@ entry: bb: ; preds = %entry %1 = bitcast i8* %Foo to void (i32, i8*, i8*)* ; <void (i32, i8*, i8*)*> [#uses=1] call void %1(i32 %x, i8* %Foo, i8* %Bar) nounwind - volatile store i32 13, i32* @g, align 4 + store volatile i32 13, i32* @g, align 4 ret void return: ; preds = %entry diff --git a/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll b/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll index 32979191f8..7f7390809c 100644 --- a/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll +++ b/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll @@ -2,6 +2,6 @@ define void @test(i32* %P) { ; Dead but not deletable! - %X = volatile load i32* %P ; <i32> [#uses=0] + %X = load volatile i32* %P ; <i32> [#uses=0] ret void } diff --git a/test/Transforms/InstCombine/2007-10-28-stacksave.ll b/test/Transforms/InstCombine/2007-10-28-stacksave.ll index 76bceb6879..4c5c367bcf 100644 --- a/test/Transforms/InstCombine/2007-10-28-stacksave.ll +++ b/test/Transforms/InstCombine/2007-10-28-stacksave.ll @@ -26,7 +26,7 @@ lab: ; preds = %cleanup31, %entry %tmp21 = getelementptr i32* %tmp1819, i32 0 ; <i32*> [#uses=1] store i32 1, i32* %tmp21, align 4 %tmp2223 = bitcast i32* %tmp1819 to i8* ; <i8*> [#uses=1] - volatile store i8* %tmp2223, i8** @p, align 4 + store volatile i8* %tmp2223, i8** @p, align 4 %tmp25 = add i32 %n.0, 1 ; <i32> [#uses=2] %tmp27 = icmp sle i32 %tmp25, 999999 ; <i1> [#uses=1] %tmp2728 = zext i1 %tmp27 to i8 ; <i8> [#uses=1] diff --git a/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll b/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll index 6847f5ed05..de08c32fb4 100644 --- a/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll +++ b/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll @@ -2,7 +2,7 @@ define void @test() { %votf = alloca <4 x float> ; <<4 x float>*> [#uses=1] - volatile store <4 x float> zeroinitializer, <4 x float>* %votf, align 16 + store volatile <4 x float> zeroinitializer, <4 x float>* %votf, align 16 ret void } diff --git a/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll b/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll index a24f3071c9..1286e3d63b 100644 --- a/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll +++ b/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll @@ -6,17 +6,17 @@ target triple = "i386-apple-darwin8" define i32 @main() nounwind { entry: %tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0] - %tmp34 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1] + %tmp34 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1] br label %bb bb: ; preds = %bb, %entry %b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; <i32> [#uses=1] %tmp3.reg2mem.0 = phi i32 [ %tmp34, %entry ], [ %tmp3, %bb ] ; <i32> [#uses=1] %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1] - volatile store i32 %tmp4, i32* @g_1, align 4 + store volatile i32 %tmp4, i32* @g_1, align 4 %tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2] %tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1] - %tmp3 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1] + %tmp3 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1] br i1 %tmp9, label %bb, label %bb11 bb11: ; preds = %bb diff --git a/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll b/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll index 5fb11ffb32..ebbd3a743f 100644 --- a/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll +++ b/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll @@ -7,11 +7,11 @@ target triple = "i386-apple-darwin8" define i32 @main(i32 %i) nounwind { entry: %tmp93 = icmp slt i32 %i, 10 ; <i1> [#uses=0] - %tmp34 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1] + %tmp34 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1] br i1 %tmp93, label %bb11, label %bb bb: ; preds = %bb, %entry - %tmp3 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1] + %tmp3 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1] br label %bb11 bb11: ; preds = %bb diff --git a/test/Transforms/InstCombine/2008-06-24-StackRestore.ll b/test/Transforms/InstCombine/2008-06-24-StackRestore.ll index 8307834551..4f4709b6f2 100644 --- a/test/Transforms/InstCombine/2008-06-24-StackRestore.ll +++ b/test/Transforms/InstCombine/2008-06-24-StackRestore.ll @@ -10,7 +10,7 @@ entry: %tmp2752 = alloca i32 ; <i32*> [#uses=2] %tmpcast53 = bitcast i32* %tmp2752 to i8* ; <i8*> [#uses=1] store i32 2, i32* %tmp2752, align 4 - volatile store i8* %tmpcast53, i8** @p, align 4 + store volatile i8* %tmpcast53, i8** @p, align 4 br label %bb44 bb: ; preds = %bb44 @@ -29,7 +29,7 @@ bb44: ; preds = %bb44, %entry store i32 1, i32* %tmp27, align 4 %tmp34 = getelementptr i32* %tmp27, i32 %tmp4 ; <i32*> [#uses=1] store i32 2, i32* %tmp34, align 4 - volatile store i8* %tmpcast, i8** @p, align 4 + store volatile i8* %tmpcast, i8** @p, align 4 %exitcond = icmp eq i32 %tmp3857, 999999 ; <i1> [#uses=1] br i1 %exitcond, label %bb, label %bb44 } diff --git a/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll b/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll index 81044083c6..1ed53237aa 100644 --- a/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll +++ b/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll @@ -7,17 +7,17 @@ target triple = "i386-apple-darwin8" define i32 @main() nounwind { entry: %tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0] - %tmp34 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1] + %tmp34 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1] br label %bb bb: ; preds = %bb, %entry %b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; <i32> [#uses=1] %tmp3.reg2mem.0 = phi i32 [ %tmp3, %bb ], [ %tmp34, %entry ] %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1] - volatile store i32 %tmp4, i32* @g_1, align 4 + store volatile i32 %tmp4, i32* @g_1, align 4 %tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2] %tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1] - %tmp3 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1] + %tmp3 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1] br i1 %tmp9, label %bb, label %bb11 bb11: ; preds = %bb diff --git a/test/Transforms/InstCombine/extractvalue.ll b/test/Transforms/InstCombine/extractvalue.ll index cf36b8f237..5e4c677782 100644 --- a/test/Transforms/InstCombine/extractvalue.ll +++ b/test/Transforms/InstCombine/extractvalue.ll @@ -88,7 +88,7 @@ define i32 @doubleextract2gep({i32, {i32, i32}}* %arg) { ; CHECK-NEXT: ret define i32 @nogep-multiuse({i32, i32}* %pair) { ; The load should be left unchanged since both parts are needed. - %L = volatile load {i32, i32}* %pair + %L = load volatile {i32, i32}* %pair %LHS = extractvalue {i32, i32} %L, 0 %RHS = extractvalue {i32, i32} %L, 1 %R = add i32 %LHS, %RHS @@ -100,8 +100,8 @@ define i32 @nogep-multiuse({i32, i32}* %pair) { ; CHECK-NEXT: extractvalue ; CHECK-NEXT: ret define i32 @nogep-volatile({i32, i32}* %pair) { - ; The volatile load should be left unchanged. - %L = volatile load {i32, i32}* %pair + ; The load volatile should be left unchanged. + %L = load volatile {i32, i32}* %pair %E = extractvalue {i32, i32} %L, 1 ret i32 %E } diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll index f033e51036..fb57a190aa 100644 --- a/test/Transforms/InstCombine/intrinsics.ll +++ b/test/Transforms/InstCombine/intrinsics.ll @@ -142,13 +142,13 @@ define i32 @umultest4(i32 %n) nounwind { define void @powi(double %V, double *%P) { entry: %A = tail call double @llvm.powi.f64(double %V, i32 -1) nounwind - volatile store double %A, double* %P + store volatile double %A, double* %P %B = tail call double @llvm.powi.f64(double %V, i32 0) nounwind - volatile store double %B, double* %P + store volatile double %B, double* %P %C = tail call double @llvm.powi.f64(double %V, i32 1) nounwind - volatile store double %C, double* %P + store volatile double %C, double* %P ret void ; CHECK: @powi ; CHECK: %A = fdiv double 1.0{{.*}}, %V @@ -183,13 +183,13 @@ define void @cmp.simplify(i32 %a, i32 %b, i1* %c) { entry: %lz = tail call i32 @llvm.ctlz.i32(i32 %a) nounwind readnone %lz.cmp = icmp eq i32 %lz, 32 - volatile store i1 %lz.cmp, i1* %c + store volatile i1 %lz.cmp, i1* %c %tz = tail call i32 @llvm.cttz.i32(i32 %a) nounwind readnone %tz.cmp = icmp ne i32 %tz, 32 - volatile store i1 %tz.cmp, i1* %c + store volatile i1 %tz.cmp, i1* %c %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone %pop.cmp = icmp eq i32 %pop, 0 - volatile store i1 %pop.cmp, i1* %c + store volatile i1 %pop.cmp, i1* %c ret void ; CHECK: @cmp.simplify ; CHECK-NEXT: entry: diff --git a/test/Transforms/InstCombine/volatile_store.ll b/test/Transforms/InstCombine/volatile_store.ll index 0518e5aa02..2256678118 100644 --- a/test/Transforms/InstCombine/volatile_store.ll +++ b/test/Transforms/InstCombine/volatile_store.ll @@ -5,8 +5,8 @@ define void @self_assign_1() { entry: - %tmp = volatile load i32* @x ; <i32> [#uses=1] - volatile store i32 %tmp, i32* @x + %tmp = load volatile i32* @x ; <i32> [#uses=1] + store volatile i32 %tmp, i32* @x br label %return return: ; preds = %entry diff --git a/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll b/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll index 46aaa00380..e80bae578a 100644 --- a/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll +++ b/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll @@ -15,7 +15,7 @@ for.cond1177: br i1 %cmp1179, label %for.cond1177, label %land.rhs1320 land.rhs1320: - %tmp1324 = volatile load i64* getelementptr inbounds (%0* @g_338, i64 0, i32 2), align 1, !tbaa !0 + %tmp1324 = load volatile i64* getelementptr inbounds (%0* @g_338, i64 0, i32 2), align 1, !tbaa !0 br label %if.end.i if.end.i: diff --git a/test/Transforms/JumpThreading/crash.ll b/test/Transforms/JumpThreading/crash.ll index 2115dd3840..b9c03544db 100644 --- a/test/Transforms/JumpThreading/crash.ll +++ b/test/Transforms/JumpThreading/crash.ll @@ -399,7 +399,7 @@ if.then237: br label %lbl_664 lbl_596: ; preds = %lbl_664, %for.end37 - volatile store i64 undef, i64* undef, align 4 + store volatile i64 undef, i64* undef, align 4 br label %for.cond111 for.cond111: ; preds = %safe_sub_func_int64_t_s_s.exit, %lbl_596 diff --git a/test/Transforms/JumpThreading/no-irreducible-loops.ll b/test/Transforms/JumpThreading/no-irreducible-loops.ll index 7c7fe3929a..a4914f9634 100644 --- a/test/Transforms/JumpThreading/no-irreducible-loops.ll +++ b/test/Transforms/JumpThreading/no-irreducible-loops.ll @@ -17,11 +17,11 @@ bb: ; preds = %bb4 br i1 %0, label %bb1, label %bb2 bb1: ; preds = %bb - volatile store i32 1000, i32* @v1, align 4 + store volatile i32 1000, i32* @v1, align 4 br label %bb3 bb2: ; preds = %bb - volatile store i32 1001, i32* @v1, align 4 + store volatile i32 1001, i32* @v1, align 4 br label %bb3 bb3: ; preds = %bb2, %bb1 diff --git a/test/Transforms/LICM/2007-05-22-VolatileSink.ll b/test/Transforms/LICM/2007-05-22-VolatileSink.ll index 17383c2ebb..4df6ea7581 100644 --- a/test/Transforms/LICM/2007-05-22-VolatileSink.ll +++ b/test/Transforms/LICM/2007-05-22-VolatileSink.ll @@ -10,7 +10,7 @@ entry: br label %bb6 bb: ; preds = %bb6 - %tmp2 = volatile load i32* %DataIn ; <i32> [#uses=1] + %tmp2 = load volatile i32* %DataIn ; <i32> [#uses=1] %tmp3 = getelementptr [64 x i32]* %buffer, i32 0, i32 %i.0 ; <i32*> [#uses=1] store i32 %tmp2, i32* %tmp3 %tmp5 = add i32 %i.0, 1 ; <i32> [#uses=1] @@ -28,7 +28,7 @@ bb12: ; preds = %bb22 %tmp16 = add i32 %tmp14, %i.1 ; <i32> [#uses=1] %tmp17 = getelementptr [64 x i32]* %buffer, i32 0, i32 %tmp16 ; <i32*> [#uses=1] %tmp18 = load i32* %tmp17 ; <i32> [#uses=1] - volatile store i32 %tmp18, i32* %DataOut + store volatile i32 %tmp18, i32* %DataOut %tmp21 = add i32 %j.1, 1 ; <i32> [#uses=1] br label %bb22 diff --git a/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll b/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll index fd114f4ccc..2bbc6ab041 100644 --- a/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll +++ b/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll @@ -19,7 +19,7 @@ for.body4.lr.ph: for.body4: %l_612.11 = phi i32* [ undef, %for.body4.lr.ph ], [ %call19, %for.body4 ] - %tmp7 = volatile load i16* @g_39, align 2 + %tmp7 = load volatile i16* @g_39, align 2 %call = call i32** @func_108(i32*** undef) %call19 = call i32* @func_84(i32** %call) br i1 false, label %for.body4, label %for.cond.loopexit diff --git a/test/Transforms/LICM/crash.ll b/test/Transforms/LICM/crash.ll index ff7fa0b19a..de41d008a7 100644 --- a/test/Transforms/LICM/crash.ll +++ b/test/Transforms/LICM/crash.ll @@ -68,7 +68,7 @@ define void @test4() noreturn nounwind { br label %1 ; <label>:1 ; preds = %1, %0 - volatile store i32* @g_47, i32** undef, align 8 + store volatile i32* @g_47, i32** undef, align 8 store i32 undef, i32* @g_47, align 4 br label %1 } diff --git a/test/Transforms/LICM/scalar_promote.ll b/test/Transforms/LICM/scalar_promote.ll index 9aefc4f87e..05a64d6322 100644 --- a/test/Transforms/LICM/scalar_promote.ll +++ b/test/Transforms/LICM/scalar_promote.ll @@ -59,7 +59,7 @@ define void @test3(i32 %i) { br label %Loop Loop: ; Should not promote this to a register - %x = volatile load i32* @X + %x = load volatile i32* @X %x2 = add i32 %x, 1 store i32 %x2, i32* @X br i1 true, label %Out, label %Loop @@ -133,7 +133,7 @@ Loop: ; preds = %Loop, %0 %x2 = add i32 %x, 1 ; <i32> [#uses=1] store i32 %x2, i32* @X - volatile store i32* @X, i32** %P2 + store volatile i32* @X, i32** %P2 %Next = add i32 %j, 1 ; <i32> [#uses=2] %cond = icmp eq i32 %Next, 0 ; <i1> [#uses=1] diff --git a/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll b/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll index 90477d1069..ce56bd3101 100644 --- a/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll +++ b/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll @@ -10,7 +10,7 @@ entry: bb: ; preds = %bb, %entry %l_2.0.reg2mem.0 = phi i16 [ 0, %entry ], [ %t1, %bb ] ; <i16> [#uses=2] %t0 = shl i16 %l_2.0.reg2mem.0, 1 ; <i16>:0 [#uses=1] - volatile store i16 %t0, i16* @g_3, align 2 + store volatile i16 %t0, i16* @g_3, align 2 %t1 = add i16 %l_2.0.reg2mem.0, -3 ; <i16>:1 [#uses=2] %t2 = icmp slt i16 %t1, 1 ; <i1>:2 [#uses=1] br i1 %t2, label %bb, label %return @@ -22,7 +22,7 @@ return: ; preds = %bb define i32 @main() nounwind { entry: tail call void @func_1( ) nounwind - volatile load i16* @g_3, align 2 ; <i16>:0 [#uses=1] + load volatile i16* @g_3, align 2 ; <i16>:0 [#uses=1] zext i16 %0 to i32 ; <i32>:1 [#uses=1] tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), i32 %1 ) nounwind ; <i32>:2 [#uses=0] ret i32 0 diff --git a/test/Transforms/LoopStrengthReduce/exit_compare_live_range.ll b/test/Transforms/LoopStrengthReduce/exit_compare_live_range.ll index abbfda6e92..ad4959be34 100644 --- a/test/Transforms/LoopStrengthReduce/exit_compare_live_range.ll +++ b/test/Transforms/LoopStrengthReduce/exit_compare_live_range.ll @@ -9,7 +9,7 @@ entry: br label %no_exit no_exit: ; preds = %no_exit, %entry %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %no_exit ] ; <i32> [#uses=1] - volatile store float 0.000000e+00, float* %D + store volatile float 0.000000e+00, float* %D %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2] ; CHECK: icmp ; CHECK-NEXT: br i1 diff --git a/test/Transforms/LoopStrengthReduce/pr3399.ll b/test/Transforms/LoopStrengthReduce/pr3399.ll index b809007fea..26c5002fde 100644 --- a/test/Transforms/LoopStrengthReduce/pr3399.ll +++ b/test/Transforms/LoopStrengthReduce/pr3399.ll @@ -13,7 +13,7 @@ bb: ; preds = %bb5, %bb5.thread bb1: ; preds = %bb %l_2.0.reg2mem.0 = sub i32 0, %indvar ; <i32> [#uses=1] - %0 = volatile load i32* @g_53, align 4 ; <i32> [#uses=1] + %0 = load volatile i32* @g_53, align 4 ; <i32> [#uses=1] %1 = trunc i32 %l_2.0.reg2mem.0 to i16 ; <i16> [#uses=1] %2 = trunc i32 %0 to i16 ; <i16> [#uses=1] %3 = mul i16 %2, %1 ; <i16> [#uses=1] diff --git a/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll b/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll index 52a8375312..ea0d515498 100644 --- a/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll +++ b/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -std-compile-opts -S | grep volatile | count 3 ; PR1520 -; Don't promote volatile loads/stores. This is really needed to handle setjmp/lonjmp properly. +; Don't promote load volatiles/stores. This is really needed to handle setjmp/lonjmp properly. target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32" target triple = "i686-pc-linux-gnu" @@ -14,7 +14,7 @@ entry: %v = alloca i32, align 4 ; <i32*> [#uses=3] %tmp = alloca i32, align 4 ; <i32*> [#uses=3] %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] - volatile store i32 0, i32* %v, align 4 + store volatile i32 0, i32* %v, align 4 %tmp1 = call i32 @_setjmp( %struct.__jmp_buf_tag* getelementptr ([1 x %struct.__jmp_buf_tag]* @j, i32 0, i32 0) ) ; <i32> [#uses=1] %tmp2 = icmp ne i32 %tmp1, 0 ; <i1> [#uses=1] %tmp23 = zext i1 %tmp2 to i8 ; <i8> [#uses=1] @@ -22,12 +22,12 @@ entry: br i1 %toBool, label %bb, label %bb5 bb: ; preds = %entry - %tmp4 = volatile load i32* %v, align 4 ; <i32> [#uses=1] + %tmp4 = load volatile i32* %v, align 4 ; <i32> [#uses=1] store i32 %tmp4, i32* %tmp, align 4 br label %bb6 bb5: ; preds = %entry - volatile store i32 1, i32* %v, align 4 + store volatile i32 1, i32* %v, align 4 call void @g( ) store i32 0, i32* %tmp, align 4 br label %bb6 diff --git a/test/Transforms/ObjCARC/basic.ll b/test/Transforms/ObjCARC/basic.ll index 861173b9c9..c466fa0e25 100644 --- a/test/Transforms/ObjCARC/basic.ll +++ b/test/Transforms/ObjCARC/basic.ll @@ -167,7 +167,7 @@ entry: loop: %c = bitcast i32* %x to i8* call void @objc_release(i8* %c) nounwind - %j = volatile load i1* %q + %j = load volatile i1* %q br i1 %j, label %loop, label %return return: @@ -190,7 +190,7 @@ entry: loop: %a = bitcast i32* %x to i8* %0 = call i8* @objc_retain(i8* %a) nounwind - %j = volatile load i1* %q + %j = load volatile i1* %q br i1 %j, label %loop, label %return return: diff --git a/test/Transforms/ObjCARC/contract-storestrong.ll b/test/Transforms/ObjCARC/contract-storestrong.ll index 25c93f411c..fda2ff4cbf 100644 --- a/test/Transforms/ObjCARC/contract-storestrong.ll +++ b/test/Transforms/ObjCARC/contract-storestrong.ll @@ -33,7 +33,7 @@ entry: define void @test1(i8* %p) { entry: %0 = tail call i8* @objc_retain(i8* %p) nounwind - %tmp = volatile load i8** @x, align 8 + %tmp = load volatile i8** @x, align 8 store i8* %0, i8** @x, align 8 tail call void @objc_release(i8* %tmp) nounwind ret void @@ -53,7 +53,7 @@ define void @test2(i8* %p) { entry: %0 = tail call i8* @objc_retain(i8* %p) nounwind %tmp = load i8** @x, align 8 - volatile store i8* %0, i8** @x, align 8 + store volatile i8* %0, i8** @x, align 8 tail call void @objc_release(i8* %tmp) nounwind ret void } diff --git a/test/Transforms/ScalarRepl/volatile.ll b/test/Transforms/ScalarRepl/volatile.ll index ab276b043e..fadf1aa276 100644 --- a/test/Transforms/ScalarRepl/volatile.ll +++ b/test/Transforms/ScalarRepl/volatile.ll @@ -4,9 +4,9 @@ define i32 @voltest(i32 %T) { %A = alloca {i32, i32} %B = getelementptr {i32,i32}* %A, i32 0, i32 0 - volatile store i32 %T, i32* %B + store volatile i32 %T, i32* %B %C = getelementptr {i32,i32}* %A, i32 0, i32 1 - %X = volatile load i32* %C + %X = load volatile i32* %C ret i32 %X } diff --git a/test/Transforms/Sink/basic.ll b/test/Transforms/Sink/basic.ll index 23433728c1..4c531d82e6 100644 --- a/test/Transforms/Sink/basic.ll +++ b/test/Transforms/Sink/basic.ll @@ -21,7 +21,7 @@ false: ret i32 0 } -; But don't sink volatile loads... +; But don't sink load volatiles... ; CHECK: @foo2 ; CHECK: load volatile diff --git a/test/Transforms/TailCallElim/dont_reorder_load.ll b/test/Transforms/TailCallElim/dont_reorder_load.ll index 899e115966..a29b72e942 100644 --- a/test/Transforms/TailCallElim/dont_reorder_load.ll +++ b/test/Transforms/TailCallElim/dont_reorder_load.ll @@ -46,7 +46,7 @@ else: ; preds = %entry } ; This load can't be safely moved above the call because that would change the -; order in which the volatile loads are performed. +; order in which the load volatiles are performed. define fastcc i32 @no_tailrecelim_3(i32* %a_arg, i32 %a_len_arg, i32 %start_arg) nounwind { entry: %tmp2 = icmp sge i32 %start_arg, %a_len_arg ; <i1> [#uses=1] @@ -58,7 +58,7 @@ if: ; preds = %entry else: ; preds = %entry %tmp7 = add i32 %start_arg, 1 ; <i32> [#uses=1] %tmp8 = call fastcc i32 @no_tailrecelim_3(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; <i32> [#uses=1] - %tmp9 = volatile load i32* %a_arg ; <i32> [#uses=1] + %tmp9 = load volatile i32* %a_arg ; <i32> [#uses=1] %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] ret i32 %tmp10 } |