diff options
-rw-r--r-- | lib/Target/X86/X86Subtarget.cpp | 9 | ||||
-rw-r--r-- | lib/Target/X86/X86Subtarget.h | 6 | ||||
-rw-r--r-- | test/CodeGen/X86/2007-01-08-InstrSched.ll | 16 | ||||
-rw-r--r-- | test/CodeGen/X86/lsr-reuse.ll | 20 | ||||
-rw-r--r-- | test/CodeGen/X86/sse2.ll | 8 | ||||
-rw-r--r-- | test/CodeGen/X86/sse3.ll | 10 |
6 files changed, 27 insertions, 42 deletions
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index f907614c86..cd568164b2 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -366,12 +366,3 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS, if (StackAlignment) stackAlignment = StackAlignment; } - -bool X86Subtarget::enablePostRAScheduler( - CodeGenOpt::Level OptLevel, - TargetSubtarget::AntiDepBreakMode& Mode, - RegClassVector& CriticalPathRCs) const { - Mode = TargetSubtarget::ANTIDEP_CRITICAL; - CriticalPathRCs.clear(); - return OptLevel >= CodeGenOpt::Aggressive; -} diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index 50338d33ac..56220db3b2 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -230,12 +230,6 @@ public: /// indicating the number of scheduling cycles of backscheduling that /// should be attempted. unsigned getSpecialAddressLatency() const; - - /// enablePostRAScheduler - X86 target is enabling post-alloc scheduling - /// at 'More' optimization level. - bool enablePostRAScheduler(CodeGenOpt::Level OptLevel, - TargetSubtarget::AntiDepBreakMode& Mode, - RegClassVector& CriticalPathRCs) const; }; } // End llvm namespace diff --git a/test/CodeGen/X86/2007-01-08-InstrSched.ll b/test/CodeGen/X86/2007-01-08-InstrSched.ll index 317ed0a4f7..58e186b4c4 100644 --- a/test/CodeGen/X86/2007-01-08-InstrSched.ll +++ b/test/CodeGen/X86/2007-01-08-InstrSched.ll @@ -11,12 +11,12 @@ define float @foo(float %x) nounwind { %tmp14 = fadd float %tmp12, %tmp7 ret float %tmp14 -; CHECK: mulss LCPI1_3(%rip) -; CHECK-NEXT: mulss LCPI1_0(%rip) -; CHECK-NEXT: mulss LCPI1_1(%rip) -; CHECK-NEXT: mulss LCPI1_2(%rip) -; CHECK-NEXT: addss -; CHECK-NEXT: addss -; CHECK-NEXT: addss -; CHECK-NEXT: ret +; CHECK: mulss LCPI1_0(%rip) +; CHECK: mulss LCPI1_1(%rip) +; CHECK: addss +; CHECK: mulss LCPI1_2(%rip) +; CHECK: addss +; CHECK: mulss LCPI1_3(%rip) +; CHECK: addss +; CHECK: ret } diff --git a/test/CodeGen/X86/lsr-reuse.ll b/test/CodeGen/X86/lsr-reuse.ll index 2f6fb3fa8b..ab71555950 100644 --- a/test/CodeGen/X86/lsr-reuse.ll +++ b/test/CodeGen/X86/lsr-reuse.ll @@ -8,10 +8,10 @@ target triple = "x86_64-unknown-unknown" ; CHECK: full_me_0: ; CHECK: movsd (%rsi), %xmm0 -; CHECK: addq $8, %rsi ; CHECK: mulsd (%rdx), %xmm0 -; CHECK: addq $8, %rdx ; CHECK: movsd %xmm0, (%rdi) +; CHECK: addq $8, %rsi +; CHECK: addq $8, %rdx ; CHECK: addq $8, %rdi ; CHECK: decq %rcx ; CHECK: jne @@ -53,10 +53,10 @@ return: ; CHECK: mulsd -2048(%rdx), %xmm0 ; CHECK: movsd %xmm0, -2048(%rdi) ; CHECK: movsd (%rsi), %xmm0 -; CHECK: addq $8, %rsi ; CHECK: divsd (%rdx), %xmm0 -; CHECK: addq $8, %rdx ; CHECK: movsd %xmm0, (%rdi) +; CHECK: addq $8, %rsi +; CHECK: addq $8, %rdx ; CHECK: addq $8, %rdi ; CHECK: decq %rcx ; CHECK: jne @@ -99,10 +99,10 @@ return: ; CHECK: mulsd (%rdx), %xmm0 ; CHECK: movsd %xmm0, (%rdi) ; CHECK: movsd -2048(%rsi), %xmm0 -; CHECK: addq $8, %rsi ; CHECK: divsd -2048(%rdx), %xmm0 -; CHECK: addq $8, %rdx ; CHECK: movsd %xmm0, -2048(%rdi) +; CHECK: addq $8, %rsi +; CHECK: addq $8, %rdx ; CHECK: addq $8, %rdi ; CHECK: decq %rcx ; CHECK: jne @@ -144,10 +144,10 @@ return: ; CHECK: mulsd (%rdx), %xmm0 ; CHECK: movsd %xmm0, (%rdi) ; CHECK: movsd -4096(%rsi), %xmm0 -; CHECK: addq $8, %rsi ; CHECK: divsd -4096(%rdx), %xmm0 -; CHECK: addq $8, %rdx ; CHECK: movsd %xmm0, -4096(%rdi) +; CHECK: addq $8, %rsi +; CHECK: addq $8, %rdx ; CHECK: addq $8, %rdi ; CHECK: decq %rcx ; CHECK: jne @@ -310,10 +310,10 @@ return: ; CHECK: addsd (%rsi), %xmm0 ; CHECK: movsd %xmm0, (%rdx) ; CHECK: movsd 40(%rdi), %xmm0 -; CHECK: addq $8, %rdi ; CHECK: subsd 40(%rsi), %xmm0 -; CHECK: addq $8, %rsi ; CHECK: movsd %xmm0, 40(%rdx) +; CHECK: addq $8, %rdi +; CHECK: addq $8, %rsi ; CHECK: addq $8, %rdx ; CHECK: decq %rcx ; CHECK: jne diff --git a/test/CodeGen/X86/sse2.ll b/test/CodeGen/X86/sse2.ll index f2b8010d41..20b8eac9c8 100644 --- a/test/CodeGen/X86/sse2.ll +++ b/test/CodeGen/X86/sse2.ll @@ -10,10 +10,10 @@ define void @t1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind { ; CHECK: t1: ; CHECK: movl 8(%esp), %eax -; CHECK-NEXT: movl 4(%esp), %ecx ; CHECK-NEXT: movapd (%eax), %xmm0 ; CHECK-NEXT: movlpd 12(%esp), %xmm0 -; CHECK-NEXT: movapd %xmm0, (%ecx) +; CHECK-NEXT: movl 4(%esp), %eax +; CHECK-NEXT: movapd %xmm0, (%eax) ; CHECK-NEXT: ret } @@ -26,9 +26,9 @@ define void @t2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind { ; CHECK: t2: ; CHECK: movl 8(%esp), %eax -; CHECK-NEXT: movl 4(%esp), %ecx ; CHECK-NEXT: movapd (%eax), %xmm0 ; CHECK-NEXT: movhpd 12(%esp), %xmm0 -; CHECK-NEXT: movapd %xmm0, (%ecx) +; CHECK-NEXT: movl 4(%esp), %eax +; CHECK-NEXT: movapd %xmm0, (%eax) ; CHECK-NEXT: ret } diff --git a/test/CodeGen/X86/sse3.ll b/test/CodeGen/X86/sse3.ll index 921161e4a1..e9c2c01a9e 100644 --- a/test/CodeGen/X86/sse3.ll +++ b/test/CodeGen/X86/sse3.ll @@ -17,8 +17,8 @@ entry: ; X64: t0: ; X64: movddup (%rsi), %xmm0 -; X64: xorl %eax, %eax ; X64: pshuflw $0, %xmm0, %xmm0 +; X64: xorl %eax, %eax ; X64: pinsrw $0, %eax, %xmm0 ; X64: movaps %xmm0, (%rdi) ; X64: ret @@ -169,11 +169,11 @@ define internal void @t10() nounwind { ret void ; X64: t10: ; X64: pextrw $4, %xmm0, %eax -; X64: pextrw $6, %xmm0, %edx ; X64: movlhps %xmm1, %xmm1 ; X64: pshuflw $8, %xmm1, %xmm1 ; X64: pinsrw $2, %eax, %xmm1 -; X64: pinsrw $3, %edx, %xmm1 +; X64: pextrw $6, %xmm0, %eax +; X64: pinsrw $3, %eax, %xmm1 } @@ -184,8 +184,8 @@ entry: ret <8 x i16> %tmp7 ; X64: t11: -; X64: movlhps %xmm0, %xmm0 ; X64: movd %xmm1, %eax +; X64: movlhps %xmm0, %xmm0 ; X64: pshuflw $1, %xmm0, %xmm0 ; X64: pinsrw $1, %eax, %xmm0 ; X64: ret @@ -198,8 +198,8 @@ entry: ret <8 x i16> %tmp9 ; X64: t12: -; X64: movlhps %xmm0, %xmm0 ; X64: pextrw $3, %xmm1, %eax +; X64: movlhps %xmm0, %xmm0 ; X64: pshufhw $3, %xmm0, %xmm0 ; X64: pinsrw $5, %eax, %xmm0 ; X64: ret |