diff options
author | Dan Gohman <gohman@apple.com> | 2010-08-24 15:55:12 +0000 |
---|---|---|
committer | Dan Gohman <gohman@apple.com> | 2010-08-24 15:55:12 +0000 |
commit | 92b651fb199557b9e54c7263e83c34ab39eb644f (patch) | |
tree | 72b6e581c96f5da87956a442876bb09e85fc5388 | |
parent | 2426668562623cf94809309c93b41ecea856b19d (diff) |
Fix X86's isLegalAddressingMode to recognize that static addresses
need not be RIP-relative in small mode.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@111917 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 4 | ||||
-rw-r--r-- | test/CodeGen/X86/lsr-static-addr.ll | 31 |
2 files changed, 34 insertions, 1 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e186058aa5..ff884a62a2 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -8171,6 +8171,7 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const { // X86 supports extremely general addressing modes. CodeModel::Model M = getTargetMachine().getCodeModel(); + Reloc::Model R = getTargetMachine().getRelocationModel(); // X86 allows a sign-extended 32-bit immediate field as a displacement. if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) @@ -8190,7 +8191,8 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, return false; // If lower 4G is not available, then we must use rip-relative addressing. - if (Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) + if ((M != CodeModel::Small || R != Reloc::Static) && + Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) return false; } diff --git a/test/CodeGen/X86/lsr-static-addr.ll b/test/CodeGen/X86/lsr-static-addr.ll new file mode 100644 index 0000000000..c9ed3e553a --- /dev/null +++ b/test/CodeGen/X86/lsr-static-addr.ll @@ -0,0 +1,31 @@ +; RUN: llc -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -relocation-model=static -asm-verbose=false < %s | FileCheck %s + +; CHECK: xorl %eax, %eax +; CHECK: movsd .LCPI0_0(%rip), %xmm0 +; CHECK: align +; CHECK-NEXT: BB0_2: +; CHECK-NEXT: movsd A(,%rax,8) +; CHECK-NEXT: mulsd +; CHECK-NEXT: movsd +; CHECK-NEXT: incq %rax + +@A = external global [0 x double] + +define void @foo(i64 %n) nounwind { +entry: + %cmp5 = icmp sgt i64 %n, 0 + br i1 %cmp5, label %for.body, label %for.end + +for.body: + %i.06 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr [0 x double]* @A, i64 0, i64 %i.06 + %tmp3 = load double* %arrayidx, align 8 + %mul = fmul double %tmp3, 2.300000e+00 + store double %mul, double* %arrayidx, align 8 + %inc = add nsw i64 %i.06, 1 + %exitcond = icmp eq i64 %inc, %n + br i1 %exitcond, label %for.end, label %for.body + +for.end: + ret void +} |