aboutsummaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@apple.com>2011-11-14 22:34:48 +0000
committerChad Rosier <mcrosier@apple.com>2011-11-14 22:34:48 +0000
commite489af8dce12249be26ac0c8e371557378886bc2 (patch)
treef37a03ac917deddae57256542e1e9deeee89872f /lib/Target
parent02e3d9268fe456ebe4fe6ae277507bb7933ec3df (diff)
Fix a performance regression from r144565. Positive offsets were being lowered
into registers, rather then encoded directly in the load/store. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@144576 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp6
1 files changed, 3 insertions, 3 deletions
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 81a93b1ed3..0b728a94fa 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -874,9 +874,9 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
// Integer loads/stores handle 12-bit offsets.
needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
// Handle negative offsets.
- if (isThumb2)
- needsLowering = !(needsLowering && Subtarget->hasV6T2Ops() &&
- Addr.Offset < 0 && Addr.Offset > -256);
+ if (needsLowering && isThumb2)
+ needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
+ Addr.Offset > -256);
} else {
// ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);