diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2010-10-29 19:08:24 +0100 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2010-10-29 19:08:24 +0100 |
commit | 7837314d141c661c70bc13c5050694413ecfe14a (patch) | |
tree | de137b1d2945d2490bc1dcdf6d76eac6006f7ab0 /arch/mips/include/asm/system.h | |
parent | 18cb657ca1bafe635f368346a1676fb04c512edf (diff) |
MIPS: Get rid of branches to .subsections.
It was a nice optimization - on paper at least. In practice it results in
branches that may exceed the maximum legal range for a branch. We can
fight that problem with -ffunction-sections but -ffunction-sections again
is incompatible with -pg used by the function tracer.
By rewriting the loop around all simple LL/SC blocks to C we reduce the
amount of inline assembler and at the same time allow GCC to often fill
the branch delay slots with something sensible or whatever else clever
optimization it may have up in its sleeve.
With this optimization gone we also no longer need -ffunction-sections,
so drop it.
This optimization was originally introduced in 2.6.21, commit
5999eca25c1fd4b9b9aca7833b04d10fe4bc877d (linux-mips.org) rsp.
f65e4fa8e0c6022ad58dc88d1b11b12589ed7f9f (kernel.org).
Original fix for the issues which caused me to pull this optimization by
Paul Gortmaker <paul.gortmaker@windriver.com>.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/system.h')
-rw-r--r-- | arch/mips/include/asm/system.h | 52 |
1 files changed, 24 insertions, 28 deletions
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h index bb937ccfba1..6018c80ce37 100644 --- a/arch/mips/include/asm/system.h +++ b/arch/mips/include/asm/system.h @@ -115,21 +115,19 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) } else if (kernel_uses_llsc) { unsigned long dummy; - __asm__ __volatile__( - " .set mips3 \n" - "1: ll %0, %3 # xchg_u32 \n" - " .set mips0 \n" - " move %2, %z4 \n" - " .set mips3 \n" - " sc %2, %1 \n" - " beqz %2, 2f \n" - " .subsection 2 \n" - "2: b 1b \n" - " .previous \n" - " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) - : "memory"); + do { + __asm__ __volatile__( + " .set mips3 \n" + " ll %0, %3 # xchg_u32 \n" + " .set mips0 \n" + " move %2, %z4 \n" + " .set mips3 \n" + " sc %2, %1 \n" + " .set mips0 \n" + : "=&r" (retval), "=m" (*m), "=&r" (dummy) + : "R" (*m), "Jr" (val) + : "memory"); + } while (unlikely(!dummy)); } else { unsigned long flags; @@ -167,19 +165,17 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) } else if (kernel_uses_llsc) { unsigned long dummy; - __asm__ __volatile__( - " .set mips3 \n" - "1: lld %0, %3 # xchg_u64 \n" - " move %2, %z4 \n" - " scd %2, %1 \n" - " beqz %2, 2f \n" - " .subsection 2 \n" - "2: b 1b \n" - " .previous \n" - " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) - : "memory"); + do { + __asm__ __volatile__( + " .set mips3 \n" + " lld %0, %3 # xchg_u64 \n" + " move %2, %z4 \n" + " scd %2, %1 \n" + " .set mips0 \n" + : "=&r" (retval), "=m" (*m), "=&r" (dummy) + : "R" (*m), "Jr" (val) + : "memory"); + } while (unlikely(!dummy)); } else { unsigned long flags; |