diff options
Diffstat (limited to 'arch/mn10300/include/asm/atomic.h')
| -rw-r--r-- | arch/mn10300/include/asm/atomic.h | 157 |
1 files changed, 120 insertions, 37 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index bc064825f9b..cadeb1e2cdf 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -11,9 +11,13 @@ #ifndef _ASM_ATOMIC_H #define _ASM_ATOMIC_H -#ifdef CONFIG_SMP -#error not SMP safe -#endif +#include <asm/irqflags.h> +#include <asm/cmpxchg.h> +#include <asm/barrier.h> + +#ifndef CONFIG_SMP +#include <asm-generic/atomic.h> +#else /* * Atomic operations that C can't guarantee us. Useful for @@ -31,7 +35,7 @@ * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (ACCESS_ONCE((v)->counter)) /** * atomic_set - set atomic variable @@ -43,8 +47,6 @@ */ #define atomic_set(v, i) (((v)->counter) = (i)) -#include <asm/system.h> - /** * atomic_add_return - add integer to atomic variable * @i: integer value to add @@ -55,16 +57,33 @@ */ static inline int atomic_add_return(int i, atomic_t *v) { + int retval; +#ifdef CONFIG_SMP + int status; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " add %5,%1 \n" + " mov %1,(_ADR,%3) \n" + " mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(retval), "=m"(v->counter) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) + : "memory", "cc"); + +#else unsigned long flags; - int temp; - local_irq_save(flags); - temp = v->counter; - temp += i; - v->counter = temp; - local_irq_restore(flags); - - return temp; + flags = arch_local_cli_save(); + retval = v->counter; + retval += i; + v->counter = retval; + arch_local_irq_restore(flags); +#endif + return retval; } /** @@ -77,16 +96,32 @@ static inline int atomic_add_return(int i, atomic_t *v) */ static inline int atomic_sub_return(int i, atomic_t *v) { + int retval; +#ifdef CONFIG_SMP + int status; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " sub %5,%1 \n" + " mov %1,(_ADR,%3) \n" + " mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(retval), "=m"(v->counter) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) + : "memory", "cc"); + +#else unsigned long flags; - int temp; - - local_irq_save(flags); - temp = v->counter; - temp -= i; - v->counter = temp; - local_irq_restore(flags); - - return temp; + flags = arch_local_cli_save(); + retval = v->counter; + retval -= i; + v->counter = retval; + arch_local_irq_restore(flags); +#endif + return retval; } static inline int atomic_add_negative(int i, atomic_t *v) @@ -121,37 +156,85 @@ static inline void atomic_dec(atomic_t *v) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic_add_unless(v, a, u) \ +#define __atomic_add_unless(v, a, u) \ ({ \ int c, old; \ c = atomic_read(v); \ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ - c != (u); \ + c; \ }) -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +/** + * atomic_clear_mask - Atomically clear bits in memory + * @mask: Mask of the bits to be cleared + * @v: pointer to word in memory + * + * Atomically clears the bits set in mask from the memory word specified. + */ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) { +#ifdef CONFIG_SMP + int status; + + asm volatile( + "1: mov %3,(_AAR,%2) \n" + " mov (_ADR,%2),%0 \n" + " and %4,%0 \n" + " mov %0,(_ADR,%2) \n" + " mov (_ADR,%2),%0 \n" /* flush */ + " mov (_ASR,%2),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=m"(*addr) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask) + : "memory", "cc"); +#else unsigned long flags; mask = ~mask; - local_irq_save(flags); + flags = arch_local_cli_save(); *addr &= mask; - local_irq_restore(flags); + arch_local_irq_restore(flags); +#endif } -#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) - -/* Atomic operations are already serializing on MN10300??? */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() +/** + * atomic_set_mask - Atomically set bits in memory + * @mask: Mask of the bits to be set + * @v: pointer to word in memory + * + * Atomically sets the bits set in mask from the memory word specified. + */ +static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) +{ +#ifdef CONFIG_SMP + int status; + + asm volatile( + "1: mov %3,(_AAR,%2) \n" + " mov (_ADR,%2),%0 \n" + " or %4,%0 \n" + " mov %0,(_ADR,%2) \n" + " mov (_ADR,%2),%0 \n" /* flush */ + " mov (_ASR,%2),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=m"(*addr) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask) + : "memory", "cc"); +#else + unsigned long flags; -#include <asm-generic/atomic.h> + flags = arch_local_cli_save(); + *addr |= mask; + arch_local_irq_restore(flags); +#endif +} #endif /* __KERNEL__ */ +#endif /* CONFIG_SMP */ #endif /* _ASM_ATOMIC_H */ |
