diff options
Diffstat (limited to 'arch/x86/include/asm/rwsem.h')
| -rw-r--r-- | arch/x86/include/asm/rwsem.h | 110 | 
1 files changed, 28 insertions, 82 deletions
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index d1e41b0f9b6..cad82c9c2fd 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h @@ -37,26 +37,9 @@  #endif  #ifdef __KERNEL__ - -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/lockdep.h>  #include <asm/asm.h> -struct rwsem_waiter; - -extern asmregparm struct rw_semaphore * - rwsem_down_read_failed(struct rw_semaphore *sem); -extern asmregparm struct rw_semaphore * - rwsem_down_write_failed(struct rw_semaphore *sem); -extern asmregparm struct rw_semaphore * - rwsem_wake(struct rw_semaphore *); -extern asmregparm struct rw_semaphore * - rwsem_downgrade_wake(struct rw_semaphore *sem); -  /* - * the semaphore definition - *   * The bias values and the counter type limits the number of   * potential readers/writers to 32767 for 32 bits and 2147483647   * for 64 bits. @@ -74,43 +57,6 @@ extern asmregparm struct rw_semaphore *  #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS  #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) -typedef signed long rwsem_count_t; - -struct rw_semaphore { -	rwsem_count_t		count; -	spinlock_t		wait_lock; -	struct list_head	wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC -	struct lockdep_map dep_map; -#endif -}; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - - -#define __RWSEM_INITIALIZER(name)				\ -{								\ -	RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ -	LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ -} - -#define DECLARE_RWSEM(name)					\ -	struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, -			 struct lock_class_key *key); - -#define init_rwsem(sem)						\ -do {								\ -	static struct lock_class_key __key;			\ -								\ -	__init_rwsem((sem), #sem, &__key);			\ -} while (0) -  /*   * lock for reading   */ @@ -133,7 +79,7 @@ static inline void __down_read(struct rw_semaphore *sem)   */  static inline int __down_read_trylock(struct rw_semaphore *sem)  { -	rwsem_count_t result, tmp; +	long result, tmp;  	asm volatile("# beginning __down_read_trylock\n\t"  		     "  mov          %0,%1\n\t"  		     "1:\n\t" @@ -155,12 +101,12 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)   */  static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)  { -	rwsem_count_t tmp; +	long tmp;  	asm volatile("# beginning down_write\n\t"  		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"  		     /* adds 0xffff0001, returns the old value */ -		     "  test      %1,%1\n\t" -		     /* was the count 0 before? */ +		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" +		     /* was the active mask 0 before? */  		     "  jz        1f\n"  		     "  call call_rwsem_down_write_failed\n"  		     "1:\n" @@ -180,12 +126,25 @@ static inline void __down_write(struct rw_semaphore *sem)   */  static inline int __down_write_trylock(struct rw_semaphore *sem)  { -	rwsem_count_t ret = cmpxchg(&sem->count, -				    RWSEM_UNLOCKED_VALUE, -				    RWSEM_ACTIVE_WRITE_BIAS); -	if (ret == RWSEM_UNLOCKED_VALUE) -		return 1; -	return 0; +	long result, tmp; +	asm volatile("# beginning __down_write_trylock\n\t" +		     "  mov          %0,%1\n\t" +		     "1:\n\t" +		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" +		     /* was the active mask 0 before? */ +		     "  jnz          2f\n\t" +		     "  mov          %1,%2\n\t" +		     "  add          %3,%2\n\t" +		     LOCK_PREFIX "  cmpxchg  %2,%0\n\t" +		     "  jnz	     1b\n\t" +		     "2:\n\t" +		     "  sete         %b1\n\t" +		     "  movzbl       %b1, %k1\n\t" +		     "# ending __down_write_trylock\n\t" +		     : "+m" (sem->count), "=&a" (result), "=&r" (tmp) +		     : "er" (RWSEM_ACTIVE_WRITE_BIAS) +		     : "memory", "cc"); +	return result;  }  /* @@ -193,7 +152,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)   */  static inline void __up_read(struct rw_semaphore *sem)  { -	rwsem_count_t tmp; +	long tmp;  	asm volatile("# beginning __up_read\n\t"  		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"  		     /* subtracts 1, returns the old value */ @@ -211,7 +170,7 @@ static inline void __up_read(struct rw_semaphore *sem)   */  static inline void __up_write(struct rw_semaphore *sem)  { -	rwsem_count_t tmp; +	long tmp;  	asm volatile("# beginning __up_write\n\t"  		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"  		     /* subtracts 0xffff0001, returns the old value */ @@ -247,8 +206,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)  /*   * implement atomic add functionality   */ -static inline void rwsem_atomic_add(rwsem_count_t delta, -				    struct rw_semaphore *sem) +static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)  {  	asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"  		     : "+m" (sem->count) @@ -258,21 +216,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,  /*   * implement exchange and add functionality   */ -static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, -						struct rw_semaphore *sem) -{ -	rwsem_count_t tmp = delta; - -	asm volatile(LOCK_PREFIX "xadd %0,%1" -		     : "+r" (tmp), "+m" (sem->count) -		     : : "memory"); - -	return tmp + delta; -} - -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)  { -	return (sem->count != 0); +	return delta + xadd(&sem->count, delta);  }  #endif /* __KERNEL__ */  | 
