diff options
-rw-r--r-- | arch/x86/kernel/paravirt.c | 10 | ||||
-rw-r--r-- | include/asm-x86/paravirt.h | 37 | ||||
-rw-r--r-- | include/asm-x86/spinlock.h | 55 | ||||
-rw-r--r-- | include/asm-x86/spinlock_types.h | 2 |
4 files changed, 88 insertions, 16 deletions
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 2963ab5d91e..f3381686870 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -124,6 +124,7 @@ static void *get_call_destination(u8 type) .pv_irq_ops = pv_irq_ops, .pv_apic_ops = pv_apic_ops, .pv_mmu_ops = pv_mmu_ops, + .pv_lock_ops = pv_lock_ops, }; return *((void **)&tmpl + type); } @@ -450,6 +451,15 @@ struct pv_mmu_ops pv_mmu_ops = { .set_fixmap = native_set_fixmap, }; +struct pv_lock_ops pv_lock_ops = { + .spin_is_locked = __ticket_spin_is_locked, + .spin_is_contended = __ticket_spin_is_contended, + + .spin_lock = __ticket_spin_lock, + .spin_trylock = __ticket_spin_trylock, + .spin_unlock = __ticket_spin_unlock, +}; + EXPORT_SYMBOL_GPL(pv_time_ops); EXPORT_SYMBOL (pv_cpu_ops); EXPORT_SYMBOL (pv_mmu_ops); diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index eef8095a09d..feb6bb66c5e 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -326,6 +326,15 @@ struct pv_mmu_ops { unsigned long phys, pgprot_t flags); }; +struct raw_spinlock; +struct pv_lock_ops { + int (*spin_is_locked)(struct raw_spinlock *lock); + int (*spin_is_contended)(struct raw_spinlock *lock); + void (*spin_lock)(struct raw_spinlock *lock); + int (*spin_trylock)(struct raw_spinlock *lock); + void (*spin_unlock)(struct raw_spinlock *lock); +}; + /* This contains all the paravirt structures: we get a convenient * number for each function using the offset which we use to indicate * what to patch. */ @@ -336,6 +345,7 @@ struct paravirt_patch_template { struct pv_irq_ops pv_irq_ops; struct pv_apic_ops pv_apic_ops; struct pv_mmu_ops pv_mmu_ops; + struct pv_lock_ops pv_lock_ops; }; extern struct pv_info pv_info; @@ -345,6 +355,7 @@ extern struct pv_cpu_ops pv_cpu_ops; extern struct pv_irq_ops pv_irq_ops; extern struct pv_apic_ops pv_apic_ops; extern struct pv_mmu_ops pv_mmu_ops; +extern struct pv_lock_ops pv_lock_ops; #define PARAVIRT_PATCH(x) \ (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) @@ -1374,6 +1385,31 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, void _paravirt_nop(void); #define paravirt_nop ((void *)_paravirt_nop) +static inline int __raw_spin_is_locked(struct raw_spinlock *lock) +{ + return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); +} + +static inline int __raw_spin_is_contended(struct raw_spinlock *lock) +{ + return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); +} + +static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) +{ + return PVOP_VCALL1(pv_lock_ops.spin_lock, lock); +} + +static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) +{ + return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); +} + +static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) +{ + return PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); +} + /* These all sit in the .parainstructions section to tell us what to patch. */ struct paravirt_patch_site { u8 *instr; /* original instructions */ @@ -1458,6 +1494,7 @@ static inline unsigned long __raw_local_irq_save(void) return f; } + /* Make sure as little as possible of this mess escapes. */ #undef PARAVIRT_CALL #undef __PVOP_CALL diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 21e89bf92f1..9726144cdab 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -6,7 +6,7 @@ #include <asm/page.h> #include <asm/processor.h> #include <linux/compiler.h> - +#include <asm/paravirt.h> /* * Your basic SMP spinlocks, allowing only a single CPU anywhere * @@ -54,21 +54,21 @@ * much between them in performance though, especially as locks are out of line. */ #if (NR_CPUS < 256) -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 8) & 0xff) != (tmp & 0xff)); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; } -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) { short inc = 0x0100; @@ -87,9 +87,7 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) - -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) { int tmp; short new; @@ -110,7 +108,7 @@ static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock) @@ -118,21 +116,21 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory", "cc"); } #else -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; } -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) { int inc = 0x00010000; int tmp; @@ -153,9 +151,7 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) - -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) { int tmp; int new; @@ -177,7 +173,7 @@ static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock) @@ -186,6 +182,35 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) } #endif +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) + +#ifndef CONFIG_PARAVIRT +static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +{ + return __ticket_spin_is_locked(lock); +} + +static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +{ + return __ticket_spin_is_contended(lock); +} + +static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + __ticket_spin_lock(lock); +} + +static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + return __ticket_spin_trylock(lock); +} + +static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + __ticket_spin_unlock(lock); +} +#endif /* CONFIG_PARAVIRT */ + static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h index 9029cf78cf5..06c071c9eee 100644 --- a/include/asm-x86/spinlock_types.h +++ b/include/asm-x86/spinlock_types.h @@ -5,7 +5,7 @@ # error "please don't include this file directly" #endif -typedef struct { +typedef struct raw_spinlock { unsigned int slock; } raw_spinlock_t; |