diff options
author | Christoph Lameter <cl@linux.com> | 2010-12-06 11:16:29 -0600 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-12-17 15:07:19 +0100 |
commit | 780f36d8b3fa9572f731d4fb85067b2e45e6f993 (patch) | |
tree | 26c2f80122cd863232de2e295e8435f06c9e2527 /arch/x86/xen/spinlock.c | |
parent | c7b92516a9c68fa5403879225a5a19974a801ef6 (diff) |
xen: Use this_cpu_ops
Use this_cpu_ops to reduce code size and simplify things in various places.
V3->V4:
Move instance of this_cpu_inc_return to a later patchset so that
this patch can be applied without infrastructure changes.
Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/xen/spinlock.c')
-rw-r--r-- | arch/x86/xen/spinlock.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 23e061b9327..cc9b1e182fc 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -159,8 +159,8 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) { struct xen_spinlock *prev; - prev = __get_cpu_var(lock_spinners); - __get_cpu_var(lock_spinners) = xl; + prev = __this_cpu_read(lock_spinners); + __this_cpu_write(lock_spinners, xl); wmb(); /* set lock of interest before count */ @@ -179,14 +179,14 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock asm(LOCK_PREFIX " decw %0" : "+m" (xl->spinners) : : "memory"); wmb(); /* decrement count before restoring lock */ - __get_cpu_var(lock_spinners) = prev; + __this_cpu_write(lock_spinners, prev); } static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; struct xen_spinlock *prev; - int irq = __get_cpu_var(lock_kicker_irq); + int irq = __this_cpu_read(lock_kicker_irq); int ret; u64 start; |