diff options
author | Tony Luck <tony.luck@intel.com> | 2009-10-09 10:52:39 -0700 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2009-10-09 10:52:39 -0700 |
commit | 883a3acf5b0d4782ac35981227a0d094e8b44850 (patch) | |
tree | a5ad44c09e36298749f0b37dbd5695bad040620c | |
parent | 36a07902c2134649c4af7f07980413ffb1a56085 (diff) |
[IA64] Re-implement spinaphores using ticket lock concepts
Bound the wait time for the ptcg_sem by using similar idea to the
ticket spin locks. In this case we have only one instance of a
spinaphore, so make it 8 bytes rather than try to squeeze it into
4-bytes to keep the code simpler (and shorter).
Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r-- | arch/ia64/mm/tlb.c | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index f426dc78d95..ee09d261f2e 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm) * this primitive it can be moved up to a spinaphore.h header. */ struct spinaphore { - atomic_t cur; + unsigned long ticket; + unsigned long serve; }; static inline void spinaphore_init(struct spinaphore *ss, int val) { - atomic_set(&ss->cur, val); + ss->ticket = 0; + ss->serve = val; } static inline void down_spin(struct spinaphore *ss) { - while (unlikely(!atomic_add_unless(&ss->cur, -1, 0))) - while (atomic_read(&ss->cur) == 0) - cpu_relax(); + unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve; + + if (time_before(t, ss->serve)) + return; + + ia64_invala(); + + for (;;) { + asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); + if (time_before(t, serve)) + return; + cpu_relax(); + } } static inline void up_spin(struct spinaphore *ss) { - atomic_add(1, &ss->cur); + ia64_fetchadd(1, &ss->serve, rel); } static struct spinaphore ptcg_sem; |