diff options
author | Cliff Wickman <cpw@sgi.com> | 2010-06-02 16:22:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-08 21:13:47 +0200 |
commit | 90cc7d944981a6d06b49bb26fde1b490e28c90e5 (patch) | |
tree | 21e2d202c168e8b0ff17907954a7106f018a5f26 /arch/x86/kernel/tlb_uv.c | |
parent | a8328ee58c15c9d763a67607a35bb987b38950fa (diff) |
x86, UV: Remove BAU check for stay-busy
Remove a faulty assumption that a long running BAU request has
encountered a hardware problem and will never finish.
Numalink congestion can make a request appear to have
encountered such a problem, but it is not safe to cancel the
request. If such a cancel is done but a reply is later received
we can miss a TLB shootdown.
We depend upon the max_bau_concurrent 'throttle' to prevent the
stay-busy case from happening.
Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: gregkh@suse.de
LKML-Reference: <E1OJvNy-0004ad-BV@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/tlb_uv.c')
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 23 |
1 files changed, 0 insertions, 23 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index ab929e97650..dc962b5ac87 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -405,12 +405,10 @@ static int uv_wait_completion(struct bau_desc *bau_desc, unsigned long mmr; unsigned long mask; cycles_t ttime; - cycles_t timeout_time; struct ptc_stats *stat = bcp->statp; struct bau_control *hmaster; hmaster = bcp->uvhub_master; - timeout_time = get_cycles() + bcp->timeout_interval; /* spin on the status MMR, waiting for it to go idle */ while ((descriptor_status = (((unsigned long) @@ -450,26 +448,6 @@ static int uv_wait_completion(struct bau_desc *bau_desc, * descriptor_status is still BUSY */ cpu_relax(); - relaxes++; - if (relaxes >= 10000) { - relaxes = 0; - if (get_cycles() > timeout_time) { - quiesce_local_uvhub(hmaster); - - /* single-thread the register change */ - spin_lock(&hmaster->masks_lock); - mmr = uv_read_local_mmr(mmr_offset); - mask = 0UL; - mask |= (3UL < right_shift); - mask = ~mask; - mmr &= mask; - uv_write_local_mmr(mmr_offset, mmr); - spin_unlock(&hmaster->masks_lock); - end_uvhub_quiesce(hmaster); - stat->s_busy++; - return FLUSH_GIVEUP; - } - } } } bcp->conseccompletes++; @@ -1580,7 +1558,6 @@ static void uv_init_per_cpu(int nuvhubs) for_each_present_cpu(cpu) { bcp = &per_cpu(bau_control, cpu); memset(bcp, 0, sizeof(struct bau_control)); - spin_lock_init(&bcp->masks_lock); pnode = uv_cpu_hub_info(cpu)->pnode; uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; uvhub_mask |= (1 << uvhub); |