aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel/smp.c')
-rw-r--r--arch/ia64/kernel/smp.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 5230eaafd83..9fcd4e63048 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -32,7 +32,7 @@
#include <linux/bitops.h>
#include <linux/kexec.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/machvec.h>
@@ -44,7 +44,6 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
-#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>
#include <asm/mca.h>
@@ -58,7 +57,8 @@ static struct local_tlb_flush_counts {
unsigned int count;
} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
-static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
+static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
+ shadow_flush_counts);
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cachelin
#define IPI_KDUMP_CPU_STOP 3
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
-static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
+static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation);
extern void cpu_halt (void);
@@ -76,7 +76,7 @@ stop_this_cpu(void)
/*
* Remove this CPU:
*/
- cpu_clear(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), false);
max_xtp();
local_irq_disable();
cpu_halt();
@@ -292,6 +292,7 @@ smp_flush_tlb_all (void)
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
+ cpumask_var_t cpus;
preempt_disable();
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
@@ -300,9 +301,15 @@ smp_flush_tlb_mm (struct mm_struct *mm)
preempt_enable();
return;
}
-
- smp_call_function_mask(mm->cpu_vm_mask,
- (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
+ if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
+ smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
+ mm, 1);
+ } else {
+ cpumask_copy(cpus, mm_cpumask(mm));
+ smp_call_function_many(cpus,
+ (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
+ free_cpumask_var(cpus);
+ }
local_irq_disable();
local_finish_flush_tlb_mm(mm);
local_irq_enable();