aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/smp.c')
-rw-r--r--arch/mips/kernel/smp.c409
1 files changed, 210 insertions, 199 deletions
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index fcacf1aae98..9bad52ede90 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/module.h>
@@ -29,52 +30,67 @@
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/ftrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/system.h>
+#include <asm/idle.h>
+#include <asm/r4k-timer.h>
#include <asm/mmu_context.h>
-#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/setup.h>
-cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
-cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
+
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
+
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
-EXPORT_SYMBOL(phys_cpu_present_map);
-EXPORT_SYMBOL(cpu_online_map);
+/* Number of TCs (or siblings in Intel speak) per CPU core */
+int smp_num_siblings = 1;
+EXPORT_SYMBOL(smp_num_siblings);
-static void smp_tune_scheduling (void)
-{
- struct cache_desc *cd = &current_cpu_data.scache;
- unsigned long cachesize; /* kB */
- unsigned long cpu_khz;
+/* representing the TCs (or siblings in Intel speak) of each logical CPU */
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
- /*
- * Crude estimate until we actually meassure ...
- */
- cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
- /*
- * Rough estimation for SMP scheduling, this is the number of
- * cycles it takes for a fully memory-limited process to flush
- * the SMP-local cache.
- *
- * (For a P5 this pretty much means we will choose another idle
- * CPU almost always at wakeup time (this is due to the small
- * L1 cache), on PIIs it's around 50-100 usecs, depending on
- * the cache size)
- */
- if (!cpu_khz)
- return;
+cpumask_t cpu_coherent_mask;
- cachesize = cd->linesz * cd->sets * cd->ways;
+static inline void set_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ cpu_set(cpu, cpu_sibling_setup_map);
+
+ if (smp_num_siblings > 1) {
+ for_each_cpu_mask(i, cpu_sibling_setup_map) {
+ if (cpu_data[cpu].core == cpu_data[i].core) {
+ cpu_set(i, cpu_sibling_map[cpu]);
+ cpu_set(cpu, cpu_sibling_map[i]);
+ }
+ }
+ } else
+ cpu_set(cpu, cpu_sibling_map[cpu]);
}
-extern void __init calibrate_delay(void);
-extern ATTRIB_NORET void cpu_idle(void);
+struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
+
+void register_smp_ops(struct plat_smp_ops *ops)
+{
+ if (mp_ops)
+ printk(KERN_WARNING "Overriding previously set SMP ops\n");
+
+ mp_ops = ops;
+}
/*
* First C code run on the secondary CPUs after being started up by
@@ -82,12 +98,13 @@ extern ATTRIB_NORET void cpu_idle(void);
*/
asmlinkage void start_secondary(void)
{
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
cpu_probe();
cpu_report();
- per_cpu_trap_init();
- prom_init_secondary();
+ per_cpu_trap_init(false);
+ mips_clockevent_init();
+ mp_ops->init_secondary();
/*
* XXX parity protection should be folded in here when it's converted
@@ -95,116 +112,39 @@ asmlinkage void start_secondary(void)
*/
calibrate_delay();
+ preempt_disable();
+ cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
- prom_smp_finish();
-
- cpu_set(cpu, cpu_callin_map);
+ cpu_set(cpu, cpu_coherent_mask);
+ notify_cpu_starting(cpu);
- cpu_idle();
-}
+ set_cpu_online(cpu, true);
-DEFINE_SPINLOCK(smp_call_lock);
+ set_cpu_sibling_map(cpu);
-struct call_data_struct *call_data;
+ cpu_set(cpu, cpu_callin_map);
-/*
- * Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or are or have executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler:
- *
- * CPU A CPU B
- * Disable interrupts
- * smp_call_function()
- * Take call_lock
- * Send IPIs
- * Wait for all cpus to acknowledge IPI
- * CPU A has not responded, spin waiting
- * for cpu A to respond, holding call_lock
- * smp_call_function()
- * Spin waiting for call_lock
- * Deadlock Deadlock
- */
-int smp_call_function (void (*func) (void *info), void *info, int retry,
- int wait)
-{
- struct call_data_struct data;
- int i, cpus = num_online_cpus() - 1;
- int cpu = smp_processor_id();
+ synchronise_count_slave(cpu);
/*
- * Can die spectacularly if this CPU isn't yet marked online
+ * irq will be enabled in ->smp_finish(), enabling it too early
+ * is dangerous.
*/
- BUG_ON(!cpu_online(cpu));
+ WARN_ON_ONCE(!irqs_disabled());
+ mp_ops->smp_finish();
- if (!cpus)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock(&smp_call_lock);
- call_data = &data;
- mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i) && i != cpu)
- core_send_ipi(i, SMP_CALL_FUNCTION);
-
- /* Wait for response */
- /* FIXME: lock-up detection, backtrace on lock-up */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
- spin_unlock(&smp_call_lock);
-
- return 0;
+ cpu_startup_entry(CPUHP_ONLINE);
}
-void smp_call_function_interrupt(void)
+/*
+ * Call into both interrupt handlers, as we share the IPI for them
+ */
+void __irq_entry smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function.
- */
- mb();
- atomic_inc(&call_data->started);
-
- /*
- * At this point the info structure may be out of scope unless wait==1.
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
irq_exit();
-
- if (wait) {
- mb();
- atomic_inc(&call_data->finished);
- }
}
static void stop_this_cpu(void *dummy)
@@ -212,19 +152,20 @@ static void stop_this_cpu(void *dummy)
/*
* Remove this CPU:
*/
- cpu_clear(smp_processor_id(), cpu_online_map);
- local_irq_enable(); /* May need to service _machine_restart IPI */
- for (;;); /* Wait if available. */
+ set_cpu_online(smp_processor_id(), false);
+ for (;;) {
+ if (cpu_wait)
+ (*cpu_wait)(); /* Wait if available. */
+ }
}
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, NULL, 1, 0);
+ smp_call_function(stop_this_cpu, NULL, 0);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
- prom_cpus_done();
}
/* called from main before smp_init() */
@@ -232,43 +173,25 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
init_new_context(current, &init_mm);
current_thread_info()->cpu = 0;
- smp_tune_scheduling();
- prom_prepare_cpus(max_cpus);
+ mp_ops->prepare_cpus(max_cpus);
+ set_cpu_sibling_map(0);
+#ifndef CONFIG_HOTPLUG_CPU
+ init_cpu_present(cpu_possible_mask);
+#endif
+ cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
}
/* preload SMP state for boot cpu */
-void __devinit smp_prepare_boot_cpu(void)
+void smp_prepare_boot_cpu(void)
{
- /*
- * This assumes that bootup is always handled by the processor
- * with the logic and physical number 0.
- */
- __cpu_number_map[0] = 0;
- __cpu_logical_map[0] = 0;
- cpu_set(0, phys_cpu_present_map);
- cpu_set(0, cpu_online_map);
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
cpu_set(0, cpu_callin_map);
}
-/*
- * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
- * and keep control until "cpu_online(cpu)" is set. Note: cpu is
- * physical, not logical.
- */
-int __devinit __cpu_up(unsigned int cpu)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- struct task_struct *idle;
-
- /*
- * Processor goes to start_secondary(), sets online flag
- * The following code is purely to make sure
- * Linux can schedule processes on this slave.
- */
- idle = fork_idle(cpu);
- if (IS_ERR(idle))
- panic(KERN_ERR "Fork failed for CPU %d", cpu);
-
- prom_boot_secondary(cpu, idle);
+ mp_ops->boot_secondary(cpu, tidle);
/*
* Trust is futile. We should really have timeouts ...
@@ -276,8 +199,7 @@ int __devinit __cpu_up(unsigned int cpu)
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
- cpu_set(cpu, cpu_online_map);
-
+ synchronise_count_master(cpu);
return 0;
}
@@ -294,7 +216,7 @@ static void flush_tlb_all_ipi(void *info)
void flush_tlb_all(void)
{
- on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
+ on_each_cpu(flush_tlb_all_ipi, NULL, 1);
}
static void flush_tlb_mm_ipi(void *mm)
@@ -303,6 +225,29 @@ static void flush_tlb_mm_ipi(void *mm)
}
/*
+ * Special Variant of smp_call_function for use by TLB functions:
+ *
+ * o No return value
+ * o collapses to normal function call on UP kernels
+ * o collapses to normal function call on systems with a single shared
+ * primary cache.
+ */
+static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
+{
+ smp_call_function(func, info, 1);
+}
+
+static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
+{
+ preempt_disable();
+
+ smp_on_other_tlbs(func, info);
+ func(info);
+
+ preempt_enable();
+}
+
+/*
* The following tlb flush calls are invoked when old translations are
* being torn down, or pte attributes are changing. For single threaded
* address spaces, a new context is obtained on the current cpu, and tlb
@@ -320,12 +265,14 @@ void flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+ smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, mm) = 0;
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
+ }
}
local_flush_tlb_mm(mm);
@@ -340,7 +287,7 @@ struct flush_tlb_data {
static void flush_tlb_range_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
@@ -351,17 +298,20 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- struct flush_tlb_data fd;
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
- fd.vma = vma;
- fd.addr1 = start;
- fd.addr2 = end;
- smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+ smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, mm) = 0;
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
+ }
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
@@ -369,23 +319,24 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
static void flush_tlb_kernel_range_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- struct flush_tlb_data fd;
+ struct flush_tlb_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
- fd.addr1 = start;
- fd.addr2 = end;
- on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
}
static void flush_tlb_page_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_page(fd->vma, fd->addr1);
}
@@ -394,16 +345,19 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
- struct flush_tlb_data fd;
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = page,
+ };
- fd.vma = vma;
- fd.addr1 = page;
- smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+ smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, vma->vm_mm) = 0;
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
+ cpu_context(cpu, vma->vm_mm) = 0;
+ }
}
local_flush_tlb_page(vma, page);
preempt_enable();
@@ -418,11 +372,68 @@ static void flush_tlb_one_ipi(void *info)
void flush_tlb_one(unsigned long vaddr)
{
- smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
- local_flush_tlb_one(vaddr);
+ smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
}
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(flush_tlb_one);
-EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(synchronize_irq);
+
+#if defined(CONFIG_KEXEC)
+void (*dump_ipi_function_ptr)(void *) = NULL;
+void dump_send_ipi(void (*dump_ipi_callback)(void *))
+{
+ int i;
+ int cpu = smp_processor_id();
+
+ dump_ipi_function_ptr = dump_ipi_callback;
+ smp_mb();
+ for_each_online_cpu(i)
+ if (i != cpu)
+ mp_ops->send_ipi_single(i, SMP_DUMP);
+
+}
+EXPORT_SYMBOL(dump_send_ipi);
+#endif
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+
+static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
+static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
+
+void tick_broadcast(const struct cpumask *mask)
+{
+ atomic_t *count;
+ struct call_single_data *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ count = &per_cpu(tick_broadcast_count, cpu);
+ csd = &per_cpu(tick_broadcast_csd, cpu);
+
+ if (atomic_inc_return(count) == 1)
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+static void tick_broadcast_callee(void *info)
+{
+ int cpu = smp_processor_id();
+ tick_receive_broadcast();
+ atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
+}
+
+static int __init tick_broadcast_init(void)
+{
+ struct call_single_data *csd;
+ int cpu;
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ csd = &per_cpu(tick_broadcast_csd, cpu);
+ csd->func = tick_broadcast_callee;
+ }
+
+ return 0;
+}
+early_initcall(tick_broadcast_init);
+
+#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */