diff options
Diffstat (limited to 'kernel/smp.c')
| -rw-r--r-- | kernel/smp.c | 833 |
1 files changed, 547 insertions, 286 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index f362a855377..80c33f8de14 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -2,196 +2,254 @@ * Generic helpers for smp ipi calls * * (C) Jens Axboe <jens.axboe@oracle.com> 2008 - * */ -#include <linux/init.h> -#include <linux/module.h> -#include <linux/percpu.h> #include <linux/rcupdate.h> #include <linux/rculist.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/percpu.h> +#include <linux/init.h> +#include <linux/gfp.h> #include <linux/smp.h> +#include <linux/cpu.h> -static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); -static LIST_HEAD(call_function_queue); -__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); +#include "smpboot.h" enum { - CSD_FLAG_WAIT = 0x01, - CSD_FLAG_ALLOC = 0x02, + CSD_FLAG_LOCK = 0x01, + CSD_FLAG_WAIT = 0x02, }; struct call_function_data { - struct call_single_data csd; - spinlock_t lock; - unsigned int refs; - cpumask_t cpumask; - struct rcu_head rcu_head; + struct call_single_data __percpu *csd; + cpumask_var_t cpumask; }; -struct call_single_queue { - struct list_head list; - spinlock_t lock; -}; +static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); + +static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); -static int __cpuinit init_call_single_data(void) +static void flush_smp_call_function_queue(bool warn_cpu_offline); + +static int +hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) { - int i; + long cpu = (long)hcpu; + struct call_function_data *cfd = &per_cpu(cfd_data, cpu); + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, + cpu_to_node(cpu))) + return notifier_from_errno(-ENOMEM); + cfd->csd = alloc_percpu(struct call_single_data); + if (!cfd->csd) { + free_cpumask_var(cfd->cpumask); + return notifier_from_errno(-ENOMEM); + } + break; - for_each_possible_cpu(i) { - struct call_single_queue *q = &per_cpu(call_single_queue, i); +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: + /* Fall-through to the CPU_DEAD[_FROZEN] case. */ - spin_lock_init(&q->lock); - INIT_LIST_HEAD(&q->list); - } - return 0; -} -early_initcall(init_call_single_data); + case CPU_DEAD: + case CPU_DEAD_FROZEN: + free_cpumask_var(cfd->cpumask); + free_percpu(cfd->csd); + break; -static void csd_flag_wait(struct call_single_data *data) -{ - /* Wait for response */ - do { + case CPU_DYING: + case CPU_DYING_FROZEN: /* - * We need to see the flags store in the IPI handler + * The IPIs for the smp-call-function callbacks queued by other + * CPUs might arrive late, either due to hardware latencies or + * because this CPU disabled interrupts (inside stop-machine) + * before the IPIs were sent. So flush out any pending callbacks + * explicitly (without waiting for the IPIs to arrive), to + * ensure that the outgoing CPU doesn't go offline with work + * still pending. */ - smp_mb(); - if (!(data->flags & CSD_FLAG_WAIT)) - break; - cpu_relax(); - } while (1); + flush_smp_call_function_queue(false); + break; +#endif + }; + + return NOTIFY_OK; +} + +static struct notifier_block hotplug_cfd_notifier = { + .notifier_call = hotplug_cfd, +}; + +void __init call_function_init(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + int i; + + for_each_possible_cpu(i) + init_llist_head(&per_cpu(call_single_queue, i)); + + hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); + register_cpu_notifier(&hotplug_cfd_notifier); } /* - * Insert a previously allocated call_single_data element for execution - * on the given CPU. data must already have ->func, ->info, and ->flags set. + * csd_lock/csd_unlock used to serialize access to per-cpu csd resources + * + * For non-synchronous ipi calls the csd can still be in use by the + * previous function call. For multi-cpu calls its even more interesting + * as we'll have to ensure no other cpu is observing our csd. */ -static void generic_exec_single(int cpu, struct call_single_data *data) +static void csd_lock_wait(struct call_single_data *csd) { - struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); - int wait = data->flags & CSD_FLAG_WAIT, ipi; - unsigned long flags; - - spin_lock_irqsave(&dst->lock, flags); - ipi = list_empty(&dst->list); - list_add_tail(&data->list, &dst->list); - spin_unlock_irqrestore(&dst->lock, flags); + while (csd->flags & CSD_FLAG_LOCK) + cpu_relax(); +} - if (ipi) - arch_send_call_function_single_ipi(cpu); +static void csd_lock(struct call_single_data *csd) +{ + csd_lock_wait(csd); + csd->flags |= CSD_FLAG_LOCK; - if (wait) - csd_flag_wait(data); + /* + * prevent CPU from reordering the above assignment + * to ->flags with any subsequent assignments to other + * fields of the specified call_single_data structure: + */ + smp_mb(); } -static void rcu_free_call_data(struct rcu_head *head) +static void csd_unlock(struct call_single_data *csd) { - struct call_function_data *data; + WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK)); - data = container_of(head, struct call_function_data, rcu_head); + /* + * ensure we're all done before releasing data: + */ + smp_mb(); - kfree(data); + csd->flags &= ~CSD_FLAG_LOCK; } +static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); + /* - * Invoked by arch to handle an IPI for call function. Must be called with - * interrupts disabled. + * Insert a previously allocated call_single_data element + * for execution on the given CPU. data must already have + * ->func, ->info, and ->flags set. */ -void generic_smp_call_function_interrupt(void) +static int generic_exec_single(int cpu, struct call_single_data *csd, + smp_call_func_t func, void *info, int wait) { - struct call_function_data *data; - int cpu = get_cpu(); + struct call_single_data csd_stack = { .flags = 0 }; + unsigned long flags; + + + if (cpu == smp_processor_id()) { + local_irq_save(flags); + func(info); + local_irq_restore(flags); + return 0; + } + + + if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) + return -ENXIO; + + + if (!csd) { + csd = &csd_stack; + if (!wait) + csd = &__get_cpu_var(csd_data); + } + + csd_lock(csd); + + csd->func = func; + csd->info = info; + + if (wait) + csd->flags |= CSD_FLAG_WAIT; /* - * It's ok to use list_for_each_rcu() here even though we may delete - * 'pos', since list_del_rcu() doesn't clear ->next + * The list addition should be visible before sending the IPI + * handler locks the list to pull the entry off it because of + * normal cache coherency rules implied by spinlocks. + * + * If IPIs can go out of order to the cache coherency protocol + * in an architecture, sufficient synchronisation should be added + * to arch code to make it appear to obey cache coherency WRT + * locking and barrier primitives. Generic code isn't really + * equipped to do the right thing... */ - rcu_read_lock(); - list_for_each_entry_rcu(data, &call_function_queue, csd.list) { - int refs; - - if (!cpu_isset(cpu, data->cpumask)) - continue; - - data->csd.func(data->csd.info); - - spin_lock(&data->lock); - cpu_clear(cpu, data->cpumask); - WARN_ON(data->refs == 0); - data->refs--; - refs = data->refs; - spin_unlock(&data->lock); - - if (refs) - continue; - - spin_lock(&call_function_lock); - list_del_rcu(&data->csd.list); - spin_unlock(&call_function_lock); - - if (data->csd.flags & CSD_FLAG_WAIT) { - /* - * serialize stores to data with the flag clear - * and wakeup - */ - smp_wmb(); - data->csd.flags &= ~CSD_FLAG_WAIT; - } - if (data->csd.flags & CSD_FLAG_ALLOC) - call_rcu(&data->rcu_head, rcu_free_call_data); - } - rcu_read_unlock(); + if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) + arch_send_call_function_single_ipi(cpu); - put_cpu(); + if (wait) + csd_lock_wait(csd); + + return 0; } -/* - * Invoked by arch to handle an IPI for call function single. Must be called - * from the arch with interrupts disabled. +/** + * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks + * + * Invoked by arch to handle an IPI for call function single. + * Must be called with interrupts disabled. */ void generic_smp_call_function_single_interrupt(void) { - struct call_single_queue *q = &__get_cpu_var(call_single_queue); - LIST_HEAD(list); + flush_smp_call_function_queue(true); +} + +/** + * flush_smp_call_function_queue - Flush pending smp-call-function callbacks + * + * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an + * offline CPU. Skip this check if set to 'false'. + * + * Flush any pending smp-call-function callbacks queued on this CPU. This is + * invoked by the generic IPI handler, as well as by a CPU about to go offline, + * to ensure that all pending IPI callbacks are run before it goes completely + * offline. + * + * Loop through the call_single_queue and run all the queued callbacks. + * Must be called with interrupts disabled. + */ +static void flush_smp_call_function_queue(bool warn_cpu_offline) +{ + struct llist_head *head; + struct llist_node *entry; + struct call_single_data *csd, *csd_next; + static bool warned; + + WARN_ON(!irqs_disabled()); + + head = &__get_cpu_var(call_single_queue); + entry = llist_del_all(head); + entry = llist_reverse_order(entry); + + /* There shouldn't be any pending callbacks on an offline CPU. */ + if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && + !warned && !llist_empty(head))) { + warned = true; + WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); - /* - * Need to see other stores to list head for checking whether - * list is empty without holding q->lock - */ - smp_mb(); - while (!list_empty(&q->list)) { - unsigned int data_flags; - - spin_lock(&q->lock); - list_replace_init(&q->list, &list); - spin_unlock(&q->lock); - - while (!list_empty(&list)) { - struct call_single_data *data; - - data = list_entry(list.next, struct call_single_data, - list); - list_del(&data->list); - - /* - * 'data' can be invalid after this call if - * flags == 0 (when called through - * generic_exec_single(), so save them away before - * making the call. - */ - data_flags = data->flags; - - data->func(data->info); - - if (data_flags & CSD_FLAG_WAIT) { - smp_wmb(); - data->flags &= ~CSD_FLAG_WAIT; - } else if (data_flags & CSD_FLAG_ALLOC) - kfree(data); - } /* - * See comment on outer loop + * We don't have to use the _safe() variant here + * because we are not invoking the IPI handlers yet. */ - smp_mb(); + llist_for_each_entry(csd, entry, llist) + pr_warn("IPI callback %pS sent to offline CPU\n", + csd->func); + } + + llist_for_each_entry_safe(csd, csd_next, entry, llist) { + csd->func(csd->info); + csd_unlock(csd); } } @@ -201,231 +259,434 @@ void generic_smp_call_function_single_interrupt(void) * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed on other CPUs. * - * Returns 0 on success, else a negative status code. Note that @wait - * will be implicitly turned on in case of allocation failures, since - * we fall back to on-stack allocation. + * Returns 0 on success, else a negative status code. */ -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, +int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait) { - struct call_single_data d; - unsigned long flags; - /* prevent preemption and reschedule on another processor, - as well as CPU removal */ - int me = get_cpu(); - int err = 0; + int this_cpu; + int err; - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - if (cpu == me) { - local_irq_save(flags); - func(info); - local_irq_restore(flags); - } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { - struct call_single_data *data = NULL; + /* + * prevent preemption and reschedule on another processor, + * as well as CPU removal + */ + this_cpu = get_cpu(); - if (!wait) { - data = kmalloc(sizeof(*data), GFP_ATOMIC); - if (data) - data->flags = CSD_FLAG_ALLOC; - } - if (!data) { - data = &d; - data->flags = CSD_FLAG_WAIT; - } + /* + * Can deadlock when called with interrupts disabled. + * We allow cpu's that are not yet online though, as no one else can + * send smp call function interrupt to this cpu and as such deadlocks + * can't happen. + */ + WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() + && !oops_in_progress); - data->func = func; - data->info = info; - generic_exec_single(cpu, data); - } else { - err = -ENXIO; /* CPU not online */ - } + err = generic_exec_single(cpu, NULL, func, info, wait); put_cpu(); + return err; } EXPORT_SYMBOL(smp_call_function_single); /** - * __smp_call_function_single(): Run a function on another CPU + * smp_call_function_single_async(): Run an asynchronous function on a + * specific CPU. * @cpu: The CPU to run on. - * @data: Pre-allocated and setup data structure + * @csd: Pre-allocated and setup data structure * - * Like smp_call_function_single(), but allow caller to pass in a pre-allocated - * data structure. Useful for embedding @data inside other structures, for - * instance. + * Like smp_call_function_single(), but the call is asynchonous and + * can thus be done from contexts with disabled interrupts. * + * The caller passes his own pre-allocated data structure + * (ie: embedded in an object) and is responsible for synchronizing it + * such that the IPIs performed on the @csd are strictly serialized. + * + * NOTE: Be careful, there is unfortunately no current debugging facility to + * validate the correctness of this serialization. */ -void __smp_call_function_single(int cpu, struct call_single_data *data) +int smp_call_function_single_async(int cpu, struct call_single_data *csd) { - /* Can deadlock when called with interrupts disabled */ - WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); + int err = 0; - generic_exec_single(cpu, data); -} + preempt_disable(); + err = generic_exec_single(cpu, csd, csd->func, csd->info, 0); + preempt_enable(); -/* Dummy function */ -static void quiesce_dummy(void *unused) -{ + return err; } +EXPORT_SYMBOL_GPL(smp_call_function_single_async); /* - * Ensure stack based data used in call function mask is safe to free. - * - * This is needed by smp_call_function_mask when using on-stack data, because - * a single call function queue is shared by all CPUs, and any CPU may pick up - * the data item on the queue at any time before it is deleted. So we need to - * ensure that all CPUs have transitioned through a quiescent state after - * this call. + * smp_call_function_any - Run a function on any of the given cpus + * @mask: The mask of cpus it can run on. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait until function has completed. * - * This is a very slow function, implemented by sending synchronous IPIs to - * all possible CPUs. For this reason, we have to alloc data rather than use - * stack based data even in the case of synchronous calls. The stack based - * data is then just used for deadlock/oom fallback which will be very rare. + * Returns 0 on success, else a negative status code (if no cpus were online). * - * If a faster scheme can be made, we could go back to preferring stack based - * data -- the data allocation/free is non-zero cost. + * Selection preference: + * 1) current cpu if in @mask + * 2) any cpu of current node if in @mask + * 3) any other online cpu in @mask */ -static void smp_call_function_mask_quiesce_stack(cpumask_t mask) +int smp_call_function_any(const struct cpumask *mask, + smp_call_func_t func, void *info, int wait) { - struct call_single_data data; - int cpu; - - data.func = quiesce_dummy; - data.info = NULL; + unsigned int cpu; + const struct cpumask *nodemask; + int ret; - for_each_cpu_mask(cpu, mask) { - data.flags = CSD_FLAG_WAIT; - generic_exec_single(cpu, &data); + /* Try for same CPU (cheapest) */ + cpu = get_cpu(); + if (cpumask_test_cpu(cpu, mask)) + goto call; + + /* Try for same node. */ + nodemask = cpumask_of_node(cpu_to_node(cpu)); + for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; + cpu = cpumask_next_and(cpu, nodemask, mask)) { + if (cpu_online(cpu)) + goto call; } + + /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ + cpu = cpumask_any_and(mask, cpu_online_mask); +call: + ret = smp_call_function_single(cpu, func, info, wait); + put_cpu(); + return ret; } +EXPORT_SYMBOL_GPL(smp_call_function_any); /** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * @mask: The set of cpus to run on. + * smp_call_function_many(): Run a function on a set of other CPUs. + * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. + * @wait: If true, wait (atomically) until function has completed + * on other CPUs. * - * If @wait is true, then returns once @func has returned. Note that @wait - * will be implicitly turned on in case of allocation failures, since - * we fall back to on-stack allocation. + * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */ -int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, - int wait) +void smp_call_function_many(const struct cpumask *mask, + smp_call_func_t func, void *info, bool wait) { - struct call_function_data d; - struct call_function_data *data = NULL; - cpumask_t allbutself; - unsigned long flags; - int cpu, num_cpus; - int slowpath = 0; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - cpu = smp_processor_id(); - allbutself = cpu_online_map; - cpu_clear(cpu, allbutself); - cpus_and(mask, mask, allbutself); - num_cpus = cpus_weight(mask); + struct call_function_data *cfd; + int cpu, next_cpu, this_cpu = smp_processor_id(); /* - * If zero CPUs, return. If just a single CPU, turn this request - * into a targetted single call instead since it's faster. + * Can deadlock when called with interrupts disabled. + * We allow cpu's that are not yet online though, as no one else can + * send smp call function interrupt to this cpu and as such deadlocks + * can't happen. */ - if (!num_cpus) - return 0; - else if (num_cpus == 1) { - cpu = first_cpu(mask); - return smp_call_function_single(cpu, func, info, wait); + WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() + && !oops_in_progress && !early_boot_irqs_disabled); + + /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ + cpu = cpumask_first_and(mask, cpu_online_mask); + if (cpu == this_cpu) + cpu = cpumask_next_and(cpu, mask, cpu_online_mask); + + /* No online cpus? We're done. */ + if (cpu >= nr_cpu_ids) + return; + + /* Do we have another CPU which isn't us? */ + next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); + if (next_cpu == this_cpu) + next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); + + /* Fastpath: do that cpu by itself. */ + if (next_cpu >= nr_cpu_ids) { + smp_call_function_single(cpu, func, info, wait); + return; } - data = kmalloc(sizeof(*data), GFP_ATOMIC); - if (data) { - data->csd.flags = CSD_FLAG_ALLOC; - if (wait) - data->csd.flags |= CSD_FLAG_WAIT; - } else { - data = &d; - data->csd.flags = CSD_FLAG_WAIT; - wait = 1; - slowpath = 1; - } + cfd = &__get_cpu_var(cfd_data); + + cpumask_and(cfd->cpumask, mask, cpu_online_mask); + cpumask_clear_cpu(this_cpu, cfd->cpumask); - spin_lock_init(&data->lock); - data->csd.func = func; - data->csd.info = info; - data->refs = num_cpus; - data->cpumask = mask; + /* Some callers race with other cpus changing the passed mask */ + if (unlikely(!cpumask_weight(cfd->cpumask))) + return; - spin_lock_irqsave(&call_function_lock, flags); - list_add_tail_rcu(&data->csd.list, &call_function_queue); - spin_unlock_irqrestore(&call_function_lock, flags); + for_each_cpu(cpu, cfd->cpumask) { + struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); + + csd_lock(csd); + csd->func = func; + csd->info = info; + llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); + } /* Send a message to all CPUs in the map */ - arch_send_call_function_ipi(mask); + arch_send_call_function_ipi_mask(cfd->cpumask); - /* optionally wait for the CPUs to complete */ if (wait) { - csd_flag_wait(&data->csd); - if (unlikely(slowpath)) - smp_call_function_mask_quiesce_stack(mask); - } + for_each_cpu(cpu, cfd->cpumask) { + struct call_single_data *csd; - return 0; + csd = per_cpu_ptr(cfd->csd, cpu); + csd_lock_wait(csd); + } + } } -EXPORT_SYMBOL(smp_call_function_mask); +EXPORT_SYMBOL(smp_call_function_many); /** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. + * @wait: If true, wait (atomically) until function has completed + * on other CPUs. * - * Returns 0 on success, else a negative status code. + * Returns 0. * * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. In case of allocation - * failure, @wait will be implicitly turned on. + * it returns just before the target cpu calls @func. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ -int smp_call_function(void (*func)(void *), void *info, int wait) +int smp_call_function(smp_call_func_t func, void *info, int wait) { - int ret; + preempt_disable(); + smp_call_function_many(cpu_online_mask, func, info, wait); + preempt_enable(); + + return 0; +} +EXPORT_SYMBOL(smp_call_function); + +/* Setup configured maximum number of CPUs to activate */ +unsigned int setup_max_cpus = NR_CPUS; +EXPORT_SYMBOL(setup_max_cpus); + + +/* + * Setup routine for controlling SMP activation + * + * Command-line option of "nosmp" or "maxcpus=0" will disable SMP + * activation entirely (the MPS table probe still happens, though). + * + * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer + * greater than 0, limits the maximum number of CPUs activated in + * SMP mode to <NUM>. + */ + +void __weak arch_disable_smp_support(void) { } + +static int __init nosmp(char *str) +{ + setup_max_cpus = 0; + arch_disable_smp_support(); + + return 0; +} + +early_param("nosmp", nosmp); + +/* this is hard limit */ +static int __init nrcpus(char *str) +{ + int nr_cpus; + + get_option(&str, &nr_cpus); + if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) + nr_cpu_ids = nr_cpus; + + return 0; +} + +early_param("nr_cpus", nrcpus); + +static int __init maxcpus(char *str) +{ + get_option(&str, &setup_max_cpus); + if (setup_max_cpus == 0) + arch_disable_smp_support(); + + return 0; +} + +early_param("maxcpus", maxcpus); + +/* Setup number of possible processor ids */ +int nr_cpu_ids __read_mostly = NR_CPUS; +EXPORT_SYMBOL(nr_cpu_ids); + +/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ +void __init setup_nr_cpu_ids(void) +{ + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; +} + +void __weak smp_announce(void) +{ + printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus()); +} + +/* Called by boot processor to activate the rest. */ +void __init smp_init(void) +{ + unsigned int cpu; + + idle_threads_init(); + + /* FIXME: This should be done in userspace --RR */ + for_each_present_cpu(cpu) { + if (num_online_cpus() >= setup_max_cpus) + break; + if (!cpu_online(cpu)) + cpu_up(cpu); + } + + /* Any cleanup work */ + smp_announce(); + smp_cpus_done(setup_max_cpus); +} + +/* + * Call a function on all processors. May be used during early boot while + * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead + * of local_irq_disable/enable(). + */ +int on_each_cpu(void (*func) (void *info), void *info, int wait) +{ + unsigned long flags; + int ret = 0; preempt_disable(); - ret = smp_call_function_mask(cpu_online_map, func, info, wait); + ret = smp_call_function(func, info, wait); + local_irq_save(flags); + func(info); + local_irq_restore(flags); preempt_enable(); return ret; } -EXPORT_SYMBOL(smp_call_function); +EXPORT_SYMBOL(on_each_cpu); -void ipi_call_lock(void) +/** + * on_each_cpu_mask(): Run a function on processors specified by + * cpumask, which may include the local processor. + * @mask: The set of cpus to run on (only runs on online subset). + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait (atomically) until function has completed + * on other CPUs. + * + * If @wait is true, then returns once @func has returned. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. The + * exception is that it may be used during early boot while + * early_boot_irqs_disabled is set. + */ +void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, + void *info, bool wait) { - spin_lock(&call_function_lock); + int cpu = get_cpu(); + + smp_call_function_many(mask, func, info, wait); + if (cpumask_test_cpu(cpu, mask)) { + unsigned long flags; + local_irq_save(flags); + func(info); + local_irq_restore(flags); + } + put_cpu(); } +EXPORT_SYMBOL(on_each_cpu_mask); -void ipi_call_unlock(void) +/* + * on_each_cpu_cond(): Call a function on each processor for which + * the supplied function cond_func returns true, optionally waiting + * for all the required CPUs to finish. This may include the local + * processor. + * @cond_func: A callback function that is passed a cpu id and + * the the info parameter. The function is called + * with preemption disabled. The function should + * return a blooean value indicating whether to IPI + * the specified CPU. + * @func: The function to run on all applicable CPUs. + * This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to both functions. + * @wait: If true, wait (atomically) until function has + * completed on other CPUs. + * @gfp_flags: GFP flags to use when allocating the cpumask + * used internally by the function. + * + * The function might sleep if the GFP flags indicates a non + * atomic allocation is allowed. + * + * Preemption is disabled to protect against CPUs going offline but not online. + * CPUs going online during the call will not be seen or sent an IPI. + * + * You must not call this function with disabled interrupts or + * from a hardware interrupt handler or from a bottom half handler. + */ +void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags) { - spin_unlock(&call_function_lock); + cpumask_var_t cpus; + int cpu, ret; + + might_sleep_if(gfp_flags & __GFP_WAIT); + + if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { + preempt_disable(); + for_each_online_cpu(cpu) + if (cond_func(cpu, info)) + cpumask_set_cpu(cpu, cpus); + on_each_cpu_mask(cpus, func, info, wait); + preempt_enable(); + free_cpumask_var(cpus); + } else { + /* + * No free cpumask, bother. No matter, we'll + * just have to IPI them one by one. + */ + preempt_disable(); + for_each_online_cpu(cpu) + if (cond_func(cpu, info)) { + ret = smp_call_function_single(cpu, func, + info, wait); + WARN_ON_ONCE(!ret); + } + preempt_enable(); + } } +EXPORT_SYMBOL(on_each_cpu_cond); -void ipi_call_lock_irq(void) +static void do_nothing(void *unused) { - spin_lock_irq(&call_function_lock); } -void ipi_call_unlock_irq(void) +/** + * kick_all_cpus_sync - Force all cpus out of idle + * + * Used to synchronize the update of pm_idle function pointer. It's + * called after the pointer is updated and returns after the dummy + * callback function has been executed on all cpus. The execution of + * the function can only happen on the remote cpus after they have + * left the idle function which had been called via pm_idle function + * pointer. So it's guaranteed that nothing uses the previous pointer + * anymore. + */ +void kick_all_cpus_sync(void) { - spin_unlock_irq(&call_function_lock); + /* Make sure the change is visible before we kick the cpus */ + smp_mb(); + smp_call_function(do_nothing, NULL, 1); } +EXPORT_SYMBOL_GPL(kick_all_cpus_sync); |
