aboutsummaryrefslogtreecommitdiff
path: root/include/linux/smp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/smp.h')
-rw-r--r--include/linux/smp.h99
1 files changed, 30 insertions, 69 deletions
diff --git a/include/linux/smp.h b/include/linux/smp.h
index dd6f06be3c9..34347f26be9 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,16 +11,14 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
-
-extern void cpu_idle(void);
+#include <linux/llist.h>
typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
- struct list_head list;
+ struct llist_node llist;
smp_call_func_t func;
void *info;
u16 flags;
- u16 priv;
};
/* total number of cpus in this system (may exceed NR_CPUS) */
@@ -29,6 +27,29 @@ extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
+
+/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+ void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags);
+
+int smp_call_function_single_async(int cpu, struct call_single_data *csd);
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -75,9 +96,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait);
-void __smp_call_function_single(int cpuid, struct call_single_data *data,
- int wait);
-
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);
@@ -86,34 +104,10 @@ void kick_all_cpus_sync(void);
/*
* Generic and arch helpers
*/
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
void __init call_function_init(void);
void generic_smp_call_function_single_interrupt(void);
-void generic_smp_call_function_interrupt(void);
-#else
-static inline void call_function_init(void) { }
-#endif
-
-/*
- * Call a function on all processors
- */
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
-
-/*
- * Call a function on processors specified by mask, which might include
- * the local one.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
- void *info, bool wait);
-
-/*
- * Call a function on each processor for which the supplied function
- * cond_func returns a positive value. This may include the local
- * processor.
- */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags);
+#define generic_smp_call_function_interrupt \
+ generic_smp_call_function_single_interrupt
/*
* Mark the boot cpu "online" so that it can call console drivers in
@@ -139,42 +133,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
}
#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
-#define on_each_cpu(func,info,wait) \
- ({ \
- local_irq_disable(); \
- func(info); \
- local_irq_enable(); \
- 0; \
- })
-/*
- * Note we still need to test the mask even for UP
- * because we actually can get an empty mask from
- * code that on SMP might call us without the local
- * CPU in the mask.
- */
-#define on_each_cpu_mask(mask, func, info, wait) \
- do { \
- if (cpumask_test_cpu(0, (mask))) { \
- local_irq_disable(); \
- (func)(info); \
- local_irq_enable(); \
- } \
- } while (0)
-/*
- * Preemption is disabled here to make sure the cond_func is called under the
- * same condtions in UP and SMP.
- */
-#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
- do { \
- void *__info = (info); \
- preempt_disable(); \
- if ((cond_func)(0, __info)) { \
- local_irq_disable(); \
- (func)(__info); \
- local_irq_enable(); \
- } \
- preempt_enable(); \
- } while (0)
static inline void smp_send_reschedule(int cpu) { }
#define smp_prepare_boot_cpu() do {} while (0)
@@ -224,6 +182,9 @@ static inline void kick_all_cpus_sync(void) { }
*/
extern void arch_disable_smp_support(void);
+extern void arch_enable_nonboot_cpus_begin(void);
+extern void arch_enable_nonboot_cpus_end(void);
+
void smp_setup_processor_id(void);
#endif /* __LINUX_SMP_H */