diff options
Diffstat (limited to 'kernel/stop_machine.c')
| -rw-r--r-- | kernel/stop_machine.c | 532 | 
1 files changed, 346 insertions, 186 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 2df820b03be..695f0c6cd16 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -12,14 +12,15 @@  #include <linux/cpu.h>  #include <linux/init.h>  #include <linux/kthread.h> -#include <linux/module.h> +#include <linux/export.h>  #include <linux/percpu.h>  #include <linux/sched.h>  #include <linux/stop_machine.h>  #include <linux/interrupt.h>  #include <linux/kallsyms.h> - -#include <asm/atomic.h> +#include <linux/smpboot.h> +#include <linux/atomic.h> +#include <linux/lglock.h>  /*   * Structure to determine completion condition and record errors.  May @@ -37,10 +38,19 @@ struct cpu_stopper {  	spinlock_t		lock;  	bool			enabled;	/* is this stopper enabled? */  	struct list_head	works;		/* list of pending works */ -	struct task_struct	*thread;	/* stopper thread */  };  static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); +static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task); +static bool stop_machine_initialized = false; + +/* + * Avoids a race between stop_two_cpus and global stop_cpus, where + * the stoppers could get queued up in reverse order, leading to + * system deadlock. Using an lglock means stop_two_cpus remains + * relatively cheap. + */ +DEFINE_STATIC_LGLOCK(stop_cpus_lock);  static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)  { @@ -61,16 +71,18 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)  }  /* queue @work to @stopper.  if offline, @work is completed immediately */ -static void cpu_stop_queue_work(struct cpu_stopper *stopper, -				struct cpu_stop_work *work) +static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)  { +	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); +	struct task_struct *p = per_cpu(cpu_stopper_task, cpu); +  	unsigned long flags;  	spin_lock_irqsave(&stopper->lock, flags);  	if (stopper->enabled) {  		list_add_tail(&work->list, &stopper->works); -		wake_up_process(stopper->thread); +		wake_up_process(p);  	} else  		cpu_stop_signal_done(work->done, false); @@ -107,16 +119,195 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)  	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };  	cpu_stop_init_done(&done, 1); -	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work); +	cpu_stop_queue_work(cpu, &work);  	wait_for_completion(&done.completion);  	return done.executed ? done.ret : -ENOENT;  } +/* This controls the threads on each CPU. */ +enum multi_stop_state { +	/* Dummy starting state for thread. */ +	MULTI_STOP_NONE, +	/* Awaiting everyone to be scheduled. */ +	MULTI_STOP_PREPARE, +	/* Disable interrupts. */ +	MULTI_STOP_DISABLE_IRQ, +	/* Run the function */ +	MULTI_STOP_RUN, +	/* Exit */ +	MULTI_STOP_EXIT, +}; + +struct multi_stop_data { +	int			(*fn)(void *); +	void			*data; +	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ +	unsigned int		num_threads; +	const struct cpumask	*active_cpus; + +	enum multi_stop_state	state; +	atomic_t		thread_ack; +}; + +static void set_state(struct multi_stop_data *msdata, +		      enum multi_stop_state newstate) +{ +	/* Reset ack counter. */ +	atomic_set(&msdata->thread_ack, msdata->num_threads); +	smp_wmb(); +	msdata->state = newstate; +} + +/* Last one to ack a state moves to the next state. */ +static void ack_state(struct multi_stop_data *msdata) +{ +	if (atomic_dec_and_test(&msdata->thread_ack)) +		set_state(msdata, msdata->state + 1); +} + +/* This is the cpu_stop function which stops the CPU. */ +static int multi_cpu_stop(void *data) +{ +	struct multi_stop_data *msdata = data; +	enum multi_stop_state curstate = MULTI_STOP_NONE; +	int cpu = smp_processor_id(), err = 0; +	unsigned long flags; +	bool is_active; + +	/* +	 * When called from stop_machine_from_inactive_cpu(), irq might +	 * already be disabled.  Save the state and restore it on exit. +	 */ +	local_save_flags(flags); + +	if (!msdata->active_cpus) +		is_active = cpu == cpumask_first(cpu_online_mask); +	else +		is_active = cpumask_test_cpu(cpu, msdata->active_cpus); + +	/* Simple state machine */ +	do { +		/* Chill out and ensure we re-read multi_stop_state. */ +		cpu_relax(); +		if (msdata->state != curstate) { +			curstate = msdata->state; +			switch (curstate) { +			case MULTI_STOP_DISABLE_IRQ: +				local_irq_disable(); +				hard_irq_disable(); +				break; +			case MULTI_STOP_RUN: +				if (is_active) +					err = msdata->fn(msdata->data); +				break; +			default: +				break; +			} +			ack_state(msdata); +		} +	} while (curstate != MULTI_STOP_EXIT); + +	local_irq_restore(flags); +	return err; +} + +struct irq_cpu_stop_queue_work_info { +	int cpu1; +	int cpu2; +	struct cpu_stop_work *work1; +	struct cpu_stop_work *work2; +}; + +/* + * This function is always run with irqs and preemption disabled. + * This guarantees that both work1 and work2 get queued, before + * our local migrate thread gets the chance to preempt us. + */ +static void irq_cpu_stop_queue_work(void *arg) +{ +	struct irq_cpu_stop_queue_work_info *info = arg; +	cpu_stop_queue_work(info->cpu1, info->work1); +	cpu_stop_queue_work(info->cpu2, info->work2); +} + +/** + * stop_two_cpus - stops two cpus + * @cpu1: the cpu to stop + * @cpu2: the other cpu to stop + * @fn: function to execute + * @arg: argument to @fn + * + * Stops both the current and specified CPU and runs @fn on one of them. + * + * returns when both are completed. + */ +int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) +{ +	struct cpu_stop_done done; +	struct cpu_stop_work work1, work2; +	struct irq_cpu_stop_queue_work_info call_args; +	struct multi_stop_data msdata; + +	preempt_disable(); +	msdata = (struct multi_stop_data){ +		.fn = fn, +		.data = arg, +		.num_threads = 2, +		.active_cpus = cpumask_of(cpu1), +	}; + +	work1 = work2 = (struct cpu_stop_work){ +		.fn = multi_cpu_stop, +		.arg = &msdata, +		.done = &done +	}; + +	call_args = (struct irq_cpu_stop_queue_work_info){ +		.cpu1 = cpu1, +		.cpu2 = cpu2, +		.work1 = &work1, +		.work2 = &work2, +	}; + +	cpu_stop_init_done(&done, 2); +	set_state(&msdata, MULTI_STOP_PREPARE); + +	/* +	 * If we observe both CPUs active we know _cpu_down() cannot yet have +	 * queued its stop_machine works and therefore ours will get executed +	 * first. Or its not either one of our CPUs that's getting unplugged, +	 * in which case we don't care. +	 * +	 * This relies on the stopper workqueues to be FIFO. +	 */ +	if (!cpu_active(cpu1) || !cpu_active(cpu2)) { +		preempt_enable(); +		return -ENOENT; +	} + +	lg_local_lock(&stop_cpus_lock); +	/* +	 * Queuing needs to be done by the lowest numbered CPU, to ensure +	 * that works are always queued in the same order on every CPU. +	 * This prevents deadlocks. +	 */ +	smp_call_function_single(min(cpu1, cpu2), +				 &irq_cpu_stop_queue_work, +				 &call_args, 1); +	lg_local_unlock(&stop_cpus_lock); +	preempt_enable(); + +	wait_for_completion(&done.completion); + +	return done.executed ? done.ret : -ENOENT; +} +  /**   * stop_one_cpu_nowait - stop a cpu but don't wait for completion   * @cpu: cpu to stop   * @fn: function to execute   * @arg: argument to @fn + * @work_buf: pointer to cpu_stop_work structure   *   * Similar to stop_one_cpu() but doesn't wait for completion.  The   * caller is responsible for ensuring @work_buf is currently unused @@ -129,17 +320,18 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,  			struct cpu_stop_work *work_buf)  {  	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; -	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf); +	cpu_stop_queue_work(cpu, work_buf);  }  /* static data for stop_cpus */  static DEFINE_MUTEX(stop_cpus_mutex);  static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); -int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) +static void queue_stop_cpus_work(const struct cpumask *cpumask, +				 cpu_stop_fn_t fn, void *arg, +				 struct cpu_stop_done *done)  {  	struct cpu_stop_work *work; -	struct cpu_stop_done done;  	unsigned int cpu;  	/* initialize works and done */ @@ -147,21 +339,27 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)  		work = &per_cpu(stop_cpus_work, cpu);  		work->fn = fn;  		work->arg = arg; -		work->done = &done; +		work->done = done;  	} -	cpu_stop_init_done(&done, cpumask_weight(cpumask));  	/*  	 * Disable preemption while queueing to avoid getting  	 * preempted by a stopper which might wait for other stoppers  	 * to enter @fn which can lead to deadlock.  	 */ -	preempt_disable(); +	lg_global_lock(&stop_cpus_lock);  	for_each_cpu(cpu, cpumask) -		cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), -				    &per_cpu(stop_cpus_work, cpu)); -	preempt_enable(); +		cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); +	lg_global_unlock(&stop_cpus_lock); +} + +static int __stop_cpus(const struct cpumask *cpumask, +		       cpu_stop_fn_t fn, void *arg) +{ +	struct cpu_stop_done done; +	cpu_stop_init_done(&done, cpumask_weight(cpumask)); +	queue_stop_cpus_work(cpumask, fn, arg, &done);  	wait_for_completion(&done.completion);  	return done.executed ? done.ret : -ENOENT;  } @@ -235,20 +433,25 @@ int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)  	return ret;  } -static int cpu_stopper_thread(void *data) +static int cpu_stop_should_run(unsigned int cpu)  { -	struct cpu_stopper *stopper = data; +	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); +	unsigned long flags; +	int run; + +	spin_lock_irqsave(&stopper->lock, flags); +	run = !list_empty(&stopper->works); +	spin_unlock_irqrestore(&stopper->lock, flags); +	return run; +} + +static void cpu_stopper_thread(unsigned int cpu) +{ +	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);  	struct cpu_stop_work *work;  	int ret;  repeat: -	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */ - -	if (kthread_should_stop()) { -		__set_current_state(TASK_RUNNING); -		return 0; -	} -  	work = NULL;  	spin_lock_irq(&stopper->lock);  	if (!list_empty(&stopper->works)) { @@ -264,8 +467,6 @@ repeat:  		struct cpu_stop_done *done = work->done;  		char ksym_buf[KSYM_NAME_LEN] __maybe_unused; -		__set_current_state(TASK_RUNNING); -  		/* cpu stop callbacks are not allowed to sleep */  		preempt_disable(); @@ -281,86 +482,55 @@ repeat:  					  ksym_buf), arg);  		cpu_stop_signal_done(done, true); -	} else -		schedule(); - -	goto repeat; +		goto repeat; +	}  }  extern void sched_set_stop_task(int cpu, struct task_struct *stop); -/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ -static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, -					   unsigned long action, void *hcpu) +static void cpu_stop_create(unsigned int cpu) +{ +	sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu)); +} + +static void cpu_stop_park(unsigned int cpu)  { -	unsigned int cpu = (unsigned long)hcpu;  	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); -	struct task_struct *p; - -	switch (action & ~CPU_TASKS_FROZEN) { -	case CPU_UP_PREPARE: -		BUG_ON(stopper->thread || stopper->enabled || -		       !list_empty(&stopper->works)); -		p = kthread_create(cpu_stopper_thread, stopper, "migration/%d", -				   cpu); -		if (IS_ERR(p)) -			return notifier_from_errno(PTR_ERR(p)); -		get_task_struct(p); -		kthread_bind(p, cpu); -		sched_set_stop_task(cpu, p); -		stopper->thread = p; -		break; - -	case CPU_ONLINE: -		/* strictly unnecessary, as first user will wake it */ -		wake_up_process(stopper->thread); -		/* mark enabled */ -		spin_lock_irq(&stopper->lock); -		stopper->enabled = true; -		spin_unlock_irq(&stopper->lock); -		break; - -#ifdef CONFIG_HOTPLUG_CPU -	case CPU_UP_CANCELED: -	case CPU_POST_DEAD: -	{ -		struct cpu_stop_work *work; - -		sched_set_stop_task(cpu, NULL); -		/* kill the stopper */ -		kthread_stop(stopper->thread); -		/* drain remaining works */ -		spin_lock_irq(&stopper->lock); -		list_for_each_entry(work, &stopper->works, list) -			cpu_stop_signal_done(work->done, false); -		stopper->enabled = false; -		spin_unlock_irq(&stopper->lock); -		/* release the stopper */ -		put_task_struct(stopper->thread); -		stopper->thread = NULL; -		break; -	} -#endif -	} +	struct cpu_stop_work *work; +	unsigned long flags; -	return NOTIFY_OK; +	/* drain remaining works */ +	spin_lock_irqsave(&stopper->lock, flags); +	list_for_each_entry(work, &stopper->works, list) +		cpu_stop_signal_done(work->done, false); +	stopper->enabled = false; +	spin_unlock_irqrestore(&stopper->lock, flags);  } -/* - * Give it a higher priority so that cpu stopper is available to other - * cpu notifiers.  It currently shares the same priority as sched - * migration_notifier. - */ -static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = { -	.notifier_call	= cpu_stop_cpu_callback, -	.priority	= 10, +static void cpu_stop_unpark(unsigned int cpu) +{ +	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + +	spin_lock_irq(&stopper->lock); +	stopper->enabled = true; +	spin_unlock_irq(&stopper->lock); +} + +static struct smp_hotplug_thread cpu_stop_threads = { +	.store			= &cpu_stopper_task, +	.thread_should_run	= cpu_stop_should_run, +	.thread_fn		= cpu_stopper_thread, +	.thread_comm		= "migration/%u", +	.create			= cpu_stop_create, +	.setup			= cpu_stop_unpark, +	.park			= cpu_stop_park, +	.pre_unpark		= cpu_stop_unpark, +	.selfparking		= true,  };  static int __init cpu_stop_init(void)  { -	void *bcpu = (void *)(long)smp_processor_id();  	unsigned int cpu; -	int err;  	for_each_possible_cpu(cpu) {  		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); @@ -369,108 +539,45 @@ static int __init cpu_stop_init(void)  		INIT_LIST_HEAD(&stopper->works);  	} -	/* start one for the boot cpu */ -	err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE, -				    bcpu); -	BUG_ON(err != NOTIFY_OK); -	cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu); -	register_cpu_notifier(&cpu_stop_cpu_notifier); - +	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); +	stop_machine_initialized = true;  	return 0;  }  early_initcall(cpu_stop_init);  #ifdef CONFIG_STOP_MACHINE -/* This controls the threads on each CPU. */ -enum stopmachine_state { -	/* Dummy starting state for thread. */ -	STOPMACHINE_NONE, -	/* Awaiting everyone to be scheduled. */ -	STOPMACHINE_PREPARE, -	/* Disable interrupts. */ -	STOPMACHINE_DISABLE_IRQ, -	/* Run the function */ -	STOPMACHINE_RUN, -	/* Exit */ -	STOPMACHINE_EXIT, -}; - -struct stop_machine_data { -	int			(*fn)(void *); -	void			*data; -	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ -	unsigned int		num_threads; -	const struct cpumask	*active_cpus; - -	enum stopmachine_state	state; -	atomic_t		thread_ack; -}; - -static void set_state(struct stop_machine_data *smdata, -		      enum stopmachine_state newstate) -{ -	/* Reset ack counter. */ -	atomic_set(&smdata->thread_ack, smdata->num_threads); -	smp_wmb(); -	smdata->state = newstate; -} - -/* Last one to ack a state moves to the next state. */ -static void ack_state(struct stop_machine_data *smdata) -{ -	if (atomic_dec_and_test(&smdata->thread_ack)) -		set_state(smdata, smdata->state + 1); -} - -/* This is the cpu_stop function which stops the CPU. */ -static int stop_machine_cpu_stop(void *data) -{ -	struct stop_machine_data *smdata = data; -	enum stopmachine_state curstate = STOPMACHINE_NONE; -	int cpu = smp_processor_id(), err = 0; -	bool is_active; - -	if (!smdata->active_cpus) -		is_active = cpu == cpumask_first(cpu_online_mask); -	else -		is_active = cpumask_test_cpu(cpu, smdata->active_cpus); - -	/* Simple state machine */ -	do { -		/* Chill out and ensure we re-read stopmachine_state. */ -		cpu_relax(); -		if (smdata->state != curstate) { -			curstate = smdata->state; -			switch (curstate) { -			case STOPMACHINE_DISABLE_IRQ: -				local_irq_disable(); -				hard_irq_disable(); -				break; -			case STOPMACHINE_RUN: -				if (is_active) -					err = smdata->fn(smdata->data); -				break; -			default: -				break; -			} -			ack_state(smdata); -		} -	} while (curstate != STOPMACHINE_EXIT); - -	local_irq_enable(); -	return err; -} -  int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)  { -	struct stop_machine_data smdata = { .fn = fn, .data = data, -					    .num_threads = num_online_cpus(), -					    .active_cpus = cpus }; +	struct multi_stop_data msdata = { +		.fn = fn, +		.data = data, +		.num_threads = num_online_cpus(), +		.active_cpus = cpus, +	}; + +	if (!stop_machine_initialized) { +		/* +		 * Handle the case where stop_machine() is called +		 * early in boot before stop_machine() has been +		 * initialized. +		 */ +		unsigned long flags; +		int ret; + +		WARN_ON_ONCE(msdata.num_threads != 1); + +		local_irq_save(flags); +		hard_irq_disable(); +		ret = (*fn)(data); +		local_irq_restore(flags); + +		return ret; +	}  	/* Set the initial state and stop all online cpus. */ -	set_state(&smdata, STOPMACHINE_PREPARE); -	return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata); +	set_state(&msdata, MULTI_STOP_PREPARE); +	return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);  }  int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) @@ -485,4 +592,57 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)  }  EXPORT_SYMBOL_GPL(stop_machine); +/** + * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU + * @fn: the function to run + * @data: the data ptr for the @fn() + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) + * + * This is identical to stop_machine() but can be called from a CPU which + * is not active.  The local CPU is in the process of hotplug (so no other + * CPU hotplug can start) and not marked active and doesn't have enough + * context to sleep. + * + * This function provides stop_machine() functionality for such state by + * using busy-wait for synchronization and executing @fn directly for local + * CPU. + * + * CONTEXT: + * Local CPU is inactive.  Temporarily stops all active CPUs. + * + * RETURNS: + * 0 if all executions of @fn returned 0, any non zero return value if any + * returned non zero. + */ +int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, +				  const struct cpumask *cpus) +{ +	struct multi_stop_data msdata = { .fn = fn, .data = data, +					    .active_cpus = cpus }; +	struct cpu_stop_done done; +	int ret; + +	/* Local CPU must be inactive and CPU hotplug in progress. */ +	BUG_ON(cpu_active(raw_smp_processor_id())); +	msdata.num_threads = num_active_cpus() + 1;	/* +1 for local */ + +	/* No proper task established and can't sleep - busy wait for lock. */ +	while (!mutex_trylock(&stop_cpus_mutex)) +		cpu_relax(); + +	/* Schedule work on other CPUs and execute directly for local CPU */ +	set_state(&msdata, MULTI_STOP_PREPARE); +	cpu_stop_init_done(&done, num_active_cpus()); +	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, +			     &done); +	ret = multi_cpu_stop(&msdata); + +	/* Busy wait for completion. */ +	while (!completion_done(&done.completion)) +		cpu_relax(); + +	mutex_unlock(&stop_cpus_mutex); +	return ret ?: done.ret; +} +  #endif	/* CONFIG_STOP_MACHINE */  | 
