diff options
Diffstat (limited to 'arch/mn10300/kernel/smp.c')
| -rw-r--r-- | arch/mn10300/kernel/smp.c | 177 | 
1 files changed, 105 insertions, 72 deletions
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 0dcd1c686ba..f984193718b 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -24,8 +24,8 @@  #include <linux/sched.h>  #include <linux/profile.h>  #include <linux/smp.h> +#include <linux/cpu.h>  #include <asm/tlbflush.h> -#include <asm/system.h>  #include <asm/bitops.h>  #include <asm/processor.h>  #include <asm/bug.h> @@ -39,7 +39,6 @@  #include "internal.h"  #ifdef CONFIG_HOTPLUG_CPU -#include <linux/cpu.h>  #include <asm/cacheflush.h>  static unsigned long sleep_mode[NR_CPUS]; @@ -113,15 +112,17 @@ static void init_ipi(void);   */  static void mn10300_ipi_disable(unsigned int irq);  static void mn10300_ipi_enable(unsigned int irq); -static void mn10300_ipi_ack(unsigned int irq); -static void mn10300_ipi_nop(unsigned int irq); +static void mn10300_ipi_chip_disable(struct irq_data *d); +static void mn10300_ipi_chip_enable(struct irq_data *d); +static void mn10300_ipi_ack(struct irq_data *d); +static void mn10300_ipi_nop(struct irq_data *d);  static struct irq_chip mn10300_ipi_type = {  	.name		= "cpu_ipi", -	.disable	= mn10300_ipi_disable, -	.enable		= mn10300_ipi_enable, -	.ack		= mn10300_ipi_ack, -	.eoi		= mn10300_ipi_nop +	.irq_disable	= mn10300_ipi_chip_disable, +	.irq_enable	= mn10300_ipi_chip_enable, +	.irq_ack	= mn10300_ipi_ack, +	.irq_eoi	= mn10300_ipi_nop  };  static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id); @@ -129,10 +130,12 @@ static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);  static struct irqaction reschedule_ipi = {  	.handler	= smp_reschedule_interrupt, +	.flags		= IRQF_NOBALANCING,  	.name		= "smp reschedule IPI"  };  static struct irqaction call_function_ipi = {  	.handler	= smp_call_function_interrupt, +	.flags		= IRQF_NOBALANCING,  	.name		= "smp call function IPI"  }; @@ -140,7 +143,7 @@ static struct irqaction call_function_ipi = {  static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);  static struct irqaction local_timer_ipi = {  	.handler	= smp_ipi_timer_interrupt, -	.flags		= IRQF_DISABLED, +	.flags		= IRQF_NOBALANCING,  	.name		= "smp local timer IPI"  };  #endif @@ -154,15 +157,15 @@ static void init_ipi(void)  	u16 tmp16;  	/* set up the reschedule IPI */ -	set_irq_chip_and_handler(RESCHEDULE_IPI, -				 &mn10300_ipi_type, handle_percpu_irq); +	irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type, +				 handle_percpu_irq);  	setup_irq(RESCHEDULE_IPI, &reschedule_ipi);  	set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);  	mn10300_ipi_enable(RESCHEDULE_IPI);  	/* set up the call function IPI */ -	set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI, -				 &mn10300_ipi_type, handle_percpu_irq); +	irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type, +				 handle_percpu_irq);  	setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);  	set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);  	mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); @@ -170,8 +173,8 @@ static void init_ipi(void)  	/* set up the local timer IPI */  #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \      defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) -	set_irq_chip_and_handler(LOCAL_TIMER_IPI, -				 &mn10300_ipi_type, handle_percpu_irq); +	irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type, +				 handle_percpu_irq);  	setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);  	set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);  	mn10300_ipi_enable(LOCAL_TIMER_IPI); @@ -179,6 +182,7 @@ static void init_ipi(void)  #ifdef CONFIG_MN10300_CACHE_ENABLED  	/* set up the cache flush IPI */ +	irq_set_chip(FLUSH_CACHE_IPI, &mn10300_ipi_type);  	flags = arch_local_cli_save();  	__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),  			mn10300_low_ipi_handler); @@ -188,6 +192,7 @@ static void init_ipi(void)  #endif  	/* set up the NMI call function IPI */ +	irq_set_chip(CALL_FUNCTION_NMI_IPI, &mn10300_ipi_type);  	flags = arch_local_cli_save();  	GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;  	tmp16 = GxICR(CALL_FUNCTION_NMI_IPI); @@ -198,6 +203,10 @@ static void init_ipi(void)  	__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),  			mn10300_low_ipi_handler);  	arch_local_irq_restore(flags); + +#ifdef CONFIG_KERNEL_DEBUGGER +	irq_set_chip(DEBUGGER_NMI_IPI, &mn10300_ipi_type); +#endif  }  /** @@ -236,6 +245,11 @@ static void mn10300_ipi_enable(unsigned int irq)  	arch_local_irq_restore(flags);  } +static void mn10300_ipi_chip_enable(struct irq_data *d) +{ +	mn10300_ipi_enable(d->irq); +} +  /**   * mn10300_ipi_disable - Disable an IPI   * @irq: The IPI to be disabled. @@ -254,6 +268,12 @@ static void mn10300_ipi_disable(unsigned int irq)  	arch_local_irq_restore(flags);  } +static void mn10300_ipi_chip_disable(struct irq_data *d) +{ +	mn10300_ipi_disable(d->irq); +} + +  /**   * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC   * @irq: The IPI to be acknowledged. @@ -261,8 +281,9 @@ static void mn10300_ipi_disable(unsigned int irq)   * Clear the interrupt detection flag for the IPI on the appropriate interrupt   * channel in the PIC.   */ -static void mn10300_ipi_ack(unsigned int irq) +static void mn10300_ipi_ack(struct irq_data *d)  { +	unsigned int irq = d->irq;  	unsigned long flags;  	u16 tmp; @@ -276,7 +297,7 @@ static void mn10300_ipi_ack(unsigned int irq)   * mn10300_ipi_nop - Dummy IPI action   * @irq: The IPI to be acted upon.   */ -static void mn10300_ipi_nop(unsigned int irq) +static void mn10300_ipi_nop(struct irq_data *d)  {  } @@ -295,7 +316,7 @@ static void send_IPI_mask(const cpumask_t *cpumask, int irq)  	u16 tmp;  	for (i = 0; i < NR_CPUS; i++) { -		if (cpu_isset(i, *cpumask)) { +		if (cpumask_test_cpu(i, cpumask)) {  			/* send IPI */  			tmp = CROSS_GxICR(irq, i);  			CROSS_GxICR(irq, i) = @@ -328,8 +349,8 @@ void send_IPI_allbutself(int irq)  {  	cpumask_t cpumask; -	cpumask = cpu_online_map; -	cpu_clear(smp_processor_id(), cpumask); +	cpumask_copy(&cpumask, cpu_online_mask); +	cpumask_clear_cpu(smp_processor_id(), &cpumask);  	send_IPI_mask(&cpumask, irq);  } @@ -379,8 +400,8 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)  	data.func = func;  	data.info = info; -	data.started = cpu_online_map; -	cpu_clear(smp_processor_id(), data.started); +	cpumask_copy(&data.started, cpu_online_mask); +	cpumask_clear_cpu(smp_processor_id(), &data.started);  	data.wait = wait;  	if (wait)  		data.finished = data.started; @@ -396,14 +417,14 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)  	if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {  		for (cnt = 0;  		     cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && -			     !cpus_empty(data.started); +			     !cpumask_empty(&data.started);  		     cnt++)  			mdelay(1);  		if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {  			for (cnt = 0;  			     cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && -				     !cpus_empty(data.finished); +				     !cpumask_empty(&data.finished);  			     cnt++)  				mdelay(1);  		} @@ -414,10 +435,10 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)  	} else {  		/* If timeout value is zero, wait until cpumask has been  		 * cleared */ -		while (!cpus_empty(data.started)) +		while (!cpumask_empty(&data.started))  			barrier();  		if (wait) -			while (!cpus_empty(data.finished)) +			while (!cpumask_empty(&data.finished))  				barrier();  	} @@ -426,6 +447,22 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)  }  /** + * smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI + * + * Send a non-maskable request to all other CPUs in the system, instructing + * them to jump into the debugger.  The caller is responsible for checking that + * the other CPUs responded to the instruction. + * + * The caller should make sure that this CPU's debugger IPI is disabled. + */ +void smp_jump_to_debugger(void) +{ +	if (num_online_cpus() > 1) +		/* Send a message to all other CPUs */ +		send_IPI_allbutself(DEBUGGER_NMI_IPI); +} + +/**   * stop_this_cpu - Callback to stop a CPU.   * @unused: Callback context (ignored).   */ @@ -442,12 +479,12 @@ void stop_this_cpu(void *unused)  #endif	/* CONFIG_GDBSTUB */  	flags = arch_local_cli_save(); -	cpu_clear(smp_processor_id(), cpu_online_map); +	set_cpu_online(smp_processor_id(), false);  	while (!stopflag)  		cpu_relax(); -	cpu_set(smp_processor_id(), cpu_online_map); +	set_cpu_online(smp_processor_id(), true);  	arch_local_irq_restore(flags);  } @@ -464,14 +501,11 @@ void smp_send_stop(void)   * @irq: The interrupt number.   * @dev_id: The device ID.   * - * We need do nothing here, since the scheduling will be effected on our way - * back through entry.S. - *   * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.   */  static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)  { -	/* do nothing */ +	scheduler_ipi();  	return IRQ_HANDLED;  } @@ -502,12 +536,13 @@ void smp_nmi_call_function_interrupt(void)  	 * execute the function  	 */  	smp_mb(); -	cpu_clear(smp_processor_id(), nmi_call_data->started); +	cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);  	(*func)(info);  	if (wait) {  		smp_mb(); -		cpu_clear(smp_processor_id(), nmi_call_data->finished); +		cpumask_clear_cpu(smp_processor_id(), +				  &nmi_call_data->finished);  	}  } @@ -589,7 +624,7 @@ static void __init smp_cpu_init(void)  /**   * smp_prepare_cpu_init - Initialise CPU in startup_secondary   * - * Set interrupt level 0-6 setting and init ICR of gdbstub. + * Set interrupt level 0-6 setting and init ICR of the kernel debugger.   */  void smp_prepare_cpu_init(void)  { @@ -608,15 +643,15 @@ void smp_prepare_cpu_init(void)  	for (loop = 0; loop < GxICR_NUM_IRQS; loop++)  		GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT; -#ifdef CONFIG_GDBSTUB -	/* initialise GDB-stub */ +#ifdef CONFIG_KERNEL_DEBUGGER +	/* initialise the kernel debugger interrupt */  	do {  		unsigned long flags;  		u16 tmp16;  		flags = arch_local_cli_save(); -		GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; -		tmp16 = GxICR(GDB_NMI_IPI); +		GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; +		tmp16 = GxICR(DEBUGGER_NMI_IPI);  		arch_local_irq_restore(flags);  	} while (0);  #endif @@ -630,7 +665,7 @@ int __init start_secondary(void *unused)  {  	smp_cpu_init();  	smp_callin(); -	while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) +	while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))  		cpu_relax();  	local_flush_tlb(); @@ -640,7 +675,7 @@ int __init start_secondary(void *unused)  #ifdef CONFIG_GENERIC_CLOCKEVENTS  	init_clockevents();  #endif -	cpu_idle(); +	cpu_startup_entry(CPUHP_ONLINE);  	return 0;  } @@ -753,13 +788,14 @@ static int __init do_boot_cpu(int phy_id)  	if (send_status == 0) {  		/* Allow AP to start initializing */ -		cpu_set(cpu_id, cpu_callout_map); +		cpumask_set_cpu(cpu_id, &cpu_callout_map);  		/* Wait for setting cpu_callin_map */  		timeout = 0;  		do {  			udelay(1000); -			callin_status = cpu_isset(cpu_id, cpu_callin_map); +			callin_status = cpumask_test_cpu(cpu_id, +							 &cpu_callin_map);  		} while (callin_status == 0 && timeout++ < 5000);  		if (callin_status == 0) @@ -769,9 +805,9 @@ static int __init do_boot_cpu(int phy_id)  	}  	if (send_status == GxICR_REQUEST || callin_status == 0) { -		cpu_clear(cpu_id, cpu_callout_map); -		cpu_clear(cpu_id, cpu_callin_map); -		cpu_clear(cpu_id, cpu_initialized); +		cpumask_clear_cpu(cpu_id, &cpu_callout_map); +		cpumask_clear_cpu(cpu_id, &cpu_callin_map); +		cpumask_clear_cpu(cpu_id, &cpu_initialized);  		cpucount--;  		return 1;  	} @@ -806,7 +842,7 @@ static void __init smp_callin(void)  	cpu = smp_processor_id();  	timeout = jiffies + (2 * HZ); -	if (cpu_isset(cpu, cpu_callin_map)) { +	if (cpumask_test_cpu(cpu, &cpu_callin_map)) {  		printk(KERN_ERR "CPU#%d already present.\n", cpu);  		BUG();  	} @@ -814,7 +850,7 @@ static void __init smp_callin(void)  	/* Wait for AP startup 2s total */  	while (time_before(jiffies, timeout)) { -		if (cpu_isset(cpu, cpu_callout_map)) +		if (cpumask_test_cpu(cpu, &cpu_callout_map))  			break;  		cpu_relax();  	} @@ -834,11 +870,11 @@ static void __init smp_callin(void)  	smp_store_cpu_info(cpu);  	/* Allow the boot processor to continue */ -	cpu_set(cpu, cpu_callin_map); +	cpumask_set_cpu(cpu, &cpu_callin_map);  }  /** - * smp_online - Set cpu_online_map + * smp_online - Set cpu_online_mask   */  static void __init smp_online(void)  { @@ -846,10 +882,11 @@ static void __init smp_online(void)  	cpu = smp_processor_id(); -	local_irq_enable(); +	notify_cpu_starting(cpu); -	cpu_set(cpu, cpu_online_map); -	smp_wmb(); +	set_cpu_online(cpu, true); + +	local_irq_enable();  }  /** @@ -865,13 +902,13 @@ void __init smp_cpus_done(unsigned int max_cpus)  /*   * smp_prepare_boot_cpu - Set up stuff for the boot processor.   * - * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot + * Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot   * processor (CPU 0).   */ -void __devinit smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void)  { -	cpu_set(0, cpu_callout_map); -	cpu_set(0, cpu_callin_map); +	cpumask_set_cpu(0, &cpu_callout_map); +	cpumask_set_cpu(0, &cpu_callin_map);  	current_thread_info()->cpu = 0;  } @@ -893,27 +930,25 @@ void initialize_secondary(void)   * __cpu_up - Set smp_commenced_mask for the nominated CPU   * @cpu: The target CPU.   */ -int __devinit __cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu, struct task_struct *tidle)  {  	int timeout;  #ifdef CONFIG_HOTPLUG_CPU -	if (num_online_cpus() == 1) -		disable_hlt();  	if (sleep_mode[cpu])  		run_wakeup_cpu(cpu);  #endif /* CONFIG_HOTPLUG_CPU */ -	cpu_set(cpu, smp_commenced_mask); +	cpumask_set_cpu(cpu, &smp_commenced_mask);  	/* Wait 5s total for a response */  	for (timeout = 0 ; timeout < 5000 ; timeout++) { -		if (cpu_isset(cpu, cpu_online_map)) +		if (cpu_online(cpu))  			break;  		udelay(1000);  	} -	BUG_ON(!cpu_isset(cpu, cpu_online_map)); +	BUG_ON(!cpu_online(cpu));  	return 0;  } @@ -959,16 +994,13 @@ int __cpu_disable(void)  		return -EBUSY;  	migrate_irqs(); -	cpu_clear(cpu, current->active_mm->cpu_vm_mask); +	cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));  	return 0;  }  void __cpu_die(unsigned int cpu)  {  	run_sleep_cpu(cpu); - -	if (num_online_cpus() == 1) -		enable_hlt();  }  #ifdef CONFIG_MN10300_CACHE_ENABLED @@ -1064,13 +1096,13 @@ static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,  	do {  		mn10300_local_dcache_inv_range(start, end);  		barrier(); -	} while (!cpus_empty(nmi_call_func_mask_data.started)); +	} while (!cpumask_empty(&nmi_call_func_mask_data.started));  	if (wait) {  		do {  			mn10300_local_dcache_inv_range(start, end);  			barrier(); -		} while (!cpus_empty(nmi_call_func_mask_data.finished)); +		} while (!cpumask_empty(&nmi_call_func_mask_data.finished));  	}  	spin_unlock(&smp_nmi_call_lock); @@ -1081,9 +1113,9 @@ static void restart_wakeup_cpu(void)  {  	unsigned int cpu = smp_processor_id(); -	cpu_set(cpu, cpu_callin_map); +	cpumask_set_cpu(cpu, &cpu_callin_map);  	local_flush_tlb(); -	cpu_set(cpu, cpu_online_map); +	set_cpu_online(cpu, true);  	smp_wmb();  } @@ -1114,8 +1146,9 @@ static void sleep_cpu(void *unused)  static void run_sleep_cpu(unsigned int cpu)  {  	unsigned long flags; -	cpumask_t cpumask = cpumask_of(cpu); +	cpumask_t cpumask; +	cpumask_copy(&cpumask, &cpumask_of(cpu));  	flags = arch_local_cli_save();  	hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);  	hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);  | 
