diff options
Diffstat (limited to 'arch/tile/kernel/smp.c')
| -rw-r--r-- | arch/tile/kernel/smp.c | 90 | 
1 files changed, 45 insertions, 45 deletions
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index 9575b37a8b7..01e8ab29f43 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c @@ -20,8 +20,13 @@  #include <linux/irq.h>  #include <linux/module.h>  #include <asm/cacheflush.h> +#include <asm/homecache.h> -HV_Topology smp_topology __write_once; +/* + * We write to width and height with a single store in head_NN.S, + * so make the variable aligned to "long". + */ +HV_Topology smp_topology __write_once __aligned(sizeof(long));  EXPORT_SYMBOL(smp_topology);  #if CHIP_HAS_IPI() @@ -36,6 +41,22 @@ static unsigned long __iomem *ipi_mappings[NR_CPUS];  /* Set by smp_send_stop() to avoid recursive panics. */  static int stopping_cpus; +static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag) +{ +	int sent = 0; +	while (sent < nrecip) { +		int rc = hv_send_message(recip, nrecip, +					 (HV_VirtAddr)&tag, sizeof(tag)); +		if (rc < 0) { +			if (!stopping_cpus)  /* avoid recursive panic */ +				panic("hv_send_message returned %d", rc); +			break; +		} +		WARN_ONCE(rc == 0, "hv_send_message() returned zero\n"); +		sent += rc; +	} +} +  void send_IPI_single(int cpu, int tag)  {  	HV_Recipient recip = { @@ -43,14 +64,13 @@ void send_IPI_single(int cpu, int tag)  		.x = cpu % smp_width,  		.state = HV_TO_BE_SENT  	}; -	int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)); -	BUG_ON(rc <= 0); +	__send_IPI_many(&recip, 1, tag);  }  void send_IPI_many(const struct cpumask *mask, int tag)  {  	HV_Recipient recip[NR_CPUS]; -	int cpu, sent; +	int cpu;  	int nrecip = 0;  	int my_cpu = smp_processor_id();  	for_each_cpu(cpu, mask) { @@ -61,17 +81,7 @@ void send_IPI_many(const struct cpumask *mask, int tag)  		r->x = cpu % smp_width;  		r->state = HV_TO_BE_SENT;  	} -	sent = 0; -	while (sent < nrecip) { -		int rc = hv_send_message(recip, nrecip, -					 (HV_VirtAddr)&tag, sizeof(tag)); -		if (rc <= 0) { -			if (!stopping_cpus)  /* avoid recursive panic */ -				panic("hv_send_message returned %d", rc); -			break; -		} -		sent += rc; -	} +	__send_IPI_many(recip, nrecip, tag);  }  void send_IPI_allbutself(int tag) @@ -82,25 +92,6 @@ void send_IPI_allbutself(int tag)  	send_IPI_many(&mask, tag);  } - -/* - * Provide smp_call_function_mask, but also run function locally - * if specified in the mask. - */ -void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *), -		      void *info, bool wait) -{ -	int cpu = get_cpu(); -	smp_call_function_many(mask, func, info, wait); -	if (cpumask_test_cpu(cpu, mask)) { -		local_irq_disable(); -		func(info); -		local_irq_enable(); -	} -	put_cpu(); -} - -  /*   * Functions related to starting/stopping cpus.   */ @@ -114,10 +105,10 @@ static void smp_start_cpu_interrupt(void)  /* Handler to stop the current cpu. */  static void smp_stop_cpu_interrupt(void)  { -	set_cpu_online(smp_processor_id(), 0);  	arch_local_irq_disable_all(); +	set_cpu_online(smp_processor_id(), 0);  	for (;;) -		asm("nap"); +		asm("nap; nop");  }  /* This function calls the 'stop' function on all other CPUs in the system. */ @@ -127,6 +118,12 @@ void smp_send_stop(void)  	send_IPI_allbutself(MSG_TAG_STOP_CPU);  } +/* On panic, just wait; we may get an smp_send_stop() later on. */ +void panic_smp_self_stop(void) +{ +	while (1) +		asm("nap; nop"); +}  /*   * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. @@ -175,21 +172,24 @@ static void ipi_flush_icache_range(void *info)  void flush_icache_range(unsigned long start, unsigned long end)  {  	struct ipi_flush flush = { start, end }; -	preempt_disable(); -	on_each_cpu(ipi_flush_icache_range, &flush, 1); -	preempt_enable(); + +	/* If invoked with irqs disabled, we can not issue IPIs. */ +	if (irqs_disabled()) +		flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0, +			NULL, NULL, 0); +	else { +		preempt_disable(); +		on_each_cpu(ipi_flush_icache_range, &flush, 1); +		preempt_enable(); +	}  }  /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */  static irqreturn_t handle_reschedule_ipi(int irq, void *token)  { -	/* -	 * Nothing to do here; when we return from interrupt, the -	 * rescheduling will occur there. But do bump the interrupt -	 * profiler count in the meantime. -	 */  	__get_cpu_var(irq_stat).irq_resched_count++; +	scheduler_ipi();  	return IRQ_HANDLED;  } @@ -215,7 +215,7 @@ void __init ipi_init(void)  		if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)  			panic("Failed to initialize IPI for cpu %d\n", cpu); -		offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; +		offset = PFN_PHYS(pte_pfn(pte));  		ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);  	}  #endif  | 
