diff options
Diffstat (limited to 'arch/sparc/kernel/smp_32.c')
| -rw-r--r-- | arch/sparc/kernel/smp_32.c | 311 | 
1 files changed, 139 insertions, 172 deletions
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 91c10fb7085..7958242d63c 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -20,9 +20,11 @@  #include <linux/seq_file.h>  #include <linux/cache.h>  #include <linux/delay.h> +#include <linux/profile.h> +#include <linux/cpu.h>  #include <asm/ptrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h>  #include <asm/irq.h>  #include <asm/page.h> @@ -32,16 +34,18 @@  #include <asm/cacheflush.h>  #include <asm/tlbflush.h>  #include <asm/cpudata.h> +#include <asm/timer.h>  #include <asm/leon.h> +#include "kernel.h"  #include "irq.h" -volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; -unsigned char boot_cpu_id = 0; -unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ +volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};  cpumask_t smp_commenced_mask = CPU_MASK_NONE; +const struct sparc32_ipi_ops *sparc32_ipi_ops; +  /* The only guaranteed locking primitive available on all Sparc   * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically   * places the current byte at the effective address into dest_reg and @@ -50,9 +54,10 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;   * instruction which is much better...   */ -void __cpuinit smp_store_cpu_info(int id) +void smp_store_cpu_info(int id)  {  	int cpu_node; +	int mid;  	cpu_data(id).udelay_val = loops_per_jiffy; @@ -60,16 +65,17 @@ void __cpuinit smp_store_cpu_info(int id)  	cpu_data(id).clock_tick = prom_getintdefault(cpu_node,  						     "clock-frequency", 0);  	cpu_data(id).prom_node = cpu_node; -	cpu_data(id).mid = cpu_get_hwmid(cpu_node); +	mid = cpu_get_hwmid(cpu_node); -	if (cpu_data(id).mid < 0) -		panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); +	if (mid < 0) { +		printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); +		mid = 0; +	} +	cpu_data(id).mid = mid;  }  void __init smp_cpus_done(unsigned int max_cpus)  { -	extern void smp4m_smp_done(void); -	extern void smp4d_smp_done(void);  	unsigned long bogosum = 0;  	int cpu, num = 0; @@ -83,14 +89,6 @@ void __init smp_cpus_done(unsigned int max_cpus)  		(bogosum/(5000/HZ))%100);  	switch(sparc_cpu_model) { -	case sun4: -		printk("SUN4\n"); -		BUG(); -		break; -	case sun4c: -		printk("SUN4C\n"); -		BUG(); -		break;  	case sun4m:  		smp4m_smp_done();  		break; @@ -112,7 +110,7 @@ void __init smp_cpus_done(unsigned int max_cpus)  		printk("UNKNOWN!\n");  		BUG();  		break; -	}; +	}  }  void cpu_panic(void) @@ -121,165 +119,69 @@ void cpu_panic(void)  	panic("SMP bolixed\n");  } -struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; +struct linux_prom_registers smp_penguin_ctable = { 0 };  void smp_send_reschedule(int cpu)  { -	/* See sparc64 */ +	/* +	 * CPU model dependent way of implementing IPI generation targeting +	 * a single CPU. The trap handler needs only to do trap entry/return +	 * to call schedule. +	 */ +	sparc32_ipi_ops->resched(cpu);  }  void smp_send_stop(void)  {  } -void smp_flush_cache_all(void) -{ -	xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); -	local_flush_cache_all(); -} - -void smp_flush_tlb_all(void) -{ -	xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all)); -	local_flush_tlb_all(); -} - -void smp_flush_cache_mm(struct mm_struct *mm) -{ -	if(mm->context != NO_CONTEXT) { -		cpumask_t cpu_mask = *mm_cpumask(mm); -		cpu_clear(smp_processor_id(), cpu_mask); -		if (!cpus_empty(cpu_mask)) -			xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); -		local_flush_cache_mm(mm); -	} -} - -void smp_flush_tlb_mm(struct mm_struct *mm) -{ -	if(mm->context != NO_CONTEXT) { -		cpumask_t cpu_mask = *mm_cpumask(mm); -		cpu_clear(smp_processor_id(), cpu_mask); -		if (!cpus_empty(cpu_mask)) { -			xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); -			if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) -				cpumask_copy(mm_cpumask(mm), -					     cpumask_of(smp_processor_id())); -		} -		local_flush_tlb_mm(mm); -	} -} - -void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, -			   unsigned long end) -{ -	struct mm_struct *mm = vma->vm_mm; - -	if (mm->context != NO_CONTEXT) { -		cpumask_t cpu_mask = *mm_cpumask(mm); -		cpu_clear(smp_processor_id(), cpu_mask); -		if (!cpus_empty(cpu_mask)) -			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); -		local_flush_cache_range(vma, start, end); -	} -} - -void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, -			 unsigned long end) +void arch_send_call_function_single_ipi(int cpu)  { -	struct mm_struct *mm = vma->vm_mm; - -	if (mm->context != NO_CONTEXT) { -		cpumask_t cpu_mask = *mm_cpumask(mm); -		cpu_clear(smp_processor_id(), cpu_mask); -		if (!cpus_empty(cpu_mask)) -			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); -		local_flush_tlb_range(vma, start, end); -	} +	/* trigger one IPI single call on one CPU */ +	sparc32_ipi_ops->single(cpu);  } -void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) +void arch_send_call_function_ipi_mask(const struct cpumask *mask)  { -	struct mm_struct *mm = vma->vm_mm; - -	if(mm->context != NO_CONTEXT) { -		cpumask_t cpu_mask = *mm_cpumask(mm); -		cpu_clear(smp_processor_id(), cpu_mask); -		if (!cpus_empty(cpu_mask)) -			xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); -		local_flush_cache_page(vma, page); -	} -} +	int cpu; -void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) -{ -	struct mm_struct *mm = vma->vm_mm; - -	if(mm->context != NO_CONTEXT) { -		cpumask_t cpu_mask = *mm_cpumask(mm); -		cpu_clear(smp_processor_id(), cpu_mask); -		if (!cpus_empty(cpu_mask)) -			xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); -		local_flush_tlb_page(vma, page); -	} +	/* trigger IPI mask call on each CPU */ +	for_each_cpu(cpu, mask) +		sparc32_ipi_ops->mask_one(cpu);  } -void smp_reschedule_irq(void) +void smp_resched_interrupt(void)  { -	set_need_resched(); +	irq_enter(); +	scheduler_ipi(); +	local_cpu_data().irq_resched_count++; +	irq_exit(); +	/* re-schedule routine called by interrupt return code. */  } -void smp_flush_page_to_ram(unsigned long page) +void smp_call_function_single_interrupt(void)  { -	/* Current theory is that those who call this are the one's -	 * who have just dirtied their cache with the pages contents -	 * in kernel space, therefore we only run this on local cpu. -	 * -	 * XXX This experiment failed, research further... -DaveM -	 */ -#if 1 -	xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page); -#endif -	local_flush_page_to_ram(page); +	irq_enter(); +	generic_smp_call_function_single_interrupt(); +	local_cpu_data().irq_call_count++; +	irq_exit();  } -void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) +void smp_call_function_interrupt(void)  { -	cpumask_t cpu_mask = *mm_cpumask(mm); -	cpu_clear(smp_processor_id(), cpu_mask); -	if (!cpus_empty(cpu_mask)) -		xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); -	local_flush_sig_insns(mm, insn_addr); +	irq_enter(); +	generic_smp_call_function_interrupt(); +	local_cpu_data().irq_call_count++; +	irq_exit();  } -extern unsigned int lvl14_resolution; - -/* /proc/profile writes can call this, don't __init it please. */ -static DEFINE_SPINLOCK(prof_setup_lock); -  int setup_profiling_timer(unsigned int multiplier)  { -	int i; -	unsigned long flags; - -	/* Prevent level14 ticker IRQ flooding. */ -	if((!multiplier) || (lvl14_resolution / multiplier) < 500) -		return -EINVAL; - -	spin_lock_irqsave(&prof_setup_lock, flags); -	for_each_possible_cpu(i) { -		load_profile_irq(i, lvl14_resolution / multiplier); -		prof_multiplier(i) = multiplier; -	} -	spin_unlock_irqrestore(&prof_setup_lock, flags); - -	return 0; +	return -EINVAL;  }  void __init smp_prepare_cpus(unsigned int max_cpus)  { -	extern void __init smp4m_boot_cpus(void); -	extern void __init smp4d_boot_cpus(void);  	int i, cpuid, extra;  	printk("Entering SMP Mode...\n"); @@ -296,14 +198,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)  	smp_store_cpu_info(boot_cpu_id);  	switch(sparc_cpu_model) { -	case sun4: -		printk("SUN4\n"); -		BUG(); -		break; -	case sun4c: -		printk("SUN4C\n"); -		BUG(); -		break;  	case sun4m:  		smp4m_boot_cpus();  		break; @@ -325,7 +219,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)  		printk("UNKNOWN!\n");  		BUG();  		break; -	}; +	}  }  /* Set this up early so that things like the scheduler can init @@ -362,29 +256,19 @@ void __init smp_prepare_boot_cpu(void)  	set_cpu_possible(cpuid, true);  } -int __cpuinit __cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu, struct task_struct *tidle)  { -	extern int __cpuinit smp4m_boot_one_cpu(int); -	extern int __cpuinit smp4d_boot_one_cpu(int);  	int ret=0;  	switch(sparc_cpu_model) { -	case sun4: -		printk("SUN4\n"); -		BUG(); -		break; -	case sun4c: -		printk("SUN4C\n"); -		BUG(); -		break;  	case sun4m: -		ret = smp4m_boot_one_cpu(cpu); +		ret = smp4m_boot_one_cpu(cpu, tidle);  		break;  	case sun4d: -		ret = smp4d_boot_one_cpu(cpu); +		ret = smp4d_boot_one_cpu(cpu, tidle);  		break;  	case sparc_leon: -		ret = leon_boot_one_cpu(cpu); +		ret = leon_boot_one_cpu(cpu, tidle);  		break;  	case sun4e:  		printk("SUN4E\n"); @@ -398,16 +282,99 @@ int __cpuinit __cpu_up(unsigned int cpu)  		printk("UNKNOWN!\n");  		BUG();  		break; -	}; +	}  	if (!ret) { -		cpu_set(cpu, smp_commenced_mask); +		cpumask_set_cpu(cpu, &smp_commenced_mask);  		while (!cpu_online(cpu))  			mb();  	}  	return ret;  } +static void arch_cpu_pre_starting(void *arg) +{ +	local_ops->cache_all(); +	local_ops->tlb_all(); + +	switch(sparc_cpu_model) { +	case sun4m: +		sun4m_cpu_pre_starting(arg); +		break; +	case sun4d: +		sun4d_cpu_pre_starting(arg); +		break; +	case sparc_leon: +		leon_cpu_pre_starting(arg); +		break; +	default: +		BUG(); +	} +} + +static void arch_cpu_pre_online(void *arg) +{ +	unsigned int cpuid = hard_smp_processor_id(); + +	register_percpu_ce(cpuid); + +	calibrate_delay(); +	smp_store_cpu_info(cpuid); + +	local_ops->cache_all(); +	local_ops->tlb_all(); + +	switch(sparc_cpu_model) { +	case sun4m: +		sun4m_cpu_pre_online(arg); +		break; +	case sun4d: +		sun4d_cpu_pre_online(arg); +		break; +	case sparc_leon: +		leon_cpu_pre_online(arg); +		break; +	default: +		BUG(); +	} +} + +static void sparc_start_secondary(void *arg) +{ +	unsigned int cpu; + +	/* +	 * SMP booting is extremely fragile in some architectures. So run +	 * the cpu initialization code first before anything else. +	 */ +	arch_cpu_pre_starting(arg); + +	preempt_disable(); +	cpu = smp_processor_id(); + +	/* Invoke the CPU_STARTING notifier callbacks */ +	notify_cpu_starting(cpu); + +	arch_cpu_pre_online(arg); + +	/* Set the CPU in the cpu_online_mask */ +	set_cpu_online(cpu, true); + +	/* Enable local interrupts now */ +	local_irq_enable(); + +	wmb(); +	cpu_startup_entry(CPUHP_ONLINE); + +	/* We should never reach here! */ +	BUG(); +} + +void smp_callin(void) +{ +	sparc_start_secondary(NULL); +} +  void smp_bogo(struct seq_file *m)  {  	int i;  | 
