diff options
Diffstat (limited to 'arch/mips/kernel/smp-mt.c')
| -rw-r--r-- | arch/mips/kernel/smp-mt.c | 401 |
1 files changed, 176 insertions, 225 deletions
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 93429a4d301..3babf6e4f89 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -22,247 +22,165 @@ #include <linux/cpumask.h> #include <linux/interrupt.h> #include <linux/compiler.h> +#include <linux/smp.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/cacheflush.h> #include <asm/cpu.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> -#include <asm/smp.h> #include <asm/time.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> -#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */ +#include <asm/gic.h> -#define MIPS_CPU_IPI_RESCHED_IRQ 0 -#define MIPS_CPU_IPI_CALL_IRQ 1 +static void __init smvp_copy_vpe_config(void) +{ + write_vpe_c0_status( + (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); -static int cpu_ipi_resched_irq, cpu_ipi_call_irq; + /* set config to be the same as vpe0, particularly kseg0 coherency alg */ + write_vpe_c0_config( read_c0_config()); -#if 0 -static void dump_mtregisters(int vpe, int tc) -{ - printk("vpe %d tc %d\n", vpe, tc); + /* make sure there are no software interrupts pending */ + write_vpe_c0_cause(0); - settc(tc); + /* Propagate Config7 */ + write_vpe_c0_config7(read_c0_config7()); - printk(" c0 status 0x%lx\n", read_vpe_c0_status()); - printk(" vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol()); - printk(" vpeconf0 0x%lx\n", read_vpe_c0_vpeconf0()); - printk(" tcstatus 0x%lx\n", read_tc_c0_tcstatus()); - printk(" tcrestart 0x%lx\n", read_tc_c0_tcrestart()); - printk(" tcbind 0x%lx\n", read_tc_c0_tcbind()); - printk(" tchalt 0x%lx\n", read_tc_c0_tchalt()); + write_vpe_c0_count(read_c0_count()); } -#endif -void __init sanitize_tlb_entries(void) +static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, + unsigned int ncpu) { - int i, tlbsiz; - unsigned long mvpconf0, ncpu; - - if (!cpu_has_mipsmt) - return; - - /* Enable VPC */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - back_to_back_c0_hazard(); - - /* Disable TLB sharing */ - clear_c0_mvpcontrol(MVPCONTROL_STLB); + if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) + return ncpu; - mvpconf0 = read_c0_mvpconf0(); - - printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0, - (mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT, - (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT); - - tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT; - ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - - printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu); - - if (tlbsiz > 0) { - /* share them out across the vpe's */ - tlbsiz /= ncpu; + /* Deactivate all but VPE 0 */ + if (tc != 0) { + unsigned long tmp = read_vpe_c0_vpeconf0(); - printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz); + tmp &= ~VPECONF0_VPA; - for (i = 0; i < ncpu; i++) { - settc(i); + /* master VPE */ + tmp |= VPECONF0_MVP; + write_vpe_c0_vpeconf0(tmp); - if (i == 0) - write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25)); - else - write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) | - (tlbsiz << 25)); - } + /* Record this as available CPU */ + set_cpu_possible(tc, true); + set_cpu_present(tc, true); + __cpu_number_map[tc] = ++ncpu; + __cpu_logical_map[ncpu] = tc; } - clear_c0_mvpcontrol(MVPCONTROL_VPC); -} - -static void ipi_resched_dispatch (struct pt_regs *regs) -{ - do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs); -} - -static void ipi_call_dispatch (struct pt_regs *regs) -{ - do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs); -} - -irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs) -{ - return IRQ_HANDLED; -} + /* Disable multi-threading with TC's */ + write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); -irqreturn_t ipi_call_interrupt(int irq, void *dev_id, struct pt_regs *regs) -{ - smp_call_function_interrupt(); + if (tc != 0) + smvp_copy_vpe_config(); - return IRQ_HANDLED; + return ncpu; } -static struct irqaction irq_resched = { - .handler = ipi_resched_interrupt, - .flags = IRQF_DISABLED, - .name = "IPI_resched" -}; - -static struct irqaction irq_call = { - .handler = ipi_call_interrupt, - .flags = IRQF_DISABLED, - .name = "IPI_call" -}; - -/* - * Common setup before any secondaries are started - * Make sure all CPU's are in a sensible state before we boot any of the - * secondarys - */ -void plat_smp_setup(void) +static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0) { - unsigned long val; - int i, num; + unsigned long tmp; -#ifdef CONFIG_MIPS_MT_FPAFF - /* If we have an FPU, enroll ourselves in the FPU-full mask */ - if (cpu_has_fpu) - cpu_set(0, mt_fpu_cpumask); -#endif /* CONFIG_MIPS_MT_FPAFF */ - if (!cpu_has_mipsmt) + if (!tc) return; - /* disable MT so we can configure */ - dvpe(); - dmt(); - - mips_mt_set_cpuoptions(); - - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - val = read_c0_mvpconf0(); - - /* we'll always have more TC's than VPE's, so loop setting everything - to a sensible state */ - for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) { - settc(i); - - /* VPE's */ - if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) { - - /* deactivate all but vpe0 */ - if (i != 0) { - unsigned long tmp = read_vpe_c0_vpeconf0(); - - tmp &= ~VPECONF0_VPA; - - /* master VPE */ - tmp |= VPECONF0_MVP; - write_vpe_c0_vpeconf0(tmp); - - /* Record this as available CPU */ - cpu_set(i, phys_cpu_present_map); - __cpu_number_map[i] = ++num; - __cpu_logical_map[num] = i; - } - - /* disable multi-threading with TC's */ - write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); - - if (i != 0) { - write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); + /* bind a TC to each VPE, May as well put all excess TC's + on the last VPE */ + if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1)) + write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); + else { + write_tc_c0_tcbind(read_tc_c0_tcbind() | tc); - /* set config to be the same as vpe0, particularly kseg0 coherency alg */ - write_vpe_c0_config( read_c0_config()); - - /* make sure there are no software interrupts pending */ - write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0)); - - /* Propagate Config7 */ - write_vpe_c0_config7(read_c0_config7()); - } + /* and set XTC */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT)); + } - } + tmp = read_tc_c0_tcstatus(); - /* TC's */ + /* mark not allocated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); - if (i != 0) { - unsigned long tmp; + write_tc_c0_tchalt(TCHALT_H); +} - /* bind a TC to each VPE, May as well put all excess TC's - on the last VPE */ - if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) ) - write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) ); - else { - write_tc_c0_tcbind( read_tc_c0_tcbind() | i); +static void vsmp_send_ipi_single(int cpu, unsigned int action) +{ + int i; + unsigned long flags; + int vpflags; - /* and set XTC */ - write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT)); - } +#ifdef CONFIG_IRQ_GIC + if (gic_present) { + gic_send_ipi_single(cpu, action); + return; + } +#endif + local_irq_save(flags); - tmp = read_tc_c0_tcstatus(); + vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */ - /* mark not allocated and not dynamically allocatable */ - tmp &= ~(TCSTATUS_A | TCSTATUS_DA); - tmp |= TCSTATUS_IXMT; /* interrupt exempt */ - write_tc_c0_tcstatus(tmp); + switch (action) { + case SMP_CALL_FUNCTION: + i = C_SW1; + break; - write_tc_c0_tchalt(TCHALT_H); - } + case SMP_RESCHEDULE_YOURSELF: + default: + i = C_SW0; + break; } - /* Release config state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); + /* 1:1 mapping of vpe and tc... */ + settc(cpu); + write_vpe_c0_cause(read_vpe_c0_cause() | i); + evpe(vpflags); - /* We'll wait until starting the secondaries before starting MVPE */ + local_irq_restore(flags); +} + +static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned int i; - printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); + for_each_cpu(i, mask) + vsmp_send_ipi_single(i, action); } -void __init plat_prepare_cpus(unsigned int max_cpus) +static void vsmp_init_secondary(void) { - /* set up ipi interrupts */ - if (cpu_has_vint) { - set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); - set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); - } +#ifdef CONFIG_IRQ_GIC + /* This is Malta specific: IPI,performance and timer interrupts */ + if (gic_present) + change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | + STATUSF_IP6 | STATUSF_IP7); + else +#endif + change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | + STATUSF_IP6 | STATUSF_IP7); +} - cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; - cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; +static void vsmp_smp_finish(void) +{ + /* CDFIXME: remove this? */ + write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); - setup_irq(cpu_ipi_resched_irq, &irq_resched); - setup_irq(cpu_ipi_call_irq, &irq_call); +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(smp_processor_id(), mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ - /* need to mark IPI's as IRQ_PER_CPU */ - irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU; - irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU; + local_irq_enable(); } /* @@ -273,7 +191,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) * (unsigned long)idle->thread_info the gp * assumes a 1:1 mapping of TC => VPE */ -void prom_boot_secondary(int cpu, struct task_struct *idle) +static void vsmp_boot_secondary(int cpu, struct task_struct *idle) { struct thread_info *gp = task_thread_info(idle); dvpe(); @@ -299,7 +217,7 @@ void prom_boot_secondary(int cpu, struct task_struct *idle) write_tc_gpr_gp((unsigned long)gp); flush_icache_range((unsigned long)gp, - (unsigned long)(gp + sizeof(struct thread_info))); + (unsigned long)(gp + sizeof(struct thread_info))); /* finally out of configuration and into chaos */ clear_c0_mvpcontrol(MVPCONTROL_VPC); @@ -307,54 +225,87 @@ void prom_boot_secondary(int cpu, struct task_struct *idle) evpe(EVPE_ENABLE); } -void prom_init_secondary(void) -{ - write_c0_status((read_c0_status() & ~ST0_IM ) | - (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7)); -} - -void prom_smp_finish(void) +/* + * Common setup before any secondaries are started + * Make sure all CPU's are in a sensible state before we boot any of the + * secondaries + */ +static void __init vsmp_smp_setup(void) { - write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); + unsigned int mvpconf0, ntc, tc, ncpu = 0; + unsigned int nvpe; #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) - cpu_set(smp_processor_id(), mt_fpu_cpumask); + cpu_set(0, mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ + if (!cpu_has_mipsmt) + return; - local_irq_enable(); -} + /* disable MT so we can configure */ + dvpe(); + dmt(); -void prom_cpus_done(void) -{ + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + mvpconf0 = read_c0_mvpconf0(); + ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; + + nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + smp_num_siblings = nvpe; + + /* we'll always have more TC's than VPE's, so loop setting everything + to a sensible state */ + for (tc = 0; tc <= ntc; tc++) { + settc(tc); + + smvp_tc_init(tc, mvpconf0); + ncpu = smvp_vpe_init(tc, mvpconf0, ncpu); + } + + /* Release config state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + /* We'll wait until starting the secondaries before starting MVPE */ + + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); } -void core_send_ipi(int cpu, unsigned int action) +static void __init vsmp_prepare_cpus(unsigned int max_cpus) { - int i; - unsigned long flags; - int vpflags; + mips_mt_set_cpuoptions(); +} - local_irq_save (flags); +struct plat_smp_ops vsmp_smp_ops = { + .send_ipi_single = vsmp_send_ipi_single, + .send_ipi_mask = vsmp_send_ipi_mask, + .init_secondary = vsmp_init_secondary, + .smp_finish = vsmp_smp_finish, + .boot_secondary = vsmp_boot_secondary, + .smp_setup = vsmp_smp_setup, + .prepare_cpus = vsmp_prepare_cpus, +}; - vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ +static int proc_cpuinfo_chain_call(struct notifier_block *nfb, + unsigned long action_unused, void *data) +{ + struct proc_cpuinfo_notifier_args *pcn = data; + struct seq_file *m = pcn->m; + unsigned long n = pcn->n; - switch (action) { - case SMP_CALL_FUNCTION: - i = C_SW1; - break; + if (!cpu_has_mipsmt) + return NOTIFY_OK; - case SMP_RESCHEDULE_YOURSELF: - default: - i = C_SW0; - break; - } + seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); - /* 1:1 mapping of vpe and tc... */ - settc(cpu); - write_vpe_c0_cause(read_vpe_c0_cause() | i); - evpe(vpflags); + return NOTIFY_OK; +} - local_irq_restore(flags); +static int __init proc_cpuinfo_notifier_init(void) +{ + return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0); } + +subsys_initcall(proc_cpuinfo_notifier_init); |
