diff options
51 files changed, 3720 insertions, 176 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a7bac0459f9..f9be549645e 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1447,6 +1447,10 @@ choice prompt "MIPS MT options" depends on MIPS_MT +config MIPS_MT_SMTC + bool "SMTC: Use all TCs on all VPEs for SMP" + select SMP + config MIPS_MT_SMP bool "Use 1 TC on each available VPE for SMP" select SMP @@ -1613,7 +1617,7 @@ source "mm/Kconfig" config SMP bool "Multi-Processing support" - depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP + depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP || MIPS_MT_SMTC ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 9ec01de81c0..34e8a256765 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -34,7 +34,9 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o +obj-$(CONFIG_MIPS_MT) += mips-mt.o +obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o +obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index ca6b03c773b..92b28b674d6 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -69,6 +69,9 @@ void output_ptreg_defines(void) offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); offset("#define PT_STATUS ", struct pt_regs, cp0_status); offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); +#ifdef CONFIG_MIPS_MT_SMTC + offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus); +#endif /* CONFIG_MIPS_MT_SMTC */ size("#define PT_SIZE ", struct pt_regs); linefeed; } diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index b1939a486d2..d101d2fb24c 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -17,6 +17,9 @@ #include <asm/isadep.h> #include <asm/thread_info.h> #include <asm/war.h> +#ifdef CONFIG_MIPS_MT_SMTC +#include <asm/mipsmtregs.h> +#endif #ifdef CONFIG_PREEMPT .macro preempt_stop @@ -75,6 +78,37 @@ FEXPORT(syscall_exit) bnez t0, syscall_exit_work FEXPORT(restore_all) # restore full frame +#ifdef CONFIG_MIPS_MT_SMTC +/* Detect and execute deferred IPI "interrupts" */ + move a0,sp + jal deferred_smtc_ipi +/* Re-arm any temporarily masked interrupts not explicitly "acked" */ + mfc0 v0, CP0_TCSTATUS + ori v1, v0, TCSTATUS_IXMT + mtc0 v1, CP0_TCSTATUS + andi v0, TCSTATUS_IXMT + ehb + mfc0 t0, CP0_TCCONTEXT + DMT 9 # dmt t1 + jal mips_ihb + mfc0 t2, CP0_STATUS + andi t3, t0, 0xff00 + or t2, t2, t3 + mtc0 t2, CP0_STATUS + ehb + andi t1, t1, VPECONTROL_TE + beqz t1, 1f + EMT +1: + mfc0 v1, CP0_TCSTATUS + /* We set IXMT above, XOR should cler it here */ + xori v1, v1, TCSTATUS_IXMT + or v1, v0, v1 + mtc0 v1, CP0_TCSTATUS + ehb + xor t0, t0, t3 + mtc0 t0, CP0_TCCONTEXT +#endif /* CONFIG_MIPS_MT_SMTC */ .set noat RESTORE_TEMP RESTORE_AT diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S index 235ad9f6bd3..10f28fb9f00 100644 --- a/arch/mips/kernel/gdb-low.S +++ b/arch/mips/kernel/gdb-low.S @@ -283,11 +283,33 @@ */ 3: +#ifdef CONFIG_MIPS_MT_SMTC + /* Read-modify write of Status must be atomic */ + mfc0 t2, CP0_TCSTATUS + ori t1, t2, TCSTATUS_IXMT + mtc0 t1, CP0_TCSTATUS + andi t2, t2, TCSTATUS_IXMT + ehb + DMT 9 # dmt t1 + jal mips_ihb + nop +#endif /* CONFIG_MIPS_MT_SMTC */ mfc0 t0, CP0_STATUS ori t0, 0x1f xori t0, 0x1f mtc0 t0, CP0_STATUS - +#ifdef CONFIG_MIPS_MT_SMTC + andi t1, t1, VPECONTROL_TE + beqz t1, 9f + nop + EMT # emt +9: + mfc0 t1, CP0_TCSTATUS + xori t1, t1, TCSTATUS_IXMT + or t1, t1, t2 + mtc0 t1, CP0_TCSTATUS + ehb +#endif /* CONFIG_MIPS_MT_SMTC */ LONG_L v0, GDB_FR_STATUS(sp) LONG_L v1, GDB_FR_EPC(sp) mtc0 v0, CP0_STATUS diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c index d4f88e0af24..6ecbdc1fefd 100644 --- a/arch/mips/kernel/gdb-stub.c +++ b/arch/mips/kernel/gdb-stub.c @@ -140,6 +140,7 @@ #include <asm/system.h> #include <asm/gdb-stub.h> #include <asm/inst.h> +#include <asm/smp.h> /* * external low-level support routines @@ -669,6 +670,64 @@ static void kgdb_wait(void *arg) local_irq_restore(flags); } +/* + * GDB stub needs to call kgdb_wait on all processor with interrupts + * disabled, so it uses it's own special variant. + */ +static int kgdb_smp_call_kgdb_wait(void) +{ +#ifdef CONFIG_SMP + struct call_data_struct data; + int i, cpus = num_online_cpus() - 1; + int cpu = smp_processor_id(); + + /* + * Can die spectacularly if this CPU isn't yet marked online + */ + BUG_ON(!cpu_online(cpu)); + + if (!cpus) + return 0; + + if (spin_is_locked(&smp_call_lock)) { + /* + * Some other processor is trying to make us do something + * but we're not going to respond... give up + */ + return -1; + } + + /* + * We will continue here, accepting the fact that + * the kernel may deadlock if another CPU attempts + * to call smp_call_function now... + */ + + data.func = kgdb_wait; + data.info = NULL; + atomic_set(&data.started, 0); + data.wait = 0; + + spin_lock(&smp_call_lock); + call_data = &data; + mb(); + + /* Send a message to all other CPUs and wait for them to respond */ + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i) && i != cpu) + core_send_ipi(i, SMP_CALL_FUNCTION); + + /* Wait for response */ + /* FIXME: lock-up detection, backtrace on lock-up */ + while (atomic_read(&data.started) != cpus) + barrier(); + + call_data = NULL; + spin_unlock(&smp_call_lock); +#endif + + return 0; +} /* * This function does all command processing for interfacing to gdb. It @@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs) /* * force other cpus to enter kgdb */ - smp_call_function(kgdb_wait, NULL, 0, 0); + kgdb_smp_call_kgdb_wait(); /* * If we're in breakpoint() increment the PC diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 04418b6568b..ff7af369f28 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -12,6 +12,7 @@ #include <linux/init.h> #include <asm/asm.h> +#include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/regdef.h> #include <asm/fpregdef.h> @@ -171,6 +172,15 @@ NESTED(except_vec_vi, 0, sp) SAVE_AT .set push .set noreorder +#ifdef CONFIG_MIPS_MT_SMTC + /* + * To keep from blindly blocking *all* interrupts + * during service by SMTC kernel, we also want to + * pass the IM value to be cleared. + */ +EXPORT(except_vec_vi_mori) + ori a0, $0, 0 +#endif /* CONFIG_MIPS_MT_SMTC */ EXPORT(except_vec_vi_lui) lui v0, 0 /* Patched */ j except_vec_vi_handler @@ -187,6 +197,25 @@ EXPORT(except_vec_vi_end) NESTED(except_vec_vi_handler, 0, sp) SAVE_TEMP SAVE_STATIC +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC has an interesting problem that interrupts are level-triggered, + * and the CLI macro will clear EXL, potentially causing a duplicate + * interrupt service invocation. So we need to clear the associated + * IM bit of Status prior to doing CLI, and restore it after the + * service routine has been invoked - we must assume that the + * service routine will have cleared the state, and any active + * level represents a new or otherwised unserviced event... + */ + mfc0 t1, CP0_STATUS + and t0, a0, t1 + mfc0 t2, CP0_TCCONTEXT + or t0, t0, t2 + mtc0 t0, CP0_TCCONTEXT + xor t1, t1, t0 + mtc0 t1, CP0_STATUS + ehb +#endif /* CONFIG_MIPS_MT_SMTC */ CLI move a0, sp jalr v0 diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 2e9122a4213..bdf6f6eff72 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -18,6 +18,7 @@ #include <linux/threads.h> #include <asm/asm.h> +#include <asm/asmmacro.h> #include <asm/regdef.h> #include <asm/page.h> #include <asm/mipsregs.h> @@ -82,12 +83,33 @@ */ .macro setup_c0_status set clr .set push +#ifdef CONFIG_MIPS_MT_SMTC + /* + * For SMTC, we need to set privilege and disable interrupts only for + * the current TC, using the TCStatus register. + */ + mfc0 t0, CP0_TCSTATUS + /* Fortunately CU 0 is in the same place in both registers */ + /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ + li t1, ST0_CU0 | 0x08001c00 + or t0, t1 + /* Clear TKSU, leave IXMT */ + xori t0, 0x00001800 + mtc0 t0, CP0_TCSTATUS + ehb + /* We need to leave the global IE bit set, but clear EXL...*/ + mfc0 t0, CP0_STATUS + or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr + xor t0, ST0_EXL | ST0_ERL | \clr + mtc0 t0, CP0_STATUS +#else mfc0 t0, CP0_STATUS or t0, ST0_CU0|\set|0x1f|\clr xor t0, 0x1f|\clr mtc0 t0, CP0_STATUS .set noreorder sll zero,3 # ehb +#endif .set pop .endm @@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point ARC64_TWIDDLE_PC +#ifdef CONFIG_MIPS_MT_SMTC + /* + * In SMTC kernel, "CLI" is thread-specific, in TCStatus. + * We still need to enable interrupts globally in Status, + * and clear EXL/ERL. + * + * TCContext is used to track interrupt levels under + * service in SMTC kernel. Clear for boot TC before + * allowing any interrupts. + */ + mtc0 zero, CP0_TCCONTEXT + + mfc0 t0, CP0_STATUS + ori t0, t0, 0xff1f + xori t0, t0, 0x001e + mtc0 t0, CP0_STATUS +#endif /* CONFIG_MIPS_MT_SMTC */ + PTR_LA t0, __bss_start # clear .bss LONG_S zero, (t0) PTR_LA t1, __bss_stop - LONGSIZE @@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp) # kernel entry point * function after setting up the stack and gp registers. */ NESTED(smp_bootstrap, 16, sp) +#ifdef CONFIG_MIPS_MT_SMTC + /* + * Read-modify-writes of Status must be atomic, and this + * is one case where CLI is invoked without EXL being + * necessarily set. The CLI and setup_c0_status will + * in fact be redundant for all but the first TC of + * each VPE being booted. + */ + DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ + jal mips_ihb +#endif /* CONFIG_MIPS_MT_SMTC */ setup_c0_status_sec smp_slave_setup +#ifdef CONFIG_MIPS_MT_SMTC + andi t2, t2, VPECONTROL_TE + beqz t2, 2f + EMT # emt +2: +#endif /* CONFIG_MIPS_MT_SMTC */ j start_secondary END(smp_bootstrap) #endif /* CONFIG_SMP */ diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index b974ac9057f..2125ba5f1d9 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -187,6 +187,10 @@ handle_real_irq: outb(cached_21,0x21); outb(0x60+irq,0x20); /* 'Specific EOI' to master */ } +#ifdef CONFIG_MIPS_MT_SMTC + if (irq_hwmask[irq] & ST0_IM) + set_c0_status(irq_hwmask[irq] & ST0_IM); +#endif /* CONFIG_MIPS_MT_SMTC */ spin_unlock_irqrestore(&i8259A_lock, flags); return; diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 3f653c7cfbf..97ebdc754b9 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c @@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) mask_msc_irq(irq); if (!cpu_has_veic) MSCIC_WRITE(MSC01_IC_EOI, 0); +#ifdef CONFIG_MIPS_MT_SMTC + /* This actually needs to be a call into platform code */ + if (irq_hwmask[irq] & ST0_IM) + set_c0_status(irq_hwmask[irq] & ST0_IM); +#endif /* CONFIG_MIPS_MT_SMTC */ } /* @@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq) MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); } +#ifdef CONFIG_MIPS_MT_SMTC + if (irq_hwmask[irq] & ST0_IM) + set_c0_status(irq_hwmask[irq] & ST0_IM); +#endif /* CONFIG_MIPS_MT_SMTC */ } /* diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index e0efc4f2f93..3dce742e716 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq) atomic_t irq_err_count; +#ifdef CONFIG_MIPS_MT_SMTC +/* + * SMTC Kernel needs to manipulate low-level CPU interrupt mask + * in do_IRQ. These are passed in setup_irq_smtc() and stored + * in this table. + */ +unsigned long irq_hwmask[NR_IRQS]; +#endif /* CONFIG_MIPS_MT_SMTC */ + #undef do_IRQ /* @@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs) { irq_enter(); + __DO_IRQ_SMTC_HOOK(); __do_IRQ(irq, regs); irq_exit(); @@ -129,6 +139,9 @@ void __init init_IRQ(void) irq_desc[i].depth = 1; irq_desc[i].handler = &no_irq_type; spin_lock_init(&irq_desc[i].lock); +#ifdef CONFIG_MIPS_MT_SMTC + irq_hwmask[i] = 0; +#endif /* CONFIG_MIPS_MT_SMTC */ } arch_init_irq(); diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c new file mode 100644 index 00000000000..02237a685ec --- /dev/null +++ b/arch/mips/kernel/mips-mt.c @@ -0,0 +1,449 @@ +/* + * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels + * Copyright (C) 2005 Mips Technologies, Inc + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> + +#include <asm/cpu.h> +#include <asm/processor.h> +#include <asm/atomic.h> +#include <asm/system.h> +#include <asm/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/smp.h> +#include <asm/mipsmtregs.h> +#include <asm/r4kcache.h> +#include <asm/cacheflush.h> + +/* + * CPU mask used to set process affinity for MT VPEs/TCs with FPUs + */ + +cpumask_t mt_fpu_cpumask; + +#ifdef CONFIG_MIPS_MT_FPAFF + +#include <linux/cpu.h> +#include <linux/delay.h> +#include <asm/uaccess.h> + +unsigned long mt_fpemul_threshold = 0; + +/* + * Replacement functions for the sys_sched_setaffinity() and + * sys_sched_getaffinity() system calls, so that we can integrate + * FPU affinity with the user's requested processor affinity. + * This code is 98% identical with the sys_sched_setaffinity() + * and sys_sched_getaffinity() system calls, and should be + * updated when kernel/sched.c changes. + */ + +/* + * find_process_by_pid - find a process with a matching PID value. + * used in sys_sched_set/getaffinity() in kernel/sched.c, so + * cloned here. + */ +static inline task_t *find_process_by_pid(pid_t pid) +{ + return pid ? find_task_by_pid(pid) : current; +} + + +/* + * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process + */ +asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr) +{ + cpumask_t new_mask; + cpumask_t effective_mask; + int retval; + task_t *p; + + if (len < sizeof(new_mask)) + return -EINVAL; + + if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) + return -EFAULT; + + lock_cpu_hotplug(); + read_lock(&tasklist_lock); + + p = find_process_by_pid(pid); + if (!p) { + read_unlock(&tasklist_lock); + unlock_cpu_hotplug(); + return -ESRCH; + } + + /* + * It is not safe to call set_cpus_allowed with the + * tasklist_lock held. We will bump the task_struct's + * usage count and drop tasklist_lock before invoking + * set_cpus_allowed. + */ + get_task_struct(p); + + retval = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_NICE)) { + read_unlock(&tasklist_lock); + goto out_unlock; + } + + /* Record new user-specified CPU set for future reference */ + p->thread.user_cpus_allowed = new_mask; + + /* Unlock the task list */ + read_unlock(&tasklist_lock); + + /* Compute new global allowed CPU set if necessary */ + if( (p->thread.mflags & MF_FPUBOUND) + && cpus_intersects(new_mask, mt_fpu_cpumask)) { + cpus_and(effective_mask, new_mask, mt_fpu_cpumask); + retval = set_cpus_allowed(p, effective_mask); + } else { + p->thread.mflags &= ~MF_FPUBOUND; + retval = set_cpus_allowed(p, new_mask); + } + + +out_unlock: + put_task_struct(p); + unlock_cpu_hotplug(); + return retval; +} + +/* + * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process + */ +asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr) +{ + unsigned int real_len; + cpumask_t mask; + int retval; + task_t *p; + + real_len = sizeof(mask); + if (len < real_len) + return -EINVAL; + + lock_cpu_hotplug(); + read_lock(&tasklist_lock); + + retval = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + retval = 0; + + cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); + +out_unlock: + read_unlock(&tasklist_lock); + unlock_cpu_hotplug(); + if (retval) + return retval; + if (copy_to_user(user_mask_ptr, &mask, real_len)) + return -EFAULT; + return real_len; +} + +#endif /* CONFIG_MIPS_MT_FPAFF */ + +/* + * Dump new MIPS MT state for the core. Does not leave TCs halted. + * Takes an argument which taken to be a pre-call MVPControl value. + */ + +void mips_mt_regdump(unsigned long mvpctl) +{ + unsigned long flags; + unsigned long vpflags; + unsigned long mvpconf0; + int nvpe; + int ntc; + int i; + int tc; + unsigned long haltval; + unsigned long tcstatval; +#ifdef CONFIG_MIPS_MT_SMTC + void smtc_soft_dump(void); +#endif /* CONFIG_MIPT_MT_SMTC */ + + local_irq_save(flags); + vpflags = dvpe(); + printk("=== MIPS MT State Dump ===\n"); + printk("-- Global State --\n"); + printk(" MVPControl Passed: %08lx\n", mvpctl); + printk(" MVPControl Read: %08lx\n", vpflags); + printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); + nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; + printk("-- per-VPE State --\n"); + for(i = 0; i < nvpe; i++) { + for(tc = 0; tc < ntc; tc++) { + settc(tc); + if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { + printk(" VPE %d\n", i); + printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol()); + printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0()); + printk(" VPE%d.Status : %08lx\n", + i, read_vpe_c0_status()); + printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc()); + printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause()); + printk(" VPE%d.Config7 : %08lx\n", + i, read_vpe_c0_config7()); + break; /* Next VPE */ + } + } + } + printk("-- per-TC State --\n"); + for(tc = 0; tc < ntc; tc++) { + settc(tc); + if(read_tc_c0_tcbind() == read_c0_tcbind()) { + /* Are we dumping ourself? */ + haltval = 0; /* Then we're not halted, and mustn't be */ + tcstatval = flags; /* And pre-dump TCStatus is flags */ + printk(" TC %d (current TC with VPE EPC above)\n", tc); + } else { + haltval = read_tc_c0_tchalt(); + write_tc_c0_tchalt(1); + tcstatval = read_tc_c0_tcstatus(); + printk(" TC %d\n", tc); + } + printk(" TCStatus : %08lx\n", tcstatval); + printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); + printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart()); + printk(" TCHalt : %08lx\n", haltval); + printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); + if (!haltval) + write_tc_c0_tchalt(0); + } +#ifdef CONFIG_MIPS_MT_SMTC + smtc_soft_dump(); +#endif /* CONFIG_MIPT_MT_SMTC */ + printk("===========================\n"); + evpe(vpflags); + local_irq_restore(flags); +} + +static int mt_opt_norps = 0; +static int mt_opt_rpsctl = -1; +static int mt_opt_nblsu = -1; +static int mt_opt_forceconfig7 = 0; +static int mt_opt_config7 = -1; + +static int __init rps_disable(char *s) +{ + mt_opt_norps = 1; + return 1; +} +__setup("norps", rps_disable); + +static int __init rpsctl_set(char *str) +{ + get_option(&str, &mt_opt_rpsctl); + return 1; +} +__setup("rpsctl=", rpsctl_set); + +static int __init nblsu_set(char *str) +{ + get_option(&str, &mt_opt_nblsu); + return 1; +} +__setup("nblsu=", nblsu_set); + +static int __init config7_set(char *str) +{ + get_option(&str, &mt_opt_config7); + mt_opt_forceconfig7 = 1; + return 1; +} +__setup("config7=", config7_set); + +/* Experimental cache flush control parameters that should go away some day */ +int mt_protiflush = 0; +int mt_protdflush = 0; +int mt_n_iflushes = 1; +int mt_n_dflushes = 1; + +static int __init set_protiflush(char *s) +{ + mt_protiflush = 1; + return 1; +} +__setup("protiflush", set_protiflush); + +static int __init set_protdflush(char *s) +{ + mt_protdflush = 1; + return 1; +} +__setup("protdflush", set_protdflush); + +static int |