diff options
Diffstat (limited to 'arch/mips/kernel/r4k_switch.S')
| -rw-r--r-- | arch/mips/kernel/r4k_switch.S | 143 |
1 files changed, 104 insertions, 39 deletions
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index d2afbd19a9c..81ca3f70fe2 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -4,19 +4,17 @@ * for more details. * * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle - * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1994, 1995, 1996, by Andreas Busse * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 2000 MIPS Technologies, Inc. * written by Carsten Langgaard, carstenl@mips.com */ -#include <linux/config.h> #include <asm/asm.h> #include <asm/cachectl.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> -#include <asm/page.h> #include <asm/pgtable-bits.h> #include <asm/regdef.h> #include <asm/stackframe.h> @@ -30,54 +28,56 @@ */ #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) -/* - * FPU context is saved iff the process has used it's FPU in the current - * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user - * space STATUS register should be 0, so that a process *always* starts its - * userland with FPU disabled after each context switch. - * - * FPU will be enabled as soon as the process accesses FPU again, through - * do_cpu() trap. - */ - +#ifndef USE_ALTERNATE_RESUME_IMPL /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti) + * struct thread_info *next_ti, s32 fp_save) */ .align 5 LEAF(resume) -#ifndef CONFIG_CPU_HAS_LLSC - sw zero, ll_bit -#endif mfc0 t1, CP0_STATUS LONG_S t1, THREAD_STATUS(a0) cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) /* - * check if we need to save FPU registers + * Check whether we need to save any FP context. FP context is saved + * iff the process has used the context with the scalar FPU or the MSA + * ASE in the current time slice, as indicated by _TIF_USEDFPU and + * _TIF_USEDMSA respectively. switch_to will have set fp_save + * accordingly to an FP_SAVE_ enum value. */ - PTR_L t3, TASK_THREAD_INFO(a0) - LONG_L t0, TI_FLAGS(t3) - li t1, _TIF_USEDFPU - and t2, t0, t1 - beqz t2, 1f - nor t1, zero, t1 - - and t0, t0, t1 - LONG_S t0, TI_FLAGS(t3) + beqz a3, 2f /* - * clear saved user stack CU1 bit + * We do. Clear the saved CU1 bit for prev, such that next time it is + * scheduled it will start in userland with the FPU disabled. If the + * task uses the FPU then it will be enabled again via the do_cpu trap. + * This allows us to lazily restore the FP context. */ + PTR_L t3, TASK_THREAD_INFO(a0) LONG_L t0, ST_OFF(t3) li t1, ~ST0_CU1 and t0, t0, t1 LONG_S t0, ST_OFF(t3) - fpu_save_double a0 t1 t0 t2 # c0_status passed in t1 - # clobbers t0 and t2 -1: + /* Check whether we're saving scalar or vector context. */ + bgtz a3, 1f + + /* Save 128b MSA vector context. */ + msa_save_all a0 + b 2f + +1: /* Save 32b/64b scalar FP context. */ + fpu_save_double a0 t0 t1 # c0_status passed in t0 + # clobbers t1 +2: + +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + PTR_LA t8, __stack_chk_guard + LONG_L t9, TASK_STACK_CANARY(a1) + LONG_S t9, 0(t8) +#endif /* * The order of restoring the registers takes care of the race @@ -86,9 +86,8 @@ move $28, a2 cpu_restore_nonscratch a1 - PTR_ADDIU t0, $28, _THREAD_SIZE - 32 + PTR_ADDU t0, $28, _THREAD_SIZE - 32 set_saved_sp t0, t1, t2 - mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff01 and t1, a3 @@ -101,14 +100,16 @@ jr ra END(resume) +#endif /* USE_ALTERNATE_RESUME_IMPL */ + /* * Save a thread's fp context. */ LEAF(_save_fp) -#ifdef CONFIG_64BIT - mfc0 t1, CP0_STATUS +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) + mfc0 t0, CP0_STATUS #endif - fpu_save_double a0 t1 t0 t2 # clobbers t1 + fpu_save_double a0 t0 t1 # clobbers t1 jr ra END(_save_fp) @@ -116,10 +117,33 @@ LEAF(_save_fp) * Restore a thread's fp context. */ LEAF(_restore_fp) - fpu_restore_double a0, t1 # clobbers t1 +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) + mfc0 t0, CP0_STATUS +#endif + fpu_restore_double a0 t0 t1 # clobbers t1 jr ra END(_restore_fp) +#ifdef CONFIG_CPU_HAS_MSA + +/* + * Save a thread's MSA vector context. + */ +LEAF(_save_msa) + msa_save_all a0 + jr ra + END(_save_msa) + +/* + * Restore a thread's MSA vector context. + */ +LEAF(_restore_msa) + msa_restore_all a0 + jr ra + END(_restore_msa) + +#endif + /* * Load the FPU with signalling NANS. This bit pattern we're using has * the property that no matter whether considered as single or as double @@ -135,7 +159,7 @@ LEAF(_init_fpu) li t1, ST0_CU1 or t0, t1 mtc0 t0, CP0_STATUS - fpu_enable_hazard + enable_fpu_hazard li t1, FPU_DEFAULT ctc1 t1, fcr31 @@ -198,8 +222,49 @@ LEAF(_init_fpu) mtc1 t1, $f29 mtc1 t1, $f30 mtc1 t1, $f31 + +#ifdef CONFIG_CPU_MIPS32_R2 + .set push + .set mips64r2 + sll t0, t0, 5 # is Status.FR set? + bgez t0, 1f # no: skip setting upper 32b + + mthc1 t1, $f0 + mthc1 t1, $f1 + mthc1 t1, $f2 + mthc1 t1, $f3 + mthc1 t1, $f4 + mthc1 t1, $f5 + mthc1 t1, $f6 + mthc1 t1, $f7 + mthc1 t1, $f8 + mthc1 t1, $f9 + mthc1 t1, $f10 + mthc1 t1, $f11 + mthc1 t1, $f12 + mthc1 t1, $f13 + mthc1 t1, $f14 + mthc1 t1, $f15 + mthc1 t1, $f16 + mthc1 t1, $f17 + mthc1 t1, $f18 + mthc1 t1, $f19 + mthc1 t1, $f20 + mthc1 t1, $f21 + mthc1 t1, $f22 + mthc1 t1, $f23 + mthc1 t1, $f24 + mthc1 t1, $f25 + mthc1 t1, $f26 + mthc1 t1, $f27 + mthc1 t1, $f28 + mthc1 t1, $f29 + mthc1 t1, $f30 + mthc1 t1, $f31 +1: .set pop +#endif /* CONFIG_CPU_MIPS32_R2 */ #else - .set mips3 + .set arch=r4000 dmtc1 t1, $f0 dmtc1 t1, $f2 dmtc1 t1, $f4 |
