diff options
Diffstat (limited to 'arch/mips/include/asm/switch_to.h')
| -rw-r--r-- | arch/mips/include/asm/switch_to.h | 53 |
1 files changed, 45 insertions, 8 deletions
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 4f8ddba8c36..495c1041a2c 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -15,14 +15,30 @@ #include <asm/cpu-features.h> #include <asm/watch.h> #include <asm/dsp.h> +#include <asm/cop2.h> +#include <asm/msa.h> struct task_struct; -/* - * switch_to(n) should switch tasks to task nr n, first - * checking that n isn't the current task, in which case it does nothing. +enum { + FP_SAVE_NONE = 0, + FP_SAVE_VECTOR = -1, + FP_SAVE_SCALAR = 1, +}; + +/** + * resume - resume execution of a task + * @prev: The task previously executed. + * @next: The task to begin executing. + * @next_ti: task_thread_info(next). + * @fp_save: Which, if any, FP context to save for prev. + * + * This function is used whilst scheduling to save the context of prev & load + * the context of next. Returns prev. */ -extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu); +extern asmlinkage struct task_struct *resume(struct task_struct *prev, + struct task_struct *next, struct thread_info *next_ti, + s32 fp_save); extern unsigned int ll_bit; extern struct task_struct *ll_task; @@ -30,7 +46,7 @@ extern struct task_struct *ll_task; #ifdef CONFIG_MIPS_MT_FPAFF /* - * Handle the scheduler resume end of FPU affinity management. We do this + * Handle the scheduler resume end of FPU affinity management. We do this * inline to try to keep the overhead down. If we have been forced to run on * a "CPU" with an FPU because of a previous high level of FP computation, * but did not actually use the FPU during the most recent time-slice (CU1 @@ -66,17 +82,38 @@ do { \ #define switch_to(prev, next, last) \ do { \ - u32 __usedfpu; \ + u32 __c0_stat; \ + s32 __fpsave = FP_SAVE_NONE; \ __mips_mt_fpaff_switch_to(prev); \ if (cpu_has_dsp) \ __save_dsp(prev); \ + if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \ + if (cop2_lazy_restore) \ + KSTK_STATUS(prev) &= ~ST0_CU2; \ + __c0_stat = read_c0_status(); \ + write_c0_status(__c0_stat | ST0_CU2); \ + cop2_save(&prev->thread.cp2); \ + write_c0_status(__c0_stat & ~ST0_CU2); \ + } \ __clear_software_ll_bit(); \ - __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \ - (last) = resume(prev, next, task_thread_info(next), __usedfpu); \ + if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \ + __fpsave = FP_SAVE_SCALAR; \ + if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \ + __fpsave = FP_SAVE_VECTOR; \ + (last) = resume(prev, next, task_thread_info(next), __fpsave); \ + disable_msa(); \ } while (0) #define finish_arch_switch(prev) \ do { \ + u32 __c0_stat; \ + if (cop2_present && !cop2_lazy_restore && \ + (KSTK_STATUS(current) & ST0_CU2)) { \ + __c0_stat = read_c0_status(); \ + write_c0_status(__c0_stat | ST0_CU2); \ + cop2_restore(¤t->thread.cp2); \ + write_c0_status(__c0_stat & ~ST0_CU2); \ + } \ if (cpu_has_dsp) \ __restore_dsp(current); \ if (cpu_has_userlocal) \ |
