diff options
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r-- | arch/powerpc/mm/slb.c | 69 |
1 files changed, 32 insertions, 37 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 304375a7357..a73d2d70097 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; } -static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, +static inline void slb_shadow_update(unsigned long ea, + unsigned long flags, unsigned long entry) { /* @@ -61,28 +62,16 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, * updating it. */ get_slb_shadow()->save_area[entry].esid = 0; - barrier(); - get_slb_shadow()->save_area[entry].vsid = vsid; - barrier(); - get_slb_shadow()->save_area[entry].esid = esid; - + smp_wmb(); + get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); + smp_wmb(); + get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); + smp_wmb(); } -static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, - unsigned long entry) +static inline void slb_shadow_clear(unsigned long entry) { - /* - * Updating the shadow buffer before writing the SLB ensures - * we don't get a stale entry here if we get preempted by PHYP - * between these two statements. - */ - slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), - entry); - - asm volatile("slbmte %0,%1" : - : "r" (mk_vsid_data(ea, flags)), - "r" (mk_esid_data(ea, entry)) - : "memory" ); + get_slb_shadow()->save_area[entry].esid = 0; } void slb_flush_and_rebolt(void) @@ -100,12 +89,13 @@ void slb_flush_and_rebolt(void) vflags = SLB_VSID_KERNEL | vmalloc_llp; ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); - if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) + if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) { ksp_esid_data &= ~SLB_ESID_V; - - /* Only third entry (stack) may change here so only resave that */ - slb_shadow_update(ksp_esid_data, - mk_vsid_data(ksp_esid_data, lflags), 2); + slb_shadow_clear(2); + } else { + /* Update stack entry; others don't change */ + slb_shadow_update(get_paca()->kstack, lflags, 2); + } /* We need to do this all in asm, so we're sure we don't touch * the stack between the slbia and rebolting it. */ @@ -123,6 +113,15 @@ void slb_flush_and_rebolt(void) : "memory"); } +void slb_vmalloc_update(void) +{ + unsigned long vflags; + + vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; + slb_shadow_update(VMALLOC_START, vflags, 1); + slb_flush_and_rebolt(); +} + /* Flush all user entries from the segment table of the current processor. */ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) { @@ -227,16 +226,12 @@ void slb_initialize(void) vflags = SLB_VSID_KERNEL | vmalloc_llp; /* Invalidate the entire SLB (even slot 0) & all the ERATS */ - asm volatile("isync":::"memory"); - asm volatile("slbmte %0,%0"::"r" (0) : "memory"); - asm volatile("isync; slbia; isync":::"memory"); - create_shadowed_slbe(PAGE_OFFSET, lflags, 0); - - create_shadowed_slbe(VMALLOC_START, vflags, 1); - - /* We don't bolt the stack for the time being - we're in boot, - * so the stack is in the bolted segment. By the time it goes - * elsewhere, we'll call _switch() which will bolt in the new - * one. */ - asm volatile("isync":::"memory"); + slb_shadow_update(PAGE_OFFSET, lflags, 0); + asm volatile("isync; slbia; sync; slbmte %0,%1; isync" :: + "r" (get_slb_shadow()->save_area[0].vsid), + "r" (get_slb_shadow()->save_area[0].esid) : "memory"); + + slb_shadow_update(VMALLOC_START, vflags, 1); + + slb_flush_and_rebolt(); } |