diff options
Diffstat (limited to 'arch/sh/mm/tlb-sh5.c')
| -rw-r--r-- | arch/sh/mm/tlb-sh5.c | 122 |
1 files changed, 91 insertions, 31 deletions
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c index f34274a1ded..e4bb2a8e0a6 100644 --- a/arch/sh/mm/tlb-sh5.c +++ b/arch/sh/mm/tlb-sh5.c @@ -15,11 +15,9 @@ #include <asm/mmu_context.h> /** - * sh64_tlb_init - * - * Perform initial setup for the DTLB and ITLB. + * sh64_tlb_init - Perform initial setup for the DTLB and ITLB. */ -int __init sh64_tlb_init(void) +int sh64_tlb_init(void) { /* Assign some sane DTLB defaults */ cpu_data->dtlb.entries = 64; @@ -46,9 +44,7 @@ int __init sh64_tlb_init(void) } /** - * sh64_next_free_dtlb_entry - * - * Find the next available DTLB entry + * sh64_next_free_dtlb_entry - Find the next available DTLB entry */ unsigned long long sh64_next_free_dtlb_entry(void) { @@ -56,9 +52,7 @@ unsigned long long sh64_next_free_dtlb_entry(void) } /** - * sh64_get_wired_dtlb_entry - * - * Allocate a wired (locked-in) entry in the DTLB + * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB */ unsigned long long sh64_get_wired_dtlb_entry(void) { @@ -71,12 +65,10 @@ unsigned long long sh64_get_wired_dtlb_entry(void) } /** - * sh64_put_wired_dtlb_entry + * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB. * * @entry: Address of TLB slot. * - * Free a wired (locked-in) entry in the DTLB. - * * Works like a stack, last one to allocate must be first one to free. */ int sh64_put_wired_dtlb_entry(unsigned long long entry) @@ -115,7 +107,7 @@ int sh64_put_wired_dtlb_entry(unsigned long long entry) } /** - * sh64_setup_tlb_slot + * sh64_setup_tlb_slot - Load up a translation in a wired slot. * * @config_addr: Address of TLB slot. * @eaddr: Virtual address. @@ -125,26 +117,15 @@ int sh64_put_wired_dtlb_entry(unsigned long long entry) * Load up a virtual<->physical translation for @eaddr<->@paddr in the * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). */ -inline void sh64_setup_tlb_slot(unsigned long long config_addr, - unsigned long eaddr, - unsigned long asid, - unsigned long paddr) +void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, + unsigned long asid, unsigned long paddr) { unsigned long long pteh, ptel; - /* Sign extension */ -#if (NEFF == 32) - pteh = (unsigned long long)(signed long long)(signed long) eaddr; -#else -#error "Can't sign extend more than 32 bits yet" -#endif + pteh = neff_sign_extend(eaddr); pteh &= PAGE_MASK; pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; -#if (NEFF == 32) - ptel = (unsigned long long)(signed long long)(signed long) paddr; -#else -#error "Can't sign extend more than 32 bits yet" -#endif + ptel = neff_sign_extend(paddr); ptel &= PAGE_MASK; ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); @@ -154,11 +135,90 @@ inline void sh64_setup_tlb_slot(unsigned long long config_addr, } /** - * sh64_teardown_tlb_slot + * sh64_teardown_tlb_slot - Teardown a translation. * * @config_addr: Address of TLB slot. * * Teardown any existing mapping in the TLB slot @config_addr. */ -inline void sh64_teardown_tlb_slot(unsigned long long config_addr) +void sh64_teardown_tlb_slot(unsigned long long config_addr) __attribute__ ((alias("__flush_tlb_slot"))); + +static int dtlb_entry; +static unsigned long long dtlb_entries[64]; + +void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + unsigned long long entry; + unsigned long paddr, flags; + + BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries)); + + local_irq_save(flags); + + entry = sh64_get_wired_dtlb_entry(); + dtlb_entries[dtlb_entry++] = entry; + + paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK; + paddr &= ~PAGE_MASK; + + sh64_setup_tlb_slot(entry, addr, get_asid(), paddr); + + local_irq_restore(flags); +} + +void tlb_unwire_entry(void) +{ + unsigned long long entry; + unsigned long flags; + + BUG_ON(!dtlb_entry); + + local_irq_save(flags); + entry = dtlb_entries[dtlb_entry--]; + + sh64_teardown_tlb_slot(entry); + sh64_put_wired_dtlb_entry(entry); + + local_irq_restore(flags); +} + +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ + unsigned long long ptel; + unsigned long long pteh=0; + struct tlb_info *tlbp; + unsigned long long next; + unsigned int fault_code = get_thread_fault_code(); + + /* Get PTEL first */ + ptel = pte.pte_low; + + /* + * Set PTEH register + */ + pteh = neff_sign_extend(address & MMU_VPN_MASK); + + /* Set the ASID. */ + pteh |= get_asid() << PTEH_ASID_SHIFT; + pteh |= PTEH_VALID; + + /* Set PTEL register, set_pte has performed the sign extension */ + ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ + + if (fault_code & FAULT_CODE_ITLB) + tlbp = &cpu_data->itlb; + else + tlbp = &cpu_data->dtlb; + + next = tlbp->next; + __flush_tlb_slot(next); + asm volatile ("putcfg %0,1,%2\n\n\t" + "putcfg %0,0,%1\n" + : : "r" (next), "r" (pteh), "r" (ptel) ); + + next += TLB_STEP; + if (next > tlbp->last) + next = tlbp->first; + tlbp->next = next; +} |
