diff options
Diffstat (limited to 'arch/cris/arch-v10/mm')
| -rw-r--r-- | arch/cris/arch-v10/mm/fault.c | 18 | ||||
| -rw-r--r-- | arch/cris/arch-v10/mm/init.c | 6 | ||||
| -rw-r--r-- | arch/cris/arch-v10/mm/tlb.c | 89 |
3 files changed, 44 insertions, 69 deletions
diff --git a/arch/cris/arch-v10/mm/fault.c b/arch/cris/arch-v10/mm/fault.c index fe2615022b9..ed60588f846 100644 --- a/arch/cris/arch-v10/mm/fault.c +++ b/arch/cris/arch-v10/mm/fault.c @@ -4,16 +4,16 @@ * Low level bus fault handler * * - * Copyright (C) 2000, 2001 Axis Communications AB + * Copyright (C) 2000-2007 Axis Communications AB + * + * Authors: Bjorn Wesen * - * Authors: Bjorn Wesen - * */ #include <linux/mm.h> #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/arch/svinto.h> +#include <arch/svinto.h> #include <asm/mmu_context.h> /* debug of low-level TLB reload */ @@ -60,7 +60,7 @@ handle_mmu_bus_fault(struct pt_regs *regs) #ifdef DEBUG page_id = IO_EXTRACT(R_MMU_CAUSE, page_id, cause); acc = IO_EXTRACT(R_MMU_CAUSE, acc_excp, cause); - inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause); + inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause); index = IO_EXTRACT(R_TLB_SELECT, index, select); #endif miss = IO_EXTRACT(R_MMU_CAUSE, miss_excp, cause); @@ -80,16 +80,16 @@ handle_mmu_bus_fault(struct pt_regs *regs) * do_page_fault may have flushed the TLB so we have to restore * the MMU registers. */ - local_save_flags(flags); - local_irq_disable(); + local_irq_save(flags); pmd = (pmd_t *)(pgd + pgd_index(address)); if (pmd_none(*pmd)) - return; + goto exit; pte = *pte_offset_kernel(pmd, address); if (!pte_present(pte)) - return; + goto exit; *R_TLB_SELECT = select; *R_TLB_HI = cause; *R_TLB_LO = pte_val(pte); +exit: local_irq_restore(flags); } diff --git a/arch/cris/arch-v10/mm/init.c b/arch/cris/arch-v10/mm/init.c index e0fcd1a9bfd..e7f8066105a 100644 --- a/arch/cris/arch-v10/mm/init.c +++ b/arch/cris/arch-v10/mm/init.c @@ -12,7 +12,7 @@ #include <asm/mmu.h> #include <asm/io.h> #include <asm/mmu_context.h> -#include <asm/arch/svinto.h> +#include <arch/svinto.h> extern void tlb_init(void); @@ -182,7 +182,7 @@ paging_init(void) * mem_map page array. */ - free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); + free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); } /* Initialize remaps of some I/O-ports. It is important that this @@ -241,7 +241,7 @@ flush_etrax_cacherange(void *startadr, int length) } /* Due to a bug in Etrax100(LX) all versions, receiving DMA buffers - * will occationally corrupt certain CPU writes if the DMA buffers + * will occasionally corrupt certain CPU writes if the DMA buffers * happen to be hot in the cache. * * As a workaround, we have to flush the relevant parts of the cache diff --git a/arch/cris/arch-v10/mm/tlb.c b/arch/cris/arch-v10/mm/tlb.c index 70a5523eff7..21d78c599ba 100644 --- a/arch/cris/arch-v10/mm/tlb.c +++ b/arch/cris/arch-v10/mm/tlb.c @@ -4,15 +4,15 @@ * Low level TLB handling * * - * Copyright (C) 2000-2002 Axis Communications AB - * + * Copyright (C) 2000-2007 Axis Communications AB + * * Authors: Bjorn Wesen (bjornw@axis.com) * */ #include <asm/tlb.h> #include <asm/mmu_context.h> -#include <asm/arch/svinto.h> +#include <arch/svinto.h> #define D(x) @@ -39,16 +39,15 @@ flush_tlb_all(void) unsigned long flags; /* the vpn of i & 0xf is so we dont write similar TLB entries - * in the same 4-way entry group. details.. + * in the same 4-way entry group. details... */ - local_save_flags(flags); - local_irq_disable(); + local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); - + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | @@ -72,20 +71,19 @@ flush_tlb_mm(struct mm_struct *mm) if(page_id == NO_CONTEXT) return; - + /* mark the TLB entries that match the page_id as invalid. * here we could also check the _PAGE_GLOBAL bit and NOT flush - * global pages. is it worth the extra I/O ? + * global pages. is it worth the extra I/O ? */ - local_save_flags(flags); - local_irq_disable(); + local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); - + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | @@ -98,9 +96,7 @@ flush_tlb_mm(struct mm_struct *mm) /* invalidate a single page */ -void -flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { struct mm_struct *mm = vma->vm_mm; int page_id = mm->context.page_id; @@ -115,11 +111,10 @@ flush_tlb_page(struct vm_area_struct *vma, addr &= PAGE_MASK; /* perhaps not necessary */ /* invalidate those TLB entries that match both the mm context - * and the virtual address requested + * and the virtual address requested */ - local_save_flags(flags); - local_irq_disable(); + local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { unsigned long tlb_hi; *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); @@ -128,7 +123,7 @@ flush_tlb_page(struct vm_area_struct *vma, (tlb_hi & PAGE_MASK) == addr) { *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | addr; /* same addr as before works. */ - + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | @@ -139,28 +134,6 @@ flush_tlb_page(struct vm_area_struct *vma, local_irq_restore(flags); } -/* dump the entire TLB for debug purposes */ - -#if 0 -void -dump_tlb_all(void) -{ - int i; - unsigned long flags; - - printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n"); - - local_save_flags(flags); - local_irq_disable(); - for(i = 0; i < NUM_TLB_ENTRIES; i++) { - *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); - printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n", - i, *R_TLB_HI, *R_TLB_LO); - } - local_irq_restore(flags); -} -#endif - /* * Initialize the context related info for a new mm_struct * instance. @@ -175,27 +148,29 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) /* called in schedule() just before actually doing the switch_to */ -void -switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) +void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) { - /* make sure we have a context */ + if (prev != next) { + /* make sure we have a context */ + get_mmu_context(next); - get_mmu_context(next); + /* remember the pgd for the fault handlers + * this is similar to the pgd register in some other CPU's. + * we need our own copy of it because current and active_mm + * might be invalid at points where we still need to derefer + * the pgd. + */ - /* remember the pgd for the fault handlers - * this is similar to the pgd register in some other CPU's. - * we need our own copy of it because current and active_mm - * might be invalid at points where we still need to derefer - * the pgd. - */ + per_cpu(current_pgd, smp_processor_id()) = next->pgd; - per_cpu(current_pgd, smp_processor_id()) = next->pgd; + /* switch context in the MMU */ - /* switch context in the MMU */ - - D(printk("switching mmu_context to %d (%p)\n", next->context, next)); + D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n", + next->context, next)); - *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id); + *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, + page_id, next->context.page_id); + } } |
