diff options
Diffstat (limited to 'arch/arc/mm')
| -rw-r--r-- | arch/arc/mm/cache_arc700.c | 281 | ||||
| -rw-r--r-- | arch/arc/mm/fault.c | 8 | ||||
| -rw-r--r-- | arch/arc/mm/init.c | 34 | ||||
| -rw-r--r-- | arch/arc/mm/tlb.c | 91 | ||||
| -rw-r--r-- | arch/arc/mm/tlbex.S | 14 |
5 files changed, 228 insertions, 200 deletions
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 5a1259cd948..353b202c37c 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -73,37 +73,9 @@ #include <asm/cachectl.h> #include <asm/setup.h> -/* Instruction cache related Auxiliary registers */ -#define ARC_REG_IC_BCR 0x77 /* Build Config reg */ -#define ARC_REG_IC_IVIC 0x10 -#define ARC_REG_IC_CTRL 0x11 -#define ARC_REG_IC_IVIL 0x19 -#if (CONFIG_ARC_MMU_VER > 2) -#define ARC_REG_IC_PTAG 0x1E -#endif - -/* Bit val in IC_CTRL */ -#define IC_CTRL_CACHE_DISABLE 0x1 - -/* Data cache related Auxiliary registers */ -#define ARC_REG_DC_BCR 0x72 /* Build Config reg */ -#define ARC_REG_DC_IVDC 0x47 -#define ARC_REG_DC_CTRL 0x48 -#define ARC_REG_DC_IVDL 0x4A -#define ARC_REG_DC_FLSH 0x4B -#define ARC_REG_DC_FLDL 0x4C -#if (CONFIG_ARC_MMU_VER > 2) -#define ARC_REG_DC_PTAG 0x5C -#endif - -/* Bit val in DC_CTRL */ -#define DC_CTRL_INV_MODE_FLUSH 0x40 -#define DC_CTRL_FLUSH_STATUS 0x100 - -char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) +char *arc_cache_mumbojumbo(int c, char *buf, int len) { int n = 0; - unsigned int c = smp_processor_id(); #define PR_CACHE(p, enb, str) \ { \ @@ -169,78 +141,114 @@ void read_decode_cache_bcr(void) */ void arc_cache_init(void) { - unsigned int cpu = smp_processor_id(); - struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; - struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; - unsigned int dcache_does_alias, temp; + unsigned int __maybe_unused cpu = smp_processor_id(); + struct cpuinfo_arc_cache __maybe_unused *ic, __maybe_unused *dc; char str[256]; printk(arc_cache_mumbojumbo(0, str, sizeof(str))); - if (!ic->ver) - goto chk_dc; - #ifdef CONFIG_ARC_HAS_ICACHE - /* 1. Confirm some of I-cache params which Linux assumes */ - if (ic->line_len != ARC_ICACHE_LINE_LEN) - panic("Cache H/W doesn't match kernel Config"); - - if (ic->ver != CONFIG_ARC_MMU_VER) - panic("Cache ver doesn't match MMU ver\n"); + ic = &cpuinfo_arc700[cpu].icache; + if (ic->ver) { + if (ic->line_len != L1_CACHE_BYTES) + panic("ICache line [%d] != kernel Config [%d]", + ic->line_len, L1_CACHE_BYTES); + + if (ic->ver != CONFIG_ARC_MMU_VER) + panic("Cache ver [%d] doesn't match MMU ver [%d]\n", + ic->ver, CONFIG_ARC_MMU_VER); + } #endif - /* Enable/disable I-Cache */ - temp = read_aux_reg(ARC_REG_IC_CTRL); +#ifdef CONFIG_ARC_HAS_DCACHE + dc = &cpuinfo_arc700[cpu].dcache; + if (dc->ver) { + unsigned int dcache_does_alias; -#ifdef CONFIG_ARC_HAS_ICACHE - temp &= ~IC_CTRL_CACHE_DISABLE; -#else - temp |= IC_CTRL_CACHE_DISABLE; -#endif + if (dc->line_len != L1_CACHE_BYTES) + panic("DCache line [%d] != kernel Config [%d]", + dc->line_len, L1_CACHE_BYTES); - write_aux_reg(ARC_REG_IC_CTRL, temp); + /* check for D-Cache aliasing */ + dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE; -chk_dc: - if (!dc->ver) - return; + if (dcache_does_alias && !cache_is_vipt_aliasing()) + panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); + else if (!dcache_does_alias && cache_is_vipt_aliasing()) + panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); + } +#endif +} -#ifdef CONFIG_ARC_HAS_DCACHE - if (dc->line_len != ARC_DCACHE_LINE_LEN) - panic("Cache H/W doesn't match kernel Config"); +#define OP_INV 0x1 +#define OP_FLUSH 0x2 +#define OP_FLUSH_N_INV 0x3 +#define OP_INV_IC 0x4 - /* check for D-Cache aliasing */ - dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE; +/* + * Common Helper for Line Operations on {I,D}-Cache + */ +static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, + unsigned long sz, const int cacheop) +{ + unsigned int aux_cmd, aux_tag; + int num_lines; + const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; - if (dcache_does_alias && !cache_is_vipt_aliasing()) - panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); - else if (!dcache_does_alias && cache_is_vipt_aliasing()) - panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); + if (cacheop == OP_INV_IC) { + aux_cmd = ARC_REG_IC_IVIL; +#if (CONFIG_ARC_MMU_VER > 2) + aux_tag = ARC_REG_IC_PTAG; #endif + } + else { + /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ + aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; +#if (CONFIG_ARC_MMU_VER > 2) + aux_tag = ARC_REG_DC_PTAG; +#endif + } - /* Set the default Invalidate Mode to "simpy discard dirty lines" - * as this is more frequent then flush before invalidate - * Ofcourse we toggle this default behviour when desired + /* Ensure we properly floor/ceil the non-line aligned/sized requests + * and have @paddr - aligned to cache line and integral @num_lines. + * This however can be avoided for page sized since: + * -@paddr will be cache-line aligned already (being page aligned) + * -@sz will be integral multiple of line size (being page sized). */ - temp = read_aux_reg(ARC_REG_DC_CTRL); - temp &= ~DC_CTRL_INV_MODE_FLUSH; + if (!full_page_op) { + sz += paddr & ~CACHE_LINE_MASK; + paddr &= CACHE_LINE_MASK; + vaddr &= CACHE_LINE_MASK; + } -#ifdef CONFIG_ARC_HAS_DCACHE - /* Enable D-Cache: Clear Bit 0 */ - write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE); + num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); + +#if (CONFIG_ARC_MMU_VER <= 2) + /* MMUv2 and before: paddr contains stuffed vaddrs bits */ + paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; #else - /* Flush D cache */ - write_aux_reg(ARC_REG_DC_FLSH, 0x1); - /* Disable D cache */ - write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE); + /* if V-P const for loop, PTAG can be written once outside loop */ + if (full_page_op) + write_aux_reg(aux_tag, paddr); #endif - return; + while (num_lines-- > 0) { +#if (CONFIG_ARC_MMU_VER > 2) + /* MMUv3, cache ops require paddr seperately */ + if (!full_page_op) { + write_aux_reg(aux_tag, paddr); + paddr += L1_CACHE_BYTES; + } + + write_aux_reg(aux_cmd, vaddr); + vaddr += L1_CACHE_BYTES; +#else + write_aux_reg(aux_cmd, paddr); + paddr += L1_CACHE_BYTES; +#endif + } } -#define OP_INV 0x1 -#define OP_FLUSH 0x2 -#define OP_FLUSH_N_INV 0x3 - #ifdef CONFIG_ARC_HAS_DCACHE /*************************************************************** @@ -289,53 +297,6 @@ static inline void __dc_entire_op(const int cacheop) write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); } -/* - * Per Line Operation on D-Cache - * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete - * It's sole purpose is to help gcc generate ZOL - * (aliasing VIPT dcache flushing needs both vaddr and paddr) - */ -static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr, - unsigned long sz, const int aux_reg) -{ - int num_lines; - - /* Ensure we properly floor/ceil the non-line aligned/sized requests - * and have @paddr - aligned to cache line and integral @num_lines. - * This however can be avoided for page sized since: - * -@paddr will be cache-line aligned already (being page aligned) - * -@sz will be integral multiple of line size (being page sized). - */ - if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { - sz += paddr & ~DCACHE_LINE_MASK; - paddr &= DCACHE_LINE_MASK; - vaddr &= DCACHE_LINE_MASK; - } - - num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); - -#if (CONFIG_ARC_MMU_VER <= 2) - paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; -#endif - - while (num_lines-- > 0) { -#if (CONFIG_ARC_MMU_VER > 2) - /* - * Just as for I$, in MMU v3, D$ ops also require - * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops - */ - write_aux_reg(ARC_REG_DC_PTAG, paddr); - - write_aux_reg(aux_reg, vaddr); - vaddr += ARC_DCACHE_LINE_LEN; -#else - /* paddr contains stuffed vaddrs bits */ - write_aux_reg(aux_reg, paddr); -#endif - paddr += ARC_DCACHE_LINE_LEN; - } -} - /* For kernel mappings cache operation: index is same as paddr */ #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) @@ -346,7 +307,6 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, unsigned long sz, const int cacheop) { unsigned long flags, tmp = tmp; - int aux; local_irq_save(flags); @@ -361,12 +321,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH); } - if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */ - aux = ARC_REG_DC_IVDL; - else - aux = ARC_REG_DC_FLDL; - - __dc_line_loop(paddr, vaddr, sz, aux); + __cache_line_loop(paddr, vaddr, sz, cacheop); if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ wait_for_flush(); @@ -434,46 +389,13 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, /*********************************************************** * Machine specific helper for per line I-Cache invalidate. */ -static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, +static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, unsigned long sz) { unsigned long flags; - int num_lines; - - /* - * Ensure we properly floor/ceil the non-line aligned/sized requests: - * However page sized flushes can be compile time optimised. - * -@paddr will be cache-line aligned already (being page aligned) - * -@sz will be integral multiple of line size (being page sized). - */ - if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { - sz += paddr & ~ICACHE_LINE_MASK; - paddr &= ICACHE_LINE_MASK; - vaddr &= ICACHE_LINE_MASK; - } - - num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); - -#if (CONFIG_ARC_MMU_VER <= 2) - /* bits 17:13 of vaddr go as bits 4:0 of paddr */ - paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; -#endif local_irq_save(flags); - while (num_lines-- > 0) { -#if (CONFIG_ARC_MMU_VER > 2) - /* tag comes from phy addr */ - write_aux_reg(ARC_REG_IC_PTAG, paddr); - - /* index bits come from vaddr */ - write_aux_reg(ARC_REG_IC_IVIL, vaddr); - vaddr += ARC_ICACHE_LINE_LEN; -#else - /* paddr contains stuffed vaddrs bits */ - write_aux_reg(ARC_REG_IC_IVIL, paddr); -#endif - paddr += ARC_ICACHE_LINE_LEN; - } + __cache_line_loop(paddr, vaddr, sz, OP_INV_IC); local_irq_restore(flags); } @@ -483,6 +405,23 @@ static inline void __ic_entire_inv(void) read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ } +struct ic_line_inv_vaddr_ipi { + unsigned long paddr, vaddr; + int sz; +}; + +static void __ic_line_inv_vaddr_helper(void *info) +{ + struct ic_line_inv_vaddr_ipi *ic_inv = (struct ic_line_inv_vaddr_ipi*) info; + __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); +} + +static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, + unsigned long sz) +{ + struct ic_line_inv_vaddr_ipi ic_inv = { paddr, vaddr , sz}; + on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); +} #else #define __ic_entire_inv() @@ -631,12 +570,8 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) */ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) { - unsigned long flags; - - local_irq_save(flags); - __ic_line_inv_vaddr(paddr, vaddr, len); __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); - local_irq_restore(flags); + __ic_line_inv_vaddr(paddr, vaddr, len); } /* wrapper to compile time eliminate alignment checks in flush loop */ diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index d63f3de0cd5..9c69552350c 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -17,7 +17,7 @@ #include <asm/pgalloc.h> #include <asm/mmu.h> -static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) +static int handle_vmalloc_fault(unsigned long address) { /* * Synchronize this task's top level page-table @@ -27,7 +27,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; - pgd = pgd_offset_fast(mm, address); + pgd = pgd_offset_fast(current->active_mm, address); pgd_k = pgd_offset_k(address); if (!pgd_present(*pgd_k)) @@ -52,7 +52,7 @@ bad_area: return 1; } -void do_page_fault(struct pt_regs *regs, unsigned long address) +void do_page_fault(unsigned long address, struct pt_regs *regs) { struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; @@ -72,7 +72,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address) * nothing more. */ if (address >= VMALLOC_START && address <= VMALLOC_END) { - ret = handle_vmalloc_fault(mm, address); + ret = handle_vmalloc_fault(address); if (unlikely(ret)) goto bad_area_nosemaphore; else diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 81279ec73a6..523412369f7 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -10,6 +10,9 @@ #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/memblock.h> +#ifdef CONFIG_BLK_DEV_INITRD +#include <linux/initrd.h> +#endif #include <linux/swap.h> #include <linux/module.h> #include <asm/page.h> @@ -42,6 +45,24 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz)); } +#ifdef CONFIG_BLK_DEV_INITRD +static int __init early_initrd(char *p) +{ + unsigned long start, size; + char *endp; + + start = memparse(p, &endp); + if (*endp == ',') { + size = memparse(endp + 1, NULL); + + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(start + size); + } + return 0; +} +early_param("initrd", early_initrd); +#endif + /* * First memory setup routine called from setup_arch() * 1. setup swapper's mm @init_mm @@ -80,6 +101,12 @@ void __init setup_arch_memory(void) memblock_reserve(CONFIG_LINUX_LINK_BASE, __pa(_end) - CONFIG_LINUX_LINK_BASE); +#ifdef CONFIG_BLK_DEV_INITRD + /*------------- reserve initrd image -----------------------*/ + if (initrd_start) + memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); +#endif + memblock_dump_all(); /*-------------- node setup --------------------------------*/ @@ -125,10 +152,3 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif - -#ifdef CONFIG_OF_FLATTREE -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - pr_err("%s(%llx, %llx)\n", __func__, start, end); -} -#endif /* CONFIG_OF_FLATTREE */ diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 71cb26df425..e1acf0ce564 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -100,7 +100,7 @@ /* A copy of the ASID from the PID reg is kept in asid_cache */ -unsigned int asid_cache = MM_CTXT_FIRST_CYCLE; +DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; /* * Utility Routine to erase a J-TLB entry @@ -274,6 +274,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm) void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + const unsigned int cpu = smp_processor_id(); unsigned long flags; /* If range @start to @end is more than 32 TLB entries deep, @@ -297,9 +298,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_irq_save(flags); - if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { + if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { while (start < end) { - tlb_entry_erase(start | hw_pid(vma->vm_mm)); + tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); start += PAGE_SIZE; } } @@ -346,6 +347,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { + const unsigned int cpu = smp_processor_id(); unsigned long flags; /* Note that it is critical that interrupts are DISABLED between @@ -353,14 +355,87 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) */ local_irq_save(flags); - if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { - tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm)); + if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { + tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); utlb_invalidate(); } local_irq_restore(flags); } +#ifdef CONFIG_SMP + +struct tlb_args { + struct vm_area_struct *ta_vma; + unsigned long ta_start; + unsigned long ta_end; +}; + +static inline void ipi_flush_tlb_page(void *arg) +{ + struct tlb_args *ta = arg; + + local_flush_tlb_page(ta->ta_vma, ta->ta_start); +} + +static inline void ipi_flush_tlb_range(void *arg) +{ + struct tlb_args *ta = arg; + + local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); +} + +static inline void ipi_flush_tlb_kernel_range(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); +} + +void flush_tlb_all(void) +{ + on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, + mm, 1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + struct tlb_args ta = { + .ta_vma = vma, + .ta_start = uaddr + }; + + on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct tlb_args ta = { + .ta_vma = vma, + .ta_start = start, + .ta_end = end + }; + + on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct tlb_args ta = { + .ta_start = start, + .ta_end = end + }; + + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); +} +#endif + /* * Routine to create a TLB entry */ @@ -400,7 +475,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) local_irq_save(flags); - tlb_paranoid_check(vma->vm_mm->context.asid, address); + tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); address &= PAGE_MASK; @@ -610,9 +685,9 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, struct pt_regs *regs) { int set, way, n; - unsigned int pd0[4], pd1[4]; /* assume max 4 ways */ unsigned long flags, is_valid; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; + unsigned int pd0[mmu->ways], pd1[mmu->ways]; local_irq_save(flags); @@ -637,7 +712,7 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, continue; /* Scan the set for duplicate ways: needs a nested loop */ - for (way = 0; way < mmu->ways; way++) { + for (way = 0; way < mmu->ways - 1; way++) { if (!pd0[way]) continue; diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index cf7d7d9ad69..79bfc81358c 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -260,7 +260,7 @@ ARCFP_CODE ;Fast Path Code, candidate for ICCM ; I-TLB Miss Exception Handler ;----------------------------------------------------------------------------- -ARC_ENTRY EV_TLBMissI +ENTRY(EV_TLBMissI) TLBMISS_FREEUP_REGS @@ -293,13 +293,13 @@ ARC_ENTRY EV_TLBMissI TLBMISS_RESTORE_REGS rtie -ARC_EXIT EV_TLBMissI +END(EV_TLBMissI) ;----------------------------------------------------------------------------- ; D-TLB Miss Exception Handler ;----------------------------------------------------------------------------- -ARC_ENTRY EV_TLBMissD +ENTRY(EV_TLBMissD) TLBMISS_FREEUP_REGS @@ -369,8 +369,8 @@ do_slow_path_pf: EXCEPTION_PROLOGUE ; ------- setup args for Linux Page fault Hanlder --------- - mov_s r0, sp - lr r1, [efa] + mov_s r1, sp + lr r0, [efa] ; We don't want exceptions to be disabled while the fault is handled. ; Now that we have saved the context we return from exception hence @@ -381,6 +381,4 @@ do_slow_path_pf: bl do_page_fault b ret_from_exception -ARC_EXIT EV_TLBMissD - -ARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr +END(EV_TLBMissD) |
