diff options
Diffstat (limited to 'arch/xtensa/mm')
| -rw-r--r-- | arch/xtensa/mm/Makefile | 1 | ||||
| -rw-r--r-- | arch/xtensa/mm/cache.c | 14 | ||||
| -rw-r--r-- | arch/xtensa/mm/fault.c | 4 | ||||
| -rw-r--r-- | arch/xtensa/mm/highmem.c | 72 | ||||
| -rw-r--r-- | arch/xtensa/mm/init.c | 358 | ||||
| -rw-r--r-- | arch/xtensa/mm/misc.S | 4 | ||||
| -rw-r--r-- | arch/xtensa/mm/mmu.c | 90 | ||||
| -rw-r--r-- | arch/xtensa/mm/tlb.c | 170 |
8 files changed, 569 insertions, 144 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile index f0b646d2f84..f54f78e24d7 100644 --- a/arch/xtensa/mm/Makefile +++ b/arch/xtensa/mm/Makefile @@ -4,3 +4,4 @@ obj-y := init.o cache.o misc.o obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o +obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 81edeab82d1..63cbb867dad 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -59,6 +59,10 @@ * */ +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM) +#error "HIGHMEM is not supported on cores with aliasing cache." +#endif + #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK /* @@ -118,7 +122,7 @@ void flush_dcache_page(struct page *page) * For now, flush the whole cache. FIXME?? */ -void flush_cache_range(struct vm_area_struct* vma, +void local_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { __flush_invalidate_dcache_all(); @@ -132,7 +136,7 @@ void flush_cache_range(struct vm_area_struct* vma, * alias versions of the cache flush functions. */ -void flush_cache_page(struct vm_area_struct* vma, unsigned long address, +void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) { /* Note that we have to use the 'alias' address to avoid multi-hit */ @@ -159,8 +163,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) /* Invalidate old entry in TLBs */ - invalidate_itlb_mapping(addr); - invalidate_dtlb_mapping(addr); + flush_tlb_page(vma, addr); #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK @@ -180,10 +183,11 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) #else if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) && (vma->vm_flags & VM_EXEC) != 0) { - unsigned long paddr = (unsigned long) page_address(page); + unsigned long paddr = (unsigned long)kmap_atomic(page); __flush_dcache_page(paddr); __invalidate_icache_page(paddr); set_bit(PG_arch_1, &page->flags); + kunmap_atomic((void *)paddr); } #endif } diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 4b7bc8db170..b57c4f91f48 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -21,7 +21,7 @@ #include <asm/uaccess.h> #include <asm/pgalloc.h> -unsigned long asid_cache = ASID_USER_FIRST; +DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; void bad_page_fault(struct pt_regs*, unsigned long, int); #undef DEBUG_PAGE_FAULT @@ -72,6 +72,8 @@ void do_page_fault(struct pt_regs *regs) address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); #endif + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c new file mode 100644 index 00000000000..17a8c0d6fd1 --- /dev/null +++ b/arch/xtensa/mm/highmem.c @@ -0,0 +1,72 @@ +/* + * High memory support for Xtensa architecture + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2014 Cadence Design Systems Inc. + */ + +#include <linux/export.h> +#include <linux/highmem.h> +#include <asm/tlbflush.h> + +static pte_t *kmap_pte; + +void *kmap_atomic(struct page *page) +{ + enum fixed_addresses idx; + unsigned long vaddr; + int type; + + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(kmap_pte - idx))); +#endif + set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC)); + + return (void *)vaddr; +} +EXPORT_SYMBOL(kmap_atomic); + +void __kunmap_atomic(void *kvaddr) +{ + int idx, type; + + if (kvaddr >= (void *)FIXADDR_START && + kvaddr < (void *)FIXADDR_TOP) { + type = kmap_atomic_idx(); + idx = type + KM_TYPE_NR * smp_processor_id(); + + /* + * Force other mappings to Oops if they'll try to access this + * pte without first remap it. Keeping stale mappings around + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ + pte_clear(&init_mm, kvaddr, kmap_pte - idx); + local_flush_tlb_kernel_range((unsigned long)kvaddr, + (unsigned long)kvaddr + PAGE_SIZE); + + kmap_atomic_idx_pop(); + } + + pagefault_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); + +void __init kmap_init(void) +{ + unsigned long kmap_vstart; + + /* cache the first kmap pte */ + kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); + kmap_pte = kmap_get_fixmap_pte(kmap_vstart); +} diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 7a5156ffebb..77ed20209ca 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -8,6 +8,7 @@ * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2014 Cadence Design Systems Inc. * * Chris Zankel <chris@zankel.net> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> @@ -19,6 +20,7 @@ #include <linux/errno.h> #include <linux/bootmem.h> #include <linux/gfp.h> +#include <linux/highmem.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/nodemask.h> @@ -27,11 +29,133 @@ #include <asm/bootparam.h> #include <asm/page.h> #include <asm/sections.h> +#include <asm/sysmem.h> + +struct sysmem_info sysmem __initdata; + +static void __init sysmem_dump(void) +{ + unsigned i; + + pr_debug("Sysmem:\n"); + for (i = 0; i < sysmem.nr_banks; ++i) + pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n", + sysmem.bank[i].start, sysmem.bank[i].end, + (sysmem.bank[i].end - sysmem.bank[i].start) >> 10); +} + +/* + * Find bank with maximal .start such that bank.start <= start + */ +static inline struct meminfo * __init find_bank(unsigned long start) +{ + unsigned i; + struct meminfo *it = NULL; + + for (i = 0; i < sysmem.nr_banks; ++i) + if (sysmem.bank[i].start <= start) + it = sysmem.bank + i; + else + break; + return it; +} + +/* + * Move all memory banks starting at 'from' to a new place at 'to', + * adjust nr_banks accordingly. + * Both 'from' and 'to' must be inside the sysmem.bank. + * + * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank). + */ +static int __init move_banks(struct meminfo *to, struct meminfo *from) +{ + unsigned n = sysmem.nr_banks - (from - sysmem.bank); + + if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX) + return -ENOMEM; + if (to != from) + memmove(to, from, n * sizeof(struct meminfo)); + sysmem.nr_banks += to - from; + return 0; +} + +/* + * Add new bank to sysmem. Resulting sysmem is the union of bytes of the + * original sysmem and the new bank. + * + * Returns: 0 (success), < 0 (error) + */ +int __init add_sysmem_bank(unsigned long start, unsigned long end) +{ + unsigned i; + struct meminfo *it = NULL; + unsigned long sz; + unsigned long bank_sz = 0; + + if (start == end || + (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) { + pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n", + start, end - start); + return -EINVAL; + } + + start = PAGE_ALIGN(start); + end &= PAGE_MASK; + sz = end - start; + + it = find_bank(start); + + if (it) + bank_sz = it->end - it->start; + + if (it && bank_sz >= start - it->start) { + if (end - it->start > bank_sz) + it->end = end; + else + return 0; + } else { + if (!it) + it = sysmem.bank; + else + ++it; + + if (it - sysmem.bank < sysmem.nr_banks && + it->start - start <= sz) { + it->start = start; + if (it->end - it->start < sz) + it->end = end; + else + return 0; + } else { + if (move_banks(it + 1, it) < 0) { + pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n", + start, end - start); + return -EINVAL; + } + it->start = start; + it->end = end; + return 0; + } + } + sz = it->end - it->start; + for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i) + if (sysmem.bank[i].start - it->start <= sz) { + if (sz < sysmem.bank[i].end - it->start) + it->end = sysmem.bank[i].end; + } else { + break; + } + + move_banks(it + 1, sysmem.bank + i); + return 0; +} /* * mem_reserve(start, end, must_exist) * * Reserve some memory from the memory pool. + * If must_exist is set and a part of the region being reserved does not exist + * memory map is not altered. * * Parameters: * start Start of region, @@ -39,58 +163,74 @@ * must_exist Must exist in memory pool. * * Returns: - * 0 (memory area couldn't be mapped) - * -1 (success) + * 0 (success) + * < 0 (error) */ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) { - int i; - - if (start == end) - return 0; + struct meminfo *it; + struct meminfo *rm = NULL; + unsigned long sz; + unsigned long bank_sz = 0; start = start & PAGE_MASK; end = PAGE_ALIGN(end); + sz = end - start; + if (!sz) + return -EINVAL; - for (i = 0; i < sysmem.nr_banks; i++) - if (start < sysmem.bank[i].end - && end >= sysmem.bank[i].start) - break; + it = find_bank(start); + + if (it) + bank_sz = it->end - it->start; - if (i == sysmem.nr_banks) { - if (must_exist) - printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) " - "not in any region!\n", start, end); - return 0; + if ((!it || end - it->start > bank_sz) && must_exist) { + pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n", + start, end); + return -EINVAL; } - if (start > sysmem.bank[i].start) { - if (end < sysmem.bank[i].end) { - /* split entry */ - if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) - panic("meminfo overflow\n"); - sysmem.bank[sysmem.nr_banks].start = end; - sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end; - sysmem.nr_banks++; + if (it && start - it->start <= bank_sz) { + if (start == it->start) { + if (end - it->start < bank_sz) { + it->start = end; + return 0; + } else { + rm = it; + } + } else { + it->end = start; + if (end - it->start < bank_sz) + return add_sysmem_bank(end, + it->start + bank_sz); + ++it; } - sysmem.bank[i].end = start; + } - } else if (end < sysmem.bank[i].end) { - sysmem.bank[i].start = end; + if (!it) + it = sysmem.bank; - } else { - /* remove entry */ - sysmem.nr_banks--; - sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; - sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end; + for (; it < sysmem.bank + sysmem.nr_banks; ++it) { + if (it->end - start <= sz) { + if (!rm) + rm = it; + } else { + if (it->start - start < sz) + it->start = end; + break; + } } - return -1; + + if (rm) + move_banks(rm, it); + + return 0; } /* - * Initialize the bootmem system and give it all the memory we have available. + * Initialize the bootmem system and give it all low memory we have available. */ void __init bootmem_init(void) @@ -99,6 +239,7 @@ void __init bootmem_init(void) unsigned long bootmap_start, bootmap_size; int i; + sysmem_dump(); max_low_pfn = max_pfn = 0; min_low_pfn = ~0; @@ -142,28 +283,27 @@ void __init bootmem_init(void) /* Add all remaining memory pieces into the bootmem map */ - for (i=0; i<sysmem.nr_banks; i++) - free_bootmem(sysmem.bank[i].start, - sysmem.bank[i].end - sysmem.bank[i].start); + for (i = 0; i < sysmem.nr_banks; i++) { + if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) { + unsigned long end = min(max_low_pfn << PAGE_SHIFT, + sysmem.bank[i].end); + free_bootmem(sysmem.bank[i].start, + end - sysmem.bank[i].start); + } + } } void __init zones_init(void) { - unsigned long zones_size[MAX_NR_ZONES]; - int i; - /* All pages are DMA-able, so we put them all in the DMA zone. */ - - zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET; - for (i = 1; i < MAX_NR_ZONES; i++) - zones_size[i] = 0; - + unsigned long zones_size[MAX_NR_ZONES] = { + [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET, #ifdef CONFIG_HIGHMEM - zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; + [ZONE_HIGHMEM] = max_pfn - max_low_pfn, #endif - + }; free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); } @@ -173,50 +313,38 @@ void __init zones_init(void) void __init mem_init(void) { - unsigned long codesize, reservedpages, datasize, initsize; - unsigned long highmemsize, tmp, ram; - - max_mapnr = num_physpages = max_low_pfn - ARCH_PFN_OFFSET; - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - highmemsize = 0; - #ifdef CONFIG_HIGHMEM -#error HIGHGMEM not implemented in init.c -#endif + unsigned long tmp; - totalram_pages += free_all_bootmem(); + reset_all_zones_managed_pages(); + for (tmp = max_low_pfn; tmp < max_pfn; tmp++) + free_highmem_page(pfn_to_page(tmp)); +#endif - reservedpages = ram = 0; - for (tmp = 0; tmp < max_mapnr; tmp++) { - ram++; - if (PageReserved(mem_map+tmp)) - reservedpages++; - } + max_mapnr = max_pfn - ARCH_PFN_OFFSET; + high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); - codesize = (unsigned long) _etext - (unsigned long) _stext; - datasize = (unsigned long) _edata - (unsigned long) _sdata; - initsize = (unsigned long) __init_end - (unsigned long) __init_begin; - - printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " - "%ldk data, %ldk init %ldk highmem)\n", - nr_free_pages() << (PAGE_SHIFT-10), - ram << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10, - highmemsize >> 10); -} + free_all_bootmem(); -void -free_reserved_mem(void *start, void *end) -{ - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page((unsigned long)start); - totalram_pages++; - } + mem_init_print_info(NULL); + pr_info("virtual kernel memory layout:\n" +#ifdef CONFIG_HIGHMEM + " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" + " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" +#endif + " vmalloc : 0x%08x - 0x%08x (%5u MB)\n" + " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n", +#ifdef CONFIG_HIGHMEM + PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, + (LAST_PKMAP*PAGE_SIZE) >> 10, + FIXADDR_START, FIXADDR_TOP, + (FIXADDR_TOP - FIXADDR_START) >> 10, +#endif + VMALLOC_START, VMALLOC_END, + (VMALLOC_END - VMALLOC_START) >> 20, + PAGE_OFFSET, PAGE_OFFSET + + (max_low_pfn - min_low_pfn) * PAGE_SIZE, + ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20); } #ifdef CONFIG_BLK_DEV_INITRD @@ -224,16 +352,62 @@ extern int initrd_is_mapped; void free_initrd_mem(unsigned long start, unsigned long end) { - if (initrd_is_mapped) { - free_reserved_mem((void*)start, (void*)end); - printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10); - } + if (initrd_is_mapped) + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif void free_initmem(void) { - free_reserved_mem(__init_begin, __init_end); - printk("Freeing unused kernel memory: %zuk freed\n", - (__init_end - __init_begin) >> 10); + free_initmem_default(-1); +} + +static void __init parse_memmap_one(char *p) +{ + char *oldp; + unsigned long start_at, mem_size; + + if (!p) + return; + + oldp = p; + mem_size = memparse(p, &p); + if (p == oldp) + return; + + switch (*p) { + case '@': + start_at = memparse(p + 1, &p); + add_sysmem_bank(start_at, start_at + mem_size); + break; + + case '$': + start_at = memparse(p + 1, &p); + mem_reserve(start_at, start_at + mem_size, 0); + break; + + case 0: + mem_reserve(mem_size, 0, 0); + break; + + default: + pr_warn("Unrecognized memmap syntax: %s\n", p); + break; + } +} + +static int __init parse_memmap_opt(char *str) +{ + while (str) { + char *k = strchr(str, ','); + + if (k) + *k++ = 0; + + parse_memmap_one(str); + str = k; + } + + return 0; } +early_param("memmap", parse_memmap_opt); diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index d97ed1ba7b0..1f68558dbcc 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -140,7 +140,7 @@ ENTRY(clear_user_page) /* Setup a temporary DTLB with the color of the VPN */ - movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) + movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff movi a5, TLBTEMP_BASE_1 # virt add a6, a2, a4 # ppn add a2, a5, a3 # add 'color' @@ -194,7 +194,7 @@ ENTRY(copy_user_page) or a9, a9, a8 slli a4, a4, PAGE_SHIFT s32i a9, a5, PAGE_FLAGS - movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) + movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff beqz a6, 1f diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 0f77f9d3bb8..3429b483d9f 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -3,6 +3,7 @@ * * Extracted from init.c */ +#include <linux/bootmem.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/string.h> @@ -13,27 +14,82 @@ #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/page.h> +#include <asm/initialize_mmu.h> +#include <asm/io.h> + +#if defined(CONFIG_HIGHMEM) +static void * __init init_pmd(unsigned long vaddr) +{ + pgd_t *pgd = pgd_offset_k(vaddr); + pmd_t *pmd = pmd_offset(pgd, vaddr); + + if (pmd_none(*pmd)) { + unsigned i; + pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE); + + for (i = 0; i < 1024; i++) + pte_clear(NULL, 0, pte + i); + + set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK)); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n", + __func__, vaddr, pmd, pte); + return pte; + } else { + return pte_offset_kernel(pmd, 0); + } +} + +static void __init fixedrange_init(void) +{ + BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE); + init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK); +} +#endif void __init paging_init(void) { memset(swapper_pg_dir, 0, PAGE_SIZE); +#ifdef CONFIG_HIGHMEM + fixedrange_init(); + pkmap_page_table = init_pmd(PKMAP_BASE); + kmap_init(); +#endif } /* * Flush the mmu and reset associated register to default values. */ -void __init init_mmu(void) +void init_mmu(void) { - /* Writing zeros to the <t>TLBCFG special registers ensure - * that valid values exist in the register. For existing - * PGSZID<w> fields, zero selects the first element of the - * page-size array. For nonexistent PGSZID<w> fields, zero is - * the best value to write. Also, when changing PGSZID<w> +#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) + /* + * Writing zeros to the instruction and data TLBCFG special + * registers ensure that valid values exist in the register. + * + * For existing PGSZID<w> fields, zero selects the first element + * of the page-size array. For nonexistent PGSZID<w> fields, + * zero is the best value to write. Also, when changing PGSZID<w> * fields, the corresponding TLB must be flushed. */ set_itlbcfg_register(0); set_dtlbcfg_register(0); - flush_tlb_all(); +#endif +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) + /* + * Update the IO area mapping in case xtensa_kio_paddr has changed + */ + write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), + XCHAL_KIO_CACHED_VADDR + 6); + write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), + XCHAL_KIO_CACHED_VADDR + 6); + write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), + XCHAL_KIO_BYPASS_VADDR + 6); + write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), + XCHAL_KIO_BYPASS_VADDR + 6); +#endif + + local_flush_tlb_all(); /* Set rasid register to a known value. */ @@ -46,23 +102,3 @@ void __init init_mmu(void) */ set_ptevaddr_register(PGTABLE_START); } - -struct kmem_cache *pgtable_cache __read_mostly; - -static void pgd_ctor(void *addr) -{ - pte_t *ptep = (pte_t *)addr; - int i; - - for (i = 0; i < 1024; i++, ptep++) - pte_clear(NULL, 0, ptep); - -} - -void __init pgtable_cache_init(void) -{ - pgtable_cache = kmem_cache_create("pgd", - PAGE_SIZE, PAGE_SIZE, - SLAB_HWCACHE_ALIGN, - pgd_ctor); -} diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 5411aa67c68..5ece856c572 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -48,7 +48,7 @@ static inline void __flush_dtlb_all (void) } -void flush_tlb_all (void) +void local_flush_tlb_all(void) { __flush_itlb_all(); __flush_dtlb_all(); @@ -60,19 +60,23 @@ void flush_tlb_all (void) * a new context will be assigned to it. */ -void flush_tlb_mm(struct mm_struct *mm) +void local_flush_tlb_mm(struct mm_struct *mm) { + int cpu = smp_processor_id(); + if (mm == current->active_mm) { unsigned long flags; - local_save_flags(flags); - __get_new_mmu_context(mm); - __load_mmu_context(mm); + local_irq_save(flags); + mm->context.asid[cpu] = NO_CONTEXT; + activate_context(mm, cpu); local_irq_restore(flags); + } else { + mm->context.asid[cpu] = NO_CONTEXT; + mm->context.cpu = -1; } - else - mm->context = 0; } + #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) #if _ITLB_ENTRIES > _DTLB_ENTRIES @@ -81,24 +85,26 @@ void flush_tlb_mm(struct mm_struct *mm) # define _TLB_ENTRIES _DTLB_ENTRIES #endif -void flush_tlb_range (struct vm_area_struct *vma, - unsigned long start, unsigned long end) +void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { + int cpu = smp_processor_id(); struct mm_struct *mm = vma->vm_mm; unsigned long flags; - if (mm->context == NO_CONTEXT) + if (mm->context.asid[cpu] == NO_CONTEXT) return; #if 0 printk("[tlbrange<%02lx,%08lx,%08lx>]\n", - (unsigned long)mm->context, start, end); + (unsigned long)mm->context.asid[cpu], start, end); #endif - local_save_flags(flags); + local_irq_save(flags); if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { int oldpid = get_rasid_register(); - set_rasid_register (ASID_INSERT(mm->context)); + + set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); start &= PAGE_MASK; if (vma->vm_flags & VM_EXEC) while(start < end) { @@ -114,23 +120,25 @@ void flush_tlb_range (struct vm_area_struct *vma, set_rasid_register(oldpid); } else { - flush_tlb_mm(mm); + local_flush_tlb_mm(mm); } local_irq_restore(flags); } -void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { + int cpu = smp_processor_id(); struct mm_struct* mm = vma->vm_mm; unsigned long flags; int oldpid; - if(mm->context == NO_CONTEXT) + if (mm->context.asid[cpu] == NO_CONTEXT) return; - local_save_flags(flags); + local_irq_save(flags); oldpid = get_rasid_register(); + set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); @@ -140,3 +148,131 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) local_irq_restore(flags); } + +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET && + end - start < _TLB_ENTRIES << PAGE_SHIFT) { + start &= PAGE_MASK; + while (start < end) { + invalidate_itlb_mapping(start); + invalidate_dtlb_mapping(start); + start += PAGE_SIZE; + } + } else { + local_flush_tlb_all(); + } +} + +#ifdef CONFIG_DEBUG_TLB_SANITY + +static unsigned get_pte_for_vaddr(unsigned vaddr) +{ + struct task_struct *task = get_current(); + struct mm_struct *mm = task->mm; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + + if (!mm) + mm = task->active_mm; + pgd = pgd_offset(mm, vaddr); + if (pgd_none_or_clear_bad(pgd)) + return 0; + pmd = pmd_offset(pgd, vaddr); + if (pmd_none_or_clear_bad(pmd)) + return 0; + pte = pte_offset_map(pmd, vaddr); + if (!pte) + return 0; + return pte_val(*pte); +} + +enum { + TLB_SUSPICIOUS = 1, + TLB_INSANE = 2, +}; + +static void tlb_insane(void) +{ + BUG_ON(1); +} + +static void tlb_suspicious(void) +{ + WARN_ON(1); +} + +/* + * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE), + * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE. + * + * Check that valid TLB entries either have the same PA as the PTE, or PTE is + * marked as non-present. Non-present PTE and the page with non-zero refcount + * and zero mapcount is normal for batched TLB flush operation. Zero refcount + * means that the page was freed prematurely. Non-zero mapcount is unusual, + * but does not necessary means an error, thus marked as suspicious. + */ +static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) +{ + unsigned tlbidx = w | (e << PAGE_SHIFT); + unsigned r0 = dtlb ? + read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); + unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); + unsigned pte = get_pte_for_vaddr(vpn); + unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; + unsigned tlb_asid = r0 & ASID_MASK; + bool kernel = tlb_asid == 1; + int rc = 0; + + if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) { + pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n", + dtlb ? 'D' : 'I', w, e, vpn, + kernel ? "kernel" : "user"); + rc |= TLB_INSANE; + } + + if (tlb_asid == mm_asid) { + unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : + read_itlb_translation(tlbidx); + if ((pte ^ r1) & PAGE_MASK) { + pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", + dtlb ? 'D' : 'I', w, e, r0, r1, pte); + if (pte == 0 || !pte_present(__pte(pte))) { + struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); + pr_err("page refcount: %d, mapcount: %d\n", + page_count(p), + page_mapcount(p)); + if (!page_count(p)) + rc |= TLB_INSANE; + else if (page_mapped(p)) + rc |= TLB_SUSPICIOUS; + } else { + rc |= TLB_INSANE; + } + } + } + return rc; +} + +void check_tlb_sanity(void) +{ + unsigned long flags; + unsigned w, e; + int bug = 0; + + local_irq_save(flags); + for (w = 0; w < DTLB_ARF_WAYS; ++w) + for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e) + bug |= check_tlb_entry(w, e, true); + for (w = 0; w < ITLB_ARF_WAYS; ++w) + for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e) + bug |= check_tlb_entry(w, e, false); + if (bug & TLB_INSANE) + tlb_insane(); + if (bug & TLB_SUSPICIOUS) + tlb_suspicious(); + local_irq_restore(flags); +} + +#endif /* CONFIG_DEBUG_TLB_SANITY */ |
