diff options
Diffstat (limited to 'arch/mips/mm/init.c')
| -rw-r--r-- | arch/mips/mm/init.c | 407 |
1 files changed, 167 insertions, 240 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 9e29ba9205f..6e4413330e3 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -8,10 +8,12 @@ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ +#include <linux/bug.h> #include <linux/init.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> +#include <linux/smp.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> @@ -25,7 +27,11 @@ #include <linux/swap.h> #include <linux/proc_fs.h> #include <linux/pfn.h> +#include <linux/hardirq.h> +#include <linux/gfp.h> +#include <linux/kcore.h> +#include <asm/asm-offsets.h> #include <asm/bootinfo.h> #include <asm/cachectl.h> #include <asm/cpu.h> @@ -38,47 +44,22 @@ #include <asm/tlb.h> #include <asm/fixmap.h> -/* Atomicity and interruptability */ -#ifdef CONFIG_MIPS_MT_SMTC - -#include <asm/mipsmtregs.h> - -#define ENTER_CRITICAL(flags) \ - { \ - unsigned int mvpflags; \ - local_irq_save(flags);\ - mvpflags = dvpe() -#define EXIT_CRITICAL(flags) \ - evpe(mvpflags); \ - local_irq_restore(flags); \ - } -#else - -#define ENTER_CRITICAL(flags) local_irq_save(flags) -#define EXIT_CRITICAL(flags) local_irq_restore(flags) - -#endif /* CONFIG_MIPS_MT_SMTC */ - -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - -unsigned long highstart_pfn, highend_pfn; - /* * We have up to 8 empty zeroed pages so we can map one of the right colour - * when needed. This is necessary only on R4000 / R4400 SC and MC versions + * when needed. This is necessary only on R4000 / R4400 SC and MC versions * where we have to avoid VCED / VECI exceptions for good performance at * any price. Since page is never written to after the initialization we * don't have to care about aliases on other CPUs. */ unsigned long empty_zero_page, zero_page_mask; +EXPORT_SYMBOL_GPL(empty_zero_page); /* * Not static inline because used by IP27 special magic initialization code */ -unsigned long setup_zero_pages(void) +void setup_zero_pages(void) { - unsigned int order; - unsigned long size; + unsigned int order, i; struct page *page; if (cpu_has_vce) @@ -92,40 +73,13 @@ unsigned long setup_zero_pages(void) page = virt_to_page((void *)empty_zero_page); split_page(page, order); - while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { - SetPageReserved(page); - page++; - } - - size = PAGE_SIZE << order; - zero_page_mask = (size - 1) & PAGE_MASK; - - return 1UL << order; -} - -/* - * These are almost like kmap_atomic / kunmap_atmic except they take an - * additional address argument as the hint. - */ - -#define kmap_get_fixmap_pte(vaddr) \ - pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) - -#ifdef CONFIG_MIPS_MT_SMTC -static pte_t *kmap_coherent_pte; -static void __init kmap_coherent_init(void) -{ - unsigned long vaddr; + for (i = 0; i < (1 << order); i++, page++) + mark_page_reserved(page); - /* cache the first coherent kmap pte */ - vaddr = __fix_to_virt(FIX_CMAP_BEGIN); - kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); + zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } -#else -static inline void kmap_coherent_init(void) {} -#endif -static inline void *kmap_coherent(struct page *page, unsigned long addr) +static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr, flags, entrylo; @@ -133,59 +87,52 @@ static inline void *kmap_coherent(struct page *page, unsigned long addr) pte_t pte; int tlbidx; - inc_preempt_count(); + BUG_ON(Page_dcache_dirty(page)); + + pagefault_disable(); idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); -#ifdef CONFIG_MIPS_MT_SMTC - idx += FIX_N_COLOURS * smp_processor_id(); -#endif + idx += in_interrupt() ? FIX_N_COLOURS : 0; vaddr = __fix_to_virt(FIX_CMAP_END - idx); - pte = mk_pte(page, PAGE_KERNEL); -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) + pte = mk_pte(page, prot); +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) entrylo = pte.pte_high; #else - entrylo = pte_val(pte) >> 6; + entrylo = pte_to_entrylo(pte_val(pte)); #endif - ENTER_CRITICAL(flags); + local_irq_save(flags); old_ctx = read_c0_entryhi(); write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); -#ifdef CONFIG_MIPS_MT_SMTC - set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); - /* preload TLB instead of local_flush_tlb_one() */ - mtc0_tlbw_hazard(); - tlb_probe(); - tlb_probe_hazard(); - tlbidx = read_c0_index(); - mtc0_tlbw_hazard(); - if (tlbidx < 0) - tlb_write_random(); - else - tlb_write_indexed(); -#else tlbidx = read_c0_wired(); write_c0_wired(tlbidx + 1); write_c0_index(tlbidx); mtc0_tlbw_hazard(); tlb_write_indexed(); -#endif tlbw_use_hazard(); write_c0_entryhi(old_ctx); - EXIT_CRITICAL(flags); + local_irq_restore(flags); return (void*) vaddr; } -#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) +void *kmap_coherent(struct page *page, unsigned long addr) +{ + return __kmap_pgprot(page, addr, PAGE_KERNEL); +} + +void *kmap_noncoherent(struct page *page, unsigned long addr) +{ + return __kmap_pgprot(page, addr, PAGE_KERNEL_NC); +} -static inline void kunmap_coherent(struct page *page) +void kunmap_coherent(void) { -#ifndef CONFIG_MIPS_MT_SMTC unsigned int wired; unsigned long flags, old_ctx; - ENTER_CRITICAL(flags); + local_irq_save(flags); old_ctx = read_c0_entryhi(); wired = read_c0_wired() - 1; write_c0_wired(wired); @@ -197,64 +144,73 @@ static inline void kunmap_coherent(struct page *page) tlb_write_indexed(); tlbw_use_hazard(); write_c0_entryhi(old_ctx); - EXIT_CRITICAL(flags); -#endif - dec_preempt_count(); - preempt_check_resched(); + local_irq_restore(flags); + pagefault_enable(); +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *vfrom, *vto; + + vto = kmap_atomic(to); + if (cpu_has_dc_aliases && + page_mapped(from) && !Page_dcache_dirty(from)) { + vfrom = kmap_coherent(from, vaddr); + copy_page(vto, vfrom); + kunmap_coherent(); + } else { + vfrom = kmap_atomic(from); + copy_page(vto, vfrom); + kunmap_atomic(vfrom); + } + if ((!cpu_has_ic_fills_f_dc) || + pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) + flush_data_cache_page((unsigned long)vto); + kunmap_atomic(vto); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); } void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - if (cpu_has_dc_aliases) { + if (cpu_has_dc_aliases && + page_mapped(page) && !Page_dcache_dirty(page)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); - kunmap_coherent(page); - } else + kunmap_coherent(); + } else { memcpy(dst, src, len); + if (cpu_has_dc_aliases) + SetPageDcacheDirty(page); + } if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) flush_cache_page(vma, vaddr, page_to_pfn(page)); } -EXPORT_SYMBOL(copy_to_user_page); - void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - if (cpu_has_dc_aliases) { - void *vfrom = - kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + if (cpu_has_dc_aliases && + page_mapped(page) && !Page_dcache_dirty(page)) { + void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); - kunmap_coherent(page); - } else + kunmap_coherent(); + } else { memcpy(dst, src, len); + if (cpu_has_dc_aliases) + SetPageDcacheDirty(page); + } } - -EXPORT_SYMBOL(copy_from_user_page); - - -#ifdef CONFIG_HIGHMEM -pte_t *kmap_pte; -pgprot_t kmap_prot; - -static void __init kmap_init(void) -{ - unsigned long kmap_vstart; - - /* cache the first kmap pte */ - kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); - kmap_pte = kmap_get_fixmap_pte(kmap_vstart); - - kmap_prot = PAGE_KERNEL; -} -#endif /* CONFIG_HIGHMEM */ +EXPORT_SYMBOL_GPL(copy_from_user_page); void __init fixrange_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { -#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) +#ifdef CONFIG_HIGHMEM pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -268,16 +224,15 @@ void __init fixrange_init(unsigned long start, unsigned long end, k = __pmd_offset(vaddr); pgd = pgd_base + i; - for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { pud = (pud_t *)pgd; - for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) { pmd = (pmd_t *)pud; - for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pmd(pmd, __pmd((unsigned long)pte)); - if (pte != pte_offset_kernel(pmd, 0)) - BUG(); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); } vaddr += PMD_SIZE; } @@ -289,18 +244,21 @@ void __init fixrange_init(unsigned long start, unsigned long end, } #ifndef CONFIG_NEED_MULTIPLE_NODES -extern void pagetable_init(void); - -static int __init page_is_ram(unsigned long pagenr) +int page_is_ram(unsigned long pagenr) { int i; for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long addr, end; - if (boot_mem_map.map[i].type != BOOT_MEM_RAM) + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + case BOOT_MEM_INIT_RAM: + break; + default: /* not usable memory */ continue; + } addr = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + @@ -315,135 +273,86 @@ static int __init page_is_ram(unsigned long pagenr) void __init paging_init(void) { - unsigned long zones_size[MAX_NR_ZONES] = { 0, }; - unsigned long max_dma, high, low; -#ifndef CONFIG_FLATMEM - unsigned long zholes_size[MAX_NR_ZONES] = { 0, }; - unsigned long i, j, pfn; -#endif + unsigned long max_zone_pfns[MAX_NR_ZONES]; + unsigned long lastpfn __maybe_unused; pagetable_init(); #ifdef CONFIG_HIGHMEM kmap_init(); #endif - kmap_coherent_init(); - - max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; - low = max_low_pfn; - high = highend_pfn; - -#ifdef CONFIG_ISA - if (low < max_dma) - zones_size[ZONE_DMA] = low; - else { - zones_size[ZONE_DMA] = max_dma; - zones_size[ZONE_NORMAL] = low - max_dma; - } -#else - zones_size[ZONE_DMA] = low; +#ifdef CONFIG_ZONE_DMA + max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; +#endif +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + lastpfn = max_low_pfn; #ifdef CONFIG_HIGHMEM - if (cpu_has_dc_aliases) { - printk(KERN_WARNING "This processor doesn't support highmem."); - if (high - low) - printk(" %ldk highmem ignored", high - low); - printk("\n"); - } else - zones_size[ZONE_HIGHMEM] = high - low; + max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; + lastpfn = highend_pfn; + + if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { + printk(KERN_WARNING "This processor doesn't support highmem." + " %ldk highmem ignored\n", + (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); + max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; + lastpfn = max_low_pfn; + } #endif -#ifdef CONFIG_FLATMEM - free_area_init(zones_size); -#else - pfn = 0; - for (i = 0; i < MAX_NR_ZONES; i++) - for (j = 0; j < zones_size[i]; j++, pfn++) - if (!page_is_ram(pfn)) - zholes_size[i]++; - free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size); -#endif + free_area_init_nodes(max_zone_pfns); } -static struct kcore_list kcore_mem, kcore_vmalloc; #ifdef CONFIG_64BIT static struct kcore_list kcore_kseg0; #endif -void __init mem_init(void) +static inline void mem_init_free_highmem(void) { - unsigned long codesize, reservedpages, datasize, initsize; - unsigned long tmp, ram; +#ifdef CONFIG_HIGHMEM + unsigned long tmp; + + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { + struct page *page = pfn_to_page(tmp); + + if (!page_is_ram(tmp)) + SetPageReserved(page); + else + free_highmem_page(page); + } +#endif +} +void __init mem_init(void) +{ #ifdef CONFIG_HIGHMEM #ifdef CONFIG_DISCONTIGMEM #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" #endif - max_mapnr = highend_pfn; + max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; #else max_mapnr = max_low_pfn; #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - totalram_pages += free_all_bootmem(); - totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ - - reservedpages = ram = 0; - for (tmp = 0; tmp < max_low_pfn; tmp++) - if (page_is_ram(tmp)) { - ram++; - if (PageReserved(pfn_to_page(tmp))) - reservedpages++; - } - num_physpages = ram; - -#ifdef CONFIG_HIGHMEM - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = mem_map + tmp; - - if (!page_is_ram(tmp)) { - SetPageReserved(page); - continue; - } - ClearPageReserved(page); -#ifdef CONFIG_LIMITED_DMA - set_page_address(page, lowmem_page_address(page)); -#endif - init_page_count(page); - __free_page(page); - totalhigh_pages++; - } - totalram_pages += totalhigh_pages; - num_physpages += totalhigh_pages; -#endif - - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + free_all_bootmem(); + setup_zero_pages(); /* Setup zeroed pages. */ + mem_init_free_highmem(); + mem_init_print_info(NULL); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) /* The -4 is a hack so that user tools don't have to handle the overflow. */ - kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4); + kclist_add(&kcore_kseg0, (void *) CKSEG0, + 0x80000000 - 4, KCORE_TEXT); #endif - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); - kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); - - printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " - "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - ram << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ -static void free_init_pages(char *what, unsigned long begin, unsigned long end) +void free_init_pages(const char *what, unsigned long begin, unsigned long end) { unsigned long pfn; @@ -451,11 +360,8 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) struct page *page = pfn_to_page(pfn); void *addr = phys_to_virt(PFN_PHYS(pfn)); - ClearPageReserved(page); - init_page_count(page); memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); - __free_page(page); - totalram_pages++; + free_reserved_page(page); } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } @@ -463,23 +369,44 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", - virt_to_phys((void *)start), - virt_to_phys((void *)end)); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif -extern unsigned long prom_free_prom_memory(void); +void (*free_init_pages_eva)(void *begin, void *end) = NULL; -void free_initmem(void) +void __init_refok free_initmem(void) { - unsigned long freed; + prom_free_prom_memory(); + /* + * Let the platform define a specific function to free the + * init section since EVA may have used any possible mapping + * between virtual and physical addresses. + */ + if (free_init_pages_eva) + free_init_pages_eva((void *)&__init_begin, (void *)&__init_end); + else + free_initmem_default(POISON_FREE_INITMEM); +} - freed = prom_free_prom_memory(); - if (freed) - printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed); +#ifndef CONFIG_MIPS_PGD_C0_CONTEXT +unsigned long pgd_current[NR_CPUS]; +#endif - free_init_pages("unused kernel memory", - __pa_symbol(&__init_begin), - __pa_symbol(&__init_end)); -} +/* + * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER + * are constants. So we use the variants from asm-offset.h until that gcc + * will officially be retired. + * + * Align swapper_pg_dir in to 64K, allows its address to be loaded + * with a single LUI instruction in the TLB handlers. If we used + * __aligned(64K), its size would get rounded up to the alignment + * size, and waste space. So we place it in its own section and align + * it in the linker script. + */ +pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); +#ifndef __PAGETABLE_PMD_FOLDED +pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; +#endif +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; |
