diff options
Diffstat (limited to 'arch/mips/mm/init.c')
| -rw-r--r-- | arch/mips/mm/init.c | 423 |
1 files changed, 263 insertions, 160 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 4ee91c9a556..6e4413330e3 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -8,11 +8,12 @@ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ -#include <linux/config.h> +#include <linux/bug.h> #include <linux/init.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> +#include <linux/smp.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> @@ -24,36 +25,41 @@ #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/swap.h> +#include <linux/proc_fs.h> +#include <linux/pfn.h> +#include <linux/hardirq.h> +#include <linux/gfp.h> +#include <linux/kcore.h> +#include <asm/asm-offsets.h> #include <asm/bootinfo.h> #include <asm/cachectl.h> #include <asm/cpu.h> #include <asm/dma.h> +#include <asm/kmap_types.h> #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> - -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - -unsigned long highstart_pfn, highend_pfn; +#include <asm/fixmap.h> /* * We have up to 8 empty zeroed pages so we can map one of the right colour - * when needed. This is necessary only on R4000 / R4400 SC and MC versions + * when needed. This is necessary only on R4000 / R4400 SC and MC versions * where we have to avoid VCED / VECI exceptions for good performance at * any price. Since page is never written to after the initialization we * don't have to care about aliases on other CPUs. */ unsigned long empty_zero_page, zero_page_mask; +EXPORT_SYMBOL_GPL(empty_zero_page); /* * Not static inline because used by IP27 special magic initialization code */ -unsigned long setup_zero_pages(void) +void setup_zero_pages(void) { - unsigned long order, size; + unsigned int order, i; struct page *page; if (cpu_has_vce) @@ -65,41 +71,146 @@ unsigned long setup_zero_pages(void) if (!empty_zero_page) panic("Oh boy, that early out of memory?"); - page = virt_to_page(empty_zero_page); - while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) { - SetPageReserved(page); - set_page_count(page, 1); - page++; - } + page = virt_to_page((void *)empty_zero_page); + split_page(page, order); + for (i = 0; i < (1 << order); i++, page++) + mark_page_reserved(page); - size = PAGE_SIZE << order; - zero_page_mask = (size - 1) & PAGE_MASK; + zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; +} - return 1UL << order; +static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) +{ + enum fixed_addresses idx; + unsigned long vaddr, flags, entrylo; + unsigned long old_ctx; + pte_t pte; + int tlbidx; + + BUG_ON(Page_dcache_dirty(page)); + + pagefault_disable(); + idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); + idx += in_interrupt() ? FIX_N_COLOURS : 0; + vaddr = __fix_to_virt(FIX_CMAP_END - idx); + pte = mk_pte(page, prot); +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) + entrylo = pte.pte_high; +#else + entrylo = pte_to_entrylo(pte_val(pte)); +#endif + + local_irq_save(flags); + old_ctx = read_c0_entryhi(); + write_c0_entryhi(vaddr & (PAGE_MASK << 1)); + write_c0_entrylo0(entrylo); + write_c0_entrylo1(entrylo); + tlbidx = read_c0_wired(); + write_c0_wired(tlbidx + 1); + write_c0_index(tlbidx); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + tlbw_use_hazard(); + write_c0_entryhi(old_ctx); + local_irq_restore(flags); + + return (void*) vaddr; } -#ifdef CONFIG_HIGHMEM -pte_t *kmap_pte; -pgprot_t kmap_prot; +void *kmap_coherent(struct page *page, unsigned long addr) +{ + return __kmap_pgprot(page, addr, PAGE_KERNEL); +} + +void *kmap_noncoherent(struct page *page, unsigned long addr) +{ + return __kmap_pgprot(page, addr, PAGE_KERNEL_NC); +} -#define kmap_get_fixmap_pte(vaddr) \ - pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) +void kunmap_coherent(void) +{ + unsigned int wired; + unsigned long flags, old_ctx; + + local_irq_save(flags); + old_ctx = read_c0_entryhi(); + wired = read_c0_wired() - 1; + write_c0_wired(wired); + write_c0_index(wired); + write_c0_entryhi(UNIQUE_ENTRYHI(wired)); + write_c0_entrylo0(0); + write_c0_entrylo1(0); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + tlbw_use_hazard(); + write_c0_entryhi(old_ctx); + local_irq_restore(flags); + pagefault_enable(); +} -static void __init kmap_init(void) +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) { - unsigned long kmap_vstart; + void *vfrom, *vto; + + vto = kmap_atomic(to); + if (cpu_has_dc_aliases && + page_mapped(from) && !Page_dcache_dirty(from)) { + vfrom = kmap_coherent(from, vaddr); + copy_page(vto, vfrom); + kunmap_coherent(); + } else { + vfrom = kmap_atomic(from); + copy_page(vto, vfrom); + kunmap_atomic(vfrom); + } + if ((!cpu_has_ic_fills_f_dc) || + pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) + flush_data_cache_page((unsigned long)vto); + kunmap_atomic(vto); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); +} - /* cache the first kmap pte */ - kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); - kmap_pte = kmap_get_fixmap_pte(kmap_vstart); +void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + if (cpu_has_dc_aliases && + page_mapped(page) && !Page_dcache_dirty(page)) { + void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(vto, src, len); + kunmap_coherent(); + } else { + memcpy(dst, src, len); + if (cpu_has_dc_aliases) + SetPageDcacheDirty(page); + } + if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) + flush_cache_page(vma, vaddr, page_to_pfn(page)); +} - kmap_prot = PAGE_KERNEL; +void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + if (cpu_has_dc_aliases && + page_mapped(page) && !Page_dcache_dirty(page)) { + void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(dst, vfrom, len); + kunmap_coherent(); + } else { + memcpy(dst, src, len); + if (cpu_has_dc_aliases) + SetPageDcacheDirty(page); + } } +EXPORT_SYMBOL_GPL(copy_from_user_page); -#ifdef CONFIG_32BIT void __init fixrange_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { +#ifdef CONFIG_HIGHMEM pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -113,16 +224,15 @@ void __init fixrange_init(unsigned long start, unsigned long end, k = __pmd_offset(vaddr); pgd = pgd_base + i; - for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { pud = (pud_t *)pgd; - for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) { pmd = (pmd_t *)pud; - for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); - set_pmd(pmd, __pmd(pte)); - if (pte != pte_offset_kernel(pmd, 0)) - BUG(); + set_pmd(pmd, __pmd((unsigned long)pte)); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); } vaddr += PMD_SIZE; } @@ -130,64 +240,25 @@ void __init fixrange_init(unsigned long start, unsigned long end, } j = 0; } -} -#endif /* CONFIG_32BIT */ -#endif /* CONFIG_HIGHMEM */ - -#ifndef CONFIG_NEED_MULTIPLE_NODES -extern void pagetable_init(void); - -void __init paging_init(void) -{ - unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; - unsigned long max_dma, high, low; - - pagetable_init(); - -#ifdef CONFIG_HIGHMEM - kmap_init(); #endif - - max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; - low = max_low_pfn; - high = highend_pfn; - -#ifdef CONFIG_ISA - if (low < max_dma) - zones_size[ZONE_DMA] = low; - else { - zones_size[ZONE_DMA] = max_dma; - zones_size[ZONE_NORMAL] = low - max_dma; - } -#else - zones_size[ZONE_DMA] = low; -#endif -#ifdef CONFIG_HIGHMEM - if (cpu_has_dc_aliases) { - printk(KERN_WARNING "This processor doesn't support highmem."); - if (high - low) - printk(" %ldk highmem ignored", high - low); - printk("\n"); - } else - zones_size[ZONE_HIGHMEM] = high - low; -#endif - - free_area_init(zones_size); } -#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) - -static inline int page_is_ram(unsigned long pagenr) +#ifndef CONFIG_NEED_MULTIPLE_NODES +int page_is_ram(unsigned long pagenr) { int i; for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long addr, end; - if (boot_mem_map.map[i].type != BOOT_MEM_RAM) + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + case BOOT_MEM_INIT_RAM: + break; + default: /* not usable memory */ continue; + } addr = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + @@ -200,110 +271,142 @@ static inline int page_is_ram(unsigned long pagenr) return 0; } -void __init mem_init(void) +void __init paging_init(void) { - unsigned long codesize, reservedpages, datasize, initsize; - unsigned long tmp, ram; + unsigned long max_zone_pfns[MAX_NR_ZONES]; + unsigned long lastpfn __maybe_unused; + + pagetable_init(); #ifdef CONFIG_HIGHMEM -#ifdef CONFIG_DISCONTIGMEM -#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" + kmap_init(); #endif - max_mapnr = num_physpages = highend_pfn; -#else - max_mapnr = num_physpages = max_low_pfn; +#ifdef CONFIG_ZONE_DMA + max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; +#endif +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; +#endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + lastpfn = max_low_pfn; +#ifdef CONFIG_HIGHMEM + max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; + lastpfn = highend_pfn; + + if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { + printk(KERN_WARNING "This processor doesn't support highmem." + " %ldk highmem ignored\n", + (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); + max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; + lastpfn = max_low_pfn; + } #endif - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - totalram_pages += free_all_bootmem(); - totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ + free_area_init_nodes(max_zone_pfns); +} - reservedpages = ram = 0; - for (tmp = 0; tmp < max_low_pfn; tmp++) - if (page_is_ram(tmp)) { - ram++; - if (PageReserved(mem_map+tmp)) - reservedpages++; - } +#ifdef CONFIG_64BIT +static struct kcore_list kcore_kseg0; +#endif +static inline void mem_init_free_highmem(void) +{ #ifdef CONFIG_HIGHMEM + unsigned long tmp; + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = mem_map + tmp; + struct page *page = pfn_to_page(tmp); - if (!page_is_ram(tmp)) { + if (!page_is_ram(tmp)) SetPageReserved(page); - continue; - } - ClearPageReserved(page); -#ifdef CONFIG_LIMITED_DMA - set_page_address(page, lowmem_page_address(page)); -#endif - set_page_count(page, 1); - __free_page(page); - totalhigh_pages++; + else + free_highmem_page(page); } - totalram_pages += totalhigh_pages; #endif +} + +void __init mem_init(void) +{ +#ifdef CONFIG_HIGHMEM +#ifdef CONFIG_DISCONTIGMEM +#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" +#endif + max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; +#else + max_mapnr = max_low_pfn; +#endif + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + + free_all_bootmem(); + setup_zero_pages(); /* Setup zeroed pages. */ + mem_init_free_highmem(); + mem_init_print_info(NULL); - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " - "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - ram << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); +#ifdef CONFIG_64BIT + if ((unsigned long) &_text > (unsigned long) CKSEG0) + /* The -4 is a hack so that user tools don't have to handle + the overflow. */ + kclist_add(&kcore_kseg0, (void *) CKSEG0, + 0x80000000 - 4, KCORE_TEXT); +#endif } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ +void free_init_pages(const char *what, unsigned long begin, unsigned long end) +{ + unsigned long pfn; + + for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { + struct page *page = pfn_to_page(pfn); + void *addr = phys_to_virt(PFN_PHYS(pfn)); + + memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); + free_reserved_page(page); + } + printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); +} + #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { -#ifdef CONFIG_64BIT - /* Switch from KSEG0 to XKPHYS addresses */ - start = (unsigned long)phys_to_virt(CPHYSADDR(start)); - end = (unsigned long)phys_to_virt(CPHYSADDR(end)); -#endif - if (start < end) - printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", - (end - start) >> 10); - - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - set_page_count(virt_to_page(start), 1); - free_page(start); - totalram_pages++; - } + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif -extern unsigned long prom_free_prom_memory(void); +void (*free_init_pages_eva)(void *begin, void *end) = NULL; -void free_initmem(void) +void __init_refok free_initmem(void) { - unsigned long addr, page, freed; + prom_free_prom_memory(); + /* + * Let the platform define a specific function to free the + * init section since EVA may have used any possible mapping + * between virtual and physical addresses. + */ + if (free_init_pages_eva) + free_init_pages_eva((void *)&__init_begin, (void *)&__init_end); + else + free_initmem_default(POISON_FREE_INITMEM); +} - freed = prom_free_prom_memory(); +#ifndef CONFIG_MIPS_PGD_C0_CONTEXT +unsigned long pgd_current[NR_CPUS]; +#endif - addr = (unsigned long) &__init_begin; - while (addr < (unsigned long) &__init_end) { -#ifdef CONFIG_64BIT - page = PAGE_OFFSET | CPHYSADDR(addr); -#else - page = addr; +/* + * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER + * are constants. So we use the variants from asm-offset.h until that gcc + * will officially be retired. + * + * Align swapper_pg_dir in to 64K, allows its address to be loaded + * with a single LUI instruction in the TLB handlers. If we used + * __aligned(64K), its size would get rounded up to the alignment + * size, and waste space. So we place it in its own section and align + * it in the linker script. + */ +pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); +#ifndef __PAGETABLE_PMD_FOLDED +pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; #endif - ClearPageReserved(virt_to_page(page)); - set_page_count(virt_to_page(page), 1); - free_page(page); - totalram_pages++; - freed += PAGE_SIZE; - addr += PAGE_SIZE; - } - printk(KERN_INFO "Freeing unused kernel memory: %ldk freed\n", - freed >> 10); -} +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; |
