diff options
Diffstat (limited to 'arch/microblaze/mm')
| -rw-r--r-- | arch/microblaze/mm/Makefile | 1 | ||||
| -rw-r--r-- | arch/microblaze/mm/consistent.c | 18 | ||||
| -rw-r--r-- | arch/microblaze/mm/fault.c | 50 | ||||
| -rw-r--r-- | arch/microblaze/mm/highmem.c | 88 | ||||
| -rw-r--r-- | arch/microblaze/mm/init.c | 238 | ||||
| -rw-r--r-- | arch/microblaze/mm/pgtable.c | 40 |
6 files changed, 314 insertions, 121 deletions
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index 09c49ed8723..7313bd8acbb 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile @@ -5,3 +5,4 @@ obj-y := consistent.o init.o obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o +obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 5a59dad62bd..e10ad930895 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -13,7 +13,7 @@ * published by the Free Software Foundation. */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -37,7 +37,7 @@ #include <asm/pgalloc.h> #include <linux/io.h> #include <linux/hardirq.h> -#include <asm/mmu_context.h> +#include <linux/mmu_context.h> #include <asm/mmu.h> #include <linux/uaccess.h> #include <asm/pgtable.h> @@ -59,7 +59,7 @@ * uncached region. This will no doubt cause big problems if memory allocated * here is not also freed properly. -- JW */ -void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) +void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) { unsigned long order, vaddr; void *ret; @@ -102,8 +102,7 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) # endif if ((unsigned int)ret > cpuinfo.dcache_base && (unsigned int)ret < cpuinfo.dcache_high) - printk(KERN_WARNING - "ERROR: Your cache coherent area is CACHED!!!\n"); + pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); /* dma_handle is same as physical (shadowed) address */ *dma_handle = (dma_addr_t)ret; @@ -118,7 +117,7 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) ret = (void *)va; /* This gives us the real physical address of the first page. */ - *dma_handle = pa = virt_to_bus((void *)vaddr); + *dma_handle = pa = __virt_to_phys(vaddr); #endif /* @@ -177,8 +176,7 @@ void consistent_free(size_t size, void *vaddr) page = virt_to_page(vaddr); do { - ClearPageReserved(page); - __free_page(page); + __free_reserved_page(page); page++; } while (size -= PAGE_SIZE); #else @@ -195,9 +193,7 @@ void consistent_free(size_t size, void *vaddr) pte_clear(&init_mm, (unsigned int)vaddr, ptep); if (pfn_valid(pfn)) { page = pfn_to_page(pfn); - - ClearPageReserved(page); - __free_page(page); + __free_reserved_page(page); } } vaddr += PAGE_SIZE; diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 57bd2a09610..fa4cf52aa7a 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -32,8 +32,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu.h> -#include <asm/mmu_context.h> -#include <asm/system.h> +#include <linux/mmu_context.h> #include <linux/uaccess.h> #include <asm/exceptions.h> @@ -48,7 +47,7 @@ static int store_updates_sp(struct pt_regs *regs) { unsigned int inst; - if (get_user(inst, (unsigned int *)regs->pc)) + if (get_user(inst, (unsigned int __user *)regs->pc)) return 0; /* check for 1 in the rD field */ if (((inst >> 21) & 0x1f) != 1) @@ -93,13 +92,14 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, int code = SEGV_MAPERR; int is_write = error_code & ESR_S; int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; regs->ear = address; regs->esr = error_code; /* On a kernel SLB miss we can only check for a valid exception entry */ if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) { - printk(KERN_WARNING "kernel task_size exceed"); + pr_warn("kernel task_size exceed"); _exception(SIGSEGV, regs, code, address); } @@ -113,13 +113,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, /* in_atomic() in user mode is really bad, as is current->mm == NULL. */ - printk(KERN_EMERG "Page fault in user mode with " - "in_atomic(), mm = %p\n", mm); - printk(KERN_EMERG "r15 = %lx MSR = %lx\n", + pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n", + mm); + pr_emerg("r15 = %lx MSR = %lx\n", regs->r15, regs->msr); die("Weird page fault", regs, SIGSEGV); } + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an @@ -139,6 +142,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, if (kernel_mode(regs) && !search_exception_tables(regs->pc)) goto bad_area_nosemaphore; +retry: down_read(&mm->mmap_sem); } @@ -197,6 +201,7 @@ good_area: if (unlikely(is_write)) { if (unlikely(!(vma->vm_flags & VM_WRITE))) goto bad_area; + flags |= FAULT_FLAG_WRITE; /* a read */ } else { /* protection fault */ @@ -211,7 +216,11 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -219,11 +228,28 @@ good_area: goto do_sigbus; BUG(); } - if (unlikely(fault & VM_FAULT_MAJOR)) - current->maj_flt++; - else - current->min_flt++; + + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (unlikely(fault & VM_FAULT_MAJOR)) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + + /* + * No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } + up_read(&mm->mmap_sem); + /* * keep track of tlb+htab misses that are good addrs but * just need pte's created via handle_mm_fault() diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c new file mode 100644 index 00000000000..5a92576fad9 --- /dev/null +++ b/arch/microblaze/mm/highmem.c @@ -0,0 +1,88 @@ +/* + * highmem.c: virtual kernel memory mappings for high memory + * + * PowerPC version, stolen from the i386 version. + * + * Used in CONFIG_HIGHMEM systems for memory pages which + * are not addressable by direct kernel virtual addresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * up to 16 Terrabyte physical memory. With current x86 CPUs + * we now support up to 64 Gigabytes physical RAM. + * + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> + * + * Reworked for PowerPC by various contributors. Moved from + * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. + */ + +#include <linux/export.h> +#include <linux/highmem.h> + +/* + * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap + * gives a more generic (and caching) interface. But kmap_atomic can + * be used in IRQ contexts, so in some (very limited) cases we need + * it. + */ +#include <asm/tlbflush.h> + +void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + + unsigned long vaddr; + int idx, type; + + /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(kmap_pte-idx))); +#endif + set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); + local_flush_tlb_page(NULL, vaddr); + + return (void *) vaddr; +} +EXPORT_SYMBOL(kmap_atomic_prot); + +void __kunmap_atomic(void *kvaddr) +{ + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + int type; + + if (vaddr < __fix_to_virt(FIX_KMAP_END)) { + pagefault_enable(); + return; + } + + type = kmap_atomic_idx(); +#ifdef CONFIG_DEBUG_HIGHMEM + { + unsigned int idx; + + idx = type + KM_TYPE_NR * smp_processor_id(); + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(&init_mm, vaddr, kmap_pte-idx); + local_flush_tlb_page(NULL, vaddr); + } +#endif + kmap_atomic_idx_pop(); + pagefault_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index c8437866d3b..77bc7c7e652 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -17,12 +17,14 @@ #include <linux/pfn.h> #include <linux/slab.h> #include <linux/swap.h> +#include <linux/export.h> #include <asm/page.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/tlb.h> +#include <asm/fixmap.h> /* Use for MMU and noMMU because of PCI generic code */ int mem_init_done; @@ -32,8 +34,6 @@ unsigned int __page_offset; EXPORT_SYMBOL(__page_offset); #else -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - static int init_bootmem_done; #endif /* CONFIG_MMU */ @@ -45,9 +45,45 @@ char *klimit = _end; */ unsigned long memory_start; EXPORT_SYMBOL(memory_start); -unsigned long memory_end; /* due to mm/nommu.c */ unsigned long memory_size; EXPORT_SYMBOL(memory_size); +unsigned long lowmem_size; + +#ifdef CONFIG_HIGHMEM +pte_t *kmap_pte; +EXPORT_SYMBOL(kmap_pte); +pgprot_t kmap_prot; +EXPORT_SYMBOL(kmap_prot); + +static inline pte_t *virt_to_kpte(unsigned long vaddr) +{ + return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), + vaddr), vaddr); +} + +static void __init highmem_init(void) +{ + pr_debug("%x\n", (u32)PKMAP_BASE); + map_page(PKMAP_BASE, 0, 0); /* XXX gross */ + pkmap_page_table = virt_to_kpte(PKMAP_BASE); + + kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); + kmap_prot = PAGE_KERNEL; +} + +static void highmem_setup(void) +{ + unsigned long pfn; + + for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { + struct page *page = pfn_to_page(pfn); + + /* FIXME not sure about */ + if (!memblock_is_reserved(pfn << PAGE_SHIFT)) + free_highmem_page(page); + } +} +#endif /* CONFIG_HIGHMEM */ /* * paging_init() sets up the page tables - in fact we've already done this. @@ -55,17 +91,28 @@ EXPORT_SYMBOL(memory_size); static void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; +#ifdef CONFIG_MMU + int idx; + + /* Setup fixmaps */ + for (idx = 0; idx < __end_of_fixed_addresses; idx++) + clear_fixmap(idx); +#endif /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); - /* - * old: we can DMA to/from any address.put all page into ZONE_DMA - * We use only ZONE_NORMAL - */ - zones_size[ZONE_NORMAL] = max_mapnr; +#ifdef CONFIG_HIGHMEM + highmem_init(); - free_area_init(zones_size); + zones_size[ZONE_DMA] = max_low_pfn; + zones_size[ZONE_HIGHMEM] = max_pfn; +#else + zones_size[ZONE_DMA] = max_pfn; +#endif + + /* We don't have holes in memory map */ + free_area_init_nodes(zones_size); } void __init setup_memory(void) @@ -79,32 +126,31 @@ void __init setup_memory(void) /* Find main memory where is the kernel */ for_each_memblock(memory, reg) { memory_start = (u32)reg->base; - memory_end = (u32) reg->base + reg->size; + lowmem_size = reg->size; if ((memory_start <= (u32)_text) && - ((u32)_text <= memory_end)) { - memory_size = memory_end - memory_start; + ((u32)_text <= (memory_start + lowmem_size - 1))) { + memory_size = lowmem_size; PAGE_OFFSET = memory_start; - printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, " - "size 0x%08x\n", __func__, (u32) memory_start, - (u32) memory_end, (u32) memory_size); + pr_info("%s: Main mem: 0x%x, size 0x%08x\n", + __func__, (u32) memory_start, + (u32) memory_size); break; } } - if (!memory_start || !memory_end) { - panic("%s: Missing memory setting 0x%08x-0x%08x\n", - __func__, (u32) memory_start, (u32) memory_end); + if (!memory_start || !memory_size) { + panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", + __func__, (u32) memory_start, (u32) memory_size); } /* reservation of region where is the kernel */ kernel_align_start = PAGE_DOWN((u32)_text); /* ALIGN can be remove because _end in vmlinux.lds.S is align */ kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; - memblock_reserve(kernel_align_start, kernel_align_size); - printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n", + pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n", __func__, kernel_align_start, kernel_align_start + kernel_align_size, kernel_align_size); - + memblock_reserve(kernel_align_start, kernel_align_size); #endif /* * Kernel: @@ -114,18 +160,19 @@ void __init setup_memory(void) * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) * max_low_pfn * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) - * num_physpages - number of all pages */ /* memory start is from the kernel end (aligned) to higher addr */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ /* RAM is assumed contiguous */ - num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; - max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT; + max_mapnr = memory_size >> PAGE_SHIFT; + max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; + max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; - printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); - printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); - printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); + pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr); + pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); + pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); + pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); /* * Find an area to use for the bootmem bitmap. @@ -138,66 +185,81 @@ void __init setup_memory(void) PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); + /* Add active regions with valid PFNs */ + for_each_memblock(memory, reg) { + unsigned long start_pfn, end_pfn; + + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); + memblock_set_node(start_pfn << PAGE_SHIFT, + (end_pfn - start_pfn) << PAGE_SHIFT, + &memblock.memory, 0); + } + /* free bootmem is whole main memory */ - free_bootmem(memory_start, memory_size); + free_bootmem_with_active_regions(0, max_low_pfn); /* reserve allocate blocks */ for_each_memblock(reserved, reg) { - pr_debug("reserved - 0x%08x-0x%08x\n", - (u32) reg->base, (u32) reg->size); - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + unsigned long top = reg->base + reg->size - 1; + + pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n", + (u32) reg->base, (u32) reg->size, top, + memory_start + lowmem_size - 1); + + if (top <= (memory_start + lowmem_size - 1)) { + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + } else if (reg->base < (memory_start + lowmem_size - 1)) { + unsigned long trunc_size = memory_start + lowmem_size - + reg->base; + reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); + } } + + /* XXX need to clip this if using highmem? */ + sparse_memory_present_with_active_regions(0); + #ifdef CONFIG_MMU init_bootmem_done = 1; #endif paging_init(); } -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n", - (int)(pages * (PAGE_SIZE / 1024))); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); + free_initmem_default(-1); } void __init mem_init(void) { - high_memory = (void *)__va(memory_end); + high_memory = (void *)__va(memory_start + lowmem_size - 1); + /* this will put all memory onto the freelists */ - totalram_pages += free_all_bootmem(); + free_all_bootmem(); +#ifdef CONFIG_HIGHMEM + highmem_setup(); +#endif - printk(KERN_INFO "Memory: %luk/%luk available\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10)); + mem_init_print_info(NULL); +#ifdef CONFIG_MMU + pr_info("Kernel virtual memory layout:\n"); + pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); +#ifdef CONFIG_HIGHMEM + pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", + PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); +#endif /* CONFIG_HIGHMEM */ + pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", + ioremap_bot, ioremap_base); + pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", + (unsigned long)VMALLOC_START, VMALLOC_END); +#endif mem_init_done = 1; } @@ -227,7 +289,6 @@ static void mm_cmdline_setup(void) maxmem = memparse(p, &p); if (maxmem && memory_size > maxmem) { memory_size = maxmem; - memory_end = memory_start + memory_size; memblock.memory.regions[0].size = memory_size; } } @@ -267,19 +328,30 @@ asmlinkage void __init mmu_init(void) unsigned int kstart, ksize; if (!memblock.reserved.cnt) { - printk(KERN_EMERG "Error memory count\n"); + pr_emerg("Error memory count\n"); machine_restart(NULL); } - if ((u32) memblock.memory.regions[0].size < 0x1000000) { - printk(KERN_EMERG "Memory must be greater than 16MB\n"); + if ((u32) memblock.memory.regions[0].size < 0x400000) { + pr_emerg("Memory must be greater than 4MB\n"); machine_restart(NULL); } + + if ((u32) memblock.memory.regions[0].size < kernel_tlb) { + pr_emerg("Kernel size is greater than memory node\n"); + machine_restart(NULL); + } + /* Find main memory where the kernel is */ memory_start = (u32) memblock.memory.regions[0].base; - memory_end = (u32) memblock.memory.regions[0].base + - (u32) memblock.memory.regions[0].size; - memory_size = memory_end - memory_start; + lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; + + if (lowmem_size > CONFIG_LOWMEM_SIZE) { + lowmem_size = CONFIG_LOWMEM_SIZE; +#ifndef CONFIG_HIGHMEM + memory_size = lowmem_size; +#endif + } mm_cmdline_setup(); /* FIXME parse args from command line - not used */ @@ -294,10 +366,11 @@ asmlinkage void __init mmu_init(void) #if defined(CONFIG_BLK_DEV_INITRD) /* Remove the init RAM disk from the available memory. */ -/* if (initrd_start) { - mem_pieces_remove(&phys_avail, __pa(initrd_start), - initrd_end - initrd_start, 1); - }*/ + if (initrd_start) { + unsigned long size; + size = initrd_end - initrd_start; + memblock_reserve(__virt_to_phys(initrd_start), size); + } #endif /* CONFIG_BLK_DEV_INITRD */ /* Initialize the MMU hardware */ @@ -306,15 +379,20 @@ asmlinkage void __init mmu_init(void) /* Map in all of RAM starting at CONFIG_KERNEL_START */ mapin_ram(); -#ifdef HIGHMEM_START_BOOL - ioremap_base = HIGHMEM_START; + /* Extend vmalloc and ioremap area as big as possible */ +#ifdef CONFIG_HIGHMEM + ioremap_base = ioremap_bot = PKMAP_BASE; #else - ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ -#endif /* CONFIG_HIGHMEM */ - ioremap_bot = ioremap_base; + ioremap_base = ioremap_bot = FIXADDR_START; +#endif /* Initialize the context management stuff */ mmu_context_init(); + + /* Shortly after that, the entire linear mapping will be available */ + /* This will also cause that unflatten device tree will be allocated + * inside 768MB limit */ + memblock_set_current_limit(memory_start + lowmem_size - 1); } /* This is only called until mem_init is done. */ @@ -325,11 +403,11 @@ void __init *early_get_page(void) p = alloc_bootmem_pages(PAGE_SIZE); } else { /* - * Mem start + 32MB -> here is limit + * Mem start + kernel_tlb -> here is limit * because of mem mapping from head.S */ p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, - memory_start + 0x2000000)); + memory_start + kernel_tlb)); } return p; } diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 59bf2335a4c..4f4520e779a 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -26,8 +26,8 @@ * */ +#include <linux/export.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/init.h> @@ -37,18 +37,12 @@ #include <linux/io.h> #include <asm/mmu.h> #include <asm/sections.h> - -#define flush_HPTE(X, va, pg) _tlbie(va) +#include <asm/fixmap.h> unsigned long ioremap_base; unsigned long ioremap_bot; EXPORT_SYMBOL(ioremap_bot); -/* The maximum lowmem defaults to 768Mb, but this can be configured to - * another value. - */ -#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE - #ifndef CONFIG_SMP struct pgtable_cache_struct quicklists; #endif @@ -75,13 +69,13 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, * * However, allow remap of rootfs: TBD */ + if (mem_init_done && p >= memory_start && p < virt_to_phys(high_memory) && - !(p >= virt_to_phys((unsigned long)&__bss_stop) && - p < virt_to_phys((unsigned long)__bss_stop))) { - printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT - " is RAM lr %p\n", (unsigned long)p, - __builtin_return_address(0)); + !(p >= __virt_to_phys((phys_addr_t)__bss_stop) && + p < __virt_to_phys((phys_addr_t)__bss_stop))) { + pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n", + (unsigned long)p, __builtin_return_address(0)); return NULL; } @@ -132,9 +126,10 @@ void __iomem *ioremap(phys_addr_t addr, unsigned long size) } EXPORT_SYMBOL(ioremap); -void iounmap(void *addr) +void iounmap(void __iomem *addr) { - if (addr > high_memory && (unsigned long) addr < ioremap_bot) + if ((__force void *)addr > high_memory && + (unsigned long) addr < ioremap_bot) vfree((void *) (PAGE_MASK & (unsigned long) addr)); } EXPORT_SYMBOL(iounmap); @@ -156,8 +151,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); if (unlikely(mem_init_done)) - flush_HPTE(0, va, pmd_val(*pd)); - /* flush_HPTE(0, va, pg); */ + _tlbie(va); } return err; } @@ -171,7 +165,7 @@ void __init mapin_ram(void) v = CONFIG_KERNEL_START; p = memory_start; - for (s = 0; s < memory_size; s += PAGE_SIZE) { + for (s = 0; s < lowmem_size; s += PAGE_SIZE) { f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC; if ((char *) v < _stext || (char *) v >= _etext) @@ -254,3 +248,13 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, } return pte; } + +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) +{ + unsigned long address = __fix_to_virt(idx); + + if (idx >= __end_of_fixed_addresses) + BUG(); + + map_page(address, phys, pgprot_val(flags)); +} |
