diff options
Diffstat (limited to 'arch/arm/mm/init.c')
| -rw-r--r-- | arch/arm/mm/init.c | 884 |
1 files changed, 423 insertions, 461 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c168f322ef8..659c75d808d 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -7,60 +7,107 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/errno.h> -#include <linux/ptrace.h> #include <linux/swap.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mman.h> +#include <linux/export.h> #include <linux/nodemask.h> #include <linux/initrd.h> - +#include <linux/of_fdt.h> +#include <linux/highmem.h> +#include <linux/gfp.h> +#include <linux/memblock.h> +#include <linux/dma-contiguous.h> +#include <linux/sizes.h> + +#include <asm/cp15.h> #include <asm/mach-types.h> -#include <asm/hardware.h> +#include <asm/memblock.h> +#include <asm/prom.h> +#include <asm/sections.h> #include <asm/setup.h> #include <asm/tlb.h> +#include <asm/fixmap.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> -#define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t)) +#include "mm.h" + +#ifdef CONFIG_CPU_CP15_MMU +unsigned long __init __clear_cr(unsigned long mask) +{ + cr_alignment = cr_alignment & ~mask; + return cr_alignment; +} +#endif -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +static phys_addr_t phys_initrd_start __initdata = 0; +static unsigned long phys_initrd_size __initdata = 0; -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -extern void _stext, _text, _etext, __data_start, _end, __init_begin, __init_end; -extern unsigned long phys_initrd_start; -extern unsigned long phys_initrd_size; +static int __init early_initrd(char *p) +{ + phys_addr_t start; + unsigned long size; + char *endp; -/* - * The sole use of this is to pass memory configuration - * data from paging_init to mem_init. - */ -static struct meminfo meminfo __initdata = { 0, }; + start = memparse(p, &endp); + if (*endp == ',') { + size = memparse(endp + 1, NULL); + + phys_initrd_start = start; + phys_initrd_size = size; + } + return 0; +} +early_param("initrd", early_initrd); + +static int __init parse_tag_initrd(const struct tag *tag) +{ + printk(KERN_WARNING "ATAG_INITRD is deprecated; " + "please update your bootloader.\n"); + phys_initrd_start = __virt_to_phys(tag->u.initrd.start); + phys_initrd_size = tag->u.initrd.size; + return 0; +} + +__tagtable(ATAG_INITRD, parse_tag_initrd); + +static int __init parse_tag_initrd2(const struct tag *tag) +{ + phys_initrd_start = tag->u.initrd.start; + phys_initrd_size = tag->u.initrd.size; + return 0; +} + +__tagtable(ATAG_INITRD2, parse_tag_initrd2); /* - * empty_zero_page is a special page that is used for - * zero-initialized data and COW. + * This keeps memory configuration data used by a couple memory + * initialization functions, as well as show_mem() for the skipping + * of holes in the memory map. It is populated by arm_add_memory(). */ -struct page *empty_zero_page; - -void show_mem(void) +void show_mem(unsigned int filter) { int free = 0, total = 0, reserved = 0; - int shared = 0, cached = 0, slab = 0, node; + int shared = 0, cached = 0, slab = 0; + struct memblock_region *reg; printk("Mem-info:\n"); - show_free_areas(); - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); + show_free_areas(filter); - for_each_online_node(node) { + for_each_memblock (memory, reg) { + unsigned int pfn1, pfn2; struct page *page, *end; - page = NODE_MEM_MAP(node); - end = page + NODE_DATA(node)->node_spanned_pages; + pfn1 = memblock_region_memory_base_pfn(reg); + pfn2 = memblock_region_memory_end_pfn(reg); + + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; do { total++; @@ -74,8 +121,9 @@ void show_mem(void) free++; else shared += page_count(page) - 1; - page++; - } while (page < end); + pfn1++; + page = pfn_to_page(pfn1); + } while (pfn1 < pfn2); } printk("%d pages of RAM\n", total); @@ -86,469 +134,259 @@ void show_mem(void) printk("%d pages swap cached\n", cached); } -static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) +static void __init find_limits(unsigned long *min, unsigned long *max_low, + unsigned long *max_high) { - return pmd_offset(pgd, virt); + *max_low = PFN_DOWN(memblock_get_current_limit()); + *min = PFN_UP(memblock_start_of_DRAM()); + *max_high = PFN_DOWN(memblock_end_of_DRAM()); } -static inline pmd_t *pmd_off_k(unsigned long virt) -{ - return pmd_off(pgd_offset_k(virt), virt); -} +#ifdef CONFIG_ZONE_DMA -#define for_each_nodebank(iter,mi,no) \ - for (iter = 0; iter < mi->nr_banks; iter++) \ - if (mi->bank[iter].node == no) +phys_addr_t arm_dma_zone_size __read_mostly; +EXPORT_SYMBOL(arm_dma_zone_size); /* - * FIXME: We really want to avoid allocating the bootmap bitmap - * over the top of the initrd. Hopefully, this is located towards - * the start of a bank, so if we allocate the bootmap bitmap at - * the end, we won't clash. + * The DMA mask corresponding to the maximum bus address allocatable + * using GFP_DMA. The default here places no restriction on DMA + * allocations. This must be the smallest DMA mask in the system, + * so a successful GFP_DMA allocation will always satisfy this. */ -static unsigned int __init -find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) -{ - unsigned int start_pfn, bank, bootmap_pfn; - - start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT; - bootmap_pfn = 0; - - for_each_nodebank(bank, mi, node) { - unsigned int start, end; - - start = mi->bank[bank].start >> PAGE_SHIFT; - end = (mi->bank[bank].size + - mi->bank[bank].start) >> PAGE_SHIFT; - - if (end < start_pfn) - continue; +phys_addr_t arm_dma_limit; +unsigned long arm_dma_pfn_limit; - if (start < start_pfn) - start = start_pfn; - - if (end <= start) - continue; - - if (end - start >= bootmap_pages) { - bootmap_pfn = start; - break; - } - } - - if (bootmap_pfn == 0) - BUG(); +static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, + unsigned long dma_size) +{ + if (size[0] <= dma_size) + return; - return bootmap_pfn; + size[ZONE_NORMAL] = size[0] - dma_size; + size[ZONE_DMA] = dma_size; + hole[ZONE_NORMAL] = hole[0]; + hole[ZONE_DMA] = 0; } +#endif -static int __init check_initrd(struct meminfo *mi) +void __init setup_dma_zone(const struct machine_desc *mdesc) { - int initrd_node = -2; -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long end = phys_initrd_start + phys_initrd_size; - - /* - * Make sure that the initrd is within a valid area of - * memory. - */ - if (phys_initrd_size) { - unsigned int i; - - initrd_node = -1; - - for (i = 0; i < mi->nr_banks; i++) { - unsigned long bank_end; - - bank_end = mi->bank[i].start + mi->bank[i].size; - - if (mi->bank[i].start <= phys_initrd_start && - end <= bank_end) - initrd_node = mi->bank[i].node; - } - } - - if (initrd_node == -1) { - printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond " - "physical memory - disabling initrd\n", - phys_initrd_start, end); - phys_initrd_start = phys_initrd_size = 0; - } +#ifdef CONFIG_ZONE_DMA + if (mdesc->dma_zone_size) { + arm_dma_zone_size = mdesc->dma_zone_size; + arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; + } else + arm_dma_limit = 0xffffffff; + arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; #endif - - return initrd_node; } -/* - * Reserve the various regions of node 0 - */ -static __init void reserve_node_zero(pg_data_t *pgdat) +static void __init zone_sizes_init(unsigned long min, unsigned long max_low, + unsigned long max_high) { - unsigned long res_size = 0; + unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; + struct memblock_region *reg; /* - * Register the kernel text and data with bootmem. - * Note that this can only be in node 0. + * initialise the zones. */ -#ifdef CONFIG_XIP_KERNEL - reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start); -#else - reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); -#endif + memset(zone_size, 0, sizeof(zone_size)); /* - * Reserve the page tables. These are already in use, - * and can only be in node 0. + * The memory size has already been determined. If we need + * to do anything fancy with the allocation of this memory + * to the zones, now is the time to do it. */ - reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), - PTRS_PER_PGD * sizeof(pgd_t)); + zone_size[0] = max_low - min; +#ifdef CONFIG_HIGHMEM + zone_size[ZONE_HIGHMEM] = max_high - max_low; +#endif /* - * Hmm... This should go elsewhere, but we really really need to - * stop things allocating the low memory; ideally we need a better - * implementation of GFP_DMA which does not assume that DMA-able - * memory starts at zero. + * Calculate the size of the holes. + * holes = node_size - sum(bank_sizes) */ - if (machine_is_integrator() || machine_is_cintegrator()) - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; + memcpy(zhole_size, zone_size, sizeof(zhole_size)); + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); - /* - * These should likewise go elsewhere. They pre-reserve the - * screen memory region at the start of main system memory. - */ - if (machine_is_edb7211()) - res_size = 0x00020000; - if (machine_is_p720t()) - res_size = 0x00014000; + if (start < max_low) { + unsigned long low_end = min(end, max_low); + zhole_size[0] -= low_end - start; + } +#ifdef CONFIG_HIGHMEM + if (end > max_low) { + unsigned long high_start = max(start, max_low); + zhole_size[ZONE_HIGHMEM] -= end - high_start; + } +#endif + } -#ifdef CONFIG_SA1111 +#ifdef CONFIG_ZONE_DMA /* - * Because of the SA1111 DMA bug, we want to preserve our - * precious DMA-able memory... + * Adjust the sizes according to any special requirements for + * this machine type. */ - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; + if (arm_dma_zone_size) + arm_adjust_dma_zone(zone_size, zhole_size, + arm_dma_zone_size >> PAGE_SHIFT); #endif - if (res_size) - reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size); -} -void __init build_mem_type_table(void); -void __init create_mapping(struct map_desc *md); + free_area_init_node(0, zone_size, min, zhole_size); +} -static unsigned long __init -bootmem_init_node(int node, int initrd_node, struct meminfo *mi) +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +int pfn_valid(unsigned long pfn) { - unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long start_pfn, end_pfn, boot_pfn; - unsigned int boot_pages; - pg_data_t *pgdat; - int i; - - start_pfn = -1UL; - end_pfn = 0; - - /* - * Calculate the pfn range, and map the memory banks for this node. - */ - for_each_nodebank(i, mi, node) { - unsigned long start, end; - struct map_desc map; - - start = mi->bank[i].start >> PAGE_SHIFT; - end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; - - if (start_pfn > start) - start_pfn = start; - if (end_pfn < end) - end_pfn = end; + return memblock_is_memory(__pfn_to_phys(pfn)); +} +EXPORT_SYMBOL(pfn_valid); +#endif - map.pfn = __phys_to_pfn(mi->bank[i].start); - map.virtual = __phys_to_virt(mi->bank[i].start); - map.length = mi->bank[i].size; - map.type = MT_MEMORY; +#ifndef CONFIG_SPARSEMEM +static void __init arm_memory_present(void) +{ +} +#else +static void __init arm_memory_present(void) +{ + struct memblock_region *reg; - create_mapping(&map); - } + for_each_memblock(memory, reg) + memory_present(0, memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); +} +#endif - /* - * If there is no memory in this node, ignore it. - */ - if (end_pfn == 0) - return end_pfn; +static bool arm_memblock_steal_permitted = true; - /* - * Allocate the bootmem bitmap page. - */ - boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - boot_pfn = find_bootmap_pfn(node, mi, boot_pages); +phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) +{ + phys_addr_t phys; - /* - * Initialise the bootmem allocator for this node, handing the - * memory banks over to bootmem. - */ - node_set_online(node); - pgdat = NODE_DATA(node); - init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); + BUG_ON(!arm_memblock_steal_permitted); - for_each_nodebank(i, mi, node) - free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size); + phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); + memblock_free(phys, size); + memblock_remove(phys, size); - /* - * Reserve the bootmem bitmap for this node. - */ - reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, - boot_pages << PAGE_SHIFT); + return phys; +} +void __init arm_memblock_init(const struct machine_desc *mdesc) +{ + /* Register the kernel text, kernel data and initrd with memblock. */ +#ifdef CONFIG_XIP_KERNEL + memblock_reserve(__pa(_sdata), _end - _sdata); +#else + memblock_reserve(__pa(_stext), _end - _stext); +#endif #ifdef CONFIG_BLK_DEV_INITRD - /* - * If the initrd is in this node, reserve its memory. - */ - if (node == initrd_node) { - reserve_bootmem_node(pgdat, phys_initrd_start, - phys_initrd_size); + /* FDT scan will populate initrd_start */ + if (initrd_start && !phys_initrd_size) { + phys_initrd_start = __virt_to_phys(initrd_start); + phys_initrd_size = initrd_end - initrd_start; + } + initrd_start = initrd_end = 0; + if (phys_initrd_size && + !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size && + memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size) { + memblock_reserve(phys_initrd_start, phys_initrd_size); + + /* Now convert initrd to virtual addresses */ initrd_start = __phys_to_virt(phys_initrd_start); initrd_end = initrd_start + phys_initrd_size; } #endif - /* - * Finally, reserve any node zero regions. - */ - if (node == 0) - reserve_node_zero(pgdat); + arm_mm_memblock_reserve(); - /* - * initialise the zones within this node. - */ - memset(zone_size, 0, sizeof(zone_size)); - memset(zhole_size, 0, sizeof(zhole_size)); + /* reserve any platform specific memblock areas */ + if (mdesc->reserve) + mdesc->reserve(); - /* - * The size of this node has already been determined. If we need - * to do anything fancy with the allocation of this memory to the - * zones, now is the time to do it. - */ - zone_size[0] = end_pfn - start_pfn; - - /* - * For each bank in this node, calculate the size of the holes. - * holes = node_size - sum(bank_sizes_in_node) - */ - zhole_size[0] = zone_size[0]; - for_each_nodebank(i, mi, node) - zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT; + early_init_fdt_scan_reserved_mem(); /* - * Adjust the sizes according to any special requirements for - * this machine type. + * reserve memory for DMA contigouos allocations, + * must come from DMA area inside low memory */ - arch_adjust_zones(node, zone_size, zhole_size); + dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); - free_area_init_node(node, pgdat, zone_size, start_pfn, zhole_size); - - return end_pfn; + arm_memblock_steal_permitted = false; + memblock_dump_all(); } -static void __init bootmem_init(struct meminfo *mi) +void __init bootmem_init(void) { - unsigned long addr, memend_pfn = 0; - int node, initrd_node, i; + unsigned long min, max_low, max_high; - /* - * Invalidate the node number for empty or invalid memory banks - */ - for (i = 0; i < mi->nr_banks; i++) - if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES) - mi->bank[i].node = -1; - - memcpy(&meminfo, mi, sizeof(meminfo)); + memblock_allow_resize(); + max_low = max_high = 0; - /* - * Clear out all the mappings below the kernel image. - */ - for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) - pmd_clear(pmd_off_k(addr)); -#ifdef CONFIG_XIP_KERNEL - /* The XIP kernel is mapped in the module area -- skip over it */ - addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK; -#endif - for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) - pmd_clear(pmd_off_k(addr)); + find_limits(&min, &max_low, &max_high); /* - * Clear out all the kernel space mappings, except for the first - * memory bank, up to the end of the vmalloc region. + * Sparsemem tries to allocate bootmem in memory_present(), + * so must be done after the fixed reservations */ - for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); - addr < VMALLOC_END; addr += PGDIR_SIZE) - pmd_clear(pmd_off_k(addr)); + arm_memory_present(); /* - * Locate which node contains the ramdisk image, if any. + * sparse_init() needs the bootmem allocator up and running. */ - initrd_node = check_initrd(mi); + sparse_init(); /* - * Run through each node initialising the bootmem allocator. + * Now free the memory - free_area_init_node needs + * the sparse mem_map arrays initialized by sparse_init() + * for memmap_init_zone(), otherwise all PFNs are invalid. */ - for_each_node(node) { - unsigned long end_pfn; - - end_pfn = bootmem_init_node(node, initrd_node, mi); - - /* - * Remember the highest memory PFN. - */ - if (end_pfn > memend_pfn) - memend_pfn = end_pfn; - } - - high_memory = __va(memend_pfn << PAGE_SHIFT); + zone_sizes_init(min, max_low, max_high); /* * This doesn't seem to be used by the Linux memory manager any * more, but is used by ll_rw_block. If we can get rid of it, we * also get rid of some of the stuff above as well. - * - * Note: max_low_pfn and max_pfn reflect the number of _pages_ in - * the system, not the maximum PFN. - */ - max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; -} - -/* - * Set up device the mappings. Since we clear out the page tables for all - * mappings above VMALLOC_END, we will remove any debug device mappings. - * This means you have to be careful how you debug this function, or any - * called function. (Do it by code inspection!) - */ -static void __init devicemaps_init(struct machine_desc *mdesc) -{ - struct map_desc map; - unsigned long addr; - void *vectors; - - for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) - pmd_clear(pmd_off_k(addr)); - - /* - * Map the kernel if it is XIP. - * It is always first in the modulearea. - */ -#ifdef CONFIG_XIP_KERNEL - map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & PGDIR_MASK); - map.virtual = MODULE_START; - map.length = ((unsigned long)&_etext - map.virtual + ~PGDIR_MASK) & PGDIR_MASK; - map.type = MT_ROM; - create_mapping(&map); -#endif - - /* - * Map the cache flushing regions. - */ -#ifdef FLUSH_BASE - map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); - map.virtual = FLUSH_BASE; - map.length = PGDIR_SIZE; - map.type = MT_CACHECLEAN; - create_mapping(&map); -#endif -#ifdef FLUSH_BASE_MINICACHE - map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + PGDIR_SIZE); - map.virtual = FLUSH_BASE_MINICACHE; - map.length = PGDIR_SIZE; - map.type = MT_MINICLEAN; - create_mapping(&map); -#endif - - flush_cache_all(); - local_flush_tlb_all(); - - vectors = alloc_bootmem_low_pages(PAGE_SIZE); - BUG_ON(!vectors); - - /* - * Create a mapping for the machine vectors at the high-vectors - * location (0xffff0000). If we aren't using high-vectors, also - * create a mapping at the low-vectors virtual address. - */ - map.pfn = __phys_to_pfn(virt_to_phys(vectors)); - map.virtual = 0xffff0000; - map.length = PAGE_SIZE; - map.type = MT_HIGH_VECTORS; - create_mapping(&map); - - if (!vectors_high()) { - map.virtual = 0; - map.type = MT_LOW_VECTORS; - create_mapping(&map); - } - - /* - * Ask the machine support to map in the statically mapped devices. - */ - if (mdesc->map_io) - mdesc->map_io(); - - /* - * Finally flush the tlb again - this ensures that we're in a - * consistent state wrt the writebuffer if the writebuffer needs - * draining. After this point, we can start to touch devices - * again. */ - local_flush_tlb_all(); + min_low_pfn = min; + max_low_pfn = max_low; + max_pfn = max_high; } /* - * paging_init() sets up the page tables, initialises the zone memory - * maps, and sets up the zero page, bad page and bad page tables. + * Poison init memory with an undefined instruction (ARM) or a branch to an + * undefined instruction (Thumb). */ -void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) -{ - void *zero_page; - - build_mem_type_table(); - bootmem_init(mi); - devicemaps_init(mdesc); - - top_pmd = pmd_off_k(0xffff0000); - - /* - * allocate the zero page. Note that we count on this going ok. - */ - zero_page = alloc_bootmem_low_pages(PAGE_SIZE); - memzero(zero_page, PAGE_SIZE); - empty_zero_page = virt_to_page(zero_page); - flush_dcache_page(empty_zero_page); -} - -static inline void free_area(unsigned long addr, unsigned long end, char *s) +static inline void poison_init_mem(void *s, size_t count) { - unsigned int size = (end - addr) >> 10; - - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); - ClearPageReserved(page); - set_page_count(page, 1); - free_page(addr); - totalram_pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); + u32 *p = (u32 *)s; + for (; count != 0; count -= 4) + *p++ = 0xe7fddef0; } static inline void -free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) +free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; - unsigned long pg, pgend; + phys_addr_t pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ - start_pg = pfn_to_page(start_pfn); - end_pg = pfn_to_page(end_pfn); + start_pg = pfn_to_page(start_pfn - 1) + 1; + end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and @@ -562,40 +400,116 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) * free the section of the memmap array. */ if (pg < pgend) - free_bootmem_node(NODE_DATA(node), pg, pgend - pg); + memblock_free_early(pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ -static void __init free_unused_memmap_node(int node, struct meminfo *mi) +static void __init free_unused_memmap(void) { - unsigned long bank_start, prev_bank_end = 0; - unsigned int i; + unsigned long start, prev_end = 0; + struct memblock_region *reg; /* - * [FIXME] This relies on each bank being in address order. This - * may not be the case, especially if the user has provided the - * information on the command line. + * This relies on each bank being in address order. + * The banks are sorted previously in bootmem_init(). */ - for_each_nodebank(i, mi, node) { - bank_start = mi->bank[i].start >> PAGE_SHIFT; - if (bank_start < prev_bank_end) { - printk(KERN_ERR "MEM: unordered memory banks. " - "Not freeing memmap.\n"); - break; - } + for_each_memblock(memory, reg) { + start = memblock_region_memory_base_pfn(reg); +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + start = min(start, + ALIGN(prev_end, PAGES_PER_SECTION)); +#else + /* + * Align down here since the VM subsystem insists that the + * memmap entries are valid from the bank start aligned to + * MAX_ORDER_NR_PAGES. + */ + start = round_down(start, MAX_ORDER_NR_PAGES); +#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ - if (prev_bank_end && prev_bank_end != bank_start) - free_memmap(node, prev_bank_end, bank_start); + if (prev_end && prev_end < start) + free_memmap(prev_end, start); + + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_end = ALIGN(memblock_region_memory_end_pfn(reg), + MAX_ORDER_NR_PAGES); + } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + free_memmap(prev_end, + ALIGN(prev_end, PAGES_PER_SECTION)); +#endif +} + +#ifdef CONFIG_HIGHMEM +static inline void free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} +#endif + +static void __init free_highpages(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long max_low = max_low_pfn; + struct memblock_region *mem, *res; + + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; - prev_bank_end = (mi->bank[i].start + - mi->bank[i].size) >> PAGE_SHIFT; + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + free_area_high(start, res_start); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + free_area_high(start, end); } +#endif } /* @@ -605,51 +519,92 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) */ void __init mem_init(void) { - unsigned int codepages, datapages, initpages; - int i, node; +#ifdef CONFIG_HAVE_TCM + /* These pointers are filled in on TCM detection */ + extern u32 dtcm_end; + extern u32 itcm_end; +#endif - codepages = &_etext - &_text; - datapages = &_end - &__data_start; - initpages = &__init_end - &__init_begin; + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); -#ifndef CONFIG_DISCONTIGMEM - max_mapnr = virt_to_page(high_memory) - mem_map; + /* this will put all unused low memory onto the freelists */ + free_unused_memmap(); + free_all_bootmem(); + +#ifdef CONFIG_SA1111 + /* now that our DMA memory is actually so designated, we can free it */ + free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); #endif - /* this will put all unused low memory onto the freelists */ - for_each_online_node(node) { - pg_data_t *pgdat = NODE_DATA(node); + free_highpages(); - free_unused_memmap_node(node, &meminfo); + mem_init_print_info(NULL); - if (pgdat->node_spanned_pages != 0) - totalram_pages += free_all_bootmem_node(pgdat); - } +#define MLK(b, t) b, t, ((t) - (b)) >> 10 +#define MLM(b, t) b, t, ((t) - (b)) >> 20 +#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) -#ifdef CONFIG_SA1111 - /* now that our DMA memory is actually so designated, we can free it */ - free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); + printk(KERN_NOTICE "Virtual kernel memory layout:\n" + " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HAVE_TCM + " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" + " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif + " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" +#ifdef CONFIG_HIGHMEM + " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif +#ifdef CONFIG_MODULES + " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif + " .text : 0x%p" " - 0x%p" " (%4d kB)\n" + " .init : 0x%p" " - 0x%p" " (%4d kB)\n" + " .data : 0x%p" " - 0x%p" " (%4d kB)\n" + " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", + + MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + + (PAGE_SIZE)), +#ifdef CONFIG_HAVE_TCM + MLK(DTCM_OFFSET, (unsigned long) dtcm_end), + MLK(ITCM_OFFSET, (unsigned long) itcm_end), +#endif + MLK(FIXADDR_START, FIXADDR_TOP), + MLM(VMALLOC_START, VMALLOC_END), + MLM(PAGE_OFFSET, (unsigned long)high_memory), +#ifdef CONFIG_HIGHMEM + MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * + (PAGE_SIZE)), +#endif +#ifdef CONFIG_MODULES + MLM(MODULES_VADDR, MODULES_END), +#endif + + MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(__init_begin, __init_end), + MLK_ROUNDUP(_sdata, _edata), + MLK_ROUNDUP(__bss_start, __bss_stop)); + +#undef MLK +#undef MLM +#undef MLK_ROUNDUP /* - * Since our memory may not be contiguous, calculate the - * real number of pages we have in this system + * Check boundaries twice: Some fundamental inconsistencies can + * be detected at build time already. */ - printk(KERN_INFO "Memory:"); - - num_physpages = 0; - for (i = 0; i < meminfo.nr_banks; i++) { - num_physpages += meminfo.bank[i].size >> PAGE_SHIFT; - printk(" %ldMB", meminfo.bank[i].size >> 20); - } +#ifdef CONFIG_MMU + BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); + BUG_ON(TASK_SIZE > MODULES_VADDR); +#endif - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - printk(KERN_NOTICE "Memory: %luKB available (%dK code, " - "%dK data, %dK init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - codepages >> 10, datapages >> 10, initpages >> 10); +#ifdef CONFIG_HIGHMEM + BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); + BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); +#endif - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get @@ -662,11 +617,16 @@ void __init mem_init(void) void free_initmem(void) { - if (!machine_is_integrator() && !machine_is_cintegrator()) { - free_area((unsigned long)(&__init_begin), - (unsigned long)(&__init_end), - "init"); - } +#ifdef CONFIG_HAVE_TCM + extern char __tcm_start, __tcm_end; + + poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); + free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); +#endif + + poison_init_mem(__init_begin, __init_end - __init_begin); + if (!machine_is_integrator() && !machine_is_cintegrator()) + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -675,8 +635,10 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) - free_area(start, end, "initrd"); + if (!keep_initrd) { + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); + } } static int __init keepinitrd_setup(char *__unused) |
